code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import java.io.{BufferedWriter, StringWriter}
import org.apache.spark.geomesa.GeoMesaSparkKryoRegistratorEndpoint
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, TaskContext}
import org.locationtech.geomesa.features.serialization.GeoJsonSerializer
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* RDD with a known schema
*
* @param rdd simple feature RDD
* @param sft simple feature type schema
*/
class SpatialRDD(rdd: RDD[SimpleFeature], sft: SimpleFeatureType) extends RDD[SimpleFeature](rdd) with Schema {
GeoMesaSparkKryoRegistrator.register(sft)
private val typeName = sft.getTypeName
private val spec = SimpleFeatureTypes.encodeType(sft, includeUserData = true)
@transient
override lazy val schema: SimpleFeatureType = SimpleFeatureTypes.createType(typeName, spec)
override def compute(split: Partition, context: TaskContext): Iterator[SimpleFeature] =
firstParent.compute(split, context)
override def getPartitions: Array[Partition] = firstParent.partitions
}
object SpatialRDD {
import scala.collection.JavaConverters._
GeoMesaSparkKryoRegistratorEndpoint.init()
def apply(rdd: RDD[SimpleFeature], schema: SimpleFeatureType) = new SpatialRDD(rdd, schema)
implicit def toValueSeq(in: RDD[SimpleFeature] with Schema): RDD[Seq[AnyRef]] =
in.map(_.getAttributes.asScala)
implicit def toKeyValueSeq(in: RDD[SimpleFeature] with Schema): RDD[Seq[(String, AnyRef)]] =
in.map(_.getProperties.asScala.map(p => (p.getName.getLocalPart, p.getValue)).toSeq)
implicit def toKeyValueMap(in: RDD[SimpleFeature] with Schema): RDD[Map[String, AnyRef]] =
in.map(_.getProperties.asScala.map(p => (p.getName.getLocalPart, p.getValue)).toMap)
implicit def toGeoJSONString(in: RDD[SimpleFeature] with Schema): RDD[String] = {
val sft = in.schema
in.mapPartitions { features =>
val json = new GeoJsonSerializer(sft)
val sw = new StringWriter
// note: we don't need to close this since we're writing to a string
val jw = GeoJsonSerializer.writer(sw)
features.map { f =>
sw.getBuffer.setLength(0)
json.write(jw, f)
jw.flush()
sw.toString
}
}
}
implicit class SpatialRDDConversions(in: RDD[SimpleFeature] with Schema) {
def asGeoJSONString: RDD[String] = toGeoJSONString(in)
def asKeyValueMap: RDD[Map[String, AnyRef]] = toKeyValueMap(in)
def asKeyValueSeq: RDD[Seq[(String, AnyRef)]] = toKeyValueSeq(in)
def asValueSeq: RDD[Seq[AnyRef]] = toValueSeq(in)
}
}
| locationtech/geomesa | geomesa-spark/geomesa-spark-core/src/main/scala/org/locationtech/geomesa/spark/SpatialRDD.scala | Scala | apache-2.0 | 3,130 |
package mavigator
package cockpit
import org.scalajs.dom.html
import scalatags.JsDom.all._
import util._
/** Provides main cockpit layout. */
trait Layout { self: Page with Instruments =>
/** Elements to display in the mode control pannel (top panel). */
def mcp = div(id := "mcp")(
img(src := asset("images/logo-invert.svg"), style:="height: 20px; margin: 5px;"),
span(`class`:="mode warning")("Demo System"),
modes
)
/** Element to deisplay on heads-up display (main area). */
def hud = div(id :="hud")(
horizonOverlay.element,
attitudeOverlay.element
)
val layoutStyle = """
|#cockpit {
| width: 100%;
| height: 100%;
| display: flex;
| flex-direction: column;
| justify-content: flex-start;
| align-items: stretch;
|
| background-color: pink;
|}
|
|#mcp {
| flex: 0 1 0;
| background-color: #222222;
|}
|
|#hud {
| flex: 1 1 auto;
| position: relative;
| background-color: lightblue;
|}""".stripMargin
override def styles = Seq(layoutStyle) ++ instrumentStyles
override def elements: Seq[html.Element] = Seq(div(id := "cockpit")(
mcp,
hud
).render)
}
| project-condor/mavigator | mavigator-cockpit/src/main/scala/mavigator/cockpit/Layout.scala | Scala | gpl-3.0 | 1,232 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.errors
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.LeafNode
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.types.DataType
/**
* Thrown when an invalid attempt is made to access a property of a tree that has yet to be fully
* resolved.
*/
class UnresolvedException[TreeType <: TreeNode[_]](tree: TreeType, function: String) extends
errors.TreeNodeException(tree, s"Invalid call to $function on unresolved object", null)
/**
* Holds the name of a relation that has yet to be looked up in a [[Catalog]].
*/
case class UnresolvedRelation(
tableIdentifier: Seq[String],
alias: Option[String] = None) extends LeafNode {
/** Returns a `.` separated name for this relation. */
def tableName: String = tableIdentifier.mkString(".")
override def output: Seq[Attribute] = Nil
override lazy val resolved = false
}
/**
* Holds the name of an attribute that has yet to be resolved.
*/
case class UnresolvedAttribute(nameParts: Seq[String]) extends Attribute with Unevaluable {
def name: String =
nameParts.map(n => if (n.contains(".")) s"`$n`" else n).mkString(".")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifiers: Seq[String] = throw new UnresolvedException(this, "qualifiers")
override lazy val resolved = false
override def newInstance(): UnresolvedAttribute = this
override def withNullability(newNullability: Boolean): UnresolvedAttribute = this
override def withQualifiers(newQualifiers: Seq[String]): UnresolvedAttribute = this
override def withName(newName: String): UnresolvedAttribute = UnresolvedAttribute.quoted(newName)
override def toString: String = s"'$name"
}
object UnresolvedAttribute {
/**
* Creates an [[UnresolvedAttribute]], parsing segments separated by dots ('.').
*/
def apply(name: String): UnresolvedAttribute = new UnresolvedAttribute(name.split("\\."))
/**
* Creates an [[UnresolvedAttribute]], from a single quoted string (for example using backticks in
* HiveQL. Since the string is consider quoted, no processing is done on the name.
*/
def quoted(name: String): UnresolvedAttribute = new UnresolvedAttribute(Seq(name))
/**
* Creates an [[UnresolvedAttribute]] from a string in an embedded language. In this case
* we treat it as a quoted identifier, except for '.', which must be further quoted using
* backticks if it is part of a column name.
*/
def quotedString(name: String): UnresolvedAttribute =
new UnresolvedAttribute(parseAttributeName(name))
/**
* Used to split attribute name by dot with backticks rule.
* Backticks must appear in pairs, and the quoted string must be a complete name part,
* which means `ab..c`e.f is not allowed.
* Escape character is not supported now, so we can't use backtick inside name part.
*/
def parseAttributeName(name: String): Seq[String] = {
def e = new AnalysisException(s"syntax error in attribute name: $name")
val nameParts = scala.collection.mutable.ArrayBuffer.empty[String]
val tmp = scala.collection.mutable.ArrayBuffer.empty[Char]
var inBacktick = false
var i = 0
while (i < name.length) {
val char = name(i)
if (inBacktick) {
if (char == '`') {
inBacktick = false
if (i + 1 < name.length && name(i + 1) != '.') throw e
} else {
tmp += char
}
} else {
if (char == '`') {
if (tmp.nonEmpty) throw e
inBacktick = true
} else if (char == '.') {
if (name(i - 1) == '.' || i == name.length - 1) throw e
nameParts += tmp.mkString
tmp.clear()
} else {
tmp += char
}
}
i += 1
}
if (inBacktick) throw e
nameParts += tmp.mkString
nameParts.toSeq
}
}
case class UnresolvedFunction(
name: String,
children: Seq[Expression],
isDistinct: Boolean)
extends Expression with Unevaluable {
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
override def toString: String = s"'$name(${children.mkString(",")})"
}
/**
* Represents all of the input attributes to a given relational operator, for example in
* "SELECT * FROM ...". A [[Star]] gets automatically expanded during analysis.
*/
abstract class Star extends LeafExpression with NamedExpression {
override def name: String = throw new UnresolvedException(this, "name")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifiers: Seq[String] = throw new UnresolvedException(this, "qualifiers")
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override lazy val resolved = false
def expand(input: Seq[Attribute], resolver: Resolver): Seq[NamedExpression]
}
/**
* Represents all of the input attributes to a given relational operator, for example in
* "SELECT * FROM ...".
*
* @param table an optional table that should be the target of the expansion. If omitted all
* tables' columns are produced.
*/
case class UnresolvedStar(table: Option[String]) extends Star with Unevaluable {
override def expand(input: Seq[Attribute], resolver: Resolver): Seq[NamedExpression] = {
val expandedAttributes: Seq[Attribute] = table match {
// If there is no table specified, use all input attributes.
case None => input
// If there is a table, pick out attributes that are part of this table.
case Some(t) => input.filter(_.qualifiers.filter(resolver(_, t)).nonEmpty)
}
expandedAttributes.zip(input).map {
case (n: NamedExpression, _) => n
case (e, originalAttribute) =>
Alias(e, originalAttribute.name)(qualifiers = originalAttribute.qualifiers)
}
}
override def toString: String = table.map(_ + ".").getOrElse("") + "*"
}
/**
* Used to assign new names to Generator's output, such as hive udtf.
* For example the SQL expression "stack(2, key, value, key, value) as (a, b)" could be represented
* as follows:
* MultiAlias(stack_function, Seq(a, b))
* @param child the computation being performed
* @param names the names to be associated with each output of computing [[child]].
*/
case class MultiAlias(child: Expression, names: Seq[String])
extends UnaryExpression with NamedExpression with CodegenFallback {
override def name: String = throw new UnresolvedException(this, "name")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifiers: Seq[String] = throw new UnresolvedException(this, "qualifiers")
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override lazy val resolved = false
override def toString: String = s"$child AS $names"
}
/**
* Represents all the resolved input attributes to a given relational operator. This is used
* in the data frame DSL.
*
* @param expressions Expressions to expand.
*/
case class ResolvedStar(expressions: Seq[NamedExpression]) extends Star with Unevaluable {
override def expand(input: Seq[Attribute], resolver: Resolver): Seq[NamedExpression] = expressions
override def toString: String = expressions.mkString("ResolvedStar(", ", ", ")")
}
/**
* Extracts a value or values from an Expression
*
* @param child The expression to extract value from,
* can be Map, Array, Struct or array of Structs.
* @param extraction The expression to describe the extraction,
* can be key of Map, index of Array, field name of Struct.
*/
case class UnresolvedExtractValue(child: Expression, extraction: Expression)
extends UnaryExpression with Unevaluable {
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
override def toString: String = s"$child[$extraction]"
}
/**
* Holds the expression that has yet to be aliased.
*/
case class UnresolvedAlias(child: Expression)
extends UnaryExpression with NamedExpression with Unevaluable {
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override def qualifiers: Seq[String] = throw new UnresolvedException(this, "qualifiers")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def name: String = throw new UnresolvedException(this, "name")
override lazy val resolved = false
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala | Scala | apache-2.0 | 10,544 |
package com.nabijaczleweli.fancymagicks.render.model
import java.io.{ByteArrayInputStream, IOException, InputStream}
import java.util.zip.{ZipEntry, ZipException, ZipInputStream}
import javax.xml.parsers.{DocumentBuilderFactory, ParserConfigurationException}
import cpw.mods.fml.common.FMLLog
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.client.Minecraft
import net.minecraft.client.model.{ModelBase, ModelRenderer}
import net.minecraft.util.ResourceLocation
import net.minecraftforge.client.model.{IModelCustom, ModelFormatException}
import org.lwjgl.opengl.GL11
import org.lwjgl.opengl.GL11._
import org.xml.sax.SAXException
import scala.collection.immutable.HashMap
import scala.collection.mutable.{LinkedHashMap => mLinkedHashMap, Map => mMap}
/** A working techne model.<br />
* Shamelessly stolen from Dullkus.<br />
* Brutally `scala`ized by nabijaczleweli.
*
* @see [[https://github.com/drullkus/MateriaMuto/blob/master/src/main/java/com/agilemods/materiamuto/client/model/importer/TechneModel.java]]
* @note I probably didn't quite catch all parts of `IDEA`'s "calling EVERY parameterless method without parens" bullshit, please report
*/
@SideOnly(Side.CLIENT)
class TechneModel @throws(classOf[ModelFormatException]) (resource: ResourceLocation) extends ModelBase with IModelCustom {
val parts: mMap[String, ModelRenderer] = new mLinkedHashMap
private val fileName = resource.toString
private var zipContents = new HashMap[String, Array[Byte]]
private var texture = ""
/*
private var textureName : Int = 0
private var textureNameSet: Boolean = false
*/
loadTechneModel((Minecraft.getMinecraft.getResourceManager getResource resource).getInputStream)
@throws(classOf[ModelFormatException])
private def loadTechneModel(stream: InputStream) {
try {
val zipInput = new ZipInputStream(stream)
var entry: ZipEntry = null
while({entry = zipInput.getNextEntry; entry} != null) {
val data = new Array[Byte](entry.getSize.toInt)
// For some reason, using read(byte[]) makes reading stall upon reaching a 0x1E byte
for(i <- 0 until data.length if zipInput.available > 0)
data(i) = zipInput.read().toByte
zipContents += entry.getName -> data
}
val modelXml =
(zipContents get "models.xml", zipContents get "model.xml") match {
case (Some(a), None) =>
a
case (None, Some(a)) =>
a
case (Some(a0), Some(_)) =>
a0
case _ =>
throw new ModelFormatException("Model " + fileName + " contains no model(s).xml file")
}
val document = DocumentBuilderFactory.newInstance.newDocumentBuilder parse new ByteArrayInputStream(modelXml)
val nodeListTechne = document getElementsByTagName "Techne"
if(nodeListTechne.getLength < 1)
throw new ModelFormatException("Model " + fileName + " contains no Techne tag")
val nodeListModel = document getElementsByTagName "Model"
if(nodeListModel.getLength < 1)
throw new ModelFormatException("Model " + fileName + " contains no Model tag")
val modelAttributes = nodeListModel.item(0).getAttributes
if(modelAttributes == null)
throw new ModelFormatException("Model " + fileName + " contains a Model tag with no attributes")
texture = (Option(modelAttributes getNamedItem "texture") map {_.getTextContent}).orNull
val textureSize = document getElementsByTagName "TextureSize"
for(i <- 0 until textureSize.getLength) {
val size = textureSize.item(i).getTextContent
val textureDimensions = size split ","
textureWidth = textureDimensions(0).toInt
textureHeight = textureDimensions(1).toInt
}
val shapes = document getElementsByTagName "Shape"
for(i <- 0 until shapes.getLength) {
val shape = shapes item i
val shapeAttributes = shape.getAttributes
if(shapeAttributes == null)
throw new ModelFormatException("Shape #" + (i + 1) + " in " + fileName + " has no attributes")
val shapeName = Option(shapeAttributes getNamedItem "name").fold(s"Shape #${i + 1}")(_.getNodeValue)
val shapeType = (Option(shapeAttributes getNamedItem "type") map {_.getNodeValue}).orNull
if(shapeType != null && !TechneModel.cubeTypes.contains(shapeType))
FMLLog.warning("Model shape [" + shapeName + "] in " + fileName + " is not a cube, ignoring")
else
try {
var mirrored = false
var offset = new Array[String](3)
var position = new Array[String](3)
var rotation = new Array[String](3)
var size = new Array[String](3)
var textureOffset = new Array[String](2)
val shapeChildren = shape.getChildNodes
for(j <- 0 until shapeChildren.getLength) {
val shapeChild = shapeChildren item j
val shapeChildName = shapeChild.getNodeName
var shapeChildValue = shapeChild.getTextContent
if(shapeChildValue != null) {
shapeChildValue = shapeChildValue.trim
shapeChildName match {
case "IsMirrored" =>
mirrored = !(shapeChildValue == "False")
case "Offset" =>
offset = shapeChildValue split ","
case "Position" =>
position = shapeChildValue split ","
case "Rotation" =>
rotation = shapeChildValue split ","
case "Size" =>
size = shapeChildValue split ","
case "TextureOffset" =>
textureOffset = shapeChildValue split ","
case _ =>
}
}
}
// That's what the ModelBase subclassing is needed for
val cube = new ModelRenderer(this, shapeName)
cube.setTextureOffset(textureOffset(0).toInt, textureOffset(1).toInt)
cube.mirror = mirrored
cube.addBox(offset(0).toFloat, offset(1).toFloat, offset(2).toFloat, size(0).toInt, size(1).toInt, size(2).toInt)
cube.setRotationPoint(position(0).toFloat, position(1).toFloat - 16, position(2).toFloat)
cube.rotateAngleX = (Math toRadians rotation(0).toFloat).toFloat
cube.rotateAngleY = (Math toRadians rotation(1).toFloat).toFloat
cube.rotateAngleZ = (Math toRadians rotation(2).toFloat).toFloat
if(parts.keysIterator contains shapeName)
throw new ModelFormatException("Model contained duplicate part name: '" + shapeName + "' node #" + i)
parts += shapeName -> cube
} catch {
case nfe: NumberFormatException =>
FMLLog.warning("Model shape [" + shapeName + "] in " + fileName + " contains malformed integers within its data, ignoring")
nfe.printStackTrace()
}
}
} catch {
case ze: ZipException =>
throw new ModelFormatException("Model " + fileName + " is not a valid zip file")
case ioe: IOException =>
throw new ModelFormatException("Model " + fileName + " could not be read", ioe)
case pce: ParserConfigurationException =>
// hush
case saxe: SAXException =>
throw new ModelFormatException("Model " + fileName + " contains invalid XML", saxe)
}
}
private def bindTexture() {}
override val getType = "tcn"
private def setup() {
GL11.glScalef(-1, -1, 1)
}
def renderAll() {
glPushMatrix()
bindTexture()
setup()
for(part <- parts.values)
part render .0625F
glPopMatrix()
}
def renderPart(partName: String) =
parts get partName match {
case None =>
case Some(part) =>
glPushMatrix()
setup()
bindTexture()
part render .0625F
glPopMatrix()
}
def renderOnly(groupNames: String*) {
glPushMatrix()
setup()
bindTexture()
for((key, value) <- parts; groupName <- groupNames if key equalsIgnoreCase groupName)
glPopMatrix()
}
def renderOnlyAroundPivot(angle: Double, rotX: Double, rotY: Double, rotZ: Double, groupNames: String*) {
glPushMatrix()
setup()
bindTexture()
for((key, model) <- parts; groupName <- groupNames if key equalsIgnoreCase groupName) {
glPushMatrix()
glTranslatef(model.rotationPointX / 16, model.rotationPointY / 16, model.rotationPointZ / 16)
glRotated(angle, rotX, rotY, rotZ)
glTranslatef(-model.rotationPointX / 16, -model.rotationPointY / 16, -model.rotationPointZ / 16)
model render .0625F
glPopMatrix()
}
glPopMatrix()
}
def renderAllExcept(excludedGroupNames: String*) {
glPushMatrix()
setup()
for((key, value) <- parts; groupName <- excludedGroupNames if !(key equalsIgnoreCase groupName))
value render .0625F
glPopMatrix()
}
}
@SideOnly(Side.CLIENT)
object TechneModel {
val cubeTypes = "d9e621f7-957f-4b77-b1ae-20dcd0da7751" :: "de81aa14-bd60-4228-8d8d-5238bcd3caaa" :: Nil
}
| nabijaczleweli/Magicks | src/main/scala/com/nabijaczleweli/fancymagicks/render/model/TechneModel.scala | Scala | mit | 8,474 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package utils
import java.io.File
abstract class Position extends Ordered[Position] {
val line: Int
val col: Int
val file: File
def compare(that: Position) = {
if (this.file == that.file) {
val ld = this.line - that.line
if (ld == 0) {
this.col - that.col
} else {
ld
}
} else {
if (this.file eq null) {
-1
} else if (that.file eq null) {
+1
} else {
this.file.getPath.compare(that.file.getPath)
}
}
}
def fullString: String
def isDefined: Boolean
}
object Position {
/** Merges the two positions into the smallest RangePosition containing both positions */
def between(a: Position, b: Position): Position = {
if (a.file == b.file) {
if (a.line == b.line && a.col == b.col) {
a
} else {
val (from, to) = if (a < b) (a, b) else (b, a)
(from, to) match {
case (p1: OffsetPosition, p2: OffsetPosition) =>
RangePosition(p1.line, p1.col, p1.point, p2.line, p2.col, p2.point, p1.file)
case (p1: RangePosition, p2: RangePosition) =>
RangePosition(p1.lineFrom, p1.colFrom, p1.pointFrom, p2.lineTo, p2.colTo, p2.pointTo, p1.file)
case (p1: OffsetPosition, p2: RangePosition) =>
RangePosition(p1.line, p1.col, p1.point, p2.lineTo, p2.colTo, p2.pointTo, p1.file)
case (p1: RangePosition, p2: OffsetPosition) =>
RangePosition(p1.lineFrom, p1.colFrom, p1.pointFrom, p2.line, p2.col, p2.point, p1.file)
case (a,b) =>
a
}
}
} else {
a
}
}
/** Returns true if position a is inside position b.*/
def isInside(a: Position, b: Position): Boolean = {
b match {
case b: OffsetPosition =>
a match {
case OffsetPosition(line, col, point, file) =>
file == b.file && point == b.point
case RangePosition(line, col, pointFrom, line2, col2, pointTo, file) =>
file == b.file && pointFrom == b.point && pointTo == pointFrom
case _ => false
}
case b: RangePosition =>
a match {
case OffsetPosition(line, col, point, file) =>
file == b.file && point >= b.pointFrom && point <= b.pointTo
case RangePosition(line, col, pointFrom, line2, col2, pointTo, file) =>
file == b.file && pointFrom >= b.pointFrom && pointTo <= b.pointTo
case _ => false
}
case _ => false
}
}
}
abstract class DefinedPosition extends Position {
override def toString = line+":"+col
override def fullString = file.getPath+":"+line+":"+col
override def isDefined = true
def focusBegin: OffsetPosition
def focusEnd: OffsetPosition
}
case class OffsetPosition(line: Int, col: Int, point: Int, file: File) extends DefinedPosition {
def focusBegin = this
def focusEnd = this
}
case class RangePosition(lineFrom: Int, colFrom: Int, pointFrom: Int,
lineTo: Int, colTo: Int, pointTo: Int,
file: File) extends DefinedPosition {
def focusEnd = OffsetPosition(lineTo, colTo, pointTo, file)
def focusBegin = OffsetPosition(lineFrom, colFrom, pointFrom, file)
val line = lineFrom
val col = colFrom
}
case object NoPosition extends Position {
val line = -1
val col = -1
val file = null
override def toString = "?:?"
override def fullString = "?:?:?"
override def isDefined = false
}
trait Positioned {
private[this] var _pos: Position = NoPosition
def setPos(pos: Position): this.type = {
_pos = pos
this
}
def setPos(that: Positioned): this.type = {
_pos = that.getPos
this
}
def getPos = {
_pos
}
}
| epfl-lara/leon | src/main/scala/leon/utils/Positions.scala | Scala | gpl-3.0 | 3,793 |
package org.psliwa.idea.composerJson.intellij.codeAssist
import com.intellij.json.JsonLanguage
import com.intellij.json.psi.{JsonFile, JsonObject, JsonProperty, JsonStringLiteral}
import com.intellij.patterns.PlatformPatterns._
import com.intellij.patterns.PsiElementPattern
import com.intellij.patterns.StandardPatterns._
import org.psliwa.idea.composerJson._
package object composer {
private[composer] def packageElement: PsiElementPattern.Capture[JsonStringLiteral] = {
psiElement(classOf[JsonStringLiteral])
.inFile(psiFile(classOf[JsonFile]).withName(ComposerJson))
.withLanguage(JsonLanguage.INSTANCE)
.withParent(
psiElement(classOf[JsonProperty]).withParent(
psiElement(classOf[JsonObject]).withParent(
or(
psiElement(classOf[JsonProperty]).withName("require"),
psiElement(classOf[JsonProperty]).withName("require-dev")
)
)
)
)
}
}
| psliwa/idea-composer-plugin | src/main/scala/org/psliwa/idea/composerJson/intellij/codeAssist/composer/package.scala | Scala | mit | 963 |
package fpinscala.parallelism
import java.util.concurrent.Executors
import org.scalatest.{FlatSpec, Matchers}
class NonblockingSpec extends FlatSpec with Matchers {
import Nonblocking.Par._
private val executor = Executors.newFixedThreadPool(2)
private def testParResult[A, B](p: Nonblocking.Par[A])(test: A => B) =
test(Nonblocking.Par.run(executor)(p))
// Exercise 11
"Nonblocking Par" should "choose between N parallel computations based on the result of Par[Int]" in {
testParResult(choiceN(unit(1))(List(unit("zero"), unit("one"))))(
_ shouldBe "one")
}
it should "choose between 2 parallel computations based on the result of Par[Boolean]" in {
testParResult(choiceViaChoiceN(unit(false))(unit(1), unit(2)))(_ shouldBe 2)
}
// Exercise 12
it should "choose a parallel computation based on a key calculated by another Par" in {
testParResult(
choiceMap(unit("y"))(Map("x" -> unit("x"), "y" -> unit("y"))))(
_ shouldBe "y")
}
// Exercise 13
it should "choose a parallel computation based on the value of previous computation and function f (flatMap basically)" in {
testParResult(chooser(unit(2))(i => unit(i * 3)))(_ shouldBe 6)
}
it should "choose between N parallel computations based on the result of Par[Int] using chooser" in {
testParResult(choiceNChooser(unit(1))(List(unit("zero"), unit("one"))))(
_ shouldBe "one")
}
it should "choose between 2 parallel computations based on the result of Par[Boolean] using chooser" in {
testParResult(choiceViaChooser(unit(false))(unit(2), unit(1)))(_ shouldBe 2)
}
// Exercise 14
it should "join nested Par computations" in {
testParResult(join(unit(unit(1))))(_ shouldBe 1)
}
it should "join nested Par computations using flatMap" in {
testParResult(joinViaFlatMap(unit(unit(2))))(_ shouldBe 2)
}
it should "implement flatMap using join" in {
testParResult(flatMapViaJoin(unit(1))(i => unit(i + 3)))(_ shouldBe 4)
}
}
| goboss/fpinscala | exercises/src/test/scala/fpinscala/parallelism/NonblockingSpec.scala | Scala | mit | 2,002 |
/*
* DocumentHandlerImpl.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite.impl.document
import de.sciss.desktop
import de.sciss.desktop.{Desktop, DocumentHandler => DH}
import de.sciss.lucre.swing.LucreSwing.defer
import de.sciss.lucre.synth.Txn
import de.sciss.mellite.ActionCloseAllWorkspaces
import de.sciss.mellite.{DocumentHandler, Mellite}
import de.sciss.model.impl.ModelImpl
import de.sciss.proc.Universe
/** We are bridging between the transactional and non-EDT `mellite.DocumentHandler` and
* the GUI-based `de.sciss.desktop.DocumentHandler`. This is a bit ugly. In theory it
* should be fine to call into either, as this bridge is backed up by the peer
* `mellite.DocumentHandler.instance`.
*/
class DocumentHandlerImpl
extends desktop.DocumentHandler[Mellite.Document]
with ModelImpl[DH.Update[Mellite.Document]] {
type Document = Mellite.Document
private def peer = DocumentHandler.instance
def addDocument(u: Universe[_]): Unit =
Mellite.withUniverse(u)(addUniverse(_))
def removeDocument(u: Document): Unit =
Mellite.withUniverse(u)(removeUniverse(_))
private def addUniverse[T <: Txn[T]](u: Universe[T]): Unit =
u.cursor.step { implicit tx => peer.addDocument(u) }
private def removeUniverse[T <: Txn[T]](u: Universe[T]): Unit =
u.cursor.step { implicit tx => u.workspace.dispose() }
def documents: Iterator[Document] = peer.allDocuments
private[this] var _active = Option.empty[Document]
def activeDocument: Option[Document] = _active
def activeDocument_=(value: Option[Document]): Unit =
if (_active != value) {
_active = value
value.foreach { doc => dispatch(DH.Activated(doc)) }
}
peer.addListener {
case DocumentHandler.Opened(u) => defer {
Mellite.withUniverse(u)(u1 => dispatch(DH.Added(u1)))
}
case DocumentHandler.Closed(u) => defer {
if (activeDocument.contains(u)) activeDocument = None
Mellite.withUniverse(u)(u1 => dispatch(DH.Removed(u1)))
}
}
Desktop.addQuitAcceptor(ActionCloseAllWorkspaces.tryCloseAll())
}
| Sciss/Mellite | app/src/main/scala/de/sciss/mellite/impl/document/DocumentHandlerImpl.scala | Scala | agpl-3.0 | 2,310 |
package jp.co.cyberagent.aeromock.template.freemarker
import java.nio.file.Path
import jp.co.cyberagent.aeromock.AeromockTestModule
import jp.co.cyberagent.aeromock.config.definition.ProjectDef
import jp.co.cyberagent.aeromock.core.http.VariableManager
import jp.co.cyberagent.aeromock.helper._
import jp.co.cyberagent.aeromock.template.TemplateService
import jp.co.cyberagent.aeromock.test.SpecSupport
import org.specs2.mutable.{Specification, Tables}
/**
*
* @author stormcat24
*/
class FreemarkerTemplateServiceSpec extends Specification with Tables with SpecSupport {
"render" should {
"tutorial" in {
implicit val module = new AeromockTestModule {
override val projectConfigPath: Path = getResourcePath(".").resolve("../../../../tutorial/freemarker/project.yaml").toRealPath()
override val projectDefArround = (projectDef: ProjectDef) => {}
}
val service = inject[Option[TemplateService]].get
VariableManager.initializeRequestMap(Map(
"USER_AGENT" -> "test",
"REQUEST_URI" -> "/test",
"HOST" -> "localhost:3183",
"QUERY_STRING" -> "",
"REMOTE_HOST" -> "localhost"
))
VariableManager.initializeOriginalVariableMap(new java.util.HashMap[String, AnyRef]())
trye(service.render(request("/test"))).isRight
}
}
}
| CyberAgent/aeromock | aeromock-freemarker/src/test/scala/jp/co/cyberagent/aeromock/template/freemarker/FreemarkerTemplateServiceSpec.scala | Scala | mit | 1,331 |
/*
* MilmSearch is a mailing list searching system.
*
* Copyright (C) 2013 MilmSearch Project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 3
* of the License, or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
* You can contact MilmSearch Project at mailing list
* milm-search-public@lists.sourceforge.jp.
*/
package org.milmsearch.core.service
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
/**
* 日付・時刻関連サービス
*/
trait DateTimeService {
/**
* 現在時刻を取得する
*
* @return 現在時刻
*/
def now(): DateTime
}
class DateTimeServiceImpl extends DateTimeService {
def now() = new DateTime(DateTimeZone.UTC)
} | mzkrelx/milm-search-core | src/main/scala/org/milmsearch/core/service/DateTimeService.scala | Scala | gpl-3.0 | 1,205 |
/**
* Copyright 2010-2012 Alex Jones
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with work for additional information
* regarding copyright ownership. The ASF licenses file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package dates
import dates.ZonedDateTimeExtensions._
import java.time.ZonedDateTime
import org.specs2.mutable.Specification
/**
* The Class DateServiceImplTest.
*
* @author alex
*/
class DateParserFactoryImplSpec extends Specification {
implicit val zonedDateTimeFactory: ZonedDateTimeFactory = new ZonedDateTimeFactoryImpl()
"A day in a month before the current date but meant to be later than it" should {
"be interpreted as a date in the next year" in {
January(6) after December(25, 2012) must be_===(January(6, 2013))
}
}
"A day in a month after the current date and meant to be later than it" should {
"be interpreted as a date in the same year" in {
March(6) after February(25, 2013) must be_===(March(6, 2013))
}
}
"A day in a month before the current date and meant to be earlier than it" should {
"be interpreted as a date in the same year" in {
June(6) before September(5, 2012) must be_===(June(6, 2012))
}
}
"A day in a month after the current date and meant to be earlier than it" should {
"be interpreted as a date in the previous year" in {
December(25) before January(6, 2012) must be_===(December(25, 2011))
}
}
"A date format with an explicit year" should {
parseAndFind("05/09/1972 09:12", Before(October(10, 2012)) parsedBy "dd/MM[/yyyy] HH:mm",
Some(September(5, 1972) at (9, 12)))
}
"A date format that possibly requires a year to be added" should {
parseAndFind("05/09 09:12", Before(October(10, 2012)) parsedBy "dd/MM[/yyyy] HH:mm",
Some(September(5, 2012) at (9, 12)))
}
"A date format that definitely requires a year to be added" should {
parseAndFind("05/09 09:12", Before(October(10, 2012)) parsedBy "dd/MM HH:mm",
Some(September(5, 2012) at (9, 12)))
}
"A date format that fails the first parse but succeeds on the second" should {
parseAndFind("05/09 09:12", Before(October(10, 2012)) parsedBy ("HH:mm dd/MM[/yyyy]", "dd/MM[/yyyy] HH:mm"),
Some(September(5, 2012) at (9, 12)))
}
"A date format requiring the day of a week but without a year" should {
parseAndFind("9am Thu 26 Jan", Before(February(18, 2012)) parsedBy "ha EEE dd MMM",
Some(January(26, 2012) at (9, 0)))
}
"An invalid date format" should {
parseAndFind("05:09 9:12", Before(October(10, 2012)) parsedBy "dd/MM[/yyyy] HH:mm", None)
}
"Checking for whether a day is during the working week or not" should {
sealed class DayOfWeek(val day: Int, val name: String, val isWeekday: Boolean)
object Sunday extends DayOfWeek(3, "Sunday", false)
object Monday extends DayOfWeek(4, "Monday", true)
object Tuesday extends DayOfWeek(5, "Tuesday", true)
object Wednesday extends DayOfWeek(6, "Wednesday", true)
object Thursday extends DayOfWeek(7, "Thursday", true)
object Friday extends DayOfWeek(8, "Friday", true)
object Saturday extends DayOfWeek(9, "Saturday", false)
List(Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday) foreach { dayOfWeek =>
val day = September(dayOfWeek.day, 1972)
s"${dayOfWeek.name} $day must ${if (dayOfWeek.isWeekday) "" else "not "}be a weekday" in {
day.toZonedDateTime.isWeekday must be_===(dayOfWeek.isWeekday)
}
}
"nothing else" in {
1 must be_===(1)
}
}
"Checking for whether an zonedDateTime is the fabled Saturday at 3pm" should {
List(
September(9, 1972) at (15, 0),
September(9, 1972) at (12, 0),
September(9, 1972) at (15, 30),
September(9, 1972) at (19, 45),
September(10, 1972) at (15, 0),
September(10, 1972) at (12, 0),
September(10, 1972) at (15, 30),
September(10, 1972) at (19, 45)) zip List(true, false, false, false, false, false, false, false) foreach {
case (day, isSaturday3pm) =>
s"$day must ${if (isSaturday3pm) "" else "not "}be Saturday 3pm" in {
day.isThreeOClockOnASaturday must be_===(isSaturday3pm)
}
}
"nothing else" in {
1 must be_===(1)
}
}
def parseAndFind(
date: String,
parsingRules: (Boolean, Date, Seq[String]),
expectedZonedDateTime: Option[ZonedDateTime]) = {
val (yearDeterminingDateIsLaterThanTheDate, yearDeterminingDate, possiblyYearlessDateFormats) = parsingRules
s"The date string '$date' using formats ${possiblyYearlessDateFormats.mkString(", ")} must parse to $expectedZonedDateTime" in {
val actualZonedDateTime =
new DateParserFactoryImpl().makeParser(
yearDeterminingDate,
yearDeterminingDateIsLaterThanTheDate,
possiblyYearlessDateFormats).parse(date)
actualZonedDateTime must be_===(expectedZonedDateTime)
}
1 to 3 flatMap { paddingSize =>
val padding = (1 to paddingSize).map(_ => "x").mkString
List(date, padding + date, date + padding, padding + date + padding)
} foreach { date =>
s"The date string '$date' using formats ${possiblyYearlessDateFormats.mkString(", ")} must find $expectedZonedDateTime" in {
val actualZonedDateTime =
new DateParserFactoryImpl().makeParser(
yearDeterminingDate,
yearDeterminingDateIsLaterThanTheDate,
possiblyYearlessDateFormats).find(date)
actualZonedDateTime must be_===(expectedZonedDateTime)
}
}
"nothing else" in {
1 must be_===(1)
}
}
/**
* Syntatic sugar for date parsing information.
*/
sealed class BeforeOrAfter(date: Date, before: Boolean) {
def parsedBy(parseStrings: String*) = (before, date, parseStrings)
}
case class Before(date: Date) extends BeforeOrAfter(date, true)
case class After(date: Date) extends BeforeOrAfter(date, false)
implicit class BeforeAndAfterImplicits(monthAndDay: MonthAndDay) {
def after(date: Date) = alter(date, yearDeterminingDateIsLaterThanTheDate = false)
def before(date: Date) = alter(date, yearDeterminingDateIsLaterThanTheDate = true)
def alter(yearDeterminingDate: Date, yearDeterminingDateIsLaterThanTheDate: Boolean) = {
val newZonedDateTime = YearSettingDateParserFactory.setYear(
monthAndDay.toZonedDateTime, yearDeterminingDate.toZonedDateTime, yearDeterminingDateIsLaterThanTheDate)
Date(newZonedDateTime)
}
}
}
| unclealex72/west-ham-calendar | test/dates/DateParserFactoryImplSpec.scala | Scala | apache-2.0 | 7,202 |
package com.peterpotts.snake.coercion
case object StringCoercer extends Coercer[String] {
def apply(any: Any): String = any.toString
}
| peterpotts/snake | src/main/scala/com/peterpotts/snake/coercion/StringCoercer.scala | Scala | mit | 138 |
import sbt._
import Keys._
object ChafeBuild extends Build {
lazy val chafed = Project(id = "chafed",
base = file("."))
lazy val samples = Project(id = "chafed-samples",
base = file("samples")) dependsOn(chafed)
}
| ofrasergreen/chafed | project/ChafeBuild.scala | Scala | apache-2.0 | 279 |
package com.rocketfuel.sdbc.h2.jdbc
import java.nio.ByteBuffer
import java.sql.{Timestamp, Time, Date}
import java.time._
import java.util.UUID
import scalaz.Scalaz._
class GettersSpec
extends H2Suite {
def testSelect[T](query: String, expectedValue: Option[T])(implicit converter: Row => Option[T]): Unit = {
test(query) { implicit connection =>
val result = Select[Option[T]](query).option().flatten
(expectedValue, result) match {
case (Some(expectedArray: Array[_]), Some(resultArray: Array[_])) =>
assert(expectedArray.sameElements(resultArray))
case (Some(x), Some(y)) =>
assertResult(x)(y)
case (None, None) => true
case _ => false
}
}
}
val uuid = UUID.randomUUID()
testSelect[Int]("SELECT NULL", none[Int])
testSelect[Byte]("SELECT CAST(1 AS tinyint)", 1.toByte.some)
testSelect[Short]("SELECT CAST(1 AS smallint)", 1.toShort.some)
testSelect[Int]("SELECT CAST(1 AS int)", 1.some)
testSelect[Long]("SELECT CAST(1 AS bigint)", 1L.some)
testSelect[String]("SELECT 'hello'", "hello".some)
testSelect[ByteBuffer]("SELECT 0x0001ffa0", ByteBuffer.wrap(Array[Byte](0, 1, -1, -96)).some)
testSelect[Float]("SELECT CAST(3.14159 AS real)", 3.14159F.some)
testSelect[Double]("SELECT CAST(3.14159 AS float)", 3.14159.some)
testSelect[Boolean]("SELECT CAST(1 AS bit)", true.some)
testSelect[BigDecimal]("SELECT CAST(3.14159 AS numeric(10,5)) --as Scala BigDecimal", BigDecimal("3.14159").some)
testSelect[java.math.BigDecimal]("SELECT CAST(3.14159 AS numeric(10,5)) --as Java BigDecimal", BigDecimal("3.14159").underlying.some)
testSelect[Date]("SELECT CAST('2014-12-29' AS date)", Date.valueOf("2014-12-29").some)
testSelect[Time]("SELECT CAST('03:04:05' AS time) --as JDBC Time", Time.valueOf("03:04:05").some)
testSelect[Timestamp]("SELECT CAST('2014-12-29 01:02:03.5' AS datetime)", Timestamp.valueOf("2014-12-29 01:02:03.5").some)
testSelect[LocalDateTime]("SELECT CAST('2014-12-29 01:02:03.5' AS datetime) --as Java 8 LocalDateTime)", LocalDateTime.parse("2014-12-29T01:02:03.5").some)
{
//Convert the time being tested into UTC time
//using the current time zone's offset at the time
//that we're testing.
//We can't use the current offset, because of, for example,
//daylight savings.
val localTime = LocalDateTime.parse("2014-12-29T01:02:03.5")
val offset = ZoneId.systemDefault().getRules.getOffset(localTime)
val expectedTime = localTime.toInstant(offset)
testSelect[Instant]("SELECT CAST('2014-12-29 01:02:03.5' AS datetime) --as Java 8 Instant", expectedTime.some)
}
testSelect[UUID](s"SELECT CAST('$uuid' AS uuid)", uuid.some)
testSelect[Seq[Int]]("SELECT (1, 2, 3)", Seq(1, 2, 3).some)
testSelect[Seq[Option[Int]]]("SELECT (1, NULL, 3)", Seq(1.some, none[Int], 3.some).some)
testSelect[Seq[Seq[Int]]]("SELECT (())", Seq.empty.some)
testSelect[Seq[Seq[Int]]]("SELECT ((1, 2),)", Seq(Seq(1, 2)).some)
testSelect[Seq[Seq[Option[Int]]]]("SELECT ((1, NULL), (2, NULL))", Seq(Seq(Some(1), None), Seq(Some(2), None)).some)
}
| wdacom/sdbc | h2/src/test/scala/com/rocketfuel/sdbc/h2/jdbc/GettersSpec.scala | Scala | bsd-3-clause | 3,128 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, Date, Timestamp}
import java.time.{Instant, LocalDate}
import scala.collection.mutable.ArrayBuilder
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.util.{DateFormatter, DateTimeUtils, TimestampFormatter}
import org.apache.spark.sql.connector.catalog.TableChange
import org.apache.spark.sql.connector.catalog.TableChange._
import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* :: DeveloperApi ::
* A database type definition coupled with the jdbc type needed to send null
* values to the database.
* @param databaseTypeDefinition The database type definition
* @param jdbcNullType The jdbc type (as defined in java.sql.Types) used to
* send a null value to the database.
*/
@DeveloperApi
case class JdbcType(databaseTypeDefinition : String, jdbcNullType : Int)
/**
* :: DeveloperApi ::
* Encapsulates everything (extensions, workarounds, quirks) to handle the
* SQL dialect of a certain database or jdbc driver.
* Lots of databases define types that aren't explicitly supported
* by the JDBC spec. Some JDBC drivers also report inaccurate
* information---for instance, BIT(n{@literal >}1) being reported as a BIT type is quite
* common, even though BIT in JDBC is meant for single-bit values. Also, there
* does not appear to be a standard name for an unbounded string or binary
* type; we use BLOB and CLOB by default but override with database-specific
* alternatives when these are absent or do not behave correctly.
*
* Currently, the only thing done by the dialect is type mapping.
* `getCatalystType` is used when reading from a JDBC table and `getJDBCType`
* is used when writing to a JDBC table. If `getCatalystType` returns `null`,
* the default type handling is used for the given JDBC type. Similarly,
* if `getJDBCType` returns `(null, None)`, the default type handling is used
* for the given Catalyst type.
*/
@DeveloperApi
abstract class JdbcDialect extends Serializable with Logging{
/**
* Check if this dialect instance can handle a certain jdbc url.
* @param url the jdbc url.
* @return True if the dialect can be applied on the given jdbc url.
* @throws NullPointerException if the url is null.
*/
def canHandle(url : String): Boolean
/**
* Get the custom datatype mapping for the given jdbc meta information.
* @param sqlType The sql type (see java.sql.Types)
* @param typeName The sql type name (e.g. "BIGINT UNSIGNED")
* @param size The size of the type.
* @param md Result metadata associated with this type.
* @return The actual DataType (subclasses of [[org.apache.spark.sql.types.DataType]])
* or null if the default type mapping should be used.
*/
def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = None
/**
* Retrieve the jdbc / sql type for a given datatype.
* @param dt The datatype (e.g. [[org.apache.spark.sql.types.StringType]])
* @return The new JdbcType if there is an override for this DataType
*/
def getJDBCType(dt: DataType): Option[JdbcType] = None
/**
* Quotes the identifier. This is used to put quotes around the identifier in case the column
* name is a reserved keyword, or in case it contains characters that require quotes (e.g. space).
*/
def quoteIdentifier(colName: String): String = {
s""""$colName""""
}
/**
* Get the SQL query that should be used to find if the given table exists. Dialects can
* override this method to return a query that works best in a particular database.
* @param table The name of the table.
* @return The SQL query to use for checking the table.
*/
def getTableExistsQuery(table: String): String = {
s"SELECT * FROM $table WHERE 1=0"
}
/**
* The SQL query that should be used to discover the schema of a table. It only needs to
* ensure that the result set has the same schema as the table, such as by calling
* "SELECT * ...". Dialects can override this method to return a query that works best in a
* particular database.
* @param table The name of the table.
* @return The SQL query to use for discovering the schema.
*/
@Since("2.1.0")
def getSchemaQuery(table: String): String = {
s"SELECT * FROM $table WHERE 1=0"
}
/**
* The SQL query that should be used to truncate a table. Dialects can override this method to
* return a query that is suitable for a particular database. For PostgreSQL, for instance,
* a different query is used to prevent "TRUNCATE" affecting other tables.
* @param table The table to truncate
* @return The SQL query to use for truncating a table
*/
@Since("2.3.0")
def getTruncateQuery(table: String): String = {
getTruncateQuery(table, isCascadingTruncateTable)
}
/**
* The SQL query that should be used to truncate a table. Dialects can override this method to
* return a query that is suitable for a particular database. For PostgreSQL, for instance,
* a different query is used to prevent "TRUNCATE" affecting other tables.
* @param table The table to truncate
* @param cascade Whether or not to cascade the truncation
* @return The SQL query to use for truncating a table
*/
@Since("2.4.0")
def getTruncateQuery(
table: String,
cascade: Option[Boolean] = isCascadingTruncateTable): String = {
s"TRUNCATE TABLE $table"
}
/**
* Override connection specific properties to run before a select is made. This is in place to
* allow dialects that need special treatment to optimize behavior.
* @param connection The connection object
* @param properties The connection properties. This is passed through from the relation.
*/
def beforeFetch(connection: Connection, properties: Map[String, String]): Unit = {
}
/**
* Escape special characters in SQL string literals.
* @param value The string to be escaped.
* @return Escaped string.
*/
@Since("2.3.0")
protected[jdbc] def escapeSql(value: String): String =
if (value == null) null else StringUtils.replace(value, "'", "''")
/**
* Converts value to SQL expression.
* @param value The value to be converted.
* @return Converted value.
*/
@Since("2.3.0")
def compileValue(value: Any): Any = value match {
case stringValue: String => s"'${escapeSql(stringValue)}'"
case timestampValue: Timestamp => "'" + timestampValue + "'"
case timestampValue: Instant =>
val timestampFormatter = TimestampFormatter.getFractionFormatter(
DateTimeUtils.getZoneId(SQLConf.get.sessionLocalTimeZone))
s"'${timestampFormatter.format(timestampValue)}'"
case dateValue: Date => "'" + dateValue + "'"
case dateValue: LocalDate => s"'${DateFormatter().format(dateValue)}'"
case arrayValue: Array[Any] => arrayValue.map(compileValue).mkString(", ")
case _ => value
}
/**
* Return Some[true] iff `TRUNCATE TABLE` causes cascading default.
* Some[true] : TRUNCATE TABLE causes cascading.
* Some[false] : TRUNCATE TABLE does not cause cascading.
* None: The behavior of TRUNCATE TABLE is unknown (default).
*/
def isCascadingTruncateTable(): Option[Boolean] = None
/**
* Rename an existing table.
*
* @param oldTable The existing table.
* @param newTable New name of the table.
* @return The SQL statement to use for renaming the table.
*/
def renameTable(oldTable: String, newTable: String): String = {
s"ALTER TABLE $oldTable RENAME TO $newTable"
}
/**
* Alter an existing table.
*
* @param tableName The name of the table to be altered.
* @param changes Changes to apply to the table.
* @return The SQL statements to use for altering the table.
*/
def alterTable(
tableName: String,
changes: Seq[TableChange],
dbMajorVersion: Int): Array[String] = {
val updateClause = ArrayBuilder.make[String]
for (change <- changes) {
change match {
case add: AddColumn if add.fieldNames.length == 1 =>
val dataType = JdbcUtils.getJdbcType(add.dataType(), this).databaseTypeDefinition
val name = add.fieldNames
updateClause += getAddColumnQuery(tableName, name(0), dataType)
case rename: RenameColumn if rename.fieldNames.length == 1 =>
val name = rename.fieldNames
updateClause += getRenameColumnQuery(tableName, name(0), rename.newName, dbMajorVersion)
case delete: DeleteColumn if delete.fieldNames.length == 1 =>
val name = delete.fieldNames
updateClause += getDeleteColumnQuery(tableName, name(0))
case updateColumnType: UpdateColumnType if updateColumnType.fieldNames.length == 1 =>
val name = updateColumnType.fieldNames
val dataType = JdbcUtils.getJdbcType(updateColumnType.newDataType(), this)
.databaseTypeDefinition
updateClause += getUpdateColumnTypeQuery(tableName, name(0), dataType)
case updateNull: UpdateColumnNullability if updateNull.fieldNames.length == 1 =>
val name = updateNull.fieldNames
updateClause += getUpdateColumnNullabilityQuery(tableName, name(0), updateNull.nullable())
case _ =>
throw new AnalysisException(s"Unsupported TableChange $change in JDBC catalog.")
}
}
updateClause.result()
}
def getAddColumnQuery(tableName: String, columnName: String, dataType: String): String =
s"ALTER TABLE $tableName ADD COLUMN ${quoteIdentifier(columnName)} $dataType"
def getRenameColumnQuery(
tableName: String,
columnName: String,
newName: String,
dbMajorVersion: Int): String =
s"ALTER TABLE $tableName RENAME COLUMN ${quoteIdentifier(columnName)} TO" +
s" ${quoteIdentifier(newName)}"
def getDeleteColumnQuery(tableName: String, columnName: String): String =
s"ALTER TABLE $tableName DROP COLUMN ${quoteIdentifier(columnName)}"
def getUpdateColumnTypeQuery(
tableName: String,
columnName: String,
newDataType: String): String =
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} $newDataType"
def getUpdateColumnNullabilityQuery(
tableName: String,
columnName: String,
isNullable: Boolean): String = {
val nullable = if (isNullable) "NULL" else "NOT NULL"
s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} SET $nullable"
}
def getTableCommentQuery(table: String, comment: String): String = {
s"COMMENT ON TABLE $table IS '$comment'"
}
def getSchemaCommentQuery(schema: String, comment: String): String = {
s"COMMENT ON SCHEMA ${quoteIdentifier(schema)} IS '$comment'"
}
def removeSchemaCommentQuery(schema: String): String = {
s"COMMENT ON SCHEMA ${quoteIdentifier(schema)} IS NULL"
}
/**
* Gets a dialect exception, classifies it and wraps it by `AnalysisException`.
* @param message The error message to be placed to the returned exception.
* @param e The dialect specific exception.
* @return `AnalysisException` or its sub-class.
*/
def classifyException(message: String, e: Throwable): AnalysisException = {
new AnalysisException(message, cause = Some(e))
}
}
/**
* :: DeveloperApi ::
* Registry of dialects that apply to every new jdbc `org.apache.spark.sql.DataFrame`.
*
* If multiple matching dialects are registered then all matching ones will be
* tried in reverse order. A user-added dialect will thus be applied first,
* overwriting the defaults.
*
* @note All new dialects are applied to new jdbc DataFrames only. Make
* sure to register your dialects first.
*/
@DeveloperApi
object JdbcDialects {
/**
* Register a dialect for use on all new matching jdbc `org.apache.spark.sql.DataFrame`.
* Reading an existing dialect will cause a move-to-front.
*
* @param dialect The new dialect.
*/
def registerDialect(dialect: JdbcDialect) : Unit = {
dialects = dialect :: dialects.filterNot(_ == dialect)
}
/**
* Unregister a dialect. Does nothing if the dialect is not registered.
*
* @param dialect The jdbc dialect.
*/
def unregisterDialect(dialect : JdbcDialect) : Unit = {
dialects = dialects.filterNot(_ == dialect)
}
private[this] var dialects = List[JdbcDialect]()
registerDialect(MySQLDialect)
registerDialect(PostgresDialect)
registerDialect(DB2Dialect)
registerDialect(MsSqlServerDialect)
registerDialect(DerbyDialect)
registerDialect(OracleDialect)
registerDialect(TeradataDialect)
registerDialect(H2Dialect)
/**
* Fetch the JdbcDialect class corresponding to a given database url.
*/
def get(url: String): JdbcDialect = {
val matchingDialects = dialects.filter(_.canHandle(url))
matchingDialects.length match {
case 0 => NoopDialect
case 1 => matchingDialects.head
case _ => new AggregatedDialect(matchingDialects)
}
}
}
/**
* NOOP dialect object, always returning the neutral element.
*/
private object NoopDialect extends JdbcDialect {
override def canHandle(url : String): Boolean = true
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala | Scala | apache-2.0 | 14,180 |
package controllers
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.util.PasswordHasher
import com.mohiva.play.silhouette.impl.providers._
import forms.SignUpForm
import models.User
import models.daos.UserDAO
import org.webjars.play.WebJarsUtil
import play.api.i18n.{I18nSupport, Messages}
import play.api.mvc.{AbstractController, ControllerComponents}
import utils.DefaultEnv
import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
class SignUp @Inject()(userDAO: UserDAO,
authInfoRepository: AuthInfoRepository,
passwordHasher: PasswordHasher,
components: ControllerComponents,
silhouette: Silhouette[DefaultEnv],
signUpTemplate: views.html.signUp)
extends AbstractController(components) with I18nSupport {
def signUp = silhouette.SecuredAction.async { implicit request =>
SignUpForm.form.bindFromRequest.fold(
form => Future.successful(BadRequest(signUpTemplate(form))),
data => {
val loginInfo = LoginInfo(CredentialsProvider.ID, data.email)
userDAO.retrieve(loginInfo).flatMap {
case Some(_) =>
Future.successful(Redirect(routes.Application.signUp()).flashing("error" -> Messages("user.exists")))
case None =>
val authInfo = passwordHasher.hash(data.password)
val user = User(
id = UUID.randomUUID(),
name = data.name,
email = data.email
)
for {
user <- userDAO.save(user)
_ <- authInfoRepository.add(loginInfo, authInfo)
authenticator <- silhouette.env.authenticatorService.create(loginInfo)
value <- silhouette.env.authenticatorService.init(authenticator)
result <- silhouette.env.authenticatorService.embed(value, Redirect(routes.Application.dashboard()))
} yield {
silhouette.env.eventBus.publish(SignUpEvent(user, request))
silhouette.env.eventBus.publish(LoginEvent(user, request))
result
}
}
}
)
}
} | wjglerum/bamboesmanager | app/controllers/SignUp.scala | Scala | mit | 2,322 |
package io.backchat.hookup
package examples
import org.json4s._
import org.json4s.jackson.JsonMethods._
object ChatServer {
import DefaultConversions._
def makeServer() = {
val server = HookupServer(ServerInfo("ChatServer", port = 8127)){
new HookupServerClient {
def receive = {
case Disconnected(_) ⇒
println("%s has left" format id)
this >< "%s has left".format(id)
case Connected ⇒
println("%s has joined" format id)
broadcast("%s has joined" format id)
case TextMessage(text) ⇒
println("broadcasting: " + text + " from " + id)
this >< text
case m: JsonMessage ⇒
println("JsonMessage(" + pretty(render(m.content)) + ")")
}
}
}
server.start
server
}
def main(args: Array[String]) { makeServer }
}
| backchatio/hookup | src/main/scala/io/backchat/hookup/examples/ChatServer.scala | Scala | mit | 889 |
package com.nikolastojiljkovic.quilltrait.model
import com.nikolastojiljkovic.annotation.{ Field, Table }
@Table("page")
case class Page(
@Field("id") id: Option[Int],
@Field("is_root") isRoot: Boolean,
@Field("parent_id") parentId: Option[Int],
@Field("sorting") sorting: Option[Int],
@Field("title") title: String,
@Field("path") path: String
) extends EntityWithId
| nstojiljkovic/quill-trait | quill-trait-core/jvm/src/test/scala/com/nikolastojiljkovic/quilltrait/model/Page.scala | Scala | apache-2.0 | 387 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.osm
import java.util.Date
import com.typesafe.config.ConfigFactory
import org.geotools.util.Converters
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.SimpleFeatureConverters
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class OsmNodesConverterTest extends Specification {
sequential
val sftSimpleConf = ConfigFactory.parseString(
"""{ type-name = "osmSimpleNodeType"
| attributes = [
| {name = "geom", type = "Point", default = "true" }
| ]
|}
""".stripMargin)
val sftConf = ConfigFactory.parseString(
"""{ type-name = "osmNodeType"
| attributes = [
| { name = "user", type = "String" }
| { name = "tags", type = "Map[String,String]" }
| { name = "dtg", type = "Date" }
| { name = "geom", type = "Point", default = "true" }
| ]
|}
""".stripMargin)
val simpleSft = SimpleFeatureTypes.createType(sftSimpleConf)
val sft = SimpleFeatureTypes.createType(sftConf)
"OSM Node Converter" should {
"parse simple attributes" >> {
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "osm-nodes"
| format = "xml" // or pbf
| id-field = "$id"
| fields = [
| { name = "id", attribute = "id", transform = "toString($0)" }
| { name = "geom", attribute = "geometry" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[Any](simpleSft, parserConf)
converter must beAnInstanceOf[OsmNodesConverter]
converter.asInstanceOf[OsmNodesConverter].needsMetadata must beFalse
val features = converter.process(getClass.getClassLoader.getResourceAsStream("small.osm")).toList.sortBy(_.getID)
features must haveLength(4)
features.map(_.getID) mustEqual Seq("350151", "350152", "350153", "350154")
features.map(_.getDefaultGeometry.toString) mustEqual Seq("POINT (-6.3341538 54.1790829)",
"POINT (-6.3339244 54.179083)", "POINT (-6.3316723 54.179379)", "POINT (-6.3314593 54.1794726)")
}
"parse metadata" >> {
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "osm-nodes"
| format = "xml" // or pbf
| id-field = "$id"
| fields = [
| { name = "id", attribute = "id", transform = "toString($0)" }
| { name = "user", attribute = "user" }
| { name = "tags", attribute = "tags" }
| { name = "dtg", attribute = "timestamp" }
| { name = "geom", attribute = "geometry" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[Any](sft, parserConf)
converter must beAnInstanceOf[OsmNodesConverter]
converter.asInstanceOf[OsmNodesConverter].needsMetadata must beTrue
val features = converter.process(getClass.getClassLoader.getResourceAsStream("small.osm")).toList.sortBy(_.getID)
features must haveLength(4)
features.map(_.getID) mustEqual Seq("350151", "350152", "350153", "350154")
forall(features.map(_.getAttribute("user")))(_ mustEqual "mackerski")
forall(features.map(_.getAttribute("tags")))(_ mustEqual new java.util.HashMap[String, String]())
features.map(_.getAttribute("dtg").asInstanceOf[Date]) mustEqual
Seq("2015-10-28T21:17:49Z", "2015-10-28T21:17:49Z", "2015-10-28T21:17:49Z", "2015-10-28T21:17:49Z")
.map(Converters.convert(_, classOf[Date]))
features.map(_.getDefaultGeometry.toString) mustEqual Seq("POINT (-6.3341538 54.1790829)",
"POINT (-6.3339244 54.179083)", "POINT (-6.3316723 54.179379)", "POINT (-6.3314593 54.1794726)")
}
"handle user data" >> {
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "osm-nodes"
| format = "xml" // or pbf
| id-field = "$id"
| user-data = {
| my.user.key = "$id"
| }
| fields = [
| { name = "id", attribute = "id", transform = "toString($0)" }
| { name = "geom", attribute = "geometry" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[Any](simpleSft, parserConf)
converter must beAnInstanceOf[OsmNodesConverter]
converter.asInstanceOf[OsmNodesConverter].needsMetadata must beFalse
val features = converter.process(getClass.getClassLoader.getResourceAsStream("small.osm")).toList.sortBy(_.getID)
features must haveLength(4)
features.map(_.getID) mustEqual Seq("350151", "350152", "350153", "350154")
features.map(_.getDefaultGeometry.toString) mustEqual Seq("POINT (-6.3341538 54.1790829)",
"POINT (-6.3339244 54.179083)", "POINT (-6.3316723 54.179379)", "POINT (-6.3314593 54.1794726)")
features.map(_.getUserData.get("my.user.key")) mustEqual Seq("350151", "350152", "350153", "350154")
}
}
}
| elahrvivaz/geomesa | geomesa-convert/geomesa-convert-osm/src/test/scala/org/locationtech/geomesa/convert/osm/OsmNodesConverterTest.scala | Scala | apache-2.0 | 5,745 |
package spark.debugger
import java.io._
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import spark.Logging
import spark.RDD
import spark.Dependency
import spark.ShuffleDependency
import spark.SparkContext
import spark.SparkContext._
import spark.scheduler.ResultTask
import spark.scheduler.ShuffleMapTask
import spark.scheduler.Task
/**
* Reads events from an event log and provides replay debugging.
*/
class EventLogReader(sc: SparkContext, eventLogPath: Option[String] = None) extends Logging {
private val events_ = new ArrayBuffer[EventLogEntry]
private val checksumVerifier = new ChecksumVerifier
private val rdds = new mutable.HashMap[Int, RDD[_]]
private def getEventLogPath(): String =
eventLogPath orElse { Option(System.getProperty("spark.debugger.logPath")) } match {
case Some(elp) => elp
case None => throw new UnsupportedOperationException("No event log path provided")
}
private var objectInputStream: EventLogInputStream = {
val file = new File(getEventLogPath())
if (file.exists) {
new EventLogInputStream(new FileInputStream(file), sc)
} else {
throw new UnsupportedOperationException("Event log %s does not exist")
}
}
loadNewEvents()
// Receive new events as they occur
sc.env.eventReporter.subscribe(addEvent _)
/** Looks up an RDD by ID. */
def rdd(id: Int): RDD[_] = rdds(id)
/** Set of RDD IDs. */
def rddIds: scala.collection.Set[Int] = rdds.keySet
/** Sequence of events in the event log. */
def events: Seq[EventLogEntry] = events_.readOnly
/** List of checksum mismatches. */
def checksumMismatches: Seq[ChecksumEvent] = checksumVerifier.mismatches
/** Prints a human-readable list of RDDs. */
def printRDDs() {
for (RDDRegistration(rdd) <- events) {
println("#%02d: %-20s %s".format(
rdd.id, rddType(rdd), firstExternalElement(rdd.creationLocation)))
}
}
/** Reads any new events from the event log. */
def loadNewEvents() {
logDebug("Loading new events from " + getEventLogPath())
try {
while (true) {
val event = objectInputStream.readObject.asInstanceOf[EventLogEntry]
addEvent(event)
}
} catch {
case e: EOFException => {}
}
}
/**
* Selects the elements in startRDD that match p, traces them forward until endRDD, and returns
* the resulting members of endRDD.
*/
def traceForward[T, U: ClassManifest](
startRDD: RDD[T], p: T => Boolean, endRDD: RDD[U]): RDD[U] = {
val taggedEndRDD: RDD[Tagged[U]] = tagRDD[U, T](
endRDD, startRDD, startRDD.map((t: T) => Tagged(t, BooleanTag(p(t)))))
taggedEndRDD.filter(tu => tu.tag.isTagged).map(tu => tu.elem)
}
/**
* Traces the given element elem from startRDD forward until endRDD and returns the resulting
* members of endRDD.
*/
def traceForward[T, U: ClassManifest](startRDD: RDD[T], elem: T, endRDD: RDD[U]): RDD[U] =
traceForward(startRDD, { (x: T) => x == elem }, endRDD)
/**
* Traces the given element elem from endRDD backward until startRDD and returns the resulting
* members of startRDD.
*/
def traceBackward[T: ClassManifest, U: ClassManifest](
startRDD: RDD[T], elem: U, endRDD: RDD[U]): RDD[T] =
traceBackwardUsingMappings(startRDD, { (x: U) => x == elem }, endRDD)
/**
* Selects the elements in endRDD that match p, traces them backward until startRDD, and returns
* the resulting members of startRDD. Implemented by backward-tracing the elements in each stage
* starting from the last one and maintaining the set of elements of interest from one stage to
* the previous stage.
*/
def traceBackwardMaintainingSet[T: ClassManifest, U: ClassManifest](
startRDD: RDD[T],
p: U => Boolean,
endRDD: RDD[U]): RDD[T] = {
if (endRDD.id == startRDD.id) {
// Casts between RDD[U] and RDD[T] are legal because startRDD is the same as endRDD, so T is
// the same as U
startRDD.asInstanceOf[RDD[U]].filter(p).asInstanceOf[RDD[T]]
} else {
val (taggedEndRDD, firstRDDInStage) = tagRDDWithinStage(
endRDD, startRDD, getParentStageRDDs(endRDD))
// TODO: find the set of partitions of endRDD that contain elements that match p
val tags = sc.broadcast(taggedEndRDD.filter(tu => p(tu.elem)).map(tu => tu.tag)
.fold(IntSetTag.empty)(_ union _))
val sourceElems = new UniquelyTaggedRDD(firstRDDInStage)
.filter(taggedElem => (tags.value intersect taggedElem.tag).isTagged)
.map((tx: Tagged[_]) => tx.elem).collect()
// Casting from RDD[_] to RDD[Any] is legal because RDD is essentially covariant
traceBackwardMaintainingSet[T, Any](
startRDD,
(x: Any) => sourceElems.contains(x),
firstRDDInStage.asInstanceOf[RDD[Any]])
}
}
/**
* Selects the elements in endRDD that match p, traces them backward until startRDD, and returns
* the resulting members of startRDD. Implemented by uniquely tagging the elements of startRDD,
* tracing the tags all the way to endRDD in a single step, and returning the elements in startRDD
* whose tags ended up on the elements of interest in endRDD.
*/
def traceBackwardSingleStep[T: ClassManifest, U: ClassManifest](
startRDD: RDD[T], p: U => Boolean, endRDD: RDD[U]): RDD[T] = {
val taggedEndRDD: RDD[Tagged[U]] = tagRDD[U, T](
endRDD, startRDD, tagElements(startRDD, (t: T) => true))
val tags = sc.broadcast(
taggedEndRDD.filter(tu => p(tu.elem)).map(tu => tu.tag).fold(IntSetTag.empty)(_ union _))
val taggedStartRDD = new UniquelyTaggedRDD(startRDD)
taggedStartRDD.filter(tt => (tags.value intersect tt.tag).isTagged).map(tt => tt.elem)
}
/**
* Selects the elements in endRDD that match p, traces them backward until startRDD, and returns
* the resulting members of startRDD. Implemented using traceBackwardGivenMappings.
*/
def traceBackwardUsingMappings[T: ClassManifest, U: ClassManifest](
startRDD: RDD[T], p: U => Boolean, endRDD: RDD[U]): RDD[T] = {
val stageMappings = buildBackwardTraceMappings(startRDD, endRDD)
traceBackwardGivenMappings(startRDD, p, endRDD, stageMappings)
}
/**
* Given a set of mappings from the tags of one stage to the next, selects the elements in endRDD
* that match p, finds the tags associated with those elements, traces the tags backward using the
* mappings, and returns the elements in startRDD associated with the resulting tags.
*/
def traceBackwardGivenMappings[T: ClassManifest, U: ClassManifest](
startRDD: RDD[T],
p: U => Boolean,
endRDD: RDD[U],
stageMappings: List[(Option[RDD[(Tag, Tag)]], RDD[Tagged[_]], RDD[Tagged[_]])]): RDD[T] = {
stageMappings.lastOption match {
case Some((_, _, lastStageTaggedEndRDD)) =>
// Casting RDD[Tagged[_]] to RDD[Tagged[U]] is legal because lastStageTaggedEndRDD is
// actually a tagged version of endRDD
val tagsInEndRDD = lastStageTaggedEndRDD.asInstanceOf[RDD[Tagged[U]]]
.filter(taggedElem => p(taggedElem.elem)).map(_.tag)
val tagsInStartRDD = stageMappings.flatMap {
case (mappingOption, _, _) => mappingOption
}.foldRight(tagsInEndRDD) {
(mapping, tagsSoFar) =>
// tagsSoFar contains a subset of the new tags in mapping. We want to trace tagsSoFar
// backwards by one stage, so we join tagsSoFar and mapping on these new tags and
// extract the old tag associated with each new tag.
val mappingNewToOld = mapping.map {
case (oldTag, newTag) => (newTag, oldTag)
}
tagsSoFar.map(tag => (tag, ())).join(mappingNewToOld).map {
case (newTag, ((), oldTag)) => oldTag
}
}
// Calling head is legal because we know stageMappings is non-empty, since lastOption
// returned Some
stageMappings.head match {
case (_, taggedStartRDD, _) =>
// Casting RDD[Tagged[_]] to RDD[Tagged[T]] is legal because taggedStartRDD is a tagged
// version of startRDD
val elems = taggedStartRDD.asInstanceOf[RDD[Tagged[T]]]
val tags = sc.broadcast(tagsInStartRDD.fold(IntSetTag.empty)(_ union _))
elems.filter(tt => (tags.value intersect tt.tag).isTagged).map(tt => tt.elem)
}
case None =>
sc.parallelize(List())
}
}
/**
* For each stage, produces a mapping from tags propagated from the first RDD of the previous
* stage to the current RDD, to single-element tags representing the elements in the first RDD of
* the stage. Returns the list of such mappings, one per stage, along with the tagged start RDD of
* the stage and the tagged end RDD.
*/
def buildBackwardTraceMappings(
startRDD: RDD[_], endRDD: RDD[_])
: List[(Option[RDD[(Tag, Tag)]], RDD[Tagged[_]], RDD[Tagged[_]])] = {
val stages: List[(RDD[_], RDD[Tagged[_]])] = tagStages(startRDD, endRDD)
val prevStages: List[Option[(RDD[_], RDD[Tagged[_]])]] =
None :: stages.map(stage => Some(stage))
for (((startRDD, taggedEndRDD), prevStage) <- stages.zip(prevStages))
yield {
// Casting from UniquelyTaggedRDD[A] forSome { type A } to RDD[Tagged[_]] is legal because
// UniquelyTaggedRDD[A] is effectively covariant in A
val taggedStartRDD = new UniquelyTaggedRDD(startRDD).asInstanceOf[RDD[Tagged[_]]]
val tagMap = prevStage.map {
case (prevStartRDD, prevTaggedEndRDD) =>
// prevTaggedEndRDD and taggedStartRDD are differently-tagged versions of the same RDD. We
// join the two RDDs on their elements to extract the mapping from old tags to new tags
// across the two stages.
val oldTagged = prevTaggedEndRDD.map((tagged: Tagged[_]) => (tagged.elem, tagged.tag))
val newTagged = taggedStartRDD.map((tagged: Tagged[_]) => (tagged.elem, tagged.tag))
oldTagged.join(newTagged).map {
case (elem, (oldTag, newTag)) => (oldTag, newTag)
}
}
(tagMap, taggedStartRDD, taggedEndRDD)
}
}
/**
* For each stage from startRDD to endRDD, finds the start RDD of the stage and the end RDD tagged
* within the stage, and returns a list of pairs of these RDDs.
*/
private def tagStages(startRDD: RDD[_], endRDD: RDD[_]): List[(RDD[_], RDD[Tagged[_]])] = {
if (endRDD.id == startRDD.id || !rddPathExists(startRDD, endRDD)) {
List()
} else {
val (taggedEndRDD, firstRDDInStage) = tagRDDWithinStage(
endRDD, startRDD, getParentStageRDDs(endRDD))
// Casting RDD[Tagged[A]] forSome { type A } to RDD[Tagged[_]] is legal because RDD[Tagged[A]]
// is effectively covariant in A
val rddsForStage = (firstRDDInStage, taggedEndRDD.asInstanceOf[RDD[Tagged[_]]])
tagStages(startRDD, firstRDDInStage) :+ rddsForStage
}
}
private def tagRDDWithinStage[A, T](
rdd: RDD[A],
startRDD: RDD[T],
parentStageRDDs: Set[RDD[_]]): (RDD[Tagged[A]], RDD[_]) = {
if (!rddPathExists(startRDD, rdd)) {
(rdd.map(elem => Tagged(elem, IntSetTag.empty)), startRDD)
} else if (rdd.id == startRDD.id || parentStageRDDs.contains(rdd)) {
(new UniquelyTaggedRDD(rdd), rdd)
} else {
val dependencyResults = new ArrayBuffer[RDD[_]]
val taggedRDD = rdd.tagged(new RDDTagger {
def apply[B](prev: RDD[B]): RDD[Tagged[B]] = {
val (taggedPrev, firstRDDInStage) =
tagRDDWithinStage[B, T](prev, startRDD, parentStageRDDs)
dependencyResults += firstRDDInStage
taggedPrev
}
})
(taggedRDD, dependencyResults.max(new Ordering[RDD[_]] {
def compare(x: RDD[_], y: RDD[_]): Int = x.id - y.id
}))
}
}
private def rddPathExists(startRDD: RDD[_], endRDD: RDD[_]): Boolean = {
if (startRDD.id == endRDD.id) {
true
} else {
(for (dep <- endRDD.dependencies; rdd = dep.rdd) yield rdd).foldLeft(false) {
(acc, rdd) => acc || rddPathExists(startRDD, rdd)
}
}
}
private def tagRDD[A, T](
rdd: RDD[A],
startRDD: RDD[T],
taggedStartRDD: RDD[Tagged[T]]): RDD[Tagged[A]] = {
if (rdd.id == startRDD.id) {
// (rdd: RDD[A]) is the same as (startRDD: RDD[T]), so T is the same as A, so we can cast
// RDD[Tagged[T]] to RDD[Tagged[A]]
taggedStartRDD.asInstanceOf[RDD[Tagged[A]]]
} else {
rdd.tagged(new RDDTagger {
def apply[B](prev: RDD[B]): RDD[Tagged[B]] = {
tagRDD[B, T](prev, startRDD, taggedStartRDD)
}
})
}
}
/** Takes an RDD and returns a set of RDDs representing the parent stages. */
private def getParentStageRDDs(rdd: RDD[_]): Set[RDD[_]] = {
val parentStageRDDs = new mutable.HashSet[RDD[_]]
val visited = new mutable.HashSet[RDD[_]]
def visit(r: RDD[_]) {
if (!visited(r)) {
visited += r
for (dep <- r.dependencies) {
dep match {
case shufDep: ShuffleDependency[_,_,_] =>
parentStageRDDs.add(dep.rdd)
case _ =>
visit(dep.rdd)
}
}
}
}
visit(rdd)
// toSet is necessary because for some reason Scala doesn't think a mutable.HashSet[RDD[_]] is a
// Set[RDD[_]]
parentStageRDDs.toSet
}
private def tagElements[T](rdd: RDD[T], p: T => Boolean): RDD[Tagged[T]] = {
new UniquelyTaggedRDD(rdd).map {
case Tagged(elem, tag) => Tagged(elem, if (p(elem)) tag else IntSetTag.empty)
}
}
private def addEvent(event: EventLogEntry) {
events_ += event
event match {
case RDDRegistration(rdd) =>
// TODO(ankurdave): Check that the RDD ID and shuffle IDs aren't already in use. This may
// happen if the EventLogReader is passed a SparkContext that has previously been used for
// some computation.
logDebug("Updating RDD ID to be greater than " + rdd.id)
sc.updateRddId(rdd.id)
if (rdd.dependencies != null) {
rdd.dependencies.collect {
case shufDep: ShuffleDependency[_,_,_] => shufDep.shuffleId
} match {
case Seq() => {}
case shuffleIds =>
val maxShuffleId = shuffleIds.max
logDebug("Updating shuffle ID to be greater than " + maxShuffleId)
sc.updateShuffleId(maxShuffleId)
}
} else {
logError("Dependency list for RDD %d (%s) is null".format(rdd.id, rdd))
}
rdds(rdd.id) = rdd
case c: ChecksumEvent =>
checksumVerifier.verify(c)
case t: TaskSubmission =>
t.tasks.map(_.stageId) match {
case Seq() => {}
case stageIds =>
val maxStageId = stageIds.max
logDebug("Updating stage ID to be greater than " + maxStageId)
sc.updateStageId(maxStageId)
}
case _ => {}
}
}
private def firstExternalElement(location: Array[StackTraceElement]) =
(location.tail.find(!_.getClassName.matches("""spark\\.[A-Z].*"""))
orElse { location.headOption }
getOrElse { "" })
private def rddType(rdd: RDD[_]): String =
rdd.getClass.getName.replaceFirst("""^spark\\.""", "")
}
| ankurdave/arthur | core/src/main/scala/spark/debugger/EventLogReader.scala | Scala | bsd-3-clause | 15,389 |
package gh2011b.models
import net.liftweb.json.JsonAST.JValue
case class ForkEventPayload(forkee: Forkee)
object ForkEventPayload
{
def apply(json: JValue): Option[ForkEventPayload] =
{
val forkee = Forkee(json \ "forkee")
if(forkee.isDefined) Some(ForkEventPayload(forkee.get))
else None
}
}
| mgoeminne/github_etl | src/main/scala/gh2011b/models/ForkEventPayload.scala | Scala | mit | 325 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.psi.PsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
trait ScMethodCall extends ScExpression with MethodInvocation {
def deepestInvokedExpr: ScExpression = {
getEffectiveInvokedExpr match {
case call: ScMethodCall => {
call.deepestInvokedExpr
}
case expr => expr
}
}
def args: ScArgumentExprList = findChildByClassScala(classOf[ScArgumentExprList])
override def accept(visitor: ScalaElementVisitor) {
visitor.visitMethodCallExpression(this)
}
override def isUpdateCall: Boolean = getContext.isInstanceOf[ScAssignStmt] &&
getContext.asInstanceOf[ScAssignStmt].getLExpression == this
def updateExpression(): Option[ScExpression] = {
getContext match {
case a: ScAssignStmt if a.getLExpression == this => a.getRExpression
case _ => None
}
}
def argsElement: PsiElement = args
/**
* If named parameters enabled for this method even if it is from java; needed for Play 2 support
*/
def isNamedParametersEnabledEverywhere: Boolean = false
}
object ScMethodCall {
def unapply(call: ScMethodCall) =
Some(call.getInvokedExpr, call.argumentExpressions)
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScMethodCall.scala | Scala | apache-2.0 | 1,298 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.content
import org.scalatest.{FunSpec, Matchers}
import play.api.data.validation.ValidationError
import play.api.libs.json._
class KernelInfoReplySpec extends FunSpec with Matchers {
val kernelInfoReplyJson: JsValue = Json.parse("""
{
"protocol_version": "x.y.z",
"implementation": "<name>",
"implementation_version": "z.y.x",
"language": "<some language>",
"language_version": "a.b.c",
"banner": "<some banner>"
}
""")
val kernelInfoReply: KernelInfoReply = KernelInfoReply(
"x.y.z", "<name>", "z.y.x", "<some language>", "a.b.c", "<some banner>"
)
describe("KernelInfoReply") {
describe("implicit conversions") {
it("should implicitly convert from valid json to a kernelInfoReply instance") {
// This is the least safe way to convert as an error is thrown if it fails
kernelInfoReplyJson.as[KernelInfoReply] should be (kernelInfoReply)
}
it("should also work with asOpt") {
// This is safer, but we lose the error information as it returns
// None if the conversion fails
val newKernelInfoReply = kernelInfoReplyJson.asOpt[KernelInfoReply]
newKernelInfoReply.get should be (kernelInfoReply)
}
it("should also work with validate") {
// This is the safest as it collects all error information (not just first error) and reports it
val kernelInfoReplyResults = kernelInfoReplyJson.validate[KernelInfoReply]
kernelInfoReplyResults.fold(
(invalid: Seq[(JsPath, Seq[ValidationError])]) => println("Failed!"),
(valid: KernelInfoReply) => valid
) should be (kernelInfoReply)
}
it("should implicitly convert from a kernelInfoReply instance to valid json") {
Json.toJson(kernelInfoReply) should be (kernelInfoReplyJson)
}
}
}
}
| bpburns/spark-kernel | protocol/src/test/scala/com/ibm/spark/kernel/protocol/v5/content/KernelInfoReplySpec.scala | Scala | apache-2.0 | 2,473 |
package geek.lawsof.physics.lib.block
import geek.lawsof.physics.lib.block.te.TileEntityBase
import geek.lawsof.physics.lib.block.traits.{IconArray, singleSided}
import geek.lawsof.physics.lib.item.ItemDescriptor
import net.minecraft.client.renderer.texture.IIconRegister
import net.minecraft.world.World
/**
* Created by anshuman on 15-07-2014.
*/
class BlockDescriptor(val intName: String, val icons: IconArray, val item: ItemDescriptor) {
def this(intName: String) = this(intName, IconArray(intName, singleSided()), new ItemDescriptor(intName))
var block: BlockBase = null
def +:(reg: BlockBase) = {
if (reg.metaCount != 15) {
block = reg
block.blocks += block.metaCount -> this
block.metaCount += 1
}
else throw new IndexOutOfBoundsException("The Amount Of SubBlocks Is TOO DAMN HIGH!")
}
def register(reg: BlockBase) = reg +: this
def registerIcons(reg: IIconRegister) = icons.register(reg)
def hasTE = teClass != null
def teClass: Class[_ <: TileEntityBase] = null
def registerTE() = if (hasTE) teClass.newInstance().registerTE(intName)
def createTE(w: World): TileEntityBase = if (hasTE) teClass.newInstance() else null
}
| GeckoTheGeek42/TheLawsOfPhysics | src/main/scala/geek/lawsof/physics/lib/block/BlockDescriptor.scala | Scala | mit | 1,192 |
package org.jetbrains.plugins.scala.codeInsight.moveLeftRight
/**
* @author Nikolay.Tropin
*/
class ScalaMoveLeftRightTest extends ScalaMoveLeftRightTestBase {
def testMethodArgs(): Unit = {
doTestFromLeftToRight(
"Seq(<caret>1, 22, 333)",
"Seq(22, <caret>1, 333)",
"Seq(22, 333, <caret>1)"
)
}
def testMethodParams(): Unit = {
doTestFromRightToLeft(
"def example(s: String, i: Int, <caret>b: Boolean): Unit = {}",
"def example(s: String, <caret>b: Boolean, i: Int): Unit = {}",
"def example(<caret>b: Boolean, s: String, i: Int): Unit = {}"
)
}
def testClassParams(): Unit = {
doTestFromLeftToRight(
"class Person(val id<caret>: Long, name: String)",
"class Person(name: String, val id<caret>: Long)"
)
}
def testTypeParams(): Unit = {
doTestFromLeftToRight(
"class Pair[T<caret>1, T2](t1: T1, t2: T2)",
"class Pair[T2, T<caret>1](t1: T1, t2: T2)"
)
}
def testTypeArgs(): Unit = {
doTestFromLeftToRight(
"new Pair[Int<caret>, Boolean](0, true)",
"new Pair[Boolean, Int<caret>](0, true)"
)
}
def testPatternArgs(): Unit = {
doTestFromRightToLeft (
"val List(x, <caret>y) = List(1, 2)",
"val List(<caret>y, x) = List(1, 2)"
)
}
def testTuple(): Unit = {
doTestFromLeftToRight(
"val (x, y) = (<caret>1, 2)",
"val (x, y) = (2, <caret>1)"
)
}
def testTuplePattern(): Unit = {
doTestFromLeftToRight(
"val (<caret>x, y) = (1, 2)",
"val (y, <caret>x) = (1, 2)"
)
}
def testTupleType(): Unit = {
doTestFromLeftToRight(
"val x: Option[(<caret>Int, String)] = None",
"val x: Option[(String, <caret>Int)] = None"
)
}
def testCaseClauses(): Unit = {
doTestFromRightToLeft(
"""1 match {
| case 0 => false
| case <caret>1 => true
|}""".stripMargin.replace("\r", ""),
"""1 match {
| case <caret>1 => true
| case 0 => false
|}""".stripMargin.replace("\r", "")
)
}
def testInfixExpr(): Unit ={
doTestFromLeftToRight(
"<caret>1 + 2 + 3",
"2 + <caret>1 + 3",
"2 + 3 + <caret>1"
)
}
def testInfixExprDifferentPriority(): Unit = {
doTestFromLeftToRight(
"1 + <caret>2 * 3 + 3 * 4",
"1 + 3 * <caret>2 + 3 * 4"
)
doTestFromRightToLeft(
"1 + 2 * 3 + 3 <caret>* 4",
"1 + 3 <caret>* 4 + 2 * 3",
"3 <caret>* 4 + 1 + 2 * 3"
)
}
def testInfixExprNonOperator(): Unit = {
checkMoveRightIsDisabled("Se<caret>q(1) foreach println")
}
def testInfixExprAssignOperator(): Unit = {
checkMoveRightIsDisabled("<caret>x += 1 + 2 + 3")
doTestFromLeftToRight(
"x += <caret>1 + 2 + 3",
"x += 2 + <caret>1 + 3",
"x += 2 + 3 + <caret>1"
)
}
def testInfixPattern(): Unit = {
doTestFromLeftToRight(
"val <caret>x :: y :: Nil = 1 :: 2 :: Nil",
"val y :: <caret>x :: Nil = 1 :: 2 :: Nil",
"val y :: Nil :: <caret>x = 1 :: 2 :: Nil"
)
}
def testInfixType(): Unit = {
doTestFromLeftToRight(
"val hList: <caret>String :: Int :: HNil = ???",
"val hList: Int :: <caret>String :: HNil = ???",
"val hList: Int :: HNil :: <caret>String = ???"
)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInsight/moveLeftRight/ScalaMoveLeftRightTest.scala | Scala | apache-2.0 | 3,299 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package headers
import cats.data.NonEmptyList
import cats.syntax.foldable._
import org.http4s.laws.discipline.arbitrary._
import org.http4s.syntax.header._
import org.scalacheck.Prop.forAll
class TransferEncodingSuite extends HeaderLaws {
checkAll("TransferEncoding", headerLaws[`Transfer-Encoding`])
test("render should include all the encodings") {
assertEquals(
`Transfer-Encoding`(TransferCoding.chunked).renderString,
"Transfer-Encoding: chunked",
)
assertEquals(
`Transfer-Encoding`(TransferCoding.chunked, TransferCoding.gzip).renderString,
"Transfer-Encoding: chunked, gzip",
)
}
test("parse should accept single codings") {
assertEquals(
`Transfer-Encoding`.parse("chunked").map(_.values),
Right(NonEmptyList.one(TransferCoding.chunked)),
)
}
test("parse should accept multiple codings") {
assertEquals(
`Transfer-Encoding`.parse("chunked, gzip").map(_.values),
Right(NonEmptyList.of(TransferCoding.chunked, TransferCoding.gzip)),
)
assertEquals(
`Transfer-Encoding`.parse("chunked,gzip").map(_.values),
Right(NonEmptyList.of(TransferCoding.chunked, TransferCoding.gzip)),
)
}
test("hasChunked should detect chunked") {
forAll { (t: `Transfer-Encoding`) =>
assertEquals(t.hasChunked, (t.values.contains_(TransferCoding.chunked)))
}
}
}
| http4s/http4s | tests/shared/src/test/scala/org/http4s/headers/TransferEncodingSuite.scala | Scala | apache-2.0 | 1,996 |
package com.datawizards.dqm.alert.dto
case class SlackMessageAttachment(
color: String,
title: String,
text: String,
fallback: String,
footer: String,
ts: Long
)
| piotr-kalanski/data-quality-monitoring | src/main/scala/com/datawizards/dqm/alert/dto/SlackMessageAttachment.scala | Scala | apache-2.0 | 406 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle
import org.scalacheck.Gen
import org.joda.time._
import org.saddle.time._
object IndexArbitraries {
// Generates int index with duplicates
def indexIntWithDups: Gen[Index[Int]] = for {
l <- Gen.choose(0, 20)
lst <- Gen.listOfN(l, Gen.chooseNum(0, l))
} yield lst.toIndex
def indexIntNoDups: Gen[Index[Int]] = for {
l <- Gen.choose(0, 20)
lst <- Gen.listOfN(l, Gen.chooseNum(0, l))
} yield lst.toSet[Int].toSeq.toIndex
val zone = DateTimeZone.forID("America/New_York")
def getDate: Gen[DateTime] = for {
m <- Gen.choose(1,12)
d <- Gen.choose(1,28)
y <- Gen.choose(2012, 2013)
} yield new DateTime(y, m, d, 0, 0, 0, 0, zone)
def indexTimeWithDups: Gen[Index[DateTime]] = for {
l <- Gen.choose(0, 100)
lst <- Gen.listOfN(l, getDate)
} yield lst.toIndex
def indexTimeNoDups: Gen[Index[DateTime]] = for {
l <- Gen.choose(0, 100)
lst <- Gen.listOfN(l, getDate)
} yield lst.toSet[DateTime].toSeq.toIndex
}
| amaizing/amaizing-saddle | saddle-test-framework/src/test/scala/org/saddle/IndexArbitraries.scala | Scala | apache-2.0 | 1,611 |
/*
* Copyright (c) 2011 ScalaStuff.org (joint venture of Alexander Dvorkovyy and Ruud Diterwich)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalastuff.proto
import java.nio.charset.Charset
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
trait TestBean[B <: TestBean[B]] {
def set1(): B
def assertEquals(other: B): Unit
}
object TestFormat {
import Preamble._
val formats = List(ProtobufFormat, GraphProtostuffFormat, JsonFormat, JsonFieldTagsFormat, XmlFormat)
def checkFormats[T <: TestBean[T]](ctor: () => T)(implicit mf: Manifest[T]) {
for (format <- formats)
checkTestBeanSerDeser(ctor, format)
}
def checkTestBeanSerDeser[T <: TestBean[T]](ctor: () => T, format: SerializationFormat)(implicit mf: Manifest[T]) {
checkSerDeser(ctor(), format) { _.assertEquals(_) }
checkSerDeser(ctor().set1(), format) { _.assertEquals(_) }
}
def checkSerDeserFormats[T <: AnyRef](bean: T)(check: (T, T) => Unit)(implicit mf: Manifest[T]) {
for (format <- formats)
checkSerDeser(bean, format)(check)
}
def checkSerDeser[T <: AnyRef](bean: T, format: SerializationFormat)(check: (T, T) => Unit)(implicit mf: Manifest[T]) {
val reader = readerOf[T]
val writer = writerOf[T]
val buffer = writer.toByteArray(bean, format)
val bufferStr = format match {
case _: JsonFormat | _: XmlFormat.type => new String(buffer, Charset.forName("UTF-8"))
case _ => buffer map ("%02X" format _) mkString " "
}
println("%s (%s), %d bytes: %s".format(
mf.erasure.getSimpleName,
format.getClass.getSimpleName stripSuffix "Format$",
buffer.length,
bufferStr))
val deserialized = reader.readFrom(buffer, format)
check(bean, deserialized)
// check ser/deser via streams
val out = new ByteArrayOutputStream()
writer.writeTo(out, bean, format)
val buffer2 = out.toByteArray()
val in = new ByteArrayInputStream(buffer2)
val deserializedFromStream = reader.readFrom(in, format)
check(bean, deserializedFromStream)
}
} | scalastuff/scalabeans | src/test/scala/org/scalastuff/proto/TestFormat.scala | Scala | apache-2.0 | 2,684 |
package com.github.mauricio.async.db.exceptions
import com.github.mauricio.async.db.Connection
class ConnectionNotConnectedException(val connection: Connection)
extends DatabaseException(
"The connection %s is not connected to the database".format(connection)
)
| dripower/postgresql-async | db-async-common/src/main/scala/com/github/mauricio/async/db/exceptions/ConnectionNotConnectedException.scala | Scala | apache-2.0 | 278 |
package com.twitter.finagle.mux
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.util.Future
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FailureDetectorTest extends FunSuite
{
def ping = () => Future.Done
def close = () => Future.Done
val statsReceiver = NullStatsReceiver
test("default settings with flag override") {
sessionFailureDetector.let("threshold") {
val FailureDetector.Param(failDetectorConfig) = FailureDetector.Param.param.default
assert(FailureDetector(failDetectorConfig, ping, close, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
test("flag settings with flag set to none") {
sessionFailureDetector.let("none") {
assert(NullFailureDetector == FailureDetector(FailureDetector.GlobalFlagConfig, ping, close, statsReceiver))
}
}
test("flag settings with invalid string") {
sessionFailureDetector.let("tacos") {
assert(NullFailureDetector == FailureDetector(FailureDetector.GlobalFlagConfig, ping, close, statsReceiver))
}
}
test("flag settings with valid string") {
sessionFailureDetector.let("threshold") {
assert(FailureDetector(FailureDetector.GlobalFlagConfig, ping, close, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
test("request null gets null") {
assert(NullFailureDetector == FailureDetector(FailureDetector.NullConfig, ping, close, statsReceiver))
}
test("explicit threshold used") {
assert(FailureDetector(FailureDetector.ThresholdConfig(), ping, close, statsReceiver).isInstanceOf[ThresholdFailureDetector])
}
}
| almendar/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/FailureDetectorTest.scala | Scala | apache-2.0 | 1,689 |
/*
* Copyright 2017 Georgi Krastev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink
import org.scalacheck._
/** Data definitions. Must be separate due to SI-7046. */
object ADTsBench {
import Gen._
/** Recursive product. */
case class NTree[@specialized(Int) +E](value: E, children: List[NTree[E]]) {
def size: Int = 1 + children.foldLeft(0)(_ + _.size)
}
object NTree {
/** Reasonably sized [[Arbitrary]] [[NTree]]s. */
implicit def arb[E](implicit element: Arbitrary[E]): Arbitrary[NTree[E]] = {
lazy val tree: Gen[NTree[E]] = lzy(sized {
size => for {
value <- element.arbitrary
children <- if (size <= 1) const(Nil) else for {
n <- choose(0, size - 1)
h <- resize(size % (n + 1), tree)
t <- listOfN(n, resize(size / (n + 1), tree))
} yield h :: t
} yield NTree(value, children)
})
Arbitrary(tree)
}
}
/** Recursive coproduct. */
sealed trait BTree[@specialized(Int) +E] { def size: Int }
case object BLeaf extends BTree[Nothing] { def size = 0 }
case class BNode[@specialized(Int) +E](
left: BTree[E], value: E, right: BTree[E]
) extends BTree[E] {
def size = 1 + left.size + right.size
}
object BTree {
/** Reasonably sized [[Arbitrary]] [[BTree]]s. */
implicit def arb[E](implicit element: Arbitrary[E]): Arbitrary[BTree[E]] = {
lazy val tree: Gen[BTree[E]] = lzy(sized { size =>
if (size == 0) BLeaf else for {
e <- element.arbitrary
n <- choose(1, size)
l <- resize(n - 1, tree)
r <- resize(size - n, tree)
} yield BNode(l, e, r)
})
Arbitrary(tree)
}
}
}
| joroKr21/flink-shapeless | src/bench/scala/org/apache/flink/ADTsBench.scala | Scala | apache-2.0 | 2,246 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.instances
import cats.arrow.Profunctor
import monix.execution.Ack.Stop
import monix.execution.{Ack, Cancelable, Scheduler}
import monix.reactive.observers.Subscriber
import monix.reactive.subjects.Subject
import scala.concurrent.Future
import scala.util.control.NonFatal
/** `cats.arrow.Profunctor` type class instance for [[monix.reactive.subjects.Subject Subject]].
*
* References:
*
* - [[https://typelevel.org/cats/ typelevel/cats]]
* - [[https://github.com/typelevel/cats-effect typelevel/cats-effect]]
*/
object CatsProfunctorForSubject extends Profunctor[Subject] {
def dimap[A, B, C, D](source: Subject[A, B])(f: C => A)(g: B => D): Subject[C, D] =
new ProfunctorSubject(source)(f)(g)
private final class ProfunctorSubject[A, B, C, D](source: Subject[A, B])(f: C => A)(g: B => D) extends Subject[C, D] {
def size: Int = source.size
def unsafeSubscribeFn(subscriber: Subscriber[D]): Cancelable =
source.unsafeSubscribeFn(new Subscriber[B] {
implicit def scheduler: Scheduler = subscriber.scheduler
def onNext(elem: B): Future[Ack] = {
var streamError = true
try {
val b = g(elem)
streamError = false
subscriber.onNext(b)
} catch {
case NonFatal(ex) if streamError =>
onError(ex)
Stop
}
}
def onError(ex: Throwable): Unit = subscriber.onError(ex)
def onComplete(): Unit = subscriber.onComplete()
})
def onNext(elem: C): Future[Ack] = {
var streamError = true
try {
val a = f(elem)
streamError = false
source.onNext(a)
} catch {
case NonFatal(ex) if streamError =>
onError(ex)
Stop
}
}
def onError(ex: Throwable): Unit = source.onError(ex)
def onComplete(): Unit = source.onComplete()
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/instances/CatsProfunctorForSubject.scala | Scala | apache-2.0 | 2,599 |
package com.sksamuel.elastic4s
import java.time.LocalDate
import org.scalatest.Matchers
class ElasticDateTest extends org.scalatest.FlatSpec with Matchers {
"ElasticDate" should "support 'now' for the date" in {
ElasticDate.now.show should fullyMatch regex "now"
}
it should "support LocalDate date" in {
ElasticDate(LocalDate.of(2014, 12, 2)).show shouldBe "2014-12-02||"
}
it should "support additions" in {
ElasticDate.now.add(2, Hours).show shouldBe "now+2h"
ElasticDate(LocalDate.of(2014, 12, 2)).add(2, Years).show shouldBe "2014-12-02||+2y"
}
it should "support subtractions" in {
ElasticDate.now.subtract(3, Minutes).show shouldBe "now-3m"
ElasticDate(LocalDate.of(2014, 12, 3)).subtract(3, Seconds).show shouldBe "2014-12-03||-3s"
}
it should "support multiple adjustments" in {
ElasticDate.now.subtract(3, Minutes).add(1, Weeks).show shouldBe "now-3m+1w"
}
it should "support rounding" in {
ElasticDate.now.rounding(Days).show shouldBe "now/d"
ElasticDate(LocalDate.of(2014, 12, 11)).rounding(Days).show shouldBe "2014-12-11||/d"
}
it should "support rounding and adjustments" in {
ElasticDate.now.add(3, Weeks).rounding(Days).show shouldBe "now+3w/d"
ElasticDate(LocalDate.of(2014, 11, 11)).add(3, Weeks).rounding(Hours).show shouldBe "2014-11-11||+3w/h"
}
}
| Tecsisa/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/ElasticDateTest.scala | Scala | apache-2.0 | 1,352 |
package com.wavesplatform.lang.v1.compiler
import cats.instances.vector._
import cats.syntax.traverse._
import com.wavesplatform.lang.contract.DApp
import com.wavesplatform.lang.contract.DApp.{CallableFunction, VerifierFunction}
import com.wavesplatform.lang.v1.FunctionHeader
import com.wavesplatform.lang.v1.FunctionHeader.{Native, User}
import com.wavesplatform.lang.v1.compiler.Terms._
import com.wavesplatform.lang.v1.evaluator.FunctionIds
import com.wavesplatform.lang.v1.evaluator.ctx.impl.waves.{ExtractedFuncPostfix, ExtractedFuncPrefix}
import monix.eval.Coeval
import scala.util.Try
object Decompiler {
sealed trait BlockBraces
case object NoBraces extends BlockBraces
case object BracesWhenNeccessary extends BlockBraces
sealed trait FirstLinePolicy
case object DontIndentFirstLine extends FirstLinePolicy
case object IdentFirstLine extends FirstLinePolicy
private[lang] def pure[A](a: A) = Coeval.evalOnce(a)
private def out(in: String, ident: Int): String =
Array.fill(4 * ident)(" ").mkString("") + in
private def pureOut(in: String, ident: Int): Coeval[String] = pure(out(in, ident))
private val NEWLINE = "\\n"
private def decl(e: Coeval[DECLARATION], ctx: DecompilerContext): Coeval[String] =
e flatMap {
case Terms.FUNC(name, args, body) =>
expr(pure(body), ctx, BracesWhenNeccessary, DontIndentFirstLine).map(
fb =>
out("func " + name + " (" + args.mkString(",") + ") = ", ctx.ident) +
out(fb + NEWLINE, ctx.ident))
case Terms.LET(name, value) =>
expr(pure(value), ctx, BracesWhenNeccessary, DontIndentFirstLine).map(e => out("let " + name + " = " + e, ctx.ident))
case _: FAILED_DEC => Coeval.now("FAILED_DEC")
}
private def extrTypes(Name: String, e: EXPR): Coeval[Option[List[String]]] = {
e match {
case FUNCTION_CALL(FunctionHeader.Native(1), List(REF(Name), CONST_STRING(typeName))) => pure(Some(List(typeName)))
case IF(FUNCTION_CALL(FunctionHeader.Native(1), List(REF(Name), CONST_STRING(typeName))), TRUE, t) =>
extrTypes(Name, t) map (_.map(tl => typeName :: tl))
case _ => pure(None)
}
}
object ANY_LET {
def unapply(e: EXPR): Option[(String, EXPR, EXPR)] = {
e match {
case LET_BLOCK(LET(name, v), body) => Some((name, v, body))
case BLOCK(LET(name, v), body) => Some((name, v, body))
case _ => None
}
}
}
private def caseExpr(Name: String, e: EXPR, ctx: DecompilerContext): Coeval[(String, Option[EXPR])] = {
e match {
case IF(
tc,
ANY_LET(name, REF(Name), cExpr),
tailExpr) => extrTypes(Name, tc) flatMap {
case None => expr(pure(e), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case _ => " ++ NEWLINE ++ e, None)
}
case Some(tl) => expr(pure(cExpr), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case " ++ name ++ ": " ++ tl.mkString("|") ++ " => " ++ NEWLINE ++ e, Some(tailExpr))
}
}
case IF(
tc,
cExpr,
tailExpr) => extrTypes(Name, tc) flatMap {
case None => expr(pure(e), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case _ => " ++ NEWLINE ++ e, None)
}
case Some(tl) => expr(pure(cExpr), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case _: " ++ tl.mkString("|") ++ " => " ++ NEWLINE ++ e, Some(tailExpr))
}
}
case ANY_LET(name, REF(Name), e) => expr(pure(e), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case " ++ name ++ " => " ++ NEWLINE ++ e, None)
}
case _ => expr(pure(e), ctx.incrementIdent(), NoBraces, IdentFirstLine) map { e =>
("case _ => " ++ NEWLINE ++ e, None)
}
}
}
private def matchBlock(name: String, body: Coeval[EXPR], ctx: DecompilerContext): Coeval[String] = {
for {
e <- body
p <- caseExpr(name, e, ctx)
c = p._1 ++ NEWLINE
t <- p._2.fold(pure(Option.empty[String]))(e => matchBlock(name, pure(e), ctx).map(Some.apply))
} yield {
t.fold(out(c, ctx.ident)) { t => out(c, ctx.ident) ++ t }
}
}
val MatchRef = """(\\$match\\d*)""".r
private[lang] def expr(e: Coeval[EXPR], ctx: DecompilerContext, braces: BlockBraces, firstLinePolicy: FirstLinePolicy): Coeval[String] = {
def checkBrackets(expr: EXPR) = expr match {
// no need while all binaty ops is bracked. // case Terms.FUNCTION_CALL(FunctionHeader.Native(id), _) if ctx.binaryOps.contains(id) /* || ctx.unaryOps.contains(id) */ => ("(", ")")
case Terms.IF(_, _, _) => ("(", ")")
case Terms.LET_BLOCK(_, _) => ("(", ")")
case Terms.BLOCK(_, _) => ("(", ")")
case _ => ("", "")
}
def argsStr(args: List[EXPR]) = args.map(argStr).toVector.sequence
def listStr(elems: List[EXPR]) = argsStr(elems).map(_.mkString("[", ", ", "]"))
def argStr(elem: EXPR) = expr(pure(elem), ctx, BracesWhenNeccessary, DontIndentFirstLine)
val i = if (firstLinePolicy == DontIndentFirstLine) 0 else ctx.ident
e.flatMap(v => (v: @unchecked) match {
case Terms.BLOCK(Terms.LET(MatchRef(name), e), body) => matchBlock(name, pure(body), ctx.incrementIdent()) flatMap { b =>
expr(pure(e), ctx.incrementIdent(), NoBraces, DontIndentFirstLine) map { ex =>
out("match " ++ ex ++ " {" ++ NEWLINE, ctx.ident) ++
out( b, 0) ++
out("}", ctx.ident)
}
}
case Terms.BLOCK(declPar, body) =>
val braceThis = braces match {
case NoBraces => false
case BracesWhenNeccessary => true
}
val modifiedCtx = if (braceThis) ctx.incrementIdent() else ctx
for {
d <- decl(pure(declPar), modifiedCtx)
b <- expr(pure(body), modifiedCtx, NoBraces, IdentFirstLine)
} yield {
if (braceThis)
out("{" + NEWLINE, ident = 0) +
out(d + NEWLINE, 0) +
out(b + NEWLINE, 0) +
out("}", ctx.ident + 1)
else
out(d + NEWLINE, 0) +
out(b, 0)
}
case Terms.LET_BLOCK(let, exprPar) => expr(pure(Terms.BLOCK(let, exprPar)), ctx, braces, firstLinePolicy)
case Terms.TRUE => pureOut("true", i)
case Terms.FALSE => pureOut("false", i)
case Terms.CONST_BOOLEAN(b) => pureOut(b.toString.toLowerCase(), i)
case Terms.CONST_LONG(t) => pureOut(t.toLong.toString, i)
case Terms.CONST_STRING(s) => pureOut("\\"" ++ s ++ "\\"", i)
case Terms.CONST_BYTESTR(bs) => pureOut(if(bs.size <= 128) { "base58'" ++ bs.toString ++ "'" } else { "base64'" ++ bs.base64Raw ++ "'" }, i)
case Terms.REF(ref) => pureOut(ref, i)
case Terms.GETTER(getExpr, fld) =>
val (bs, be) = checkBrackets(getExpr)
expr(pure(getExpr), ctx, NoBraces, firstLinePolicy).map(a => s"$bs$a$be.$fld")
case Terms.IF(cond, it, iff) =>
for {
c <- expr(pure(cond), ctx, BracesWhenNeccessary, DontIndentFirstLine)
it <- expr(pure(it), ctx.incrementIdent(), BracesWhenNeccessary, DontIndentFirstLine)
iff <- expr(pure(iff), ctx.incrementIdent(), BracesWhenNeccessary, DontIndentFirstLine)
} yield
out("if (" + c + ")" + NEWLINE, i) +
out("then " + it + NEWLINE, ctx.ident + 1) +
out("else " + iff, ctx.ident + 1)
case FUNCTION_CALL(`cons`, args) =>
collectListArgs(args) match {
case (elems, None) => listStr(elems)
case (List(elem), Some(listVar)) => argStr(elem).map(v => s"$v :: $listVar")
case (elems, Some(listVar)) => listStr(elems).map(v => s"$v :: $listVar")
}
case FUNCTION_CALL(`listElem`, List(list, index)) =>
val (bs,be) = checkBrackets(list)
for (l <- argStr(list); i <- argStr(index)) yield s"$bs$l$be[$i]"
case Terms.FUNCTION_CALL(func, args) =>
val argsCoeval = argsStr(args)
func match {
case FunctionHeader.Native(id) if ctx.binaryOps.contains(id) && args.size == 2 =>
val (bs0, be0) = args(0) match {
case Terms.IF(_,_,_) => ("(", ")")
case _ => ("", "")
}
val (bs1, be1) = args(1) match {
case Terms.IF(_,_,_) => ("(", ")")
case _ => ("", "")
}
argsCoeval.map(as => out(s"(${bs0}${as(0)}${be0} ${ctx.binaryOps(id)} ${bs1}${as(1)}${be1})", i))
case FunctionHeader.User(internalName, _) if internalName == "!=" =>
argsCoeval.map(as => out(s"(${as(0)} != ${as(1)})", i))
case header =>
val name = extractFunctionName(ctx, header)
argsCoeval.map(as => out(s"$name(${as.mkString(", ")})", i))
}
case _: Terms.ARR => ??? // never happens
case obj: Terms.CaseObj => pureOut(obj.toString, i) // never happens
})
}
private val extractedFuncR = s"$ExtractedFuncPrefix(\\\\w+)\\\\((.+)\\\\)".r
private def extractFunctionName(ctx: DecompilerContext, header: FunctionHeader) =
header match {
case inner@User(_, name) =>
extractedFuncR.findFirstMatchIn(name)
.flatMap(m =>
(m.group(1), m.group(2)) match {
case ("User", name) => Some(User(name))
case ("Native", id) => Try(id.toShort).toOption.map(Native)
case _ => None
}
)
.map(getFunctionName(ctx, _) + ExtractedFuncPostfix)
.getOrElse(getFunctionName(ctx, inner))
case h => getFunctionName(ctx, h)
}
private def getFunctionName(ctx: DecompilerContext, header: FunctionHeader) =
header match {
case Native(id) => ctx.opCodes.getOrElse(id, s"Native<$id>")
case User(_, name) => name
}
private val nil = REF("nil")
private val cons = Native(FunctionIds.CREATE_LIST)
private val listElem = Native(FunctionIds.GET_LIST)
private def collectListArgs(args: List[EXPR]): (List[EXPR], Option[String]) = {
def flattenRec(args: List[EXPR]): List[EXPR] = args match {
case a :: FUNCTION_CALL(`cons`, nextArgs) :: Nil => a :: flattenRec(nextArgs)
case l => l
}
flattenRec(args) match {
case a :+ `nil` => (a, None)
case a :+ REF(listVar) => (a, Some(listVar))
case l => (l, None)
}
}
def apply(e: DApp, ctx: DecompilerContext): String = {
def intersperse(s: Seq[Coeval[String]]): Coeval[String] = s.toVector.sequence.map(v => v.mkString(NEWLINE + NEWLINE))
val dApp = ContractScriptCompactor.decompact(e)
import dApp._
val decls: Seq[Coeval[String]] = decs.map(expr => decl(pure(expr), ctx))
val callables: Seq[Coeval[String]] = callableFuncs
.map {
case CallableFunction(annotation, u) =>
Decompiler.decl(pure(u), ctx).map(out(NEWLINE + "@Callable(" + annotation.invocationArgName + ")" + NEWLINE, 0) + _)
}
val verifier: Seq[Coeval[String]] = verifierFuncOpt.map {
case VerifierFunction(annotation, u) =>
Decompiler.decl(pure(u), ctx).map(out(NEWLINE + "@Verifier(" + annotation.invocationArgName + ")" + NEWLINE, 0) + _)
}.toSeq
val result = for {
d <- intersperse(decls)
c <- intersperse(callables)
v <- intersperse(verifier)
} yield d + NEWLINE + c + NEWLINE + v
result()
}
def apply(e0: EXPR, ctx: DecompilerContext): String =
expr(pure(e0), ctx, NoBraces, IdentFirstLine).apply()
}
| wavesplatform/Waves | lang/shared/src/main/scala/com/wavesplatform/lang/v1/compiler/Decompiler.scala | Scala | mit | 11,701 |
package com.github.mauricio.async.db.postgresql.encoders
import com.github.mauricio.async.db.postgresql.column.PostgreSQLColumnEncoderRegistry
import com.github.mauricio.async.db.postgresql.messages.frontend.PreparedStatementExecuteMessage
import io.netty.util.CharsetUtil
import org.specs2.mutable.Specification
class ExecutePreparedStatementEncoderSpec extends Specification {
val registry = new PostgreSQLColumnEncoderRegistry()
val encoder = new ExecutePreparedStatementEncoder(CharsetUtil.UTF_8, registry)
val sampleMessage = Array[Byte](66,0,0,0,18,49,0,49,0,0,0,0,1,-1,-1,-1,-1,0,0,69,0,0,0,10,49,0,0,0,0,0,83,0,0,0,4,67,0,0,0,7,80,49,0)
"encoder" should {
"correctly handle the case where an encoder returns null" in {
val message = new PreparedStatementExecuteMessage(1, "select * from users", List(Some(null)), registry)
val result = encoder.encode(message)
val bytes = new Array[Byte](result.readableBytes())
result.readBytes(bytes)
bytes === sampleMessage
}
}
}
| outbrain/postgresql-async | postgresql-async/src/test/scala/com/github/mauricio/async/db/postgresql/encoders/ExecutePreparedStatementEncoderSpec.scala | Scala | apache-2.0 | 1,033 |
package com.xah.chat.comms
import android.os.Binder
class XBinder(service: XService) extends Binder {
def getService() = {
service
}
} | lemonxah/xaHChat | src/main/scala/com/xah/chat/comms/XBinder.scala | Scala | mit | 144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.BigDecimal
import org.apache.flink.table.functions.AggregateFunction
/**
* Test case for built-in max aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class MaxAggFunctionTest[T: Numeric] extends AggFunctionTestBase[T, MaxAccumulator[T]] {
private val numeric: Numeric[T] = implicitly[Numeric[T]]
def minVal: T
def maxVal: T
override def inputValueSets: Seq[Seq[T]] = Seq(
Seq(
numeric.fromInt(1),
null.asInstanceOf[T],
maxVal,
numeric.fromInt(-99),
numeric.fromInt(3),
numeric.fromInt(56),
numeric.fromInt(0),
minVal,
numeric.fromInt(-20),
numeric.fromInt(17),
null.asInstanceOf[T]
),
Seq(
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T],
null.asInstanceOf[T]
)
)
override def expectedResults: Seq[T] = Seq(
maxVal,
null.asInstanceOf[T]
)
}
class ByteMaxAggFunctionTest extends MaxAggFunctionTest[Byte] {
override def minVal = (Byte.MinValue + 1).toByte
override def maxVal = (Byte.MaxValue - 1).toByte
override def aggregator: AggregateFunction[Byte, MaxAccumulator[Byte]] =
new ByteMaxAggFunction()
}
class ShortMaxAggFunctionTest extends MaxAggFunctionTest[Short] {
override def minVal = (Short.MinValue + 1).toShort
override def maxVal = (Short.MaxValue - 1).toShort
override def aggregator: AggregateFunction[Short, MaxAccumulator[Short]] =
new ShortMaxAggFunction()
}
class IntMaxAggFunctionTest extends MaxAggFunctionTest[Int] {
override def minVal = Int.MinValue + 1
override def maxVal = Int.MaxValue - 1
override def aggregator: AggregateFunction[Int, MaxAccumulator[Int]] =
new IntMaxAggFunction()
}
class LongMaxAggFunctionTest extends MaxAggFunctionTest[Long] {
override def minVal = Long.MinValue + 1
override def maxVal = Long.MaxValue - 1
override def aggregator: AggregateFunction[Long, MaxAccumulator[Long]] =
new LongMaxAggFunction()
}
class FloatMaxAggFunctionTest extends MaxAggFunctionTest[Float] {
override def minVal = Float.MinValue / 2
override def maxVal = Float.MaxValue / 2
override def aggregator: AggregateFunction[Float, MaxAccumulator[Float]] =
new FloatMaxAggFunction()
}
class DoubleMaxAggFunctionTest extends MaxAggFunctionTest[Double] {
override def minVal = Double.MinValue / 2
override def maxVal = Double.MaxValue / 2
override def aggregator: AggregateFunction[Double, MaxAccumulator[Double]] =
new DoubleMaxAggFunction()
}
class BooleanMaxAggFunctionTest extends AggFunctionTestBase[Boolean, MaxAccumulator[Boolean]] {
override def inputValueSets: Seq[Seq[Boolean]] = Seq(
Seq(
false,
false,
false
),
Seq(
true,
true,
true
),
Seq(
true,
false,
null.asInstanceOf[Boolean],
true,
false,
true,
null.asInstanceOf[Boolean]
),
Seq(
null.asInstanceOf[Boolean],
null.asInstanceOf[Boolean],
null.asInstanceOf[Boolean]
)
)
override def expectedResults: Seq[Boolean] = Seq(
false,
true,
true,
null.asInstanceOf[Boolean]
)
override def aggregator: AggregateFunction[Boolean, MaxAccumulator[Boolean]] =
new BooleanMaxAggFunction()
}
class DecimalMaxAggFunctionTest
extends AggFunctionTestBase[BigDecimal, MaxAccumulator[BigDecimal]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new BigDecimal("1"),
new BigDecimal("1000.000001"),
new BigDecimal("-1"),
new BigDecimal("-999.998999"),
null,
new BigDecimal("0"),
new BigDecimal("-999.999"),
null,
new BigDecimal("999.999")
),
Seq(
null,
null,
null,
null,
null
)
)
override def expectedResults: Seq[BigDecimal] = Seq(
new BigDecimal("1000.000001"),
null
)
override def aggregator: AggregateFunction[BigDecimal, MaxAccumulator[BigDecimal]] =
new DecimalMaxAggFunction()
}
class StringMaxAggFunctionTest extends AggFunctionTestBase[String, MaxAccumulator[String]] {
override def inputValueSets: Seq[Seq[_]] = Seq(
Seq(
new String("a"),
new String("b"),
new String("c"),
null.asInstanceOf[String],
new String("d")
),
Seq(
null.asInstanceOf[String],
null.asInstanceOf[String],
null.asInstanceOf[String]
),
Seq(
new String("1House"),
new String("Household"),
new String("house"),
new String("household")
)
)
override def expectedResults: Seq[String] = Seq(
new String("d"),
null.asInstanceOf[String],
new String("household")
)
override def aggregator: AggregateFunction[String, MaxAccumulator[String]] =
new StringMaxAggFunction()
}
| hongyuhong/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/functions/aggfunctions/MaxAggFunctionTest.scala | Scala | apache-2.0 | 5,737 |
package com.twitter.server
import com.twitter.app.GlobalFlag
import com.twitter.finagle.{Addr, Resolver, Name}
import com.twitter.util.Var
@deprecated(
"Users should prefer using Dtabs which are overridable by setting the `dtab.add` flag",
"2019-04-03")
object resolverMap
extends GlobalFlag[Map[String, String]](
Map.empty,
"A list mapping service names to resolvers (gizmoduck=zk!/gizmoduck)"
)
/**
* Indicates that a [[com.twitter.finagle.Resolver]] was not found for the
* given `name` using the FlagResolver.
*
* Resolvers are discovered via the com.twitter.server.resolverMap
*/
class NamedResolverNotFoundException(scheme: String, name: String)
extends Exception(
s"Resolver not found for scheme '$scheme' with name '$name'. " +
s"resolverMap = ${resolverMap().keySet.toSeq.sorted.mkString(",")}"
)
class FlagResolver extends Resolver {
val scheme = "flag"
private[this] def resolvers = resolverMap()
def bind(arg: String): Var[Addr] = resolvers.get(arg) match {
case Some(target) =>
Resolver.eval(target) match {
case Name.Bound(va) => va
case Name.Path(_) =>
Var.value(Addr.Failed(new IllegalArgumentException("Cannot bind to trees")))
}
case None =>
val a = Addr.Failed(new NamedResolverNotFoundException(scheme, arg))
Var.value(a)
}
}
| twitter/twitter-server | server/src/main/scala/com/twitter/server/FlagResolver.scala | Scala | apache-2.0 | 1,369 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.util
import org.apache.calcite.adapter.java.JavaTypeFactory
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.sql.`type`.SqlTypeName
import scala.collection.JavaConverters._
import scala.collection.mutable
class InputTypeBuilder(typeFactory: JavaTypeFactory) {
private val names = mutable.ListBuffer[String]()
private val types = mutable.ListBuffer[RelDataType]()
def field(name: String, `type`: SqlTypeName): InputTypeBuilder = {
names += name
types += typeFactory.createSqlType(`type`)
this
}
def nestedField(name: String, `type`: RelDataType): InputTypeBuilder = {
names += name
types += `type`
this
}
def build: RelDataType = {
typeFactory.createStructType(types.asJava, names.asJava)
}
}
object InputTypeBuilder {
def inputOf(typeFactory: JavaTypeFactory) = new InputTypeBuilder(typeFactory)
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/plan/util/InputTypeBuilder.scala | Scala | apache-2.0 | 1,722 |
package metaconfig.cli
import org.typelevel.paiges.Doc
object VersionCommand extends Command[Unit]("version") {
override def extraNames: List[String] = List("-v", "--version", "-version")
override def description: Doc = Doc.paragraph("Show version information")
def run(value: Unit, app: CliApp): Int = {
app.out.println(app.version)
0
}
}
| olafurpg/metaconfig | metaconfig-core/shared/src/main/scala/metaconfig/cli/VersionCommand.scala | Scala | apache-2.0 | 358 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.yarn
import scala.collection.JavaConverters._
import org.apache.hadoop.metrics2.MetricsRecordBuilder
import org.mockito.Matchers._
import org.mockito.Mockito.{mock, times, verify, when}
import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite
import org.apache.spark.network.server.OneForOneStreamManager
import org.apache.spark.network.shuffle.{ExternalShuffleBlockHandler, ExternalShuffleBlockResolver}
class YarnShuffleServiceMetricsSuite extends SparkFunSuite with Matchers {
val streamManager = mock(classOf[OneForOneStreamManager])
val blockResolver = mock(classOf[ExternalShuffleBlockResolver])
when(blockResolver.getRegisteredExecutorsSize).thenReturn(42)
val metrics = new ExternalShuffleBlockHandler(streamManager, blockResolver).getAllMetrics
test("metrics named as expected") {
val allMetrics = Set(
"openBlockRequestLatencyMillis", "registerExecutorRequestLatencyMillis",
"blockTransferRateBytes", "registeredExecutorsSize")
metrics.getMetrics.keySet().asScala should be (allMetrics)
}
// these three metrics have the same effect on the collector
for (testname <- Seq("openBlockRequestLatencyMillis",
"registerExecutorRequestLatencyMillis",
"blockTransferRateBytes")) {
test(s"$testname - collector receives correct types") {
val builder = mock(classOf[MetricsRecordBuilder])
when(builder.addCounter(any(), anyLong())).thenReturn(builder)
when(builder.addGauge(any(), anyDouble())).thenReturn(builder)
YarnShuffleServiceMetrics.collectMetric(builder, testname,
metrics.getMetrics.get(testname))
verify(builder).addCounter(anyObject(), anyLong())
verify(builder, times(4)).addGauge(anyObject(), anyDouble())
}
}
// this metric writes only one gauge to the collector
test("registeredExecutorsSize - collector receives correct types") {
val builder = mock(classOf[MetricsRecordBuilder])
YarnShuffleServiceMetrics.collectMetric(builder, "registeredExecutorsSize",
metrics.getMetrics.get("registeredExecutorsSize"))
// only one
verify(builder).addGauge(anyObject(), anyInt())
}
}
| michalsenkyr/spark | resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceMetricsSuite.scala | Scala | apache-2.0 | 2,977 |
package scredis.protocol.requests
import scala.language.higherKinds
import scredis.protocol._
import scredis.serialization.{ Reader, Writer }
import scala.collection.generic.CanBuildFrom
object SetRequests {
object SAdd extends Command("SADD") with WriteCommand
object SCard extends Command("SCARD")
object SDiff extends Command("SDIFF")
object SDiffStore extends Command("SDIFFSTORE") with WriteCommand
object SInter extends Command("SINTER")
object SInterStore extends Command("SINTERSTORE") with WriteCommand
object SIsMember extends Command("SISMEMBER")
object SMembers extends Command("SMEMBERS")
object SMove extends Command("SMOVE") with WriteCommand
object SPop extends Command("SPOP") with WriteCommand
object SRandMember extends Command("SRANDMEMBER")
object SRem extends Command("SREM") with WriteCommand
object SScan extends Command("SSCAN")
object SUnion extends Command("SUNION")
object SUnionStore extends Command("SUNIONSTORE") with WriteCommand
case class SAdd[W](key: String, members: W*)(implicit writer: Writer[W]) extends Request[Long](
SAdd, key +: members.map(writer.write): _*
) with Key {
override def decode = {
case IntegerResponse(value) => value
}
}
case class SCard(key: String) extends Request[Long](SCard, key) with Key {
override def decode = {
case IntegerResponse(value) => value
}
}
case class SDiff[R: Reader](keys: String*)
extends Request[Set[R]](SDiff, keys: _*) with Key {
override def decode = {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
override val key = keys.head
}
case class SDiffStore(destination: String, keys: String*) extends Request[Long](
SDiffStore, destination +: keys: _*
) with Key {
override def decode = {
case IntegerResponse(value) => value
}
override val key = keys.head
}
case class SInter[R: Reader](keys: String*)
extends Request[Set[R]](SInter, keys: _*) with Key {
override def decode = {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
override val key = keys.head
}
case class SInterStore(destination: String, keys: String*) extends Request[Long](
SInterStore, destination +: keys: _*
) with Key {
override def decode = {
case IntegerResponse(value) => value
}
override val key = keys.head
}
case class SIsMember[W: Writer](key: String, member: W) extends Request[Boolean](
SIsMember, key, implicitly[Writer[W]].write(member)
) with Key {
override def decode = {
case i: IntegerResponse => i.toBoolean
}
}
case class SMembers[R: Reader](key: String) extends Request[Set[R]](SMembers, key) with Key {
override def decode = {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
}
case class SMove[W: Writer](
source: String, destination: String, member: W
) extends Request[Boolean](
SMove, source, destination, implicitly[Writer[W]].write(member)
) {
override def decode = {
case i: IntegerResponse => i.toBoolean
}
}
case class SPop[R: Reader](key: String) extends Request[Option[R]](SPop, key) with Key {
override def decode: Decoder[Option[R]] = {
case b: BulkStringResponse => b.parsed[R]
}
}
case class SPopCount[R: Reader](key: String, count: Int) extends Request[List[R]](SPop, key, count) with Key {
override def decode: Decoder[List[R]] = {
case a: ArrayResponse => a.parsed[Option[R], List] {
case b: BulkStringResponse => b.parsed[R]
}.flatten
}
}
case class SRandMember[R: Reader](key: String) extends Request[Option[R]](SRandMember, key) with Key {
override def decode = {
case b: BulkStringResponse => b.parsed[R]
}
}
case class SRandMembers[R: Reader](key: String, count: Int) extends Request[Set[R]](SRandMember, key, count) with Key {
override def decode = {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
}
case class SRem[W](key: String, members: W*)(implicit writer: Writer[W]) extends Request[Long](
SRem, key +: members.map(writer.write): _*
) with Key {
override def decode = {
case IntegerResponse(value) => value
}
}
case class SScan[R: Reader](
key: String,
cursor: Long,
matchOpt: Option[String],
countOpt: Option[Int]
) extends Request[(Long, Set[R])](
SScan,
generateScanLikeArgs(
keyOpt = Some(key),
cursor = cursor,
matchOpt = matchOpt,
countOpt = countOpt
): _*
) with Key {
override def decode = {
case a: ArrayResponse => a.parsedAsScanResponse[R, Set] {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
}
}
case class SUnion[R: Reader](keys: String*)
extends Request[Set[R]](SUnion, keys: _*) with Key {
override def decode = {
case a: ArrayResponse => a.parsed[R, Set] {
case b: BulkStringResponse => b.flattened[R]
}
}
override val key = keys.head
}
case class SUnionStore(destination: String, keys: String*) extends Request[Long](
SUnionStore, destination +: keys: _*
) with Key {
override def decode = {
case IntegerResponse(value) => value
}
override val key = keys.head
}
} | scredis/scredis | src/main/scala/scredis/protocol/requests/SetRequests.scala | Scala | apache-2.0 | 5,557 |
import leon.lang._
object MutableGenerics5 {
case class C(var x: Int)
def hof[A](f: (Int, A) => Int, a: A): Int = f(1, a)
//shouldn't be able to instantiate with mutable type
def test(): Int = {
val state = C(42)
hof((x: Int, s: C) => { s.x = s.x + 1; x }, state)
assert(state.x == 43)
0
}
}
| epfl-lara/leon | src/test/resources/regression/xlang/error/MutableGenerics5.scala | Scala | gpl-3.0 | 323 |
package org.alcaudon.runtime
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import org.alcaudon.clustering.DataflowTopologyListener
import org.alcaudon.clustering.DataflowTopologyListener.{DataflowNodeAddress, DownstreamDependencies}
import org.alcaudon.core._
import org.alcaudon.core.sources.{SourceCtx, SourceFunc}
import scala.concurrent.Future
object SourceReifier {
def props(dataflowId: String,
name: String,
sourceFn: SourceFunc,
subscribers: Map[String, KeyExtractor] = Map.empty): Props =
Props(new SourceReifier(dataflowId, name, sourceFn, subscribers))
}
class SourceReifier(dataflowId: String,
name: String,
sourceFn: SourceFunc,
subscribers: Map[String, KeyExtractor] = Map.empty)
extends Actor
with ActorConfig
with ActorLogging
with SourceCtx {
if (config.computation.distributed) {
context.actorOf(DataflowTopologyListener.props(dataflowId, name)) ! DownstreamDependencies(
subscribers.keySet,
self)
}
var subscriberRefs: Map[ActorRef, KeyExtractor] = Map.empty
def collect(record: RawRecord): Unit = {
subscriberRefs.foreach {
case (ref, extractor) =>
val key = extractor.extractKey(record.value)
ref ! StreamRecord(RawStreamRecord(0L, record), Record(key, record))
}
}
import context.dispatcher
def close: Unit = {}
def receive = {
case DataflowNodeAddress(id, ref) =>
subscribers.get(id).foreach { keyExtractor =>
sourceFn.setUp(this)
Future(sourceFn.run())
log.info("New node added {} {} {}", id, ref, subscribers)
subscriberRefs += (ref -> keyExtractor)
}
case _ =>
}
}
| fcofdez/alcaudon | src/main/scala/org/alcaudon/runtime/SourceReifier.scala | Scala | apache-2.0 | 1,748 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.CharArrayWriter
import java.sql.{Date, Timestamp}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.runtime.universe.TypeTag
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.TaskContext
import org.apache.spark.annotation.{DeveloperApi, Experimental, InterfaceStability}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
import org.apache.spark.api.python.{PythonRDD, SerDeUtil}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.json.{JacksonGenerator, JSONOptions}
import org.apache.spark.sql.catalyst.optimizer.CombineUnions
import org.apache.spark.sql.catalyst.parser.{ParseException, ParserUtils}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, PartitioningCollection}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.arrow.{ArrowConverters, ArrowPayload}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.execution.stat.StatFunctions
import org.apache.spark.sql.streaming.DataStreamWriter
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
private[sql] object Dataset {
def apply[T: Encoder](sparkSession: SparkSession, logicalPlan: LogicalPlan): Dataset[T] = {
new Dataset(sparkSession, logicalPlan, implicitly[Encoder[T]])
}
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame = {
val qe = sparkSession.sessionState.executePlan(logicalPlan)
qe.assertAnalyzed()
new Dataset[Row](sparkSession, qe, RowEncoder(qe.analyzed.schema))
}
}
/**
* A Dataset is a strongly typed collection of domain-specific objects that can be transformed
* in parallel using functional or relational operations. Each Dataset also has an untyped view
* called a `DataFrame`, which is a Dataset of [[Row]].
*
* Operations available on Datasets are divided into transformations and actions. Transformations
* are the ones that produce new Datasets, and actions are the ones that trigger computation and
* return results. Example transformations include map, filter, select, and aggregate (`groupBy`).
* Example actions count, show, or writing data out to file systems.
*
* Datasets are "lazy", i.e. computations are only triggered when an action is invoked. Internally,
* a Dataset represents a logical plan that describes the computation required to produce the data.
* When an action is invoked, Spark's query optimizer optimizes the logical plan and generates a
* physical plan for efficient execution in a parallel and distributed manner. To explore the
* logical plan as well as optimized physical plan, use the `explain` function.
*
* To efficiently support domain-specific objects, an [[Encoder]] is required. The encoder maps
* the domain specific type `T` to Spark's internal type system. For example, given a class `Person`
* with two fields, `name` (string) and `age` (int), an encoder is used to tell Spark to generate
* code at runtime to serialize the `Person` object into a binary structure. This binary structure
* often has much lower memory footprint as well as are optimized for efficiency in data processing
* (e.g. in a columnar format). To understand the internal binary representation for data, use the
* `schema` function.
*
* There are typically two ways to create a Dataset. The most common way is by pointing Spark
* to some files on storage systems, using the `read` function available on a `SparkSession`.
* {{{
* val people = spark.read.parquet("...").as[Person] // Scala
* Dataset<Person> people = spark.read().parquet("...").as(Encoders.bean(Person.class)); // Java
* }}}
*
* Datasets can also be created through transformations available on existing Datasets. For example,
* the following creates a new Dataset by applying a filter on the existing one:
* {{{
* val names = people.map(_.name) // in Scala; names is a Dataset[String]
* Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING));
* }}}
*
* Dataset operations can also be untyped, through various domain-specific-language (DSL)
* functions defined in: Dataset (this class), [[Column]], and [[functions]]. These operations
* are very similar to the operations available in the data frame abstraction in R or Python.
*
* To select a column from the Dataset, use `apply` method in Scala and `col` in Java.
* {{{
* val ageCol = people("age") // in Scala
* Column ageCol = people.col("age"); // in Java
* }}}
*
* Note that the [[Column]] type can also be manipulated through its various functions.
* {{{
* // The following creates a new column that increases everybody's age by 10.
* people("age") + 10 // in Scala
* people.col("age").plus(10); // in Java
* }}}
*
* A more concrete example in Scala:
* {{{
* // To create Dataset[Row] using SparkSession
* val people = spark.read.parquet("...")
* val department = spark.read.parquet("...")
*
* people.filter("age > 30")
* .join(department, people("deptId") === department("id"))
* .groupBy(department("name"), people("gender"))
* .agg(avg(people("salary")), max(people("age")))
* }}}
*
* and in Java:
* {{{
* // To create Dataset<Row> using SparkSession
* Dataset<Row> people = spark.read().parquet("...");
* Dataset<Row> department = spark.read().parquet("...");
*
* people.filter(people.col("age").gt(30))
* .join(department, people.col("deptId").equalTo(department.col("id")))
* .groupBy(department.col("name"), people.col("gender"))
* .agg(avg(people.col("salary")), max(people.col("age")));
* }}}
*
* @groupname basic Basic Dataset functions
* @groupname action Actions
* @groupname untypedrel Untyped transformations
* @groupname typedrel Typed transformations
*
* @since 1.6.0
*/
@InterfaceStability.Stable
class Dataset[T] private[sql](
@transient val sparkSession: SparkSession,
@DeveloperApi @InterfaceStability.Unstable @transient val queryExecution: QueryExecution,
encoder: Encoder[T])
extends Serializable {
queryExecution.assertAnalyzed()
// Note for Spark contributors: if adding or updating any action in `Dataset`, please make sure
// you wrap it with `withNewExecutionId` if this actions doesn't call other action.
def this(sparkSession: SparkSession, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sparkSession, sparkSession.sessionState.executePlan(logicalPlan), encoder)
}
def this(sqlContext: SQLContext, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sqlContext.sparkSession, logicalPlan, encoder)
}
@transient private[sql] val logicalPlan: LogicalPlan = {
// For various commands (like DDL) and queries with side effects, we force query execution
// to happen right away to let these side effects take place eagerly.
queryExecution.analyzed match {
case c: Command =>
LocalRelation(c.output, withAction("command", queryExecution)(_.executeCollect()))
case u @ Union(children) if children.forall(_.isInstanceOf[Command]) =>
LocalRelation(u.output, withAction("command", queryExecution)(_.executeCollect()))
case _ =>
queryExecution.analyzed
}
}
/**
* Currently [[ExpressionEncoder]] is the only implementation of [[Encoder]], here we turn the
* passed in encoder to [[ExpressionEncoder]] explicitly, and mark it implicit so that we can use
* it when constructing new Dataset objects that have the same object type (that will be
* possibly resolved to a different schema).
*/
private[sql] implicit val exprEnc: ExpressionEncoder[T] = encoderFor(encoder)
/**
* Encoder is used mostly as a container of serde expressions in Dataset. We build logical
* plans by these serde expressions and execute it within the query framework. However, for
* performance reasons we may want to use encoder as a function to deserialize internal rows to
* custom objects, e.g. collect. Here we resolve and bind the encoder so that we can call its
* `fromRow` method later.
*/
private val boundEnc =
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer)
private implicit def classTag = exprEnc.clsTag
// sqlContext must be val because a stable identifier is expected when you import implicits
@transient lazy val sqlContext: SQLContext = sparkSession.sqlContext
private[sql] def resolve(colName: String): NamedExpression = {
queryExecution.analyzed.resolveQuoted(colName, sparkSession.sessionState.analyzer.resolver)
.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
}
private[sql] def numericColumns: Seq[Expression] = {
schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
queryExecution.analyzed.resolveQuoted(n.name, sparkSession.sessionState.analyzer.resolver).get
}
}
/**
* Compose the string representing rows for output
*
* @param _numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
*/
private[sql] def showString(
_numRows: Int, truncate: Int = 20, vertical: Boolean = false): String = {
val numRows = _numRows.max(0).min(Int.MaxValue - 1)
val takeResult = toDF().take(numRows + 1)
val hasMoreData = takeResult.length > numRows
val data = takeResult.take(numRows)
lazy val timeZone =
DateTimeUtils.getTimeZone(sparkSession.sessionState.conf.sessionLocalTimeZone)
// For array values, replace Seq and Array with square brackets
// For cells that are beyond `truncate` characters, replace it with the
// first `truncate-3` and "..."
val rows: Seq[Seq[String]] = schema.fieldNames.toSeq +: data.map { row =>
row.toSeq.map { cell =>
val str = cell match {
case null => "null"
case binary: Array[Byte] => binary.map("%02X".format(_)).mkString("[", " ", "]")
case array: Array[_] => array.mkString("[", ", ", "]")
case seq: Seq[_] => seq.mkString("[", ", ", "]")
case d: Date =>
DateTimeUtils.dateToString(DateTimeUtils.fromJavaDate(d))
case ts: Timestamp =>
DateTimeUtils.timestampToString(DateTimeUtils.fromJavaTimestamp(ts), timeZone)
case _ => cell.toString
}
if (truncate > 0 && str.length > truncate) {
// do not show ellipses for strings shorter than 4 characters.
if (truncate < 4) str.substring(0, truncate)
else str.substring(0, truncate - 3) + "..."
} else {
str
}
}: Seq[String]
}
val sb = new StringBuilder
val numCols = schema.fieldNames.length
// We set a minimum column width at '3'
val minimumColWidth = 3
if (!vertical) {
// Initialise the width of each column to a minimum value
val colWidths = Array.fill(numCols)(minimumColWidth)
// Compute the width of each column
for (row <- rows) {
for ((cell, i) <- row.zipWithIndex) {
colWidths(i) = math.max(colWidths(i), cell.length)
}
}
// Create SeparateLine
val sep: String = colWidths.map("-" * _).addString(sb, "+", "+", "+\\n").toString()
// column names
rows.head.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell, colWidths(i))
} else {
StringUtils.rightPad(cell, colWidths(i))
}
}.addString(sb, "|", "|", "|\\n")
sb.append(sep)
// data
rows.tail.foreach {
_.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell.toString, colWidths(i))
} else {
StringUtils.rightPad(cell.toString, colWidths(i))
}
}.addString(sb, "|", "|", "|\\n")
}
sb.append(sep)
} else {
// Extended display mode enabled
val fieldNames = rows.head
val dataRows = rows.tail
// Compute the width of field name and data columns
val fieldNameColWidth = fieldNames.foldLeft(minimumColWidth) { case (curMax, fieldName) =>
math.max(curMax, fieldName.length)
}
val dataColWidth = dataRows.foldLeft(minimumColWidth) { case (curMax, row) =>
math.max(curMax, row.map(_.length).reduceLeftOption[Int] { case (cellMax, cell) =>
math.max(cellMax, cell)
}.getOrElse(0))
}
dataRows.zipWithIndex.foreach { case (row, i) =>
// "+ 5" in size means a character length except for padded names and data
val rowHeader = StringUtils.rightPad(
s"-RECORD $i", fieldNameColWidth + dataColWidth + 5, "-")
sb.append(rowHeader).append("\\n")
row.zipWithIndex.map { case (cell, j) =>
val fieldName = StringUtils.rightPad(fieldNames(j), fieldNameColWidth)
val data = StringUtils.rightPad(cell, dataColWidth)
s" $fieldName | $data "
}.addString(sb, "", "\\n", "\\n")
}
}
// Print a footer
if (vertical && data.isEmpty) {
// In a vertical mode, print an empty row set explicitly
sb.append("(0 rows)\\n")
} else if (hasMoreData) {
// For Data that has more than "numRows" records
val rowsString = if (numRows == 1) "row" else "rows"
sb.append(s"only showing top $numRows $rowsString\\n")
}
sb.toString()
}
override def toString: String = {
try {
val builder = new StringBuilder
val fields = schema.take(2).map {
case f => s"${f.name}: ${f.dataType.simpleString(2)}"
}
builder.append("[")
builder.append(fields.mkString(", "))
if (schema.length > 2) {
if (schema.length - fields.size == 1) {
builder.append(" ... 1 more field")
} else {
builder.append(" ... " + (schema.length - 2) + " more fields")
}
}
builder.append("]").toString()
} catch {
case NonFatal(e) =>
s"Invalid tree; ${e.getMessage}:\\n$queryExecution"
}
}
/**
* Converts this strongly typed collection of data to generic Dataframe. In contrast to the
* strongly typed objects that Dataset operations work on, a Dataframe returns generic [[Row]]
* objects that allow fields to be accessed by ordinal or name.
*
* @group basic
* @since 1.6.0
*/
// This is declared with parentheses to prevent the Scala compiler from treating
// `ds.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = new Dataset[Row](sparkSession, queryExecution, RowEncoder(schema))
/**
* :: Experimental ::
* Returns a new Dataset where each record has been mapped on to the specified type. The
* method used to map columns depend on the type of `U`:
* - When `U` is a class, fields for the class will be mapped to columns of the same name
* (case sensitivity is determined by `spark.sql.caseSensitive`).
* - When `U` is a tuple, the columns will be mapped by ordinal (i.e. the first column will
* be assigned to `_1`).
* - When `U` is a primitive type (i.e. String, Int, etc), then the first column of the
* `DataFrame` will be used.
*
* If the schema of the Dataset does not match the desired `U` type, you can use `select`
* along with `alias` or `as` to rearrange or rename as required.
*
* @group basic
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def as[U : Encoder]: Dataset[U] = Dataset[U](sparkSession, logicalPlan)
/**
* Converts this strongly typed collection of data to generic `DataFrame` with columns renamed.
* This can be quite convenient in conversion from an RDD of tuples into a `DataFrame` with
* meaningful names. For example:
* {{{
* val rdd: RDD[(Int, String)] = ...
* rdd.toDF() // this implicit conversion creates a DataFrame with column name `_1` and `_2`
* rdd.toDF("id", "name") // this creates a DataFrame with column name "id" and "name"
* }}}
*
* @group basic
* @since 2.0.0
*/
@scala.annotation.varargs
def toDF(colNames: String*): DataFrame = {
require(schema.size == colNames.size,
"The number of columns doesn't match.\\n" +
s"Old column names (${schema.size}): " + schema.fields.map(_.name).mkString(", ") + "\\n" +
s"New column names (${colNames.size}): " + colNames.mkString(", "))
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
select(newCols : _*)
}
/**
* Returns the schema of this Dataset.
*
* @group basic
* @since 1.6.0
*/
def schema: StructType = queryExecution.analyzed.schema
/**
* Prints the schema to the console in a nice tree format.
*
* @group basic
* @since 1.6.0
*/
// scalastyle:off println
def printSchema(): Unit = println(schema.treeString)
// scalastyle:on println
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(extended: Boolean): Unit = {
val explain = ExplainCommand(queryExecution.logical, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect().foreach {
// scalastyle:off println
r => println(r.getString(0))
// scalastyle:on println
}
}
/**
* Prints the physical plan to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(): Unit = explain(extended = false)
/**
* Returns all column names and their data types as an array.
*
* @group basic
* @since 1.6.0
*/
def dtypes: Array[(String, String)] = schema.fields.map { field =>
(field.name, field.dataType.toString)
}
/**
* Returns all column names as an array.
*
* @group basic
* @since 1.6.0
*/
def columns: Array[String] = schema.fields.map(_.name)
/**
* Returns true if the `collect` and `take` methods can be run locally
* (without any Spark executors).
*
* @group basic
* @since 1.6.0
*/
def isLocal: Boolean = logicalPlan.isInstanceOf[LocalRelation]
/**
* Returns true if this Dataset contains one or more sources that continuously
* return data as it arrives. A Dataset that reads data from a streaming source
* must be executed as a `StreamingQuery` using the `start()` method in
* `DataStreamWriter`. Methods that return a single answer, e.g. `count()` or
* `collect()`, will throw an [[AnalysisException]] when there is a streaming
* source present.
*
* @group streaming
* @since 2.0.0
*/
@InterfaceStability.Evolving
def isStreaming: Boolean = logicalPlan.isStreaming
/**
* Eagerly checkpoint a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@InterfaceStability.Evolving
def checkpoint(): Dataset[T] = checkpoint(eager = true)
/**
* Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
* logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@InterfaceStability.Evolving
def checkpoint(eager: Boolean): Dataset[T] = {
val internalRdd = queryExecution.toRdd.map(_.copy())
internalRdd.checkpoint()
if (eager) {
internalRdd.count()
}
val physicalPlan = queryExecution.executedPlan
// Takes the first leaf partitioning whenever we see a `PartitioningCollection`. Otherwise the
// size of `PartitioningCollection` may grow exponentially for queries involving deep inner
// joins.
def firstLeafPartitioning(partitioning: Partitioning): Partitioning = {
partitioning match {
case p: PartitioningCollection => firstLeafPartitioning(p.partitionings.head)
case p => p
}
}
val outputPartitioning = firstLeafPartitioning(physicalPlan.outputPartitioning)
Dataset.ofRows(
sparkSession,
LogicalRDD(
logicalPlan.output,
internalRdd,
outputPartitioning,
physicalPlan.outputOrdering,
isStreaming
)(sparkSession)).as[T]
}
/**
* Defines an event time watermark for this [[Dataset]]. A watermark tracks a point in time
* before which we assume no more late data is going to arrive.
*
* Spark will use this watermark for several purposes:
* - To know when a given time window aggregation can be finalized and thus can be emitted when
* using output modes that do not allow updates.
* - To minimize the amount of state that we need to keep for on-going aggregations,
* `mapGroupsWithState` and `dropDuplicates` operators.
*
* The current watermark is computed by looking at the `MAX(eventTime)` seen across
* all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
* of coordinating this value across partitions, the actual watermark used is only guaranteed
* to be at least `delayThreshold` behind the actual event time. In some cases we may still
* process records that arrive more than `delayThreshold` late.
*
* @param eventTime the name of the column that contains the event time of the row.
* @param delayThreshold the minimum delay to wait to data to arrive late, relative to the latest
* record that has been processed in the form of an interval
* (e.g. "1 minute" or "5 hours"). NOTE: This should not be negative.
*
* @group streaming
* @since 2.1.0
*/
@InterfaceStability.Evolving
// We only accept an existing column name, not a derived column here as a watermark that is
// defined on a derived column cannot referenced elsewhere in the plan.
def withWatermark(eventTime: String, delayThreshold: String): Dataset[T] = withTypedPlan {
val parsedDelay =
Option(CalendarInterval.fromString("interval " + delayThreshold))
.getOrElse(throw new AnalysisException(s"Unable to parse time delay '$delayThreshold'"))
require(parsedDelay.milliseconds >= 0 && parsedDelay.months >= 0,
s"delay threshold ($delayThreshold) should not be negative.")
EliminateEventTimeWatermark(
EventTimeWatermark(UnresolvedAttribute(eventTime), parsedDelay, logicalPlan))
}
/**
* Displays the Dataset in a tabular form. Strings more than 20 characters will be truncated,
* and all cells will be aligned right. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
*
* @group action
* @since 1.6.0
*/
def show(numRows: Int): Unit = show(numRows, truncate = true)
/**
* Displays the top 20 rows of Dataset in a tabular form. Strings more than 20 characters
* will be truncated, and all cells will be aligned right.
*
* @group action
* @since 1.6.0
*/
def show(): Unit = show(20)
/**
* Displays the top 20 rows of Dataset in a tabular form.
*
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
def show(truncate: Boolean): Unit = show(20, truncate)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
* @param numRows Number of rows to show
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = if (truncate) {
println(showString(numRows, truncate = 20))
} else {
println(showString(numRows, truncate = 0))
}
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @group action
* @since 1.6.0
*/
def show(numRows: Int, truncate: Int): Unit = show(numRows, truncate, vertical = false)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* If `vertical` enabled, this command prints output rows vertically (one line per column value)?
*
* {{{
* -RECORD 0-------------------
* year | 1980
* month | 12
* AVG('Adj Close) | 0.503218
* AVG('Adj Close) | 0.595103
* -RECORD 1-------------------
* year | 1981
* month | 01
* AVG('Adj Close) | 0.523289
* AVG('Adj Close) | 0.570307
* -RECORD 2-------------------
* year | 1982
* month | 02
* AVG('Adj Close) | 0.436504
* AVG('Adj Close) | 0.475256
* -RECORD 3-------------------
* year | 1983
* month | 03
* AVG('Adj Close) | 0.410516
* AVG('Adj Close) | 0.442194
* -RECORD 4-------------------
* year | 1984
* month | 04
* AVG('Adj Close) | 0.450090
* AVG('Adj Close) | 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @param vertical If set to true, prints output rows vertically (one line per column value).
* @group action
* @since 2.3.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Int, vertical: Boolean): Unit =
println(showString(numRows, truncate, vertical))
// scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
* {{{
* // Dropping rows containing any null values.
* ds.na.drop()
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def na: DataFrameNaFunctions = new DataFrameNaFunctions(toDF())
/**
* Returns a [[DataFrameStatFunctions]] for working statistic functions support.
* {{{
* // Finding frequent items in column with name 'a'.
* ds.stat.freqItems(Seq("a"))
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def stat: DataFrameStatFunctions = new DataFrameStatFunctions(toDF())
/**
* Join with another `DataFrame`.
*
* Behaves as an INNER JOIN and requires a subsequent join predicate.
*
* @param right Right side of the join operation.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Inner, None)
}
/**
* Inner equi-join with another `DataFrame` using the given column.
*
* Different from other join functions, the join column will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the column "user_id"
* df1.join(df2, "user_id")
* }}}
*
* @param right Right side of the join operation.
* @param usingColumn Name of the column to join on. This column must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumn: String): DataFrame = {
join(right, Seq(usingColumn))
}
/**
* Inner equi-join with another `DataFrame` using the given columns.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the columns "user_id" and "user_name"
* df1.join(df2, Seq("user_id", "user_name"))
* }}}
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String]): DataFrame = {
join(right, usingColumns, "inner")
}
/**
* Equi-join with another `DataFrame` using the given columns. A cross join with a predicate
* is specified as an inner join. If you would explicitly like to perform a cross join use the
* `crossJoin` method.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `full_outer`, `left`, `left_outer`,
* `right`, `right_outer`, `left_semi`, `left_anti`.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String], joinType: String): DataFrame = {
// Analyze the self join. The assumption is that the analyzer will disambiguate left vs right
// by creating a new instance for one of the branch.
val joined = sparkSession.sessionState.executePlan(
Join(logicalPlan, right.logicalPlan, joinType = JoinType(joinType), None))
.analyzed.asInstanceOf[Join]
withPlan {
Join(
joined.left,
joined.right,
UsingJoin(JoinType(joinType), usingColumns),
None)
}
}
/**
* Inner join with another `DataFrame`, using the given join expression.
*
* {{{
* // The following two are equivalent:
* df1.join(df2, $"df1Key" === $"df2Key")
* df1.join(df2).where($"df1Key" === $"df2Key")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column): DataFrame = join(right, joinExprs, "inner")
/**
* Join with another `DataFrame`, using the given join expression. The following performs
* a full outer join between `df1` and `df2`.
*
* {{{
* // Scala:
* import org.apache.spark.sql.functions._
* df1.join(df2, $"df1Key" === $"df2Key", "outer")
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df1.join(df2, col("df1Key").equalTo(col("df2Key")), "outer");
* }}}
*
* @param right Right side of the join.
* @param joinExprs Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `full_outer`, `left`, `left_outer`,
* `right`, `right_outer`, `left_semi`, `left_anti`.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column, joinType: String): DataFrame = {
// Note that in this function, we introduce a hack in the case of self-join to automatically
// resolve ambiguous join conditions into ones that might make sense [SPARK-6231].
// Consider this case: df.join(df, df("key") === df("key"))
// Since df("key") === df("key") is a trivially true condition, this actually becomes a
// cartesian join. However, most likely users expect to perform a self join using "key".
// With that assumption, this hack turns the trivially true condition into equality on join
// keys that are resolved to both sides.
// Trigger analysis so in the case of self-join, the analyzer will clone the plan.
// After the cloning, left and right side will have distinct expression ids.
val plan = withPlan(
Join(logicalPlan, right.logicalPlan, JoinType(joinType), Some(joinExprs.expr)))
.queryExecution.analyzed.asInstanceOf[Join]
// If auto self join alias is disabled, return the plan.
if (!sparkSession.sessionState.conf.dataFrameSelfJoinAutoResolveAmbiguity) {
return withPlan(plan)
}
// If left/right have no output set intersection, return the plan.
val lanalyzed = withPlan(this.logicalPlan).queryExecution.analyzed
val ranalyzed = withPlan(right.logicalPlan).queryExecution.analyzed
if (lanalyzed.outputSet.intersect(ranalyzed.outputSet).isEmpty) {
return withPlan(plan)
}
// Otherwise, find the trivially true predicates and automatically resolves them to both sides.
// By the time we get here, since we have already run analysis, all attributes should've been
// resolved and become AttributeReference.
val cond = plan.condition.map { _.transform {
case catalyst.expressions.EqualTo(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualTo(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
}}
withPlan {
plan.copy(condition = cond)
}
}
/**
* Explicit cartesian join with another `DataFrame`.
*
* @param right Right side of the join operation.
*
* @note Cartesian joins are very expensive without an extra filter that can be pushed down.
*
* @group untypedrel
* @since 2.1.0
*/
def crossJoin(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Cross, None)
}
/**
* :: Experimental ::
* Joins this Dataset returning a `Tuple2` for each pair where `condition` evaluates to
* true.
*
* This is similar to the relation `join` function with one important difference in the
* result schema. Since `joinWith` preserves objects present on either side of the join, the
* result schema is similarly nested into a tuple under the column names `_1` and `_2`.
*
* This type of join can be useful both for preserving type-safety with the original object
* types as well as working with relational data where either side of the join has column
* names in common.
*
* @param other Right side of the join.
* @param condition Join expression.
* @param joinType Type of join to perform. Default `inner`. Must be one of:
* `inner`, `cross`, `outer`, `full`, `full_outer`, `left`, `left_outer`,
* `right`, `right_outer`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def joinWith[U](other: Dataset[U], condition: Column, joinType: String): Dataset[(T, U)] = {
// Creates a Join node and resolve it first, to get join condition resolved, self-join resolved,
// etc.
val joined = sparkSession.sessionState.executePlan(
Join(
this.logicalPlan,
other.logicalPlan,
JoinType(joinType),
Some(condition.expr))).analyzed.asInstanceOf[Join]
if (joined.joinType == LeftSemi || joined.joinType == LeftAnti) {
throw new AnalysisException("Invalid join type in joinWith: " + joined.joinType.sql)
}
// For both join side, combine all outputs into a single column and alias it with "_1" or "_2",
// to match the schema for the encoder of the join result.
// Note that we do this before joining them, to enable the join operator to return null for one
// side, in cases like outer-join.
val left = {
val combined = if (this.exprEnc.flat) {
assert(joined.left.output.length == 1)
Alias(joined.left.output.head, "_1")()
} else {
Alias(CreateStruct(joined.left.output), "_1")()
}
Project(combined :: Nil, joined.left)
}
val right = {
val combined = if (other.exprEnc.flat) {
assert(joined.right.output.length == 1)
Alias(joined.right.output.head, "_2")()
} else {
Alias(CreateStruct(joined.right.output), "_2")()
}
Project(combined :: Nil, joined.right)
}
// Rewrites the join condition to make the attribute point to correct column/field, after we
// combine the outputs of each join side.
val conditionExpr = joined.condition.get transformUp {
case a: Attribute if joined.left.outputSet.contains(a) =>
if (this.exprEnc.flat) {
left.output.head
} else {
val index = joined.left.output.indexWhere(_.exprId == a.exprId)
GetStructField(left.output.head, index)
}
case a: Attribute if joined.right.outputSet.contains(a) =>
if (other.exprEnc.flat) {
right.output.head
} else {
val index = joined.right.output.indexWhere(_.exprId == a.exprId)
GetStructField(right.output.head, index)
}
}
implicit val tuple2Encoder: Encoder[(T, U)] =
ExpressionEncoder.tuple(this.exprEnc, other.exprEnc)
withTypedPlan(Join(left, right, joined.joinType, Some(conditionExpr)))
}
/**
* :: Experimental ::
* Using inner equi-join to join this Dataset returning a `Tuple2` for each pair
* where `condition` evaluates to true.
*
* @param other Right side of the join.
* @param condition Join expression.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def joinWith[U](other: Dataset[U], condition: Column): Dataset[(T, U)] = {
joinWith(other, condition, "inner")
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): Dataset[T] = {
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortExprs: Column*): Dataset[T] = {
sortInternal(global = false, sortExprs)
}
/**
* Returns a new Dataset sorted by the specified column, all in ascending order.
* {{{
* // The following 3 are equivalent
* ds.sort("sortcol")
* ds.sort($"sortcol")
* ds.sort($"sortcol".asc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): Dataset[T] = {
sort((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset sorted by the given expressions. For example:
* {{{
* ds.sort($"col1", $"col2".desc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortExprs: Column*): Dataset[T] = {
sortInternal(global = true, sortExprs)
}
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortCol: String, sortCols: String*): Dataset[T] = sort(sortCol, sortCols : _*)
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortExprs: Column*): Dataset[T] = sort(sortExprs : _*)
/**
* Selects column based on the column name and return it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def apply(colName: String): Column = col(colName)
/**
* Specifies some hint on the current Dataset. As an example, the following code specifies
* that one of the plan can be broadcasted:
*
* {{{
* df1.join(df2.hint("broadcast"))
* }}}
*
* @group basic
* @since 2.2.0
*/
@scala.annotation.varargs
def hint(name: String, parameters: Any*): Dataset[T] = withTypedPlan {
UnresolvedHint(name, parameters, logicalPlan)
}
/**
* Selects column based on the column name and return it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def col(colName: String): Column = colName match {
case "*" =>
Column(ResolvedStar(queryExecution.analyzed.output))
case _ =>
if (sqlContext.conf.supportQuotedRegexColumnName) {
colRegex(colName)
} else {
val expr = resolve(colName)
Column(expr)
}
}
/**
* Selects column based on the column name specified as a regex and return it as [[Column]].
* @group untypedrel
* @since 2.3.0
*/
def colRegex(colName: String): Column = {
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
colName match {
case ParserUtils.escapedIdentifier(columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, None, caseSensitive))
case ParserUtils.qualifiedEscapedIdentifier(nameParts, columnNameRegex) =>
Column(UnresolvedRegex(columnNameRegex, Some(nameParts), caseSensitive))
case _ =>
Column(resolve(colName))
}
}
/**
* Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 1.6.0
*/
def as(alias: String): Dataset[T] = withTypedPlan {
SubqueryAlias(alias, logicalPlan)
}
/**
* (Scala-specific) Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 2.0.0
*/
def as(alias: Symbol): Dataset[T] = as(alias.name)
/**
* Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: String): Dataset[T] = as(alias)
/**
* (Scala-specific) Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: Symbol): Dataset[T] = as(alias)
/**
* Selects a set of column based expressions.
* {{{
* ds.select($"colA", $"colB" + 1)
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(cols: Column*): DataFrame = withPlan {
Project(cols.map(_.named), logicalPlan)
}
/**
* Selects a set of columns. This is a variant of `select` that can only select
* existing columns using column names (i.e. cannot construct expressions).
*
* {{{
* // The following two are equivalent:
* ds.select("colA", "colB")
* ds.select($"colA", $"colB")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
* SQL expressions.
*
* {{{
* // The following are equivalent:
* ds.selectExpr("colA", "colB as newName", "abs(colC)")
* ds.select(expr("colA"), expr("colB as newName"), expr("abs(colC)"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
Column(sparkSession.sessionState.sqlParser.parseExpression(expr))
}: _*)
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expression for each element.
*
* {{{
* val ds = Seq(1, 2, 3).toDS()
* val newDS = ds.select(expr("value + 1").as[Int])
* }}}
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1](c1: TypedColumn[T, U1]): Dataset[U1] = {
implicit val encoder = c1.encoder
val project = Project(c1.withInputType(exprEnc, logicalPlan.output).named :: Nil,
logicalPlan)
if (encoder.flat) {
new Dataset[U1](sparkSession, project, encoder)
} else {
// Flattens inner fields of U1
new Dataset[Tuple1[U1]](sparkSession, project, ExpressionEncoder.tuple(encoder)).map(_._1)
}
}
/**
* Internal helper function for building typed selects that return tuples. For simplicity and
* code reuse, we do this without the help of the type system and then use helper functions
* that cast appropriately for the user facing interface.
*/
protected def selectUntyped(columns: TypedColumn[_, _]*): Dataset[_] = {
val encoders = columns.map(_.encoder)
val namedColumns =
columns.map(_.withInputType(exprEnc, logicalPlan.output).named)
val execution = new QueryExecution(sparkSession, Project(namedColumns, logicalPlan))
new Dataset(sparkSession, execution, ExpressionEncoder.tuple(encoders))
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2](c1: TypedColumn[T, U1], c2: TypedColumn[T, U2]): Dataset[(U1, U2)] =
selectUntyped(c1, c2).asInstanceOf[Dataset[(U1, U2)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3]): Dataset[(U1, U2, U3)] =
selectUntyped(c1, c2, c3).asInstanceOf[Dataset[(U1, U2, U3)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3, U4](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4]): Dataset[(U1, U2, U3, U4)] =
selectUntyped(c1, c2, c3, c4).asInstanceOf[Dataset[(U1, U2, U3, U4)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3, U4, U5](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4],
c5: TypedColumn[T, U5]): Dataset[(U1, U2, U3, U4, U5)] =
selectUntyped(c1, c2, c3, c4, c5).asInstanceOf[Dataset[(U1, U2, U3, U4, U5)]]
/**
* Filters rows using the given condition.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(condition: Column): Dataset[T] = withTypedPlan {
Filter(condition.expr, logicalPlan)
}
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.filter("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Filters rows using the given condition. This is an alias for `filter`.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(condition: Column): Dataset[T] = filter(condition)
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.where("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Groups the Dataset using the specified columns, so we can run aggregation on them. See
* [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy($"department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.GroupByType)
}
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup($"department", $"group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube($"department", $"group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.CubeType)
}
/**
* Groups the Dataset using the specified columns, so that we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of groupBy that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy("department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.GroupByType)
}
/**
* :: Experimental ::
* (Scala-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def reduce(func: (T, T) => T): T = rdd.reduce(func)
/**
* :: Experimental ::
* (Java-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def reduce(func: ReduceFunction[T]): T = reduce(func.call(_, _))
/**
* :: Experimental ::
* (Scala-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def groupByKey[K: Encoder](func: T => K): KeyValueGroupedDataset[K, T] = {
val inputPlan = logicalPlan
val withGroupingKey = AppendColumns(func, inputPlan)
val executed = sparkSession.sessionState.executePlan(withGroupingKey)
new KeyValueGroupedDataset(
encoderFor[K],
encoderFor[T],
executed,
inputPlan.output,
withGroupingKey.newColumns)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def groupByKey[K](func: MapFunction[T, K], encoder: Encoder[K]): KeyValueGroupedDataset[K, T] =
groupByKey(func.call(_))(encoder)
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of rollup that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup("department", "group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of cube that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube("department", "group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.CubeType)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg("age" -> "max", "salary" -> "avg")
* ds.groupBy().agg("age" -> "max", "salary" -> "avg")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
groupBy().agg(aggExpr, aggExprs : _*)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* (Java-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: java.util.Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(max($"age"), avg($"salary"))
* ds.groupBy().agg(max($"age"), avg($"salary"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
/**
* Returns a new Dataset by taking the first `n` rows. The difference between this function
* and `head` is that `head` is an action and returns an array (by triggering query execution)
* while `limit` returns a new Dataset.
*
* @group typedrel
* @since 2.0.0
*/
def limit(n: Int): Dataset[T] = withTypedPlan {
Limit(Literal(n), logicalPlan)
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.0.0
*/
@deprecated("use union()", "2.0.0")
def unionAll(other: Dataset[T]): Dataset[T] = union(other)
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does
* deduplication of elements), use this function followed by a [[distinct]].
*
* Also as standard in SQL, this function resolves columns by position (not by name).
*
* @group typedrel
* @since 2.0.0
*/
def union(other: Dataset[T]): Dataset[T] = withSetOperator {
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, other.logicalPlan))
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
*
* This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
* union (that does deduplication of elements), use this function followed by a [[distinct]].
*
* The difference between this function and [[union]] is that this function
* resolves columns by name (not by position):
*
* {{{
* val df1 = Seq((1, 2, 3)).toDF("col0", "col1", "col2")
* val df2 = Seq((4, 5, 6)).toDF("col1", "col2", "col0")
* df1.unionByName(df2).show
*
* // output:
* // +----+----+----+
* // |col0|col1|col2|
* // +----+----+----+
* // | 1| 2| 3|
* // | 6| 4| 5|
* // +----+----+----+
* }}}
*
* @group typedrel
* @since 2.3.0
*/
def unionByName(other: Dataset[T]): Dataset[T] = withSetOperator {
// Check column name duplication
val resolver = sparkSession.sessionState.analyzer.resolver
val leftOutputAttrs = logicalPlan.output
val rightOutputAttrs = other.logicalPlan.output
SchemaUtils.checkColumnNameDuplication(
leftOutputAttrs.map(_.name),
"in the left attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
SchemaUtils.checkColumnNameDuplication(
rightOutputAttrs.map(_.name),
"in the right attributes",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
// Builds a project list for `other` based on `logicalPlan` output names
val rightProjectList = leftOutputAttrs.map { lattr =>
rightOutputAttrs.find { rattr => resolver(lattr.name, rattr.name) }.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "${lattr.name}" among """ +
s"""(${rightOutputAttrs.map(_.name).mkString(", ")})""")
}
}
// Delegates failure checks to `CheckAnalysis`
val notFoundAttrs = rightOutputAttrs.diff(rightProjectList)
val rightChild = Project(rightProjectList ++ notFoundAttrs, other.logicalPlan)
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, rightChild))
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset.
* This is equivalent to `INTERSECT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 1.6.0
*/
def intersect(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset.
* This is equivalent to `EXCEPT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def except(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a user-supplied seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double, seed: Long): Dataset[T] = {
sample(withReplacement = false, fraction = fraction, seed = seed)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows (without replacement),
* using a random seed.
*
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 2.3.0
*/
def sample(fraction: Double): Dataset[T] = {
sample(withReplacement = false, fraction = fraction)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a user-supplied seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] = {
withTypedPlan {
Sample(0.0, fraction, withReplacement, seed, logicalPlan)
}
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a random seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate, range [0.0, 1.0].
*
* @note This is NOT guaranteed to provide exactly the fraction of the total count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* For Java API, use [[randomSplitAsList]].
*
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double], seed: Long): Array[Dataset[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
// It is possible that the underlying dataframe doesn't guarantee the ordering of rows in its
// constituent partitions each time a split is materialized which could result in
// overlapping splits. To prevent this, we explicitly sort each input partition to make the
// ordering deterministic. Note that MapTypes cannot be sorted and are explicitly pruned out
// from the sort order.
val sortOrder = logicalPlan.output
.filter(attr => RowOrdering.isOrderable(attr.dataType))
.map(SortOrder(_, Ascending))
val plan = if (sortOrder.nonEmpty) {
Sort(sortOrder, global = false, logicalPlan)
} else {
// SPARK-12662: If sort order is empty, we materialize the dataset to guarantee determinism
cache()
logicalPlan
}
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
new Dataset[T](
sparkSession, Sample(x(0), x(1), withReplacement = false, seed, plan), encoder)
}.toArray
}
/**
* Returns a Java list that contains randomly split Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* @group typedrel
* @since 2.0.0
*/
def randomSplitAsList(weights: Array[Double], seed: Long): java.util.List[Dataset[T]] = {
val values = randomSplit(weights, seed)
java.util.Arrays.asList(values : _*)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double]): Array[Dataset[T]] = {
randomSplit(weights, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights. Provided for the Python Api.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*/
private[spark] def randomSplit(weights: List[Double], seed: Long): Array[Dataset[T]] = {
randomSplit(weights.toArray, seed)
}
/**
* (Scala-specific) Returns a new Dataset where each row has been expanded to zero or more
* rows by the provided function. This is similar to a `LATERAL VIEW` in HiveQL. The columns of
* the input row are implicitly joined with each row that is output by the function.
*
* Given that this is deprecated, as an alternative, you can explode columns either using
* `functions.explode()` or `flatMap()`. The following example uses these alternatives to count
* the number of books that contain a given word:
*
* {{{
* case class Book(title: String, words: String)
* val ds: Dataset[Book]
*
* val allWords = ds.select('title, explode(split('words, " ")).as("word"))
*
* val bookCountPerWord = allWords.groupBy("word").agg(countDistinct("title"))
* }}}
*
* Using `flatMap()` this can similarly be exploded as:
*
* {{{
* ds.flatMap(_.words.split(" "))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@deprecated("use flatMap() or select() with functions.explode() instead", "2.0.0")
def explode[A <: Product : TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
val elementSchema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val convert = CatalystTypeConverters.createToCatalystConverter(elementSchema)
val rowFunction =
f.andThen(_.map(convert(_).asInstanceOf[InternalRow]))
val generator = UserDefinedGenerator(elementSchema, rowFunction, input.map(_.expr))
withPlan {
Generate(generator, join = true, outer = false,
qualifier = None, generatorOutput = Nil, logicalPlan)
}
}
/**
* (Scala-specific) Returns a new Dataset where a single column has been expanded to zero
* or more rows by the provided function. This is similar to a `LATERAL VIEW` in HiveQL. All
* columns of the input row are implicitly joined with each value that is output by the function.
*
* Given that this is deprecated, as an alternative, you can explode columns either using
* `functions.explode()`:
*
* {{{
* ds.select(explode(split('words, " ")).as("word"))
* }}}
*
* or `flatMap()`:
*
* {{{
* ds.flatMap(_.words.split(" "))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@deprecated("use flatMap() or select() with functions.explode() instead", "2.0.0")
def explode[A, B : TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
: DataFrame = {
val dataType = ScalaReflection.schemaFor[B].dataType
val attributes = AttributeReference(outputColumn, dataType)() :: Nil
// TODO handle the metadata?
val elementSchema = attributes.toStructType
def rowFunction(row: Row): TraversableOnce[InternalRow] = {
val convert = CatalystTypeConverters.createToCatalystConverter(dataType)
f(row(0).asInstanceOf[A]).map(o => InternalRow(convert(o)))
}
val generator = UserDefinedGenerator(elementSchema, rowFunction, apply(inputColumn).expr :: Nil)
withPlan {
Generate(generator, join = true, outer = false,
qualifier = None, generatorOutput = Nil, logicalPlan)
}
}
/**
* Returns a new Dataset by adding a column or replacing the existing column that has
* the same name.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumn(colName: String, col: Column): DataFrame = withColumns(Seq(colName), Seq(col))
/**
* Returns a new Dataset by adding columns or replacing the existing columns that has
* the same names.
*/
private[spark] def withColumns(colNames: Seq[String], cols: Seq[Column]): DataFrame = {
require(colNames.size == cols.size,
s"The size of column names: ${colNames.size} isn't equal to " +
s"the size of columns: ${cols.size}")
SchemaUtils.checkColumnNameDuplication(
colNames,
"in given column names",
sparkSession.sessionState.conf.caseSensitiveAnalysis)
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val columnMap = colNames.zip(cols).toMap
val replacedAndExistingColumns = output.map { field =>
columnMap.find { case (colName, _) =>
resolver(field.name, colName)
} match {
case Some((colName: String, col: Column)) => col.as(colName)
case _ => Column(field)
}
}
val newColumns = columnMap.filter { case (colName, col) =>
!output.exists(f => resolver(f.name, colName))
}.map { case (colName, col) => col.as(colName) }
select(replacedAndExistingColumns ++ newColumns : _*)
}
/**
* Returns a new Dataset by adding a column with metadata.
*/
private[spark] def withColumn(colName: String, col: Column, metadata: Metadata): DataFrame = {
withColumn(colName, col.as(colName, metadata))
}
/**
* Returns a new Dataset with a column renamed.
* This is a no-op if schema doesn't contain existingName.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumnRenamed(existingName: String, newName: String): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldRename = output.exists(f => resolver(f.name, existingName))
if (shouldRename) {
val columns = output.map { col =>
if (resolver(col.name, existingName)) {
Column(col).as(newName)
} else {
Column(col)
}
}
select(columns : _*)
} else {
toDF()
}
}
/**
* Returns a new Dataset with a column dropped. This is a no-op if schema doesn't contain
* column name.
*
* This method can only be used to drop top level columns. the colName string is treated
* literally without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(colName: String): DataFrame = {
drop(Seq(colName) : _*)
}
/**
* Returns a new Dataset with columns dropped.
* This is a no-op if schema doesn't contain column name(s).
*
* This method can only be used to drop top level columns. the colName string is treated literally
* without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def drop(colNames: String*): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val remainingCols = allColumns.filter { attribute =>
colNames.forall(n => !resolver(attribute.name, n))
}.map(attribute => Column(attribute))
if (remainingCols.size == allColumns.size) {
toDF()
} else {
this.select(remainingCols: _*)
}
}
/**
* Returns a new Dataset with a column dropped.
* This version of drop accepts a [[Column]] rather than a name.
* This is a no-op if the Dataset doesn't have a column
* with an equivalent expression.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(col: Column): DataFrame = {
val expression = col match {
case Column(u: UnresolvedAttribute) =>
queryExecution.analyzed.resolveQuoted(
u.name, sparkSession.sessionState.analyzer.resolver).getOrElse(u)
case Column(expr: Expression) => expr
}
val attrs = this.logicalPlan.output
val colsAfterDrop = attrs.filter { attr =>
attr != expression
}.map(attr => Column(attr))
select(colsAfterDrop : _*)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `distinct`.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(): Dataset[T] = dropDuplicates(this.columns)
/**
* (Scala-specific) Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Seq[String]): Dataset[T] = withTypedPlan {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val groupCols = colNames.toSet.toSeq.flatMap { (colName: String) =>
// It is possibly there are more than one columns with the same name,
// so we call filter instead of find.
val cols = allColumns.filter(col => resolver(col.name, colName))
if (cols.isEmpty) {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
cols
}
Deduplicate(groupCols, logicalPlan)
}
/**
* Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Array[String]): Dataset[T] = dropDuplicates(colNames.toSeq)
/**
* Returns a new [[Dataset]] with duplicate rows removed, considering only
* the subset of columns.
*
* For a static batch [[Dataset]], it just drops duplicate rows. For a streaming [[Dataset]], it
* will keep all data across triggers as intermediate state to drop duplicates rows. You can use
* [[withWatermark]] to limit how late the duplicate data can be and system will accordingly limit
* the state. In addition, too late data older than watermark will be dropped to avoid any
* possibility of duplicates.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def dropDuplicates(col1: String, cols: String*): Dataset[T] = {
val colNames: Seq[String] = col1 +: cols
dropDuplicates(colNames)
}
/**
* Computes basic statistics for numeric and string columns, including count, mean, stddev, min,
* and max. If no columns are given, this function computes statistics for all numerical or
* string columns.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.describe("age", "height").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // max 92.0 192.0
* }}}
*
* Use [[summary]] for expanded statistics and control over which statistics to compute.
*
* @param cols Columns to compute statistics on.
*
* @group action
* @since 1.6.0
*/
@scala.annotation.varargs
def describe(cols: String*): DataFrame = {
val selected = if (cols.isEmpty) this else select(cols.head, cols.tail: _*)
selected.summary("count", "mean", "stddev", "min", "max")
}
/**
* Computes specified statistics for numeric and string columns. Available statistics are:
*
* - count
* - mean
* - stddev
* - min
* - max
* - arbitrary approximate percentiles specified as a percentage (eg, 75%)
*
* If no statistics are given, this function computes count, mean, stddev, min,
* approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.summary().show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 50% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* {{{
* ds.summary("count", "min", "25%", "75%", "max").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // min 18.0 163.0
* // 25% 24.0 176.0
* // 75% 32.0 180.0
* // max 92.0 192.0
* }}}
*
* To do a summary for specific columns first select them:
*
* {{{
* ds.select("age", "height").summary().show()
* }}}
*
* See also [[describe]] for basic statistics.
*
* @param statistics Statistics from above list to be computed.
*
* @group action
* @since 2.3.0
*/
@scala.annotation.varargs
def summary(statistics: String*): DataFrame = StatFunctions.summary(this, statistics.toSeq)
/**
* Returns the first `n` rows.
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @group action
* @since 1.6.0
*/
def head(n: Int): Array[T] = withAction("head", limit(n).queryExecution)(collectFromPlan)
/**
* Returns the first row.
* @group action
* @since 1.6.0
*/
def head(): T = head(1).head
/**
* Returns the first row. Alias for head().
* @group action
* @since 1.6.0
*/
def first(): T = head()
/**
* Concise syntax for chaining custom transformations.
* {{{
* def featurize(ds: Dataset[T]): Dataset[U] = ...
*
* ds
* .transform(featurize)
* .transform(...)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def transform[U](t: Dataset[T] => Dataset[U]): Dataset[U] = t(this)
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def filter(func: T => Boolean): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def filter(func: FilterFunction[T]): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def map[U : Encoder](func: T => U): Dataset[U] = withTypedPlan {
MapElements[T, U](func, logicalPlan)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def map[U](func: MapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
implicit val uEnc = encoder
withTypedPlan(MapElements[T, U](func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sparkSession,
MapPartitions[T, U](func, logicalPlan),
implicitly[Encoder[U]])
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `f` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def mapPartitions[U](f: MapPartitionsFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (Iterator[T]) => Iterator[U] = x => f.call(x.asJava).asScala
mapPartitions(func)(encoder)
}
/**
* Returns a new `DataFrame` that contains the result of applying a serialized R function
* `func` to each partition.
*/
private[sql] def mapPartitionsInR(
func: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
schema: StructType): DataFrame = {
val rowEncoder = encoder.asInstanceOf[ExpressionEncoder[Row]]
Dataset.ofRows(
sparkSession,
MapPartitionsInR(func, packageNames, broadcastVars, schema, rowEncoder, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def flatMap[U](f: FlatMapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (T) => Iterator[U] = x => f.call(x).asScala
flatMap(func)(encoder)
}
/**
* Applies a function `f` to all rows.
*
* @group action
* @since 1.6.0
*/
def foreach(f: T => Unit): Unit = withNewExecutionId {
rdd.foreach(f)
}
/**
* (Java-specific)
* Runs `func` on each element of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreach(func: ForeachFunction[T]): Unit = foreach(func.call(_))
/**
* Applies a function `f` to each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withNewExecutionId {
rdd.foreachPartition(f)
}
/**
* (Java-specific)
* Runs `func` on each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(func: ForeachPartitionFunction[T]): Unit = {
foreachPartition((it: Iterator[T]) => func.call(it.asJava))
}
/**
* Returns the first `n` rows in the Dataset.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def take(n: Int): Array[T] = head(n)
/**
* Returns the first `n` rows in the Dataset as a list.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def takeAsList(n: Int): java.util.List[T] = java.util.Arrays.asList(take(n) : _*)
/**
* Returns an array that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* For Java API, use [[collectAsList]].
*
* @group action
* @since 1.6.0
*/
def collect(): Array[T] = withAction("collect", queryExecution)(collectFromPlan)
/**
* Returns a Java list that contains all rows in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def collectAsList(): java.util.List[T] = withAction("collectAsList", queryExecution) { plan =>
val values = collectFromPlan(plan)
java.util.Arrays.asList(values : _*)
}
/**
* Return an iterator that contains all rows in this Dataset.
*
* The iterator will consume as much memory as the largest partition in this Dataset.
*
* @note this results in multiple Spark jobs, and if the input Dataset is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input Dataset should be cached first.
*
* @group action
* @since 2.0.0
*/
def toLocalIterator(): java.util.Iterator[T] = {
withAction("toLocalIterator", queryExecution) { plan =>
plan.executeToIterator().map(boundEnc.fromRow).asJava
}
}
/**
* Returns the number of rows in the Dataset.
* @group action
* @since 1.6.0
*/
def count(): Long = withAction("count", groupBy().count().queryExecution) { plan =>
plan.executeCollect().head.getLong(0)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions.
*
* @group typedrel
* @since 1.6.0
*/
def repartition(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = true, logicalPlan)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(numPartitions: Int, partitionExprs: Column*): Dataset[T] = withTypedPlan {
RepartitionByExpression(partitionExprs.map(_.expr), logicalPlan, numPartitions)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(partitionExprs: Column*): Dataset[T] = withTypedPlan {
RepartitionByExpression(
partitionExprs.map(_.expr), logicalPlan, sparkSession.sessionState.conf.numShufflePartitions)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions, when the fewer partitions
* are requested. If a larger number of partitions is requested, it will stay at the current
* number of partitions. Similar to coalesce defined on an `RDD`, this operation results in
* a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not
* be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can call repartition. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @group typedrel
* @since 1.6.0
*/
def coalesce(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = false, logicalPlan)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `dropDuplicates`.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def distinct(): Dataset[T] = dropDuplicates()
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def persist(): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this)
this
}
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def cache(): this.type = persist()
/**
* Persist this Dataset with the given storage level.
* @param newLevel One of: `MEMORY_ONLY`, `MEMORY_AND_DISK`, `MEMORY_ONLY_SER`,
* `MEMORY_AND_DISK_SER`, `DISK_ONLY`, `MEMORY_ONLY_2`,
* `MEMORY_AND_DISK_2`, etc.
*
* @group basic
* @since 1.6.0
*/
def persist(newLevel: StorageLevel): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this, None, newLevel)
this
}
/**
* Get the Dataset's current storage level, or StorageLevel.NONE if not persisted.
*
* @group basic
* @since 2.1.0
*/
def storageLevel: StorageLevel = {
sparkSession.sharedState.cacheManager.lookupCachedData(this).map { cachedData =>
cachedData.cachedRepresentation.storageLevel
}.getOrElse(StorageLevel.NONE)
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
*
* @group basic
* @since 1.6.0
*/
def unpersist(blocking: Boolean): this.type = {
sparkSession.sharedState.cacheManager.uncacheQuery(this, blocking)
this
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
*
* @group basic
* @since 1.6.0
*/
def unpersist(): this.type = unpersist(blocking = false)
/**
* Represents the content of the Dataset as an `RDD` of `T`.
*
* @group basic
* @since 1.6.0
*/
lazy val rdd: RDD[T] = {
val objectType = exprEnc.deserializer.dataType
val deserialized = CatalystSerde.deserialize[T](logicalPlan)
sparkSession.sessionState.executePlan(deserialized).toRdd.mapPartitions { rows =>
rows.map(_.get(0, objectType).asInstanceOf[T])
}
}
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def toJavaRDD: JavaRDD[T] = rdd.toJavaRDD()
/**
* Returns the content of the Dataset as a `JavaRDD` of `T`s.
* @group basic
* @since 1.6.0
*/
def javaRDD: JavaRDD[T] = toJavaRDD
/**
* Registers this Dataset as a temporary table using the given name. The lifetime of this
* temporary table is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 1.6.0
*/
@deprecated("Use createOrReplaceTempView(viewName) instead.", "2.0.0")
def registerTempTable(tableName: String): Unit = {
createOrReplaceTempView(tableName)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* Local temporary view is session-scoped. Its lifetime is the lifetime of the session that
* created it, i.e. it will be automatically dropped when the session terminates. It's not
* tied to any databases, i.e. we can't use `db1.view1` to reference a local temporary view.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.0.0
*/
@throws[AnalysisException]
def createTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = false)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 2.0.0
*/
def createOrReplaceTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = false)
}
/**
* Creates a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @throws AnalysisException if the view name is invalid or already exists
*
* @group basic
* @since 2.1.0
*/
@throws[AnalysisException]
def createGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = true)
}
/**
* Creates or replaces a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM global_temp.view1`.
*
* @group basic
* @since 2.2.0
*/
def createOrReplaceGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = true)
}
private def createTempViewCommand(
viewName: String,
replace: Boolean,
global: Boolean): CreateViewCommand = {
val viewType = if (global) GlobalTempView else LocalTempView
val tableIdentifier = try {
sparkSession.sessionState.sqlParser.parseTableIdentifier(viewName)
} catch {
case _: ParseException => throw new AnalysisException(s"Invalid view name: $viewName")
}
CreateViewCommand(
name = tableIdentifier,
userSpecifiedColumns = Nil,
comment = None,
properties = Map.empty,
originalText = None,
child = logicalPlan,
allowExisting = false,
replace = replace,
viewType = viewType)
}
/**
* Interface for saving the content of the non-streaming Dataset out into external storage.
*
* @group basic
* @since 1.6.0
*/
def write: DataFrameWriter[T] = {
if (isStreaming) {
logicalPlan.failAnalysis(
"'write' can not be called on streaming Dataset/DataFrame")
}
new DataFrameWriter[T](this)
}
/**
* Interface for saving the content of the streaming Dataset out into external storage.
*
* @group basic
* @since 2.0.0
*/
@InterfaceStability.Evolving
def writeStream: DataStreamWriter[T] = {
if (!isStreaming) {
logicalPlan.failAnalysis(
"'writeStream' can be called only on streaming Dataset/DataFrame")
}
new DataStreamWriter[T](this)
}
/**
* Returns the content of the Dataset as a Dataset of JSON strings.
* @since 2.0.0
*/
def toJSON: Dataset[String] = {
val rowSchema = this.schema
val sessionLocalTimeZone = sparkSession.sessionState.conf.sessionLocalTimeZone
mapPartitions { iter =>
val writer = new CharArrayWriter()
// create the Generator without separator inserted between 2 records
val gen = new JacksonGenerator(rowSchema, writer,
new JSONOptions(Map.empty[String, String], sessionLocalTimeZone))
new Iterator[String] {
override def hasNext: Boolean = iter.hasNext
override def next(): String = {
gen.write(exprEnc.toRow(iter.next()))
gen.flush()
val json = writer.toString
if (hasNext) {
writer.reset()
} else {
gen.close()
}
json
}
}
} (Encoders.STRING)
}
/**
* Returns a best-effort snapshot of the files that compose this Dataset. This method simply
* asks each constituent BaseRelation for its respective files and takes the union of all results.
* Depending on the source relations, this may not find all input files. Duplicates are removed.
*
* @group basic
* @since 2.0.0
*/
def inputFiles: Array[String] = {
val files: Seq[String] = queryExecution.optimizedPlan.collect {
case LogicalRelation(fsBasedRelation: FileRelation, _, _, _) =>
fsBasedRelation.inputFiles
case fr: FileRelation =>
fr.inputFiles
case r: HiveTableRelation =>
r.tableMeta.storage.locationUri.map(_.toString).toArray
}.flatten
files.toSet.toArray
}
////////////////////////////////////////////////////////////////////////////
// For Python API
////////////////////////////////////////////////////////////////////////////
/**
* Converts a JavaRDD to a PythonRDD.
*/
private[sql] def javaToPython: JavaRDD[Array[Byte]] = {
val structType = schema // capture it for closure
val rdd = queryExecution.toRdd.map(EvaluatePython.toJava(_, structType))
EvaluatePython.javaToPython(rdd)
}
private[sql] def collectToPython(): Int = {
EvaluatePython.registerPicklers()
withNewExecutionId {
val toJava: (Any) => Any = EvaluatePython.toJava(_, schema)
val iter = new SerDeUtil.AutoBatchedPickler(
queryExecution.executedPlan.executeCollect().iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-DataFrame")
}
}
/**
* Collect a Dataset as ArrowPayload byte arrays and serve to PySpark.
*/
private[sql] def collectAsArrowToPython(): Int = {
withNewExecutionId {
val iter = toArrowPayload.collect().iterator.map(_.asPythonSerializable)
PythonRDD.serveIterator(iter, "serve-Arrow")
}
}
private[sql] def toPythonIterator(): Int = {
withNewExecutionId {
PythonRDD.toLocalIteratorAndServe(javaToPython.rdd)
}
}
////////////////////////////////////////////////////////////////////////////
// Private Helpers
////////////////////////////////////////////////////////////////////////////
/**
* Wrap a Dataset action to track all Spark jobs in the body so that we can connect them with
* an execution.
*/
private def withNewExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(sparkSession, queryExecution)(body)
}
/**
* Wrap a Dataset action to track the QueryExecution and time cost, then report to the
* user-registered callback functions.
*/
private def withAction[U](name: String, qe: QueryExecution)(action: SparkPlan => U) = {
try {
qe.executedPlan.foreach { plan =>
plan.resetMetrics()
}
val start = System.nanoTime()
val result = SQLExecution.withNewExecutionId(sparkSession, qe) {
action(qe.executedPlan)
}
val end = System.nanoTime()
sparkSession.listenerManager.onSuccess(name, qe, end - start)
result
} catch {
case e: Exception =>
sparkSession.listenerManager.onFailure(name, qe, e)
throw e
}
}
/**
* Collect all elements from a spark plan.
*/
private def collectFromPlan(plan: SparkPlan): Array[T] = {
plan.executeCollect().map(boundEnc.fromRow)
}
private def sortInternal(global: Boolean, sortExprs: Seq[Column]): Dataset[T] = {
val sortOrder: Seq[SortOrder] = sortExprs.map { col =>
col.expr match {
case expr: SortOrder =>
expr
case expr: Expression =>
SortOrder(expr, Ascending)
}
}
withTypedPlan {
Sort(sortOrder, global = global, logicalPlan)
}
}
/** A convenient function to wrap a logical plan and produce a DataFrame. */
@inline private def withPlan(logicalPlan: LogicalPlan): DataFrame = {
Dataset.ofRows(sparkSession, logicalPlan)
}
/** A convenient function to wrap a logical plan and produce a Dataset. */
@inline private def withTypedPlan[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
Dataset(sparkSession, logicalPlan)
}
/** A convenient function to wrap a set based logical plan and produce a Dataset. */
@inline private def withSetOperator[U : Encoder](logicalPlan: LogicalPlan): Dataset[U] = {
if (classTag.runtimeClass.isAssignableFrom(classOf[Row])) {
// Set operators widen types (change the schema), so we cannot reuse the row encoder.
Dataset.ofRows(sparkSession, logicalPlan).asInstanceOf[Dataset[U]]
} else {
Dataset(sparkSession, logicalPlan)
}
}
/** Convert to an RDD of ArrowPayload byte arrays */
private[sql] def toArrowPayload: RDD[ArrowPayload] = {
val schemaCaptured = this.schema
val maxRecordsPerBatch = sparkSession.sessionState.conf.arrowMaxRecordsPerBatch
queryExecution.toRdd.mapPartitionsInternal { iter =>
val context = TaskContext.get()
ArrowConverters.toPayloadIterator(iter, schemaCaptured, maxRecordsPerBatch, context)
}
}
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala | Scala | apache-2.0 | 107,524 |
package com.avsystem.commons
package redis
import java.io.File
import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfterAll, Suite}
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Author: ghik
* Created: 27/06/16.
*/
trait UsesClusterServers extends BeforeAndAfterAll with RedisProcessUtils { this: Suite =>
val clusterPath: String = "cluster/" + System.currentTimeMillis()
val clusterDir: File = new File(clusterPath.replace("/", File.separator))
def ports: Seq[Int]
lazy val addresses: Seq[NodeAddress] = ports.map(port => NodeAddress(port = port))
var redisProcesses: Seq[RedisProcess] = _
protected def prepareDirectory(): Unit
protected def slotKey(slot: Int): String = ClusterUtils.SlotKeys(slot)
override protected def beforeAll(): Unit = {
super.beforeAll()
prepareDirectory()
redisProcesses = Await.result(Future.traverse(ports)(port => launchRedis(
"--port", port.toString,
"--daemonize", "no",
"--pidfile", "redis.pid",
"--dbfilename", "dump.rdb",
"--dir", s"$clusterPath/$port",
"--appendonly", "yes",
"--appendfilename", "appendonly.aof",
"--cluster-enabled", "yes",
"--cluster-config-file", "nodes.conf"
)), 10.seconds)
}
override protected def afterAll(): Unit = {
Await.result(Future.traverse(redisProcesses)(shutdownRedis), 10.seconds)
FileUtils.deleteDirectory(clusterDir)
super.afterAll()
}
}
| AVSystem/scala-commons | commons-redis/src/test/scala/com/avsystem/commons/redis/UsesClusterServers.scala | Scala | mit | 1,474 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.config.sampler
import org.specs.Specification
import org.specs.mock.{ClassMocker, JMocker}
class AdjustableRateConfigSpec extends Specification with JMocker with ClassMocker {
"ReadOnlyAdjustableRateConfig" should {
val sampleRateConfig = mock[AdjustableRateConfig]
val sr = 0.3
"not issue zk calls on set" in {
expect {}
val config = new ReadOnlyAdjustableRateConfig(sampleRateConfig)
config.set(sr)
}
"not issue zk calls on setIfNotExists" in {
expect {}
val config = new ReadOnlyAdjustableRateConfig(sampleRateConfig)
config.set(sr)
}
}
}
| netconstructor/zipkin | zipkin-server/src/test/scala/com/twitter/zipkin/config/sampler/AdjustableRateConfigSpec.scala | Scala | apache-2.0 | 1,238 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.http.cache.client
import play.api.libs.json._
import uk.gov.hmrc.crypto.json.{JsonDecryptor, JsonEncryptor}
import uk.gov.hmrc.crypto.{ApplicationCrypto, CompositeSymmetricCrypto, Protected}
import uk.gov.hmrc.play.audit.http.HeaderCarrier
import uk.gov.hmrc.play.http.HttpResponse
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object ShortLivedCache extends ShortLivedCache {
override val shortLiveCache = ShortLivedHttpCaching
override implicit val crypto = ApplicationCrypto.JsonCrypto
}
trait ShortLivedCache extends CacheUtil {
val shortLiveCache: ShortLivedHttpCaching
implicit val crypto: CompositeSymmetricCrypto
def cache[A](cacheId: String, formId: String, body: A)(implicit hc: HeaderCarrier, wts: Writes[A]): Future[CacheMap] = {
val protectd = Protected(body)
val encryptionFormat = new JsonEncryptor()
val fm = shortLiveCache.cache(cacheId, formId, protectd)(hc, encryptionFormat)
fm.map(cm => new CryptoCacheMap(cm))
}
def fetch(cacheId: String)(implicit hc: HeaderCarrier): Future[Option[CacheMap]] = {
val fm = shortLiveCache.fetch(cacheId)
fm.map(om => om.map(cm => new CryptoCacheMap(cm)))
}
def fetchAndGetEntry[T](cacheId: String, key: String)(implicit hc: HeaderCarrier, rds: Reads[T]): Future[Option[T]] =
try {
val decryptionFormat = new JsonDecryptor()
val encrypted: Future[Option[Protected[T]]] = shortLiveCache.fetchAndGetEntry(cacheId, key)(hc, decryptionFormat)
encrypted.map(op => convert(op))
} catch {
case e: SecurityException => throw CachingException(s"Failed to fetch a decrypted entry by cacheId:$cacheId and key:$key", e)
}
def remove(cacheId: String)(implicit hc: HeaderCarrier): Future[HttpResponse] = shortLiveCache.remove(cacheId)
}
trait CacheUtil {
def convert[T](entry: Option[Protected[T]]): Option[T] =
entry.map(e => e.decryptedValue)
}
class CryptoCacheMap(cm: CacheMap)(implicit crypto: CompositeSymmetricCrypto)
extends CacheMap(cm.id, cm.data) with CacheUtil {
override def getEntry[T](key: String)(implicit fjs: Reads[T]): Option[T] =
try {
val decryptionFormat = new JsonDecryptor()
val encryptedEntry = cm.getEntry(key)(decryptionFormat)
convert(encryptedEntry)
} catch {
case e: SecurityException => throw CachingException(s"Failed to fetch a decrypted entry by key:$key", e)
}
}
| xnejp03/http-caching-client | app/uk/gov/hmrc/http/cache/client/ShortLivedCache.scala | Scala | apache-2.0 | 3,045 |
package nfn.service.Temperature
import akka.actor.ActorRef
import ccn.packet.CCNName
import nfn.service.{NFNIntValue, NFNService, NFNStringValue, NFNValue}
import scala.concurrent.duration._
/**
* Created by blacksheeep on 13/11/15.
*/
class ReadSensorDataSimu() extends NFNService {
val consttemp = 20
val constpreasure = 1000
override def function(interestName: CCNName, args: Seq[NFNValue], ccnApi: ActorRef): NFNValue = {
(args.head, args.tail.head) match { // sensorname, datapoint
case (sensorname: NFNStringValue, datapoint: NFNIntValue) => {
sensorname.str match {
case "Temperature" => {
NFNIntValue(
consttemp + (if (datapoint.i % 2 == 0) datapoint.i else (-datapoint.i))
)
}
case "Pressure" => {
NFNIntValue(
constpreasure + (if (datapoint.i % 2 == 0) datapoint.i else (-datapoint.i))
)
}
}
}
case _ => ???
}
}
}
| cn-uofbasel/nfn-scala | src/main/scala/nfn/service/Temperature/ReadSensorDataSimu.scala | Scala | isc | 1,000 |
package com.timgroup.eventstore.memory
import com.timgroup.clocks.joda.testing.ManualJodaClock
import com.timgroup.eventstore.api.EventStoreTest
import com.timgroup.eventstore.memory.Wrapper._
import org.joda.time.DateTimeZone
import org.scalatest.{FunSpec, MustMatchers, OneInstancePerTest}
class InMemoryEventStoreTest extends FunSpec with EventStoreTest with MustMatchers with OneInstancePerTest {
describe("traditional") {
val traditionalInMemoryEventStore = new InMemoryEventStore( new ManualJodaClock(effectiveTimestamp.toInstant, DateTimeZone.UTC) )
it should behave like anEventStore(traditionalInMemoryEventStore)
it should behave like optimisticConcurrencyControl(traditionalInMemoryEventStore)
}
describe("wrapper around new") {
val newInMemoryEventStore = new JavaInMemoryEventStore(new ManualJodaClock(effectiveTimestamp.toInstant, DateTimeZone.UTC)).toLegacy
it should behave like anEventStore(newInMemoryEventStore)
it should behave like optimisticConcurrencyControl(newInMemoryEventStore)
}
}
| tim-group/tg-eventstore | memory-legacy/src/test/scala/com/timgroup/eventstore/memory/InMemoryEventStoreTest.scala | Scala | bsd-2-clause | 1,049 |
package reactivemongo.api.commands
import reactivemongo.api.SerializationPack
private[reactivemongo] final class DropIndexes(
val index: String) extends CollectionCommand with CommandWithResult[DropIndexesResult] {
val commandKind = CommandKind.DropIndexes
override def equals(that: Any): Boolean = that match {
case other: DropIndexes =>
index == other.index
case _ =>
false
}
override def hashCode: Int = index.hashCode
override def toString: String = s"DropIndexes($index)"
}
private[reactivemongo] final class DropIndexesResult(
val value: Int) extends AnyVal
private[reactivemongo] object DropIndexes {
@inline def apply(index: String): DropIndexes = new DropIndexes(index)
private[api] def writer[P <: SerializationPack](pack: P): pack.Writer[ResolvedCollectionCommand[DropIndexes]] = {
val builder = pack.newBuilder
import builder.{ elementProducer => element, string }
pack.writer[ResolvedCollectionCommand[DropIndexes]] { drop =>
builder.document(Seq(
element("dropIndexes", string(drop.collection)),
element("index", string(drop.command.index))))
}
}
private[api] def reader[P <: SerializationPack](pack: P): pack.Reader[DropIndexesResult] = {
val decoder = pack.newDecoder
CommandCodecs.dealingWithGenericCommandExceptionsReader[pack.type, DropIndexesResult](pack) { doc =>
new DropIndexesResult(decoder.int(doc, "nIndexesWas") getOrElse 0)
}
}
}
| ReactiveMongo/ReactiveMongo | driver/src/main/scala/api/commands/DropIndexes.scala | Scala | apache-2.0 | 1,472 |
package infra.piece.file
import infra.piece.core.Piece
/**
* @author alari (name.alari@gmail.com)
* @since 08.05.14 14:22
*/
case class FilePiece(fileId: String,
filename: String,
title: Option[String],
fileIds: Seq[String],
id: Option[String] = Piece.genSomeId(),
kind: String = "file") extends Piece | alari/play-content | module-code/app/infra/piece/file/FilePiece.scala | Scala | mit | 411 |
/**
* Created by wuyiran on 11/18/16.
*/
package com.iwantfind
import java.sql.DriverManager
import org.apache.hadoop.conf._
import org.apache.spark.{SparkConf, SparkContext}
import java.sql.Connection
/** Computes an approximation to pi */
// TODO not implement
object DBUtils {
val url = "jdbc:mysql://localhost:3306/test"
val username = "root"
val password = "admin"
classOf[com.mysql.jdbc.Driver]
def getConnection(): Connection = {
DriverManager.getConnection(url, username, password)
}
def close(conn: Connection): Unit = {
try{
if(!conn.isClosed() || conn != null){
conn.close()
}
}
catch {
case ex: Exception => {
ex.printStackTrace()
}
}
}
} | Yiran-wu/iwantfind-tool | src/main/scala/com/iwantfind/DBUtils.scala | Scala | apache-2.0 | 739 |
/*
* The MIT License (MIT)
* <p/>
* Copyright (c) 2016 SWEeneyThreads
* <p/>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p/>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p/>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* <p/>
*
* @author SWEeneyThreads
* @version 0.0.1
* @since 0.0.1
*/
package server.messages.query.user
import server.messages.query.PermissionMessages.{NoPermissionMessage, ReadMessage, ReadWriteMessage}
import server.messages.query.ReplyInfo
/**
* DatabaseMessages are used to manage operations on databases.
*/
object DatabaseMessages {
/**
* Trait that every message which belongs to database operations has to extend.
*
* @see UserMessage
*/
trait DatabaseMessage extends UserMessage
/**
* A CreateDatabaseMessage is used to request the creation of a new database with the given name. An user does not
* need a particular permission to request this operation, therefore this message extends NoPermissionMessage.
* @param name The database name
*
* @see DatabaseMessage
* @see NoPermissionMessage
*/
case class CreateDatabaseMessage(name: String) extends DatabaseMessage with NoPermissionMessage
/**
* A DeleteDatabaseMessage is used to request the deletion of the database with the given name. An user needs Write
* permission to request this operation, therefore this message extends ReadWriteMessage.
* @param name The database name
*
* @see DatabaseMessage
* @see ReadWriteMessage
*/
case class DeleteDatabaseMessage(name: String) extends DatabaseMessage with ReadWriteMessage
/**
* A SelectDatabaseMessage is used to request the select of the database with the given name. An user needs Read
* permission to request this operation, therefore this message extends ReadMessage.
* @param name The database name
*
* @see DatabaseMessage
* @see ReadMessage
*/
case class SelectDatabaseMessage(name: String) extends DatabaseMessage with ReadMessage
/**
* A ListDatabaseMessage is used to request the list of databases present on the server. An user needs Read
* permission to request this operation, therefore this message extends ReadMessage.
*
* @see DatabaseMessage
* @see ReadMessage
*/
case class ListDatabaseMessage() extends DatabaseMessage with ReadMessage
/**
* A DBAlreadyExistInfo is used as response to a create database request, if the database requested for creation
* already exists
*
* @see ReplyInfo
*/
case class DBAlreadyExistInfo() extends ReplyInfo
/**
* A DBDoesNotExistInfo is used as response to a database request which asks for a database that does not exist.
*
* @see ReplyInfo
*/
case class DBDoesNotExistInfo() extends ReplyInfo
/**
* A ListDBInfo is used as response to a list database request.
* @param dbs The list of databases.
*
* @see ReplyInfo
*/
case class ListDBInfo(dbs: List[String]) extends ReplyInfo
/**
* A NoDBInfo is used as response to a list database request if no databases are present on the server.
*
* @see ReplyInfo
*/
case class NoDBInfo() extends ReplyInfo
/**
* A NoDBSelectedInfo is used as response to a request on a map or on a row when no database has previously been
* selected.
*
* @see ReplyInfo
*/
case class NoDBSelectedInfo() extends ReplyInfo
} | SweeneyThreads/Actorbase | src/main/scala/server/messages/query/user/DatabaseMessages.scala | Scala | mit | 4,356 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistributionTraitDef
import org.apache.flink.table.planner.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecSortMergeJoin
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, FlinkRelOptUtil, JoinTypeUtil, JoinUtil}
import org.apache.flink.table.runtime.operators.join.FlinkJoinType
import org.apache.calcite.plan._
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollationTraitDef, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for sort-merge [[Join]].
*/
class BatchPhysicalSortMergeJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftRel: RelNode,
rightRel: RelNode,
condition: RexNode,
joinType: JoinRelType,
// true if LHS is sorted by left join keys, else false
val leftSorted: Boolean,
// true if RHS is sorted by right join key, else false
val rightSorted: Boolean)
extends BatchPhysicalJoinBase(cluster, traitSet, leftRel, rightRel, condition, joinType) {
protected def isMergeJoinSupportedType(joinRelType: FlinkJoinType): Boolean = {
joinRelType == FlinkJoinType.INNER ||
joinRelType == FlinkJoinType.LEFT ||
joinRelType == FlinkJoinType.RIGHT ||
joinRelType == FlinkJoinType.FULL
}
override def copy(
traitSet: RelTraitSet,
conditionExpr: RexNode,
left: RelNode,
right: RelNode,
joinType: JoinRelType,
semiJoinDone: Boolean): Join = {
new BatchPhysicalSortMergeJoin(
cluster,
traitSet,
left,
right,
conditionExpr,
joinType,
leftSorted,
rightSorted)
}
override def explainTerms(pw: RelWriter): RelWriter =
super.explainTerms(pw)
.itemIf("leftSorted", leftSorted, leftSorted)
.itemIf("rightSorted", rightSorted, rightSorted)
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val leftRowCnt = mq.getRowCount(getLeft)
val rightRowCnt = mq.getRowCount(getRight)
if (leftRowCnt == null || rightRowCnt == null) {
return null
}
val numOfSort = joinInfo.leftKeys.size()
val leftSortCpuCost: Double = if (leftSorted) {
// cost of writing lhs data to buffer
leftRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * leftRowCnt * Math.max(Math.log(leftRowCnt), 1.0)
}
val rightSortCpuCost: Double = if (rightSorted) {
// cost of writing rhs data to buffer
rightRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * rightRowCnt * Math.max(Math.log(rightRowCnt), 1.0)
}
// cost of evaluating each join condition
val joinConditionCpuCost = FlinkCost.COMPARE_CPU_COST * (leftRowCnt + rightRowCnt)
val cpuCost = leftSortCpuCost + rightSortCpuCost + joinConditionCpuCost
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
// assume memory is big enough, so sort process and mergeJoin process will not spill to disk.
var sortMemCost = 0D
if (!leftSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getLeft)
}
if (!rightSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getRight)
}
val rowCount = mq.getRowCount(this)
costFactory.makeCost(rowCount, cpuCost, 0, 0, sortMemCost)
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val (canSatisfyDistribution, leftRequiredDistribution, rightRequiredDistribution) =
satisfyHashDistributionOnNonBroadcastJoin(requiredDistribution)
if (!canSatisfyDistribution) {
return None
}
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val requiredFieldCollations = requiredCollation.getFieldCollations
val shuffleKeysSize = leftRequiredDistribution.getKeys.size
val newLeft = RelOptRule.convert(getLeft, leftRequiredDistribution)
val newRight = RelOptRule.convert(getRight, rightRequiredDistribution)
// SortMergeJoin can provide collation trait, check whether provided collation can satisfy
// required collations
val canProvideCollation = if (requiredCollation.getFieldCollations.isEmpty) {
false
} else if (requiredFieldCollations.size > shuffleKeysSize) {
// Sort by [a, b] can satisfy [a], but cannot satisfy [a, b, c]
false
} else {
val leftKeys = leftRequiredDistribution.getKeys
val leftFieldCnt = getLeft.getRowType.getFieldCount
val rightKeys = rightRequiredDistribution.getKeys.map(_ + leftFieldCnt)
requiredFieldCollations.zipWithIndex.forall { case (collation, index) =>
val idxOfCollation = collation.getFieldIndex
// Full outer join is handled before, so does not need care about it
if (idxOfCollation < leftFieldCnt && joinType != JoinRelType.RIGHT) {
val fieldCollationOnLeftSortKey = FlinkRelOptUtil.ofRelFieldCollation(leftKeys.get(index))
collation == fieldCollationOnLeftSortKey
} else if (idxOfCollation >= leftFieldCnt &&
(joinType == JoinRelType.RIGHT || joinType == JoinRelType.INNER)) {
val fieldCollationOnRightSortKey =
FlinkRelOptUtil.ofRelFieldCollation(rightKeys.get(index))
collation == fieldCollationOnRightSortKey
} else {
false
}
}
}
var newProvidedTraitSet = getTraitSet.replace(requiredDistribution)
if (canProvideCollation) {
newProvidedTraitSet = newProvidedTraitSet.replace(requiredCollation)
}
Some(copy(newProvidedTraitSet, Seq(newLeft, newRight)))
}
override def translateToExecNode(): ExecNode[_] = {
JoinUtil.validateJoinSpec(
joinSpec,
FlinkTypeFactory.toLogicalRowType(left.getRowType),
FlinkTypeFactory.toLogicalRowType(right.getRowType))
new BatchExecSortMergeJoin(
JoinTypeUtil.getFlinkJoinType(joinType),
joinSpec.getLeftKeys,
joinSpec.getRightKeys,
joinSpec.getFilterNulls,
condition,
estimateOutputSize(getLeft) < estimateOutputSize(getRight),
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build(),
InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build(),
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
private def estimateOutputSize(relNode: RelNode): Double = {
val mq = relNode.getCluster.getMetadataQuery
mq.getAverageRowSize(relNode) * mq.getRowCount(relNode)
}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalSortMergeJoin.scala | Scala | apache-2.0 | 7,847 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import joptsimple.OptionParser
import kafka.utils._
import org.I0Itec.zkclient.ZkClient
import javax.management.remote.{JMXServiceURL, JMXConnectorFactory}
import javax.management.ObjectName
import kafka.controller.KafkaController
import scala.Some
object ShutdownBroker extends Logging {
private case class ShutdownParams(zkConnect: String, brokerId: java.lang.Integer, jmxUrl: String)
private def invokeShutdown(params: ShutdownParams): Boolean = {
var zkClient: ZkClient = null
try {
zkClient = new ZkClient(params.zkConnect, 30000, 30000, ZKStringSerializer)
val controllerBrokerId = ZkUtils.getController(zkClient)
val controllerOpt = ZkUtils.getBrokerInfo(zkClient, controllerBrokerId)
controllerOpt match {
case Some(controller) =>
val jmxUrl = new JMXServiceURL(params.jmxUrl)
val jmxc = JMXConnectorFactory.connect(jmxUrl, null)
val mbsc = jmxc.getMBeanServerConnection
val leaderPartitionsRemaining = mbsc.invoke(new ObjectName(KafkaController.MBeanName),
"shutdownBroker",
Array(params.brokerId),
Array(classOf[Int].getName)).asInstanceOf[Int]
val shutdownComplete = (leaderPartitionsRemaining == 0)
info("Shutdown status: " + (if (shutdownComplete)
"complete" else
"incomplete (broker still leads %d partitions)".format(leaderPartitionsRemaining)))
shutdownComplete
case None =>
error("Operation failed due to controller failure on %d.".format(controllerBrokerId))
false
}
}
catch {
case t: Throwable =>
error("Operation failed due to %s.".format(t.getMessage), t)
false
}
finally {
if (zkClient != null)
zkClient.close()
}
}
def main(args: Array[String]) {
val parser = new OptionParser
val brokerOpt = parser.accepts("broker", "REQUIRED: The broker to shutdown.")
.withRequiredArg
.describedAs("Broker Id")
.ofType(classOf[java.lang.Integer])
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val numRetriesOpt = parser.accepts("num.retries", "Number of attempts to retry if shutdown does not complete.")
.withRequiredArg
.describedAs("number of retries")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val retryIntervalOpt = parser.accepts("retry.interval.ms", "Retry interval if retries requested.")
.withRequiredArg
.describedAs("retry interval in ms (> 1000)")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1000)
val jmxUrlOpt = parser.accepts("jmx.url", "Controller's JMX URL.")
.withRequiredArg
.describedAs("JMX url.")
.ofType(classOf[String])
.defaultsTo("service:jmx:rmi:///jndi/rmi://127.0.0.1:9999/jmxrmi")
val options = parser.parse(args : _*)
CommandLineUtils.checkRequiredArgs(parser, options, brokerOpt, zkConnectOpt)
val retryIntervalMs = options.valueOf(retryIntervalOpt).intValue.max(1000)
val numRetries = options.valueOf(numRetriesOpt).intValue
val shutdownParams =
ShutdownParams(options.valueOf(zkConnectOpt), options.valueOf(brokerOpt), options.valueOf(jmxUrlOpt))
if (!invokeShutdown(shutdownParams)) {
(1 to numRetries).takeWhile(attempt => {
info("Retry " + attempt)
try {
Thread.sleep(retryIntervalMs)
}
catch {
case ie: InterruptedException => // ignore
}
!invokeShutdown(shutdownParams)
})
}
}
}
| dchenbecker/kafka-sbt | core/src/main/scala/kafka/admin/ShutdownBroker.scala | Scala | apache-2.0 | 4,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
/**
* Java can not access Projection (in package object)
*/
abstract class BaseProjection extends Projection {}
abstract class CodeGenMutableRow extends MutableRow with BaseGenericInternalRow
/**
* Generates bytecode that produces a new [[InternalRow]] object based on a fixed set of input
* [[Expression Expressions]] and a given input [[InternalRow]]. The returned [[InternalRow]]
* object is custom generated based on the output types of the [[Expression]] to avoid boxing of
* primitive values.
*/
object GenerateProjection extends CodeGenerator[Seq[Expression], Projection] {
protected def canonicalize(in: Seq[Expression]): Seq[Expression] =
in.map(ExpressionCanonicalizer.execute)
protected def bind(in: Seq[Expression], inputSchema: Seq[Attribute]): Seq[Expression] =
in.map(BindReferences.bindReference(_, inputSchema))
// Make Mutablility optional...
protected def create(expressions: Seq[Expression]): Projection = {
val ctx = newCodeGenContext()
val columns = expressions.zipWithIndex.map {
case (e, i) =>
s"private ${ctx.javaType(e.dataType)} c$i = ${ctx.defaultValue(e.dataType)};\n"
}.mkString("\n ")
val initColumns = expressions.zipWithIndex.map {
case (e, i) =>
val eval = e.gen(ctx)
s"""
{
// column$i
${eval.code}
nullBits[$i] = ${eval.isNull};
if (!${eval.isNull}) {
c$i = ${eval.primitive};
}
}
"""
}.mkString("\n")
val getCases = (0 until expressions.size).map { i =>
s"case $i: return c$i;"
}.mkString("\n ")
val updateCases = expressions.zipWithIndex.map { case (e, i) =>
s"case $i: { c$i = (${ctx.boxedType(e.dataType)})value; return;}"
}.mkString("\n ")
val specificAccessorFunctions = ctx.primitiveTypes.map { jt =>
val cases = expressions.zipWithIndex.flatMap {
case (e, i) if ctx.javaType(e.dataType) == jt =>
Some(s"case $i: return c$i;")
case _ => None
}.mkString("\n ")
if (cases.length > 0) {
val getter = "get" + ctx.primitiveTypeName(jt)
s"""
@Override
public $jt $getter(int i) {
if (isNullAt(i)) {
return ${ctx.defaultValue(jt)};
}
switch (i) {
$cases
}
throw new IllegalArgumentException("Invalid index: " + i
+ " in $getter");
}"""
} else {
""
}
}.filter(_.length > 0).mkString("\n")
val specificMutatorFunctions = ctx.primitiveTypes.map { jt =>
val cases = expressions.zipWithIndex.flatMap {
case (e, i) if ctx.javaType(e.dataType) == jt =>
Some(s"case $i: { c$i = value; return; }")
case _ => None
}.mkString("\n ")
if (cases.length > 0) {
val setter = "set" + ctx.primitiveTypeName(jt)
s"""
@Override
public void $setter(int i, $jt value) {
nullBits[i] = false;
switch (i) {
$cases
}
throw new IllegalArgumentException("Invalid index: " + i +
" in $setter}");
}"""
} else {
""
}
}.filter(_.length > 0).mkString("\n")
val hashValues = expressions.zipWithIndex.map { case (e, i) =>
val col = s"c$i"
val nonNull = e.dataType match {
case BooleanType => s"$col ? 0 : 1"
case ByteType | ShortType | IntegerType | DateType => s"$col"
case LongType | TimestampType => s"$col ^ ($col >>> 32)"
case FloatType => s"Float.floatToIntBits($col)"
case DoubleType =>
s"(int)(Double.doubleToLongBits($col) ^ (Double.doubleToLongBits($col) >>> 32))"
case BinaryType => s"java.util.Arrays.hashCode($col)"
case _ => s"$col.hashCode()"
}
s"isNullAt($i) ? 0 : ($nonNull)"
}
val hashUpdates: String = hashValues.map( v =>
s"""
result *= 37; result += $v;"""
).mkString("\n")
val columnChecks = expressions.zipWithIndex.map { case (e, i) =>
s"""
if (nullBits[$i] != row.nullBits[$i] ||
(!nullBits[$i] && !(${ctx.genEqual(e.dataType, s"c$i", s"row.c$i")}))) {
return false;
}
"""
}.mkString("\n")
val copyColumns = expressions.zipWithIndex.map { case (e, i) =>
s"""if (!nullBits[$i]) arr[$i] = c$i;"""
}.mkString("\n ")
val code = s"""
public SpecificProjection generate($exprType[] expr) {
return new SpecificProjection(expr);
}
class SpecificProjection extends ${classOf[BaseProjection].getName} {
private $exprType[] expressions;
${declareMutableStates(ctx)}
${declareAddedFunctions(ctx)}
public SpecificProjection($exprType[] expr) {
expressions = expr;
${initMutableStates(ctx)}
}
@Override
public Object apply(Object r) {
// GenerateProjection does not work with UnsafeRows.
assert(!(r instanceof ${classOf[UnsafeRow].getName}));
return new SpecificRow((InternalRow) r);
}
final class SpecificRow extends ${classOf[CodeGenMutableRow].getName} {
$columns
public SpecificRow(InternalRow i) {
$initColumns
}
public int numFields() { return ${expressions.length};}
protected boolean[] nullBits = new boolean[${expressions.length}];
public void setNullAt(int i) { nullBits[i] = true; }
public boolean isNullAt(int i) { return nullBits[i]; }
@Override
public Object genericGet(int i) {
if (isNullAt(i)) return null;
switch (i) {
$getCases
}
return null;
}
public void update(int i, Object value) {
if (value == null) {
setNullAt(i);
return;
}
nullBits[i] = false;
switch (i) {
$updateCases
}
}
$specificAccessorFunctions
$specificMutatorFunctions
@Override
public int hashCode() {
int result = 37;
$hashUpdates
return result;
}
@Override
public boolean equals(Object other) {
if (other instanceof SpecificRow) {
SpecificRow row = (SpecificRow) other;
$columnChecks
return true;
}
return super.equals(other);
}
@Override
public InternalRow copy() {
Object[] arr = new Object[${expressions.length}];
${copyColumns}
return new ${classOf[GenericInternalRow].getName}(arr);
}
}
}
"""
logDebug(s"MutableRow, initExprs: ${expressions.mkString(",")} code:\n" +
CodeFormatter.format(code))
compile(code).generate(ctx.references.toArray).asInstanceOf[Projection]
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateProjection.scala | Scala | apache-2.0 | 7,858 |
package sharry.common
import io.circe.Decoder
import io.circe.Encoder
final case class ByteSize(bytes: Long) {
def toHuman: String =
ByteSize.bytesToHuman(bytes)
def <=(other: ByteSize) =
bytes <= other.bytes
def >=(other: ByteSize) =
bytes >= other.bytes
def >(other: ByteSize) =
bytes > other.bytes
def -(other: ByteSize) =
ByteSize(bytes - other.bytes)
def +(other: ByteSize) =
ByteSize(bytes + other.bytes)
}
object ByteSize {
val zero = ByteSize(0L)
def bytesToHuman(bytes: Long): String =
if (math.abs(bytes) < 1024 && bytes != Long.MinValue) s"${bytes}B"
else {
val k = bytes / 1024.0
if (math.abs(k) < 1024) f"$k%.02fK"
else {
val m = k / 1024.0
if (math.abs(m) < 1024) f"$m%.02fM"
else f"${m / 1024.0}%.02fG"
}
}
def parse(str: String): Either[String, ByteSize] =
str.toLongOption
.map(ByteSize.apply)
.toRight(s"Not a valid size string: $str")
.orElse(span(str.toLowerCase) match {
case (num, "k") =>
Right(ByteSize(math.round(num.toDouble * 1024)))
case (num, "m") =>
Right(ByteSize(math.round(num.toDouble * 1024 * 1024)))
case (num, "g") =>
Right(ByteSize(math.round(num.toDouble * 1024 * 1024 * 1024)))
case _ =>
Left(s"Invalid byte string: $str")
})
private def span(str: String): (String, String) =
if (str.isEmpty) ("", "")
else (str.init, str.last.toString)
def unsafe(str: String): ByteSize =
parse(str).fold(sys.error, identity)
implicit val jsonDecoder: Decoder[ByteSize] =
Decoder.decodeLong.map(ByteSize.apply)
implicit val jsonEncoder: Encoder[ByteSize] =
Encoder.encodeLong.contramap(_.bytes)
}
| eikek/sharry | modules/common/src/main/scala/sharry/common/ByteSize.scala | Scala | gpl-3.0 | 1,764 |
package com.arcusys.valamis.web.portlet
import javax.portlet.{RenderRequest, RenderResponse}
import com.arcusys.learn.liferay.services.CompanyHelper
import com.arcusys.learn.liferay.util.PortalUtilHelper
import com.arcusys.valamis.lrssupport.oauth.OAuthPortlet
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.portlet.base.{LiferayHelpers, PortletBase}
class TinCanStatementViewerView extends OAuthPortlet with PortletBase {
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out = response.getWriter
val contextPath = getContextPath(request)
val endpoint = getLrsEndpointInfo(request)
val companyId = PortalUtilHelper.getCompanyId(request)
val language = LiferayHelpers.getLanguage(request)
val data = Map(
"contextPath" -> contextPath,
"endpointData" -> JsonHelper.toJson(endpoint),
"accountHomePage" -> PortalUtilHelper.getHostName(companyId)
) ++ getTranslation("statementViewer", language)
sendMustacheFile(data, "statement_viewer.html")
}
}
| arcusys/JSCORM | valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/TinCanStatementViewerView.scala | Scala | gpl-3.0 | 1,082 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.exceptions.TestFailedException
/*
This has a bit of redundancy with several other specs, but was the
original file I used to develop the matchers syntax, and it has a few
tests that don't exist elsewhere, so I'm keeping it alive for now.
*/
class ShouldMatcherSpec extends Spec with Matchers {
object `The be matcher` {
object `(for booleans)` {
def `should do nothing when false is compared to false` {
false should be (false)
}
def `should do nothing when true is compared to true` {
true should be (true)
}
def `should throw an assertion error when not equal` {
val caught = intercept[TestFailedException] {
false should be (true)
}
assert(caught.getMessage === "false was not true")
}
}
object `(for null)` {
def `should do nothing when null is compared to null` {
val o: String = null
o should be (null)
o should equal (null)
}
def `should throw an assertion error when non-null compared to null` {
val caught = intercept[TestFailedException] {
val o = "Helloooooo"
o should be (null)
}
assert(caught.getMessage === "\\"Helloooooo\\" was not null")
}
def `should do nothing when non-null is compared to not null` {
val o = "Helloooooo"
o should not { be (null) }
}
def `should throw an assertion error when null compared to not null` {
val caught1 = intercept[TestFailedException] {
val o: String = null
o should not { be (null) }
}
assert(caught1.getMessage === "The reference was null")
}
def `should work when used in a logical expression` {
val o: String = null
o should { be (null) and equal (null) }
o should { equal (null) and be (null) }
}
}
object `(for Nil)` {
def `should do nothing when an empty list is compared to Nil` {
val emptyList = List[String]()
emptyList should be (Nil)
emptyList should equal (Nil)
}
def `should throw an assertion error when a non-empty list is compared to Nil` {
val nonEmptyList = List("Helloooooo")
val caught1 = intercept[TestFailedException] {
nonEmptyList should be (Nil)
}
assert(caught1.getMessage === "List(Helloooooo) was not equal to List()")
val caught2 = intercept[TestFailedException] {
nonEmptyList should equal (Nil)
}
assert(caught2.getMessage === "List(Helloooooo) did not equal List()")
}
def `should do nothing when non-null is compared to not null` {
val nonEmptyList = List("Helloooooo")
nonEmptyList should not { be (Nil) }
nonEmptyList should not { equal (Nil) }
}
def `should throw an assertion error when null compared to not null` {
val emptyList = List[String]()
val caught1 = intercept[TestFailedException] {
emptyList should not { be (Nil) }
}
assert(caught1.getMessage === "List() was equal to List()")
val caught3 = intercept[TestFailedException] {
emptyList should not { equal (Nil) }
}
assert(caught3.getMessage === "List() equaled List()")
}
def `should work when used in a logical expression` {
val emptyList = List[Int]()
emptyList should { be (Nil) and equal (Nil) }
emptyList should { equal (Nil) and be (Nil) } // Nada, and nada is nada
}
}
object `(for None)` {
/* I think I should have tests for options somewhere
val option = Some(1)
option should equal (Some(1))
val option = Some(1)
option should not { equal (Some(2)) }
*/
def `should do nothing when a None option is compared to None` {
val option: Option[String] = None
option should be (None)
option should equal (None)
}
def `should throw an assertion error when a Some is compared to None` {
val someString = Some("Helloooooo")
val caught1 = intercept[TestFailedException] {
someString should be (None)
}
assert(caught1.getMessage === "Some(Helloooooo) was not equal to None")
val caught2 = intercept[TestFailedException] {
someString should equal (None)
}
assert(caught2.getMessage === "Some(Helloooooo) did not equal None")
}
def `should do nothing when Some is compared to not None` {
val someString = Some("Helloooooo")
someString should not { be (None) }
someString should not { equal (None) }
}
def `should throw an assertion error when None compared to not None` {
val none = None
val caught1 = intercept[TestFailedException] {
none should not { be (None) }
}
assert(caught1.getMessage === "None was equal to None")
val caught3 = intercept[TestFailedException] {
none should not { equal (None) }
}
assert(caught3.getMessage === "None equaled None")
val noString: Option[String] = None
val caught5 = intercept[TestFailedException] {
noString should not { be (None) }
}
assert(caught5.getMessage === "None was equal to None")
val caught7 = intercept[TestFailedException] {
noString should not { equal (None) }
}
assert(caught7.getMessage === "None equaled None")
}
def `should work when used in a logical expression` {
val none = None
none should { be (None) and equal (None) }
none should { equal (None) and be (None) }
val noString: Option[String] = None
noString should { be (None) and equal (None) }
noString should { equal (None) and be (None) }
}
}
object `(for Any)` {
def `should do nothing when equal` {
1 should be (1)
val option = Some(1)
option should be (Some(1))
}
def `should throw an assertion error when not equal` {
val caught = intercept[TestFailedException] {
1 should be (2)
}
assert(caught.getMessage === "1 was not equal to 2")
}
def `should do nothing when not equal and used with should not` {
1 should not { be (2) }
val option = Some(1)
option should not { be (Some(2)) }
}
def `should throw an assertion error when equal but used with should not` {
val caught = intercept[TestFailedException] {
1 should not { be (1) }
}
assert(caught.getMessage === "1 was equal to 1")
}
}
}
object `The have word` {
def `should work with map and key, right after a 'should'` {
val map = Map(1 -> "Howdy")
map should contain key (1)
map should contain key (1)
map should equal { Map(1 -> "Howdy") }
val otherMap = Map("Howdy" -> 1)
otherMap should contain key ("Howdy")
otherMap should equal { Map("Howdy" -> 1) }
import scala.collection.immutable.TreeMap
val treeMap = TreeMap(1 -> "hi", 2 -> "howdy")
treeMap should contain key (1)
}
def `should work with map and key, in a logical expression` {
val map = Map(1 -> "Howdy")
// The compiler infer the type of the value to be Nothing if I say: map should { contain key 1 and equal (Map(1 -> "Howdy")) }
// map should { have.key[Int, String](1) and equal (Map(1 -> "Howdy")) }
map should { contain key (1) and equal (Map(1 -> "Howdy")) }
val otherMap = Map("Howdy" -> 1)
// otherMap should { have.key[String, Int]("Howdy") and equal (Map("Howdy" -> 1)) }
otherMap should { contain key ("Howdy") and equal (Map("Howdy" -> 1)) }
}
def `should work with map and key, right after a 'should not'` {
val map = Map(1 -> "Howdy")
map should not { contain key (2) }
}
def `should work with map and value, right after a 'should'` {
val map = Map(1 -> "Howdy")
map should contain value ("Howdy")
map should contain value ("Howdy")
map should equal { Map(1 -> "Howdy") }
val otherMap = Map("Howdy" -> 1)
otherMap should contain value (1)
otherMap should equal { Map("Howdy" -> 1) }
}
def `should work with map and value, in a logical expression` {
val map = Map(1 -> "Howdy")
map should { equal (Map(1 -> "Howdy")) and (contain value "Howdy") }
val otherMap = Map("Howdy" -> 1)
otherMap should { contain value (1) and equal (Map("Howdy" -> 1)) }
}
def `should work with map and value, right after a 'should not'` {
val map = Map(1 -> "Howdy")
map should not { contain value ("Doody") }
}
def `should work with collection and size, in an and expression.` {
val list = List(1, 2, 3)
list should { have size (3) and equal (List(1, 2, 3)) }
}
def `should work with collection and size, right after a 'should'` {
val map = Map(1 -> "Howdy")
map should have size (1)
val caught1 = intercept[TestFailedException] {
map should have size (5)
}
assert(caught1.getMessage.indexOf("did not have size") != -1)
val list = List(1, 2, 3, 4, 5)
list should have size (5)
val caught2 = intercept[TestFailedException] {
list should have size (6)
}
assert(caught2.getMessage.indexOf("did not have size") != -1)
val set = Set(1.0, 2.0, 3.0)
set should have size (3)
val caught3 = intercept[TestFailedException] {
set should have size (0)
}
assert(caught3.getMessage.indexOf("did not have size") != -1)
val array = Array[String]()
array should have size 0
val caught4 = intercept[TestFailedException] {
array should have size 2
}
assert(caught4.getMessage.indexOf("did not have size") != -1)
}
def `should work with collection and size, right after a 'should not'` {
val map = Map(1 -> "Howdy")
map should not { have size (2) }
val caught1 = intercept[TestFailedException] {
map should not { have size (1) }
}
assert(caught1.getMessage.indexOf("had size") != -1, caught1.getMessage)
val list = List(1, 2, 3, 4, 5)
list should not { have size (6) }
val caught2 = intercept[TestFailedException] {
list should not { have size (5) }
}
assert(caught2.getMessage.indexOf("had size") != -1)
val set = Set(1.0, 2.0, 3.0)
set should not { have size (0) }
val caught3 = intercept[TestFailedException] {
set should not { have size (3) }
}
assert(caught3.getMessage.indexOf("had size") != -1)
val array = Array[String]()
array should not { have size (2) }
val caught4 = intercept[TestFailedException] {
array should not { have size (0) }
}
assert(caught4.getMessage.indexOf("had size") != -1)
}
}
object `The contain word` {
def `should work with a set, list, array, and map right after a 'should'` {
val set = Set(1, 2, 3)
set should contain (2)
val caught1 = intercept[TestFailedException] {
set should contain (5)
}
assert(caught1.getMessage.indexOf("did not contain element") != -1)
set should { contain (2) and equal (Set(1, 2, 3)) }
val caught1b = intercept[TestFailedException] {
set should { contain (5) and equal(Set(1, 2, 3)) }
}
assert(caught1b.getMessage.indexOf("did not contain element") != -1)
val list = List("one", "two", "three")
list should contain ("two")
val caught2 = intercept[TestFailedException] {
list should contain ("five")
}
assert(caught2.getMessage.indexOf("did not contain element") != -1)
val array = Array("one", "two", "three")
array should contain ("one")
val caught3 = intercept[TestFailedException] {
array should contain ("five")
}
assert(caught3.getMessage.indexOf("did not contain element") != -1)
val map = Map(1 -> "one", 2 -> "two", 3 -> "three")
val tuple2: Tuple2[Int, String] = 1 -> "one"
map should contain (tuple2)
val caught4 = intercept[TestFailedException] {
map should contain (1 -> "won")
}
assert(caught4.getMessage.indexOf("did not contain element") != -1)
}
def `should work with a set, list, array, and map right after a 'should not'` {
val set = Set(1, 2, 3)
set should not { contain (5) }
val caught1 = intercept[TestFailedException] {
set should not { contain (2) }
}
assert(caught1.getMessage.indexOf("contained element") != -1)
val list = List("one", "two", "three")
list should not { contain ("five") }
val caught2 = intercept[TestFailedException] {
list should not { contain ("two") }
}
assert(caught2.getMessage.indexOf("contained element") != -1)
val array = Array("one", "two", "three")
array should not { contain ("five") }
val caught3 = intercept[TestFailedException] {
array should not { contain ("one") }
}
assert(caught3.getMessage.indexOf("contained element") != -1)
val map = Map(1 -> "one", 2 -> "two", 3 -> "three")
val tuple2: Tuple2[Int, String] = 1 -> "won"
map should not { contain (tuple2) }
val caught4 = intercept[TestFailedException] {
map should not { contain (1 -> "one") }
}
assert(caught4.getMessage.indexOf("contained element") != -1)
}
}
object `The be theSameInstanceAs syntax` {
val string = "Hi"
val obj: AnyRef = string
val otherString = new String("Hi")
def `should do nothing if the two objects are the same` {
string should be theSameInstanceAs (string)
obj should be theSameInstanceAs (string)
string should be theSameInstanceAs (obj)
otherString should not { be theSameInstanceAs (string) }
}
def `should throw TestFailedException if the two objects are not the same` {
val caught1 = intercept[TestFailedException] {
string should not { be theSameInstanceAs (string) }
}
val caught2 = intercept[TestFailedException] {
obj should not { be theSameInstanceAs (string) }
}
val caught3 = intercept[TestFailedException] {
string should not { be theSameInstanceAs (obj) }
}
val caught4 = intercept[TestFailedException] {
otherString should be theSameInstanceAs (string)
}
assert(true) // TODO: test the failure message
}
}
object `The floating point numbers when compared with equals` {
def `should do nothing if the floating point number is exactly equal to the specified value` {
val sevenDotOh = 7.0
sevenDotOh should be (7.0)
sevenDotOh should equal (7.0)
sevenDotOh should not { be (7.0001) }
val sixDotOh: Float = 6.0f
sixDotOh should be (6.0)
sixDotOh should equal (6.0)
sixDotOh should not { be (6.0001) }
}
def `should throw TestFailedException if the floating point number is not exactly equal to the specified value` {
val sevenDotOh = 7.0001
val caught1 = intercept[TestFailedException] {
sevenDotOh should be (7.0)
// sevenDotOh should be (7.0 exactly)
}
assert(caught1.getMessage === "7.0001 was not equal to 7.0")
val caught2 = intercept[TestFailedException] {
sevenDotOh should equal (7.0)
}
assert(caught2.getMessage === "7.0001 did not equal 7.0")
val caught3 = intercept[TestFailedException] {
sevenDotOh should not { be (7.0001) }
}
assert(caught3.getMessage === "7.0001 was equal to 7.0001")
val sixDotOh: Float = 6.0001f
val caught4 = intercept[TestFailedException] {
// sixDotOh should be (6.0f exactly)
sixDotOh should be (6.0f)
}
assert(caught4.getMessage === "6.0001 was not equal to 6.0")
val caught5 = intercept[TestFailedException] {
sixDotOh should equal (6.0f)
}
assert(caught5.getMessage === "6.0001 did not equal 6.0")
val caught6 = intercept[TestFailedException] {
sixDotOh should not { be (6.0001f) }
}
assert(caught6.getMessage === "6.0001 was equal to 6.0001")
}
}
object `The floating point 'plusOrMinus' operator` {
def `should do nothing if the floating point number is within the specified range` {
val sevenDotOh = 7.0
sevenDotOh should be (7.1 plusOrMinus 0.2)
sevenDotOh should be (6.9 plusOrMinus 0.2)
sevenDotOh should not { be (7.5 plusOrMinus 0.2) }
sevenDotOh should not { be (6.5 plusOrMinus 0.2) }
val minusSevenDotOh = -7.0
minusSevenDotOh should be (-7.1 plusOrMinus 0.2)
minusSevenDotOh should be (-6.9 plusOrMinus 0.2)
minusSevenDotOh should not { be (-7.5 plusOrMinus 0.2) }
minusSevenDotOh should not { be (-6.5 plusOrMinus 0.2) }
}
def `should throw TestFailedException if the floating point number is not within the specified range` {
val sevenDotOh = 7.0
val caught1 = intercept[TestFailedException] {
sevenDotOh should not { be (7.1 plusOrMinus 0.2) }
}
assert(caught1.getMessage === "7.0 was 7.1 plus or minus 0.2")
val caught2 = intercept[TestFailedException] {
sevenDotOh should not { be (6.9 plusOrMinus 0.2) }
}
assert(caught2.getMessage === "7.0 was 6.9 plus or minus 0.2")
val caught3 = intercept[TestFailedException] {
sevenDotOh should be (7.5 plusOrMinus 0.2)
}
assert(caught3.getMessage === "7.0 was not 7.5 plus or minus 0.2")
val caught4 = intercept[TestFailedException] {
sevenDotOh should be (6.5 plusOrMinus 0.2)
}
assert(caught4.getMessage === "7.0 was not 6.5 plus or minus 0.2")
val minusSevenDotOh = -7.0
val caught5 = intercept[TestFailedException] {
minusSevenDotOh should not { be (-7.1 plusOrMinus 0.2) }
}
assert(caught5.getMessage === "-7.0 was -7.1 plus or minus 0.2")
val caught6 = intercept[TestFailedException] {
minusSevenDotOh should not { be (-6.9 plusOrMinus 0.2) }
}
assert(caught6.getMessage === "-7.0 was -6.9 plus or minus 0.2")
val caught7 = intercept[TestFailedException] {
minusSevenDotOh should be (-7.5 plusOrMinus 0.2)
}
assert(caught7.getMessage === "-7.0 was not -7.5 plus or minus 0.2")
val caught8 = intercept[TestFailedException] {
minusSevenDotOh should be (-6.5 plusOrMinus 0.2)
}
assert(caught8.getMessage === "-7.0 was not -6.5 plus or minus 0.2")
}
}
}
| svn2github/scalatest | src/test/scala/org/scalatest/ShouldMatcherSpec.scala | Scala | apache-2.0 | 19,421 |
package spark.streaming.dstream
import spark.streaming.{Duration, DStream, Time}
import spark.RDD
private[streaming]
class MappedDStream[T: ClassManifest, U: ClassManifest] (
parent: DStream[T],
mapFunc: T => U
) extends DStream[U](parent.ssc) {
override def dependencies = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[U]] = {
parent.getOrCompute(validTime).map(_.map[U](mapFunc))
}
}
| koeninger/spark | streaming/src/main/scala/spark/streaming/dstream/MappedDStream.scala | Scala | bsd-3-clause | 491 |
package net.tobysullivan.shorturl
class HashNotFoundException(msg: String) extends Exception {
} | tobyjsullivan/shorturl | src/main/scala/net/tobysullivan/shorturl/HashNotFoundException.scala | Scala | mit | 99 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.{AnalysisException, InternalOutputModes}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.streaming.OutputMode
/**
* Analyzes the presence of unsupported operations in a logical plan.
*/
object UnsupportedOperationChecker {
def checkForBatch(plan: LogicalPlan): Unit = {
plan.foreachUp {
case p if p.isStreaming =>
throwError("Queries with streaming sources must be executed with writeStream.start()")(p)
case _ =>
}
}
def checkForStreaming(plan: LogicalPlan, outputMode: OutputMode): Unit = {
if (!plan.isStreaming) {
throwError(
"Queries without streaming sources cannot be executed with writeStream.start()")(plan)
}
// Disallow multiple streaming aggregations
val aggregates = plan.collect { case a@Aggregate(_, _, _) if a.isStreaming => a }
if (aggregates.size > 1) {
throwError(
"Multiple streaming aggregations are not supported with " +
"streaming DataFrames/Datasets")(plan)
}
// Disallow some output mode
outputMode match {
case InternalOutputModes.Append if aggregates.nonEmpty =>
val aggregate = aggregates.head
// Find any attributes that are associated with an eventTime watermark.
val watermarkAttributes = aggregate.groupingExpressions.collect {
case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => a
}
// We can append rows to the sink once the group is under the watermark. Without this
// watermark a group is never "finished" so we would never output anything.
if (watermarkAttributes.isEmpty) {
throwError(
s"$outputMode output mode not supported when there are streaming aggregations on " +
s"streaming DataFrames/DataSets")(plan)
}
case InternalOutputModes.Complete | InternalOutputModes.Update if aggregates.isEmpty =>
throwError(
s"$outputMode output mode not supported when there are no streaming aggregations on " +
s"streaming DataFrames/Datasets")(plan)
case _ =>
}
/**
* Whether the subplan will contain complete data or incremental data in every incremental
* execution. Some operations may be allowed only when the child logical plan gives complete
* data.
*/
def containsCompleteData(subplan: LogicalPlan): Boolean = {
val aggs = plan.collect { case a@Aggregate(_, _, _) if a.isStreaming => a }
// Either the subplan has no streaming source, or it has aggregation with Complete mode
!subplan.isStreaming || (aggs.nonEmpty && outputMode == InternalOutputModes.Complete)
}
plan.foreachUp { implicit subPlan =>
// Operations that cannot exists anywhere in a streaming plan
subPlan match {
case Aggregate(_, aggregateExpressions, child) =>
val distinctAggExprs = aggregateExpressions.flatMap { expr =>
expr.collect { case ae: AggregateExpression if ae.isDistinct => ae }
}
throwErrorIf(
child.isStreaming && distinctAggExprs.nonEmpty,
"Distinct aggregations are not supported on streaming DataFrames/Datasets, unless " +
"it is on aggregated DataFrame/Dataset in Complete output mode. Consider using " +
"approximate distinct aggregation (e.g. approx_count_distinct() instead of count()).")
case _: Command =>
throwError("Commands like CreateTable*, AlterTable*, Show* are not supported with " +
"streaming DataFrames/Datasets")
case _: InsertIntoTable =>
throwError("InsertIntoTable is not supported with streaming DataFrames/Datasets")
case Join(left, right, joinType, _) =>
joinType match {
case _: InnerLike =>
if (left.isStreaming && right.isStreaming) {
throwError("Inner join between two streaming DataFrames/Datasets is not supported")
}
case FullOuter =>
if (left.isStreaming || right.isStreaming) {
throwError("Full outer joins with streaming DataFrames/Datasets are not supported")
}
case LeftOuter | LeftSemi | LeftAnti =>
if (right.isStreaming) {
throwError("Left outer/semi/anti joins with a streaming DataFrame/Dataset " +
"on the right is not supported")
}
case RightOuter =>
if (left.isStreaming) {
throwError("Right outer join with a streaming DataFrame/Dataset on the left is " +
"not supported")
}
case NaturalJoin(_) | UsingJoin(_, _) =>
// They should not appear in an analyzed plan.
case _ =>
throwError(s"Join type $joinType is not supported with streaming DataFrame/Dataset")
}
case c: CoGroup if c.children.exists(_.isStreaming) =>
throwError("CoGrouping with a streaming DataFrame/Dataset is not supported")
case u: Union if u.children.map(_.isStreaming).distinct.size == 2 =>
throwError("Union between streaming and batch DataFrames/Datasets is not supported")
case Except(left, right) if right.isStreaming =>
throwError("Except on a streaming DataFrame/Dataset on the right is not supported")
case Intersect(left, right) if left.isStreaming && right.isStreaming =>
throwError("Intersect between two streaming DataFrames/Datasets is not supported")
case GroupingSets(_, _, child, _) if child.isStreaming =>
throwError("GroupingSets is not supported on streaming DataFrames/Datasets")
case GlobalLimit(_, _) | LocalLimit(_, _) if subPlan.children.forall(_.isStreaming) =>
throwError("Limits are not supported on streaming DataFrames/Datasets")
case Sort(_, _, _) | SortPartitions(_, _) if !containsCompleteData(subPlan) =>
throwError("Sorting is not supported on streaming DataFrames/Datasets, unless it is on" +
"aggregated DataFrame/Dataset in Complete output mode")
case Sample(_, _, _, _, child) if child.isStreaming =>
throwError("Sampling is not supported on streaming DataFrames/Datasets")
case Window(_, _, _, child) if child.isStreaming =>
throwError("Non-time-based windows are not supported on streaming DataFrames/Datasets")
case ReturnAnswer(child) if child.isStreaming =>
throwError("Cannot return immediate result on streaming DataFrames/Dataset. Queries " +
"with streaming DataFrames/Datasets must be executed with writeStream.start().")
case _ =>
}
}
}
private def throwErrorIf(
condition: Boolean,
msg: String)(implicit operator: LogicalPlan): Unit = {
if (condition) {
throwError(msg)
}
}
private def throwError(msg: String)(implicit operator: LogicalPlan): Nothing = {
throw new AnalysisException(
msg, operator.origin.line, operator.origin.startPosition, Some(operator))
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationChecker.scala | Scala | apache-2.0 | 8,204 |
implicit val treeFunctor = new Functor[Tree] {
def fmap[A, B](f: A => B)(fa: Tree[A]): Tree[B] = fa match {
case Leaf(a) => Leaf(f(a))
case Node(t, t1) => Node(fmap(f)(t), fmap(f)(t1))
}
} | hmemcpy/milewski-ctfp-pdf | src/content/1.8/code/scala/snippet15.scala | Scala | gpl-3.0 | 200 |
package breeze.collection.mutable
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class SparseArrayTest extends FunSuite with Checkers {
test("Map") {
val x = SparseArray(1,0,2,0,3,0,-1,-2,-3)
x.compact()
val y = x.map(_ + 1)
assert(x.length === y.length)
assert(x.activeSize === x.length - 3)
assert(y.activeSize === 6, y)
assert(y.toList === List(2,1,3,1,4,1,0,-1,-2))
}
test("Filter") {
val x = SparseArray(1,0,2,0,3,0,-1,-2,-3)
x.compact()
assert(x.filter(_ % 2 == 1).toList === List(1,3))
assert(x.filter(_ % 2 == 1).activeSize === 2)
assert(x.filter(_ % 2 == 0).toList === List(0,2,0,0,-2))
assert(x.filter(_ % 2 == 0).activeSize === 2, x.filter(_ % 2 == 0))
assert(x.filter(_ > 0).toList === List(1,2,3))
assert(x.filter(_ > 0).activeSize === 3)
assert(x.filter(_ >= 0).toList === List(1,0,2,0,3,0))
assert(x.filter(_ >= 0).activeSize === 3)
val y = SparseArray(0,1,0,0,-1,-2,-3,-5)
y.compact()
assert(y.filter(_ > 0).toList === List(1))
assert(y.filter(_ >= 0).toList === List(0,1,0,0))
}
}
| tjhunter/scalanlp-core | math/src/test/scala/breeze/collection/mutable/SparseArrayTest.scala | Scala | apache-2.0 | 1,753 |
package scaliper
import scaliper._
import java.io.File
import java.io.IOException
import scala.collection.JavaConversions._
import scala.collection.mutable
object MeasurementType {
val TIME = 0
val INSTANCE = 1
val MEMORY = 2
val DEBUG = 3
}
object SetupRunner {
def split(args:String) = args.split("\\\\s+")
.map { x => x.replace(" ","").replace("\\t","") }
.filter { x => x != "" }
.toSeq
def defaultClasspath(o: Object) = o.getClass.getClassLoader match {
case urlcl: java.net.URLClassLoader => extractClasspath(urlcl)
case cl => sys.props("java.class.path")
}
def extractClasspath(urlclassloader: java.net.URLClassLoader): String = {
val fileResource = "file:(.*)".r
val files = urlclassloader.getURLs.map(_.toString) collect {
case fileResource(file) => file
}
files.mkString(":")
}
def parseOutput(s:String):MeasurementSet = {
JsonConversion.getMeasurementSet(s)
}
def measure(benchmark: Benchmark)(observer: String => Unit): MeasurementSet = {
val eventLog = new StringBuilder()
var resultPath: String = null
var measurementSet: MeasurementSet = null
var consoleDots = 0
val reader = new InterleavedReader {
def log(s:String) = { observer(s) }
def handleOutput(s:String) = {
resultPath = s
measurementSet = parseOutput(IO.read(resultPath))
}
}
def warmupTime:Long = 3000
def runTime:Long = 1000
def trial = 1
val f = java.io.File.createTempFile("pre", "post")
f.deleteOnExit
val p = f.getPath
KryoSerializer.serializeToFile(benchmark, p)
var caliperArgs = Seq("--warmupMillis", warmupTime.toString,
"--runMillis", runTime.toLong.toString,
"--measurementType", MeasurementType.TIME.toString(),
"--marker", reader.marker,
"--BENCHDATA", p)
val (cmd,rc) = try {
JavaRunner.run(
defaultClasspath(benchmark),
scaliper.InProcessRunner.getClass.getName.replace("$",""),
caliperArgs
)(reader.readLine)
} catch {
case e:java.io.IOException => throw new RuntimeException("failed to start subprocess", e)
} finally {
// f.delete
if(resultPath != null) {
new File(resultPath).delete
}
}
if (measurementSet == null) {
val message = s"Failed to execute ${cmd}"
System.err.println(" " + message)
System.err.println(eventLog.toString())
throw new ConfigurationException(message)
}
measurementSet
}
}
| azavea/scaliper | src/main/scala/scaliper/SetupRunner.scala | Scala | apache-2.0 | 2,674 |
package neuroflow.core
import breeze.generic.UFunc
import breeze.linalg.DenseMatrix
import breeze.storage.Zero
import neuroflow.dsl.Convolution
import scala.reflect.ClassTag
/**
* Collection of common operators expressed as [[UFunc]].
* The CPU implementations are found here, the GPU implicits
* are found in the [[neuroflow.cuda.CuMatrix]] area.
*
* @author bogdanski
* @since 07.03.18
*/
/**
* Subtracts row maximum from row elements.
* Example given:
* |1 2 1| |-1 0 -1|
* |2 2 2| -> | 0 0 0|
* |0 1 0| |-1 0 -1|
*/
object subRowMax extends UFunc {
implicit object subRowMaxImplDouble extends subRowMax.Impl[DenseMatrix[Double], DenseMatrix[Double]] {
def apply(in: DenseMatrix[Double]): DenseMatrix[Double] = gapply(in)
}
implicit object subRowMaxImplFloat extends subRowMax.Impl[DenseMatrix[Float], DenseMatrix[Float]] {
def apply(in: DenseMatrix[Float]): DenseMatrix[Float] = gapply(in)
}
private def gapply[V: Numeric](in: DenseMatrix[V]): DenseMatrix[V] = {
import Numeric.Implicits._
import Ordering.Implicits._
val out = in.copy
var (r, c) = (0, 0)
while (r < in.rows) {
var max = in(r, 0)
while (c < in.cols) {
val t = in(r, c)
if (t > max) max = t
c += 1
}
c = 0
while (c < in.cols) {
val t = in(r, c)
out.update(r, c, t - max)
c += 1
}
r += 1
}
out
}
}
/**
* Convolutes [[Tensor3D]], operating on its
* linearized projection matrix `in`.
*/
object convolute extends UFunc {
implicit object convoluteImplDouble extends convolute.Impl3[DenseMatrix[Double], Convolution[Double], Int, DenseMatrix[Double]] {
def apply(in: DenseMatrix[Double], l: Convolution[Double], batchSize: Int): DenseMatrix[Double] = gapply(in, l, batchSize)
}
implicit object convoluteImplFloat extends convolute.Impl3[DenseMatrix[Float], Convolution[Float], Int, DenseMatrix[Float]] {
def apply(in: DenseMatrix[Float], l: Convolution[Float], batchSize: Int): DenseMatrix[Float] = gapply(in, l, batchSize)
}
private def gapply[V: ClassTag : Zero](in: DenseMatrix[V], l: Convolution[_], batchSize: Int): DenseMatrix[V] = {
val IX = l.dimIn._1
val IY = l.dimIn._2
val X = l.dimOut._1
val Y = l.dimOut._2
val Z = l.dimIn._3
val XB = X * batchSize
val FX = l.field._1
val FY = l.field._2
val SX = l.stride._1
val SY = l.stride._2
val PX = l.padding._1
val PY = l.padding._2
val out = DenseMatrix.zeros[V](FX * FY * Z, XB * Y)
var (x, y, z) = (0, 0, 0)
while (x < XB) {
while (y < Y) {
while (z < Z) {
var (fX, fY) = (0, 0)
while (fX < FX) {
while (fY < FY) {
val xs = x % X
val xb = x / X
val a = (xs * SX) + fX
val b = (y * SY) + fY
if (a >= PX && a < (PX + IX) &&
b >= PY && b < (PY + IY)) {
val aNp = a - PX
val bNp = b - PY
out.update((z * FX * FY) + fX * FY + fY,
(xb * X * Y) + xs * Y + y,
in(z, (xb * IX * IY) + aNp * IY + bNp))
}
fY += 1
}
fY = 0
fX += 1
}
z += 1
}
z = 0
y += 1
}
y = 0
x += 1
}
out
}
}
/**
* De-Convolutes [[Tensor3D]], operating on its
* linearized projection matrix `in`.
*/
object convolute_backprop extends UFunc {
implicit object convoluteBpImplDouble extends convolute_backprop.Impl3[DenseMatrix[Double], Convolution[Double], Int, DenseMatrix[Double]] {
def apply(in: DenseMatrix[Double], l: Convolution[Double], batchSize: Int): DenseMatrix[Double] = gapply(in, l, batchSize)
}
implicit object convoluteBpImplFloat extends convolute_backprop.Impl3[DenseMatrix[Float], Convolution[Float], Int, DenseMatrix[Float]] {
def apply(in: DenseMatrix[Float], l: Convolution[Float], batchSize: Int): DenseMatrix[Float] = gapply(in, l, batchSize)
}
private def gapply[V: ClassTag : Zero](in: DenseMatrix[V], l: Convolution[_], batchSize: Int): DenseMatrix[V] = {
val IX = l.dimIn._1
val IY = l.dimIn._2
val X = l.dimOut._1
val Y = l.dimOut._2
val Z = l.dimOut._3
val XB = X * batchSize
val FX = l.field._1
val FY = l.field._2
val SX = l.stride._1
val SY = l.stride._2
val PX = l.padding._1
val PY = l.padding._2
val out = DenseMatrix.zeros[V](FX * FY * Z, IX * IY * batchSize)
var (x, y, z) = (0, 0, 0)
while (x < XB) {
while (y < Y) {
while (z < Z) {
var (fX, fY) = (0, 0)
while (fX < FX) {
while (fY < FY) {
val xs = x % X
val xb = x / X
val a = (xs * SX) + fX
val b = (y * SY) + fY
if (a >= PX && a < (PX + IX) &&
b >= PY && b < (PY + IY)) {
val aNp = a - PX
val bNp = b - PY
out.update((z * FX * FY) + fX * FY + fY,
(xb * IX * IY) + aNp * IY + bNp,
in(z, (xb * X * Y) + xs * Y + y))
}
fY += 1
}
fY = 0
fX += 1
}
z += 1
}
z = 0
y += 1
}
y = 0
x += 1
}
out
}
}
/**
* Reshapes matrix `in` by transposing the batch.
* Examples given:
* |1 2 3| |1 1 1|
* |1 2 3| -> |2 2 2|
* |1 2 3| |3 3 3|
*
* |1 1 2 2| |1 1 1 1 1 1|
* |1 1 2 2| -> |2 2 2 2 2 2|
* |1 1 2 2|
*/
object reshape_batch extends UFunc {
implicit object reshapeBatchImplDouble extends reshape_batch.Impl3[DenseMatrix[Double], (Int, Int, Int), Int, DenseMatrix[Double]] {
def apply(in: DenseMatrix[Double], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[Double] = gapply(in, dim, batchSize)
}
implicit object convoluteBatchImplFloat extends reshape_batch.Impl3[DenseMatrix[Float], (Int, Int, Int), Int, DenseMatrix[Float]] {
def apply(in: DenseMatrix[Float], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[Float] = gapply(in, dim, batchSize)
}
private def gapply[V: ClassTag : Zero](in: DenseMatrix[V], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[V] = {
val X = dim._1
val Y = dim._2
val Z = dim._3
val out = DenseMatrix.zeros[V](batchSize, X * Y * Z)
var (x, y) = (0, 0)
while (x < X * Y * Z) {
while (y < batchSize) {
val a = x % (X * Y)
val b = x / (X * Y)
val c = y * (X * Y)
val p = in(b, c + a)
out.update(y, x, p)
y += 1
}
y = 0
x += 1
}
out
}
}
/**
* Reshapes matrix `in` by de-transposing the batch.
* Examples given:
* |1 2 3| |1 1 1|
* |1 2 3| <- |2 2 2|
* |1 2 3| |3 3 3|
*
* |1 1 2 2| |1 1 1 1 1 1|
* |1 1 2 2| <- |2 2 2 2 2 2|
* |1 1 2 2|
*/
object reshape_batch_backprop extends UFunc {
implicit object reshapeBatchBpImplDouble extends reshape_batch_backprop.Impl3[DenseMatrix[Double], (Int, Int, Int), Int, DenseMatrix[Double]] {
def apply(in: DenseMatrix[Double], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[Double] = gapply(in, dim, batchSize)
}
implicit object convoluteBatchBpImplFloat extends reshape_batch_backprop.Impl3[DenseMatrix[Float], (Int, Int, Int), Int, DenseMatrix[Float]] {
def apply(in: DenseMatrix[Float], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[Float] = gapply(in, dim, batchSize)
}
private def gapply[V: ClassTag : Zero](in: DenseMatrix[V], dim: (Int, Int, Int), batchSize: Int): DenseMatrix[V] = {
val X = dim._1
val Y = dim._2
val Z = dim._3
val out = DenseMatrix.zeros[V](Z, X * Y * batchSize)
var (x, y) = (0, 0)
while (x < X * Y * Z) {
while (y < batchSize) {
val a = x % (X * Y)
val b = x / (X * Y)
val c = y * (X * Y)
val p = in(y, x)
out.update(b, c + a, p)
y += 1
}
y = 0
x += 1
}
out
}
}
| zenecture/neuroflow | core/src/main/scala/neuroflow/core/Operators.scala | Scala | apache-2.0 | 8,222 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic
/**
* Line Magics perform some function and don't return anything. I.e. you cannot
* do `val x = %runMyCode 1 2 3` or alter the MIMEType of the cell.
*/
trait LineMagic extends Magic {
override def execute(code: String): Unit
}
| kapil-malik/incubator-toree | kernel-api/src/main/scala/org/apache/toree/magic/LineMagic.scala | Scala | apache-2.0 | 1,080 |
package org.bitcoins.core.script.constant
import org.bitcoins.core.number.Int64
import org.scalatest.{FlatSpec, MustMatchers}
/**
* Created by chris on 1/25/16.
*/
class ScriptNumberTest extends FlatSpec with MustMatchers {
val zero = ScriptNumber.zero
val one = ScriptNumber.one
val ten = ScriptNumber(10)
"ScriptNumber" must "derive the correct hex value from a script number" in {
ScriptNumber(1).hex must be("01")
ScriptNumber(8).hex must be("08")
}
it must "add two script numbers correctly" in {
(zero + zero) must be(zero)
(one + zero) must be(one)
(one + ten) must be(ScriptNumber(11))
}
it must "subtract two script numbers correctly" in {
(zero - zero) must equal(zero)
(one - zero) must equal(one)
(ten - one) must equal(ScriptNumber(9))
}
it must "multiply two script numbers correctly" in {
(zero * zero) must equal(zero)
(one * zero) must equal(zero)
(ten * one) must equal(ten)
(ten * ScriptNumber(5)) must equal(ScriptNumber(50))
}
it must "compare ScriptNumbers to Int64 correctly" in {
(zero < Int64.one) must equal(true)
(zero <= Int64.zero) must equal(true)
(one > Int64.zero) must equal(true)
(one >= Int64.one) must equal(true)
}
it must "compute bitwise operations correctly" in {
(ScriptNumber.one & Int64.one).toInt must be(1)
(ScriptNumber.one & ScriptNumber.one).toInt must be(1)
(ScriptNumber.one | ScriptNumber.one).toInt must be(1)
}
}
| bitcoin-s/bitcoin-s-core | core-test/src/test/scala/org/bitcoins/core/script/constant/ScriptNumberTest.scala | Scala | mit | 1,485 |
package com.twitter.zipkin.storage
import com.google.common.base.Charsets._
import com.twitter.util.Future
import com.twitter.zipkin.Constants._
import com.twitter.zipkin.common.Span
import java.nio.ByteBuffer
/**
* Convenience trait to until existing [[SpanStore]] implementations implement
* [[QueryRequest]] natively. This will be inefficient in storage systems that
* can combine multiple conditions (annotations) into the same select.
*/
@deprecated(message = "Implement SpanStore.getTraces() directly", since = "1.15.0")
trait CollectAnnotationQueries {
/**
* Get the trace ids for this particular service and if provided, span name.
* Only return maximum of limit trace ids from before the endTs.
*/
protected def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]]
/**
* Get the trace ids for this annotation between the two timestamps. If value is also passed we expect
* both the annotation key and value to be present in index for a match to be returned.
* Only return maximum of limit trace ids from before the endTs.
*/
protected def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]]
/** Only return traces where [[Span.duration]] is between minDuration and maxDuration */
protected def getTraceIdsByDuration(
serviceName: String,
spanName: Option[String],
minDuration: Long,
maxDuration: Option[Long],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = Future.exception(new UnsupportedOperationException)
/** @see [[com.twitter.zipkin.storage.SpanStore.getTracesByIds()]] */
def getTracesByIds(traceIds: Seq[Long]): Future[Seq[List[Span]]]
/** @see [[com.twitter.zipkin.storage.SpanStore.getTraces()]] */
def getTraces(qr: QueryRequest): Future[Seq[List[Span]]] = {
var sliceQueries = Seq[Set[SliceQuery]](
qr.spanName.map(SpanSliceQuery(_)).toSet,
qr.annotations.map(AnnotationSliceQuery(_, None)),
qr.binaryAnnotations.map(e => AnnotationSliceQuery(e._1, Some(ByteBuffer.wrap(e._2.getBytes(UTF_8))))),
qr.minDuration.map(DurationSliceQuery(_, qr.maxDuration, qr.spanName)).toSet
).flatten
// don't lookup traces by span name twice
if (qr.minDuration.isDefined && qr.spanName.isDefined) {
sliceQueries = sliceQueries.filterNot(_.isInstanceOf[SpanSliceQuery])
}
val ids = sliceQueries match {
case Nil =>
getTraceIdsByName(qr.serviceName, None, qr.endTs, qr.lookback, qr.limit).flatMap(queryResponse(_, qr))
case slice :: Nil =>
querySlices(sliceQueries, qr).flatMap(ids => queryResponse(ids.flatten, qr))
case _ =>
// TODO: timestamps endTs is the wrong name for all this
querySlices(sliceQueries, qr.copy(limit = 1)) flatMap { ids =>
val ts = padTimestamp(ids.flatMap(_.map(_.timestamp)).reduceOption(_ min _).getOrElse(0))
querySlices(sliceQueries, qr.copy(endTs = ts / 1000)) flatMap { ids =>
queryResponse(traceIdsIntersect(ids), qr)
}
}
}
// only issue a query if trace ids were found
ids.flatMap(ids => if (ids.isEmpty) Future.value(Seq.empty) else getTracesByIds(ids))
}
private[this] def padTimestamp(timestamp: Long): Long =
timestamp + TraceTimestampPadding.inMicroseconds
private[this] def traceIdsIntersect(idSeqs: Seq[Seq[IndexedTraceId]]): Seq[IndexedTraceId] = {
/* Find the trace IDs present in all the Seqs */
val idMaps = idSeqs.map(_.groupBy(_.traceId))
val traceIds = idMaps.map(_.keys.toSeq)
val commonTraceIds = traceIds.tail.fold(traceIds(0))(_.intersect(_))
/*
* Find the timestamps associated with each trace ID and construct a new IndexedTraceId
* that has the trace ID's maximum timestamp (ending) as the timestamp
*/
commonTraceIds.map(id => IndexedTraceId(id, idMaps.flatMap(_(id).map(_.timestamp)).max))
}
private[this] def queryResponse(ids: Seq[IndexedTraceId], qr: QueryRequest): Future[Seq[Long]] = {
Future.value(ids.filter(_.timestamp <= qr.endTs * 1000).slice(0, qr.limit).map(_.traceId))
}
private trait SliceQuery
private case class SpanSliceQuery(name: String) extends SliceQuery
private case class AnnotationSliceQuery(key: String, value: Option[ByteBuffer]) extends SliceQuery
private case class DurationSliceQuery(minDuration: Long, maxDuration: Option[Long], name: Option[String]) extends SliceQuery
private[this] def querySlices(slices: Seq[SliceQuery], qr: QueryRequest): Future[Seq[Seq[IndexedTraceId]]] =
Future.collect(slices map {
case SpanSliceQuery(name) =>
getTraceIdsByName(qr.serviceName, Some(name), qr.endTs, qr.lookback, qr.limit)
case AnnotationSliceQuery(key, value) =>
getTraceIdsByAnnotation(qr.serviceName, key, value, qr.endTs, qr.lookback, qr.limit)
case DurationSliceQuery(minDuration, maxDuration, name) =>
getTraceIdsByDuration(qr.serviceName, name, minDuration, maxDuration, qr.endTs, qr.lookback, qr.limit)
case s =>
Future.exception(new Exception("Uknown SliceQuery: %s".format(s)))
})
}
| prat0318/zipkin | zipkin-common/src/main/scala/com/twitter/zipkin/storage/CollectAnnotationQueries.scala | Scala | apache-2.0 | 5,334 |
package pub.ayada.scala.utils.oracle
object OraUtils {
def getJDBC_URL_SID(Server:String, Port:String, SID:String):String = {
"jdbc:oracle:thin:@"+Server+ ":"+ Port + ":"+SID;
}
def getJDBC_URL_SRVC(Server:String, Port:String, Service:String):String = {
"jdbc:oracle:thin:@//"+Server+ ":"+ Port + "/"+Service;
}
def testConnection( JDBCDriverClass: String
, JDBCUrl: String
, UserID: String
, Password: String): Boolean = {
{if (OraUtils.getJDBCConnection(JDBCDriverClass, JDBCUrl, UserID, Password) == null) true else false}
}
def getJDBCConnection(JDBCDriverClass: String
, JDBCUrl: String
, UserID: String
, Password: String) : java.sql.Connection = {
val JDBCConn: java.sql.Connection = null
Class.forName(JDBCDriverClass);
try {
val JDBCConn = java.sql.DriverManager.getConnection(JDBCUrl, UserID, Password);
} catch {
case e: Exception => ()
}
JDBCConn
}
def executeQuery( JDBCConn: java.sql.Connection
, readStatement: String): java.sql.ResultSet = {
val st: java.sql.Statement = JDBCConn.createStatement( java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE
, java.sql.ResultSet.CONCUR_READ_ONLY)
st.executeQuery(readStatement)
}
def executeQuery( JDBCConn: java.sql.Connection
, readStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[Any]): java.sql.ResultSet = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(readStatement)
for ( i <- 0 until Values.size) {
st.setObject(i, Values.get(i))
}
val res:java.sql.ResultSet = st.executeQuery
st.close
res
}
def executeUpdate(JDBCConn: java.sql.Connection
, UpdateStatement: String
, Commit:Boolean): Int = {
val st: java.sql.Statement = JDBCConn.createStatement( java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE
, java.sql.ResultSet.CONCUR_READ_ONLY)
val res:Int = st.executeUpdate(UpdateStatement)
st.close
if (Commit) JDBCConn.commit
res
}
def executeUpdate( JDBCConn: java.sql.Connection
, UpdateStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[Any]): Int = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(UpdateStatement)
for ( i <- 0 until Values.size) {
st.setObject(i, Values.get(i))
}
val res:Int = st.executeUpdate
st.close
if (Commit) JDBCConn.commit
res
}
def executeBatchUpdate( JDBCConn: java.sql.Connection
, UpdateStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[java.util.ArrayList[Any]]): Array[Int] = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(UpdateStatement)
for ( i <- 0 until Values.size) {
for ( j <- 0 until Values.get(i).size) {
st.setObject(i, Values.get(i).get(j))
}
st.addBatch
}
val res:Array[Int] = st.executeBatch
st.close
if (Commit) JDBCConn.commit
res
}
def executeDelete(JDBCConn: java.sql.Connection,
DeleteStatement: String,
Commit:Boolean): Int = {
val st: java.sql.Statement = JDBCConn.createStatement( java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE
, java.sql.ResultSet.CONCUR_READ_ONLY)
val res:Int = st.executeUpdate(DeleteStatement)
st.close
if (Commit) JDBCConn.commit
res
}
def executeDelete( JDBCConn: java.sql.Connection
, DeleteStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[Any]): Int = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(DeleteStatement)
for ( i <- 0 until Values.size) {
st.setObject(i, Values.get(i))
}
val res:Int = st.executeUpdate
st.close
if (Commit) JDBCConn.commit
res
}
def executeBatchDelete( JDBCConn: java.sql.Connection
, DeleteStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[java.util.ArrayList[Any]]): Array[Int] = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(DeleteStatement)
for ( i <- 0 until Values.size) {
for ( j <- 0 until Values.get(i).size) {
st.setObject(i, Values.get(i).get(j))
}
st.addBatch
}
val res:Array[Int] = st.executeBatch
st.close
if (Commit) JDBCConn.commit
res
}
def executeInsert( JDBCConn: java.sql.Connection
, InsertStatement: String
, Commit:Boolean): Int = {
val st: java.sql.Statement = JDBCConn.createStatement( java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE
, java.sql.ResultSet.CONCUR_READ_ONLY)
val res:Int = st.executeUpdate(InsertStatement)
if (Commit) JDBCConn.commit
res
}
def executeInsert( JDBCConn: java.sql.Connection
, InsertStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[Any]): Int = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(InsertStatement)
for ( i <- 0 until Values.size) {
st.setObject(i, Values.get(i))
}
val res:Int = st.executeUpdate
if (Commit) JDBCConn.commit
res
}
def executeBatchInsert( JDBCConn: java.sql.Connection
, InsertStatement: String
, Commit:Boolean
, Values: java.util.ArrayList[java.util.ArrayList[Any]]): Array[Int] = {
val st: java.sql.PreparedStatement = JDBCConn.prepareStatement(InsertStatement)
for ( i <- 0 until Values.size) {
for ( j <- 0 until Values.get(i).size) {
st.setObject(i, Values.get(i).get(j))
}
st.addBatch
}
val res:Array[Int] = st.executeBatch
if (Commit) JDBCConn.commit
res
}
}
| k-ayada/ScalaUtils | pub/ayada/scala/utils/oracle/OraUtils.scala | Scala | apache-2.0 | 6,568 |
import scala.language.postfixOps
import scala.tools.nsc.*
object Test {
val tokens = List("", "-deprecation", "foo.scala")
val subsets = tokens.toSet.subsets.toList
val permutations0 = subsets.flatMap(_.toList.permutations).distinct
def runWithCp(cp: String) = {
val permutations = permutations0 flatMap ("-cp CPTOKEN" :: _ permutations)
for ((p, i) <- permutations.distinct.sortBy(_ mkString "").zipWithIndex) {
val args = p flatMap (_ split "\\s+") map (x => if (x == "CPTOKEN") cp else x)
val s = new settings.MutableSettings(println)
val (ok, residual) = s.processArguments(args, processAll = true)
val expected = args filter (_ == "foo.scala")
assert(residual == expected, residual)
assert(ok, args)
println(s"$i) $args ==> $s")
}
}
def main(args0: Array[String]): Unit = {
runWithCp("")
runWithCp("/tmp:/bippy")
}
}
| dotty-staging/dotty | tests/pending/run/settings-parse.scala | Scala | apache-2.0 | 940 |
/*
* Copyright 2020 David Edwards
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.loopfor.zookeeper.cli.command
import com.loopfor.scalop._
import com.loopfor.zookeeper._
import com.loopfor.zookeeper.cli._
import java.io.{FileInputStream, FileNotFoundException, IOException}
import java.nio.charset.Charset
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuilder
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.{Failure, Success}
object Mk {
val Usage = """usage: mk|create [OPTIONS] PATH [DATA]
Creates the node specified by PATH with optional DATA.
DATA is optional, and if omitted, creates the node without any attached data.
If DATA does not begin with `@`, it is assumed to be a Unicode string, which
by default, is encoded as UTF-8 at time of storage. The --encoding option is
used to provide an alternative CHARSET, which may be any of the possible
character sets installed on the underlying JRE.
If DATA is prefixed with `@`, this indicates that the remainder of the
argument is a filename and whose contents will be attached to the node when
created.
The parent node of PATH must exist and must not be ephemeral. The --recursive
option can be used to create intermediate nodes, though the first existing
node in PATH must not be ephemeral.
The --ttl option can be used in conjunction with persistent nodes only,
which specifies the time-to-live before becoming eligible for deletion, but
only if all child nodes have been removed.
The --container option creates a special type of node suitable for building
higher order constructs, such as locks and leader election protocols. Such
nodes become eligible for deletion once all child nodes have been removed.
One or more optional ACL entries may be specified with --acl, which must
conform to the following syntax: <scheme>:<id>=[rwcda*]. See *setacl* command
for further explanation of the ACL syntax.
options:
--recursive, -r : recursively create intermediate nodes
--encoding, -e CHARSET : charset used for encoding DATA (default=UTF-8)
--sequential, -S : appends sequence to node name
--ephemeral, -E : node automatically deleted when CLI exits
--ttl, -T TTL : time-to-live (millis) for persistent nodes
TTL must be greater than 0
ignored when -E specified
--container, -C : node designated as container
eligible for deletion when last child deleted
takes precedence over -S and -E
--acl, -A : ACL assigned to node (default=world:anyone=*)
"""
private lazy val opts =
("recursive", 'r') ~> just(true) ~~ false ::
("encoding", 'e') ~> as[Charset] ~~ Charset.forName("UTF-8") ::
("sequential", 'S') ~> just(true) ~~ false ::
("ephemeral", 'E') ~> just(true) ~~ false ::
("ttl", 'T') ~> as[Duration] ::
("container", 'C') ~> just(true) ~~ false ::
("acl", 'A') ~>+ as[ACL] ::
Nil
def command(zk: Zookeeper) = new CommandProcessor {
implicit val _zk = zk
def apply(cmd: String, args: Seq[String], context: Path): Path = {
val optr = opts <~ args
val recurse = optr[Boolean]("recursive")
val disp = dispOpt(optr)
val acl = aclOpt(optr)
val (path, afterPath) = pathArg(optr, false)
val data = dataArg(optr, afterPath)
val node = Node(context.resolve(path))
create(node, recurse, disp, acl, data)
context
}
}
def find(zk: Zookeeper, args: Seq[String]) = new FindProcessor {
implicit val _zk = zk
val optr = opts <~ args
val recurse = optr[Boolean]("recursive")
val disp = dispOpt(optr)
val acl = aclOpt(optr)
val (path, afterPath) = pathArg(optr, true)
val data = dataArg(optr, afterPath)
def apply(node: Node): Unit = {
create(node resolve path, recurse, disp, acl, data)
}
}
private def create(node: Node, recurse: Boolean, disp: Disposition, acl: Seq[ACL], data: Array[Byte])
(implicit zk: Zookeeper): Unit = {
try {
if (recurse) {
node.path.parts.tail.dropRight(1).foldLeft(Path("/")) { case (parent, part) =>
val node = Node(parent.resolve(part))
try node.create(Array.empty, ACL.AnyoneAll, Persistent) catch {
case _: NodeExistsException =>
}
node.path
}
}
node.create(data, acl, disp)
} catch {
case e: NodeExistsException => complain(s"${Path(e.getPath).normalize}: node already exists")
case _: NoNodeException => complain(s"${node.parent.path}: no such parent node")
case e: NoChildrenForEphemeralsException => complain(s"${Path(e.getPath).normalize}: parent node is ephemeral")
case _: InvalidACLException => complain(s"${acl.mkString(",")}: invalid ACL")
case e: UnimplementedException => complain(s"${Path(e.getPath).normalize}: feature (such as --ttl) may not be enabled")
}
}
private def dispOpt(optr: OptResult): Disposition = {
if (optr[Boolean]("container"))
Container
else {
val ttl = optr.get[Duration] ("ttl")
(optr[Boolean]("sequential"), optr[Boolean]("ephemeral")) match {
case (false, true) =>
Ephemeral
case (true, true) =>
EphemeralSequential
case (false, false) =>
ttl match {
case Some(t) => PersistentTimeToLive(t)
case None => Persistent
}
case (true, false) =>
ttl match {
case Some(t) => PersistentSequentialTimeToLive(t)
case None => PersistentSequential
}
}
}
}
private def aclOpt(optr: OptResult): Seq[ACL] = optr.get("acl") match {
case Some(acl) => acl
case None => ACL.AnyoneAll
}
private def pathArg(optr: OptResult, relative: Boolean): (Path, Seq[String]) = optr.args match {
case Seq(path, rest @ _*) =>
val p = Path(path)
(if (relative) p.path.headOption match {
case Some('/') => Path(p.path.drop(1))
case _ => p
} else p, rest)
case Seq() => complain("path must be specified")
}
private def dataArg(optr: OptResult, args: Seq[String]): Array[Byte] = args match {
case Seq(data, _*) => data.headOption match {
case Some('@') =>
val name = data.drop(1)
val file = try new FileInputStream(name) catch {
case _: FileNotFoundException => complain(s"$name: file not found")
case _: SecurityException => complain(s"$name: access denied")
}
try read(file) catch {
case e: IOException => complain(s"$name: I/O error: ${e.getMessage}")
} finally
file.close()
case _ => data.getBytes(optr[Charset]("encoding"))
}
case Seq() => Array.empty[Byte]
}
private def read(file: FileInputStream): Array[Byte] = {
@tailrec def read(buffer: ArrayBuilder[Byte]): Array[Byte] = {
val c = file.read()
if (c == -1) buffer.result() else read(buffer += c.toByte)
}
read(ArrayBuilder.make[Byte])
}
implicit def argToTTL(arg: String): Either[String, Duration] = {
try {
val ttl = arg.toLong
if (ttl > 0) Right(ttl.millis) else Left("must be greater than zero")
} catch {
case _: NumberFormatException => Left("unrecognized TTL")
}
}
implicit def argToACL(arg: String): Either[String, ACL] = ACL.parse(arg) match {
case Success(acl) => Right(acl)
case Failure(e) => Left(e.getMessage)
}
}
| davidledwards/zookeeper | zookeeper-cli/src/main/scala/com/loopfor/zookeeper/cli/command/Mk.scala | Scala | apache-2.0 | 8,138 |
/**
* Exercise 6:
*
* In the Person class of Section 5.1, “Simple Classes and Parameterless Methods,”
* on page 55, provide a primary constructor that turns negative ages to 0.
*
**/
class Person(var name: String, var age: Int = 0) {
if (age < 0) age = 0
}
| ragmha/scala-impatient | solutions/classes/ex6.scala | Scala | mit | 264 |
/*-------------------------------------------------------------------------*\
** ScalaCheck **
** Copyright (c) 2007-2018 Rickard Nilsson. All rights reserved. **
** http://www.scalacheck.org **
** **
** This software is released under the terms of the Revised BSD License. **
** There is NO WARRANTY. See the file LICENSE for the full text. **
\*------------------------------------------------------------------------ */
package org.scalacheck.util
import Pretty.{Params, pretty, format}
import org.scalacheck.Test
/** A [[org.scalacheck.Test.TestCallback]] implementation that prints
* test results directly to the console. This is the callback used by
* ScalaCheck's command line test runner, and when you run
* `org.scalacheck.Prop.check()`.
*/
class ConsoleReporter(val verbosity: Int, val columnWidth: Int)
extends Test.TestCallback {
private val prettyPrms = Params(verbosity)
override def onTestResult(name: String, res: Test.Result): Unit = {
if(verbosity > 0) {
if(name == "") {
val s = (if(res.passed) "+ " else "! ") + pretty(res, prettyPrms)
printf("\r%s\n", format(s, "", "", columnWidth))
} else {
val s = (if(res.passed) "+ " else "! ") + name + ": " +
pretty(res, prettyPrms)
printf("\r%s\n", format(s, "", "", columnWidth))
}
}
}
}
object ConsoleReporter {
/** Factory method, creates a ConsoleReporter with the
* the given verbosity and wraps output at the given column width
* (use 0 for unlimited width). */
def apply(verbosity: Int = 0, columnWidth: Int = 75) =
new ConsoleReporter(verbosity, columnWidth)
}
| martijnhoekstra/scala | src/scalacheck/org/scalacheck/util/ConsoleReporter.scala | Scala | apache-2.0 | 1,840 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
/**
* Nikolay.Tropin
* 2014-05-05
*/
class FoldTrueAndInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] =
Array(FoldTrueAnd)
}
object FoldTrueAnd extends SimplificationType(){
def hint = InspectionBundle.message("fold.true.and.hint")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual`.foldLeft`(literal("true"), andCondition(cond)) if hasSideEffects(cond) =>
None
case qual`.fold`(literal("true"), andCondition(cond)) =>
Some(replace(expr).withText(invocationText(qual, "forall", cond)).highlightFrom(qual))
case _ => None
}
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/FoldTrueAndInspection.scala | Scala | apache-2.0 | 918 |
package com.nielsen.ecom.wordseg
import scala.concurrent.duration._
import scala.concurrent.forkjoin.ThreadLocalRandom
import com.typesafe.config.ConfigFactory
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.PoisonPill
import akka.actor.Props
import akka.actor.RelativeActorPath
import akka.actor.RootActorPath
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus
object SegMaster {
def main(args: Array[String]): Unit = {
if (args.isEmpty) {
startup(Seq("2551", "2552", "0"))
StatsSampleClient.main(Array.empty)
} else {
startup(args)
}
}
def startup(ports: Seq[String]): Unit = {
ports foreach { port =>
// Override the configuration of the port when specified as program argument
val config =
ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback(
ConfigFactory.parseString("akka.cluster.roles = [compute]")).
withFallback(ConfigFactory.load("seg"))
val system = ActorSystem("ClusterSystem", config)
system.actorOf(Props[SegWorker], name = "segWorker")
system.actorOf(Props[SegService], name = "segService")
}
}
}
object StatsSampleClient {
def main(args: Array[String]): Unit = {
// note that client is not a compute node, role not defined
val system = ActorSystem("ClusterSystem")
system.actorOf(Props(classOf[StatsSampleClient], "/user/segService"), "client")
}
}
class StatsSampleClient(servicePath: String) extends Actor {
val cluster = Cluster(context.system)
val servicePathElements = servicePath match {
case RelativeActorPath(elements) => elements
case _ => throw new IllegalArgumentException(
"servicePath [%s] is not a valid relative actor path" format servicePath)
}
val itemdesc = "雀氏柔薄乐动婴儿纸尿裤L168片宝宝大码尿不湿超薄透气银行股票天秤"
import context.dispatcher
val tickTask = context.system.scheduler.schedule(2.seconds, 2.seconds, self, itemdesc)
var nodes = Set.empty[Address]
override def preStart(): Unit = {
cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
}
override def postStop(): Unit = {
cluster.unsubscribe(self)
//tickTask.cancel()
}
def receive = {
case itemdesc:String if nodes.nonEmpty =>
// just pick any one
val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size))
println(nodes)
val service = context.actorSelection(RootActorPath(address) / servicePathElements)
service ! SegJob(itemdesc)
case result: SegResult =>
println(result)
case failed: JobFailed =>
println(failed)
case state: CurrentClusterState =>
nodes = state.members.collect {
case m if m.hasRole("compute") && m.status == MemberStatus.Up => m.address
}
case MemberUp(m) if m.hasRole("compute") => nodes += m.address
case other: MemberEvent => nodes -= other.member.address
case UnreachableMember(m) => nodes -= m.address
case ReachableMember(m) if m.hasRole("compute") => nodes += m.address
}
//self ! itemdesc
}
| adrianwkj/web_akka | src/main/scala/com/nielsen/ecom/wordseg/CodingClient.scala | Scala | cc0-1.0 | 3,283 |
package drt.server.feeds.lcy
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport._
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.{Unmarshal, Unmarshaller}
import drt.shared.Terminals.T1
import drt.shared.api.Arrival
import drt.shared.{ArrivalStatus, LiveFeedSource, Operator, PortCode}
import services.SDate
import services.crunch.CrunchTestLike
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.xml.{NodeSeq, XML}
class LCYFlightTransformSpec extends CrunchTestLike {
sequential
isolated
implicit val xmlToResUM: Unmarshaller[NodeSeq, LCYFlightsResponse] = LCYFlightTransform.unmarshaller
implicit val resToBHXResUM: Unmarshaller[HttpResponse, LCYFlightsResponse] = LCYFlightTransform.responseToAUnmarshaller
"Given some flight xml with one flight, I should get get back a list of 1 arrival" >> {
val resp = HttpResponse(
entity = HttpEntity(
contentType = ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`),
lcySoapResponseOneFlightXml
)
)
val result = Await.result(Unmarshal[HttpResponse](resp).to[LCYFlightsResponse], 5 seconds)
.asInstanceOf[LCYFlightsResponseSuccess]
.flights
val expected = List(
LCYFlight(
"MMD",
"5055",
"SGD",
"LCY",
"MT",
"LND",
"2019-11-18T13:00:00.000Z",
arrival = true,
international = true,
Option("2019-11-18T12:47:00.000Z"),
Option("2019-11-18T12:49:00.000Z"),
None,
Option("2019-11-18T12:47:00.000Z"),
Option("MT"),
None,
Option(14),
None
)
)
result === expected
}
"Given some flight xml with two flight, I should get get back a list of 2 arrival" >> {
val resp = HttpResponse(
entity = HttpEntity(
contentType = ContentType(MediaTypes.`application/xml`, HttpCharsets.`UTF-8`),
lcySoapResponseTwoFlightXml
)
)
val result = Await.result(Unmarshal[HttpResponse](resp).to[LCYFlightsResponse], 5 seconds)
.asInstanceOf[LCYFlightsResponseSuccess]
.flights
val expected = List(
LCYFlight(
"MMD",
"5055",
"SGD",
"LCY",
"MT",
"LND",
"2019-11-18T13:00:00.000Z",
arrival = true,
international = true,
Option("2019-11-18T12:47:00.000Z"),
Option("2019-11-18T12:49:00.000Z"),
None,
Option("2019-11-18T12:47:00.000Z"),
Option("MT"),
None,
Option(14),
None
),
LCYFlight(
"AFP",
"24",
"TOJ",
"LCY",
"JC",
"LND",
"2019-12-03T14:50:00.000Z",
arrival = true,
international = true,
None,
Option("2019-12-03T12:12:00.000Z"),
None,
Option("2019-12-03T12:08:00.000Z"),
Option("JC"),
None,
None,
None
)
)
result === expected
}
"Given a list of operation times I should be able to extract the scheduled time" >> {
val xml =
XML.loadString(
"""
|<LegData>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2018-09-01T23:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="ACT">2018-09-01T23:00:00.000Z</OperationTime>
|</LegData>
""".stripMargin)
val expected = "2018-09-01T23:00:00.000Z"
val node = xml \\ "OperationTime"
val result = LCYFlightTransform.scheduledTime(node).get
result === expected
}
"Given a list of operation times I should be able to extract the actual chox time" >> {
val xml =
XML.loadString(
"""
|<LegData>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2018-09-01T23:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="ACT">2018-09-01T24:00:00.000Z</OperationTime>
|</LegData>
""".stripMargin)
val expected = "2018-09-01T24:00:00.000Z"
val node = xml \\ "OperationTime"
val result = LCYFlightTransform.actualChox(node).get
result === expected
}
"Given a list of operation times I should be able to extract the estimated chox time" >> {
val xml =
XML.loadString(
"""
|<LegData>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2018-09-01T23:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="EST">2018-09-01T24:00:00.000Z</OperationTime>
|</LegData>
""".stripMargin)
val expected = "2018-09-01T24:00:00.000Z"
val node = xml \\ "OperationTime"
val result = LCYFlightTransform.estChox(node).get
result === expected
}
"Given a list of operation times I should be able to extract the estimated touchdown time" >> {
val xml =
XML.loadString(
"""
|<LegData>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2018-09-01T23:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="TDN" CodeContext="2005" TimeType="EST">2018-09-01T24:00:00.000Z</OperationTime>
|</LegData>
""".stripMargin)
val expected = "2018-09-01T24:00:00.000Z"
val node = xml \\ "OperationTime"
val result = LCYFlightTransform.estTouchDown(node).get
result === expected
}
"Given a LCYFlight, I should get an Arrival back with the same fields - we should not use Est Chox" >> {
val estimatedOnBlocksTimeString = "2018-09-01T23:05:00.000Z"
val actualOnBlocksTimeString = "2018-09-01T23:06:00.000Z"
val estimatedTouchDownTimeString = "2018-09-01T23:07:00.000Z"
val actualTouchDownTimeString = "2018-09-01T23:08:00.000Z"
val scheduledTimeString = "2018-09-01T23:00:00.000Z"
val lcyFlight = LCYFlight(
"SA",
"123",
"JNB",
"LCY",
"MT",
"ARR",
scheduledTimeString,
arrival = true,
international = true,
Option(estimatedOnBlocksTimeString),
Option(actualOnBlocksTimeString),
Option(estimatedTouchDownTimeString),
Option(actualTouchDownTimeString),
Option("55"),
Option("6"),
Option(175),
Option(65),
Nil
)
val result = LCYFlightTransform.lcyFlightToArrival(lcyFlight)
val expected = Arrival(
Option(Operator("SA")),
ArrivalStatus("ARRIVED ON STAND"),
Option(SDate(estimatedTouchDownTimeString).millisSinceEpoch),
Option(SDate(actualTouchDownTimeString).millisSinceEpoch),
None,
Option(SDate(actualOnBlocksTimeString).millisSinceEpoch),
Option("6"),
Option("55"),
Option(175),
Option(65),
None,
None,
None,
PortCode("LCY"),
T1,
"SA123",
"SA123",
PortCode("JNB"),
SDate(scheduledTimeString).millisSinceEpoch,
None,
Set(LiveFeedSource)
)
result === expected
}
val lcySoapResponseOneFlightXml: String =
"""<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
| <s:Body xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
| <IATA_AIDX_FlightLegRS TimeStamp="2020-07-03T10:59:35.1977952+01:00" Version="13.2" xmlns="http://www.iata.org/IATA/2007/00">
| <Success/>
| <FlightLeg>
| <LegIdentifier>
| <Airline CodeContext="3">MMD</Airline>
| <FlightNumber>5055</FlightNumber>
| <DepartureAirport CodeContext="3">SGD</DepartureAirport>
| <ArrivalAirport CodeContext="3">LCY</ArrivalAirport>
| <OriginDate>2019-11-18</OriginDate>
| </LegIdentifier>
| <LegData InternationalStatus="International">
| <PublicStatus xsi:nil="true"/>
| <OperatingAlliance xsi:nil="true"/>
| <ServiceType>N</ServiceType>
| <EstFlightDuration xsi:nil="true"/>
| <OwnerAirline xsi:nil="true"/>
| <CabinClass Class="7">
| <SeatCapacity>14</SeatCapacity>
| </CabinClass>
| <RemarkFreeText>LND</RemarkFreeText>
| <AirportResources Usage="Planned">
| <Resource DepartureOrArrival="Arrival">
| <AirportZone xsi:nil="true"/>
| <AircraftParkingPosition>MT</AircraftParkingPosition>
| <Runway>27</Runway>
| <AircraftTerminal>MT</AircraftTerminal>
| <BaggageClaimUnit>03</BaggageClaimUnit>
| </Resource>
| </AirportResources>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2019-11-18T13:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="EST">2019-11-18T12:47:00.000Z</OperationTime>
| <OperationTime OperationQualifier="TDN" CodeContext="2005" TimeType="ACT">2019-11-18T12:47:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="ACT">2019-11-18T12:49:00.000Z</OperationTime>
| <AircraftInfo>
| <AircraftType>DF3</AircraftType>
| <AircraftSubType xsi:nil="true"/>
| <Registration>OYRAB</Registration>
| <TailNumber xsi:nil="true"/>
| <AgentInfo DepartureOrArrival="Arrival">J</AgentInfo>
| <FleetNumber xsi:nil="true"/>
| <CallSign>MMD5055</CallSign>
| </AircraftInfo>
| </LegData>
| <TPA_Extension/>
| </FlightLeg>
| </IATA_AIDX_FlightLegRS>
| </s:Body>
|</s:Envelope>
""".stripMargin
val lcySoapResponseTwoFlightXml: String =
"""<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
| <s:Body xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
| <IATA_AIDX_FlightLegRS TimeStamp="2020-07-03T10:59:35.1977952+01:00" Version="13.2" xmlns="http://www.iata.org/IATA/2007/00">
| <Success/>
| <FlightLeg>
| <LegIdentifier>
| <Airline CodeContext="3">MMD</Airline>
| <FlightNumber>5055</FlightNumber>
| <DepartureAirport CodeContext="3">SGD</DepartureAirport>
| <ArrivalAirport CodeContext="3">LCY</ArrivalAirport>
| <OriginDate>2019-11-18</OriginDate>
| </LegIdentifier>
| <LegData InternationalStatus="International">
| <PublicStatus xsi:nil="true"/>
| <OperatingAlliance xsi:nil="true"/>
| <ServiceType>N</ServiceType>
| <EstFlightDuration xsi:nil="true"/>
| <OwnerAirline xsi:nil="true"/>
| <CabinClass Class="7">
| <SeatCapacity>14</SeatCapacity>
| </CabinClass>
| <RemarkFreeText>LND</RemarkFreeText>
| <AirportResources Usage="Planned">
| <Resource DepartureOrArrival="Arrival">
| <AirportZone xsi:nil="true"/>
| <AircraftParkingPosition>MT</AircraftParkingPosition>
| <Runway>27</Runway>
| <AircraftTerminal>MT</AircraftTerminal>
| <BaggageClaimUnit>03</BaggageClaimUnit>
| </Resource>
| </AirportResources>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2019-11-18T13:00:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="EST">2019-11-18T12:47:00.000Z</OperationTime>
| <OperationTime OperationQualifier="TDN" CodeContext="2005" TimeType="ACT">2019-11-18T12:47:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="ACT">2019-11-18T12:49:00.000Z</OperationTime>
| <AircraftInfo>
| <AircraftType>DF3</AircraftType>
| <AircraftSubType xsi:nil="true"/>
| <Registration>OYRAB</Registration>
| <TailNumber xsi:nil="true"/>
| <AgentInfo DepartureOrArrival="Arrival">J</AgentInfo>
| <FleetNumber xsi:nil="true"/>
| <CallSign>MMD5055</CallSign>
| </AircraftInfo>
| </LegData>
| <TPA_Extension/>
| </FlightLeg>
| <FlightLeg>
| <LegIdentifier>
| <Airline CodeContext="3">AFP</Airline>
| <FlightNumber>24</FlightNumber>
| <DepartureAirport CodeContext="3">TOJ</DepartureAirport>
| <ArrivalAirport CodeContext="3">LCY</ArrivalAirport>
| <OriginDate>2019-12-03</OriginDate>
| </LegIdentifier>
| <LegData InternationalStatus="International">
| <PublicStatus xsi:nil="true"/>
| <OperatingAlliance xsi:nil="true"/>
| <ServiceType>D</ServiceType>
| <EstFlightDuration xsi:nil="true"/>
| <OwnerAirline xsi:nil="true"/>
| <CabinClass Class="7">
| <SeatCapacity xsi:nil="true"/>
| </CabinClass>
| <RemarkFreeText>LND</RemarkFreeText>
| <AirportResources Usage="Planned">
| <Resource DepartureOrArrival="Arrival">
| <AirportZone xsi:nil="true"/>
| <AircraftParkingPosition>JC</AircraftParkingPosition>
| <Runway>09</Runway>
| <AircraftTerminal>JC</AircraftTerminal>
| </Resource>
| </AirportResources>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="SCT">2019-12-03T14:50:00.000Z</OperationTime>
| <OperationTime OperationQualifier="TDN" CodeContext="2005" TimeType="ACT">2019-12-03T12:08:00.000Z</OperationTime>
| <OperationTime OperationQualifier="ONB" CodeContext="2005" TimeType="ACT">2019-12-03T12:12:00.000Z</OperationTime>
| <AircraftInfo>
| <AircraftType>FA50</AircraftType>
| <AircraftSubType xsi:nil="true"/>
| <Registration>17401</Registration>
| <TailNumber xsi:nil="true"/>
| <AgentInfo DepartureOrArrival="Arrival">J</AgentInfo>
| <FleetNumber xsi:nil="true"/>
| <CallSign>AFP24</CallSign>
| </AircraftInfo>
| </LegData>
| <TPA_Extension/>
| </FlightLeg>
| </IATA_AIDX_FlightLegRS>
| </s:Body>
|</s:Envelope>
""".stripMargin
} | UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/drt/server/feeds/lcy/LCYFlightTransformSpec.scala | Scala | apache-2.0 | 15,796 |
package clientmacros.tojs
import japgolly.scalajs.react.vdom.{TagOf, VdomElement, VdomNode}
import japgolly.scalajs.react.{CallbackTo, raw}
import scala.collection.{GenMap, GenTraversableOnce}
import scala.language.existentials
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
import scala.scalajs.js
/**
* modified version of https://github.com/wav/scala-macros/blob/master/src/main/scala/wav/common/scalajs/macros/Macros.scala
* via https://github.com/chandu0101/macros/blob/master/src/main/scala/chandu0101/macros/tojs/JSMacro.scala
*/
object JSMacro {
type TOJS = {
val toJS: js.Object
}
def apply[T]: T => js.Object = macro applyImpl[T]
def applyImpl[T: c.WeakTypeTag](c: blackbox.Context): c.Tree = {
import c.universe._
def isOptional(tpe: Type): Boolean =
tpe <:< typeOf[Option[_]] || tpe <:< typeOf[js.UndefOr[_]]
def isNotPrimitiveAnyVal(tpe: Type) =
!tpe.typeSymbol.fullName.startsWith("scala.")
def flattenUnion(tpe: Type, breaker: Set[Type]): List[Type] =
if (tpe <:< typeOf[js.|[_, _]] && !(tpe <:< typeOf[Null])) {
val first = tpe.dealias.typeArgs(0)
val second = tpe.dealias.typeArgs(1)
val one = if (breaker(first)) Nil else flattenUnion(first, breaker + first)
val two = if (breaker(second)) Nil else flattenUnion(second, breaker + second)
one ++ two
} else List(tpe)
def getJSValueTree(target: Tree, rt: Type): Tree = {
if (rt <:< typeOf[TOJS])
q"""if ($target != null) $target.toJS else null"""
/* scala collections */
else if (rt <:< typeOf[GenMap[String, _]])
if (rt.typeArgs(1) <:< typeOf[TOJS])
q"""$target.map{ case (k, o) => k -> (if(o == null) null else o.toJS)}.toJSDictionary"""
else
q"""$target.toJSDictionary"""
else if (rt <:< typeOf[GenTraversableOnce[_]] || (rt <:< typeOf[Array[_]]))
if (rt.typeArgs.head <:< typeOf[TOJS])
q"""$target.map(o => if(o == null) null else o.toJS).toJSArray"""
else
q"""$target.toJSArray"""
/* javascript collections. Only need to rewrite if type parameter is <:< TOJS */
else if (rt <:< typeOf[js.Dictionary[_]] && rt.typeArgs.head <:< typeOf[TOJS])
q"""$target.map{case(k, o) => (k, if(o == null) null else o.toJS)}.toJSDictionary"""
else if (rt <:< typeOf[js.Array[_]] && rt.typeArgs.head <:< typeOf[TOJS])
q"""$target.map(o => if(o == null) null else o.toJS)"""
/* rewrite functions returning a Callback so that javascript land can call them */
else if (rt <:< typeOf[CallbackTo[_]])
q"""$target.toJsFn"""
else if (rt <:< typeOf[Function0[CallbackTo[_]]])
q"""$target().toJsFn"""
else if (rt <:< typeOf[Function1[_, CallbackTo[_]]])
q"""js.Any.fromFunction1(((t0: ${rt.typeArgs(0)}) => $target(t0).runNow()))"""
else if (rt <:< typeOf[Function2[_, _, CallbackTo[_]]])
q"""js.Any.fromFunction2(((t0: ${rt.typeArgs(0)}, t1: ${rt.typeArgs(1)}) => $target(t0, t1).runNow()))"""
else if (rt <:< typeOf[Function3[_, _, _, CallbackTo[_]]])
q"""js.Any.fromFunction3(((t0: ${rt.typeArgs(0)}, t1: ${rt.typeArgs(1)}, t2: ${rt.typeArgs(2)}) => $target(t0, t1, t2).runNow()))"""
else if (rt <:< typeOf[Function0[_]])
q"""js.Any.fromFunction0($target)"""
else if (rt <:< typeOf[Function1[_, _]])
q"""js.Any.fromFunction1($target)"""
else if (rt <:< typeOf[Function2[_, _, _]])
q"""js.Any.fromFunction2($target)"""
else if (rt <:< typeOf[Function3[_, _, _, _]])
q"""js.Any.fromFunction3($target)"""
/* other scalajs-react things we need to rewrite */
else if (rt <:< typeOf[VdomElement])
q"""$target.rawElement.asInstanceOf[js.Any]"""
else if (rt <:< typeOf[VdomNode])
q"""$target.rawNode.asInstanceOf[js.Any]"""
else if (rt <:< typeOf[TagOf[_]])
q"""$target.render.rawElement.asInstanceOf[js.Any]"""
//this is to make raw.React.Node work
else if (rt <:< typeOf[raw.recursiveTypeAliases.ChildrenArray[_]])
q"""$target.asInstanceOf[js.Any]"""
/* Other values. Keep AnyVal below at least CallbackTo */
else if (rt <:< typeOf[AnyVal] && isNotPrimitiveAnyVal(rt))
q"""$target.value.asInstanceOf[js.Any]"""
else if (rt <:< typeOf[AnyVal] || rt <:< typeOf[String] || rt <:< typeOf[js.Any])
q"""$target.asInstanceOf[js.Any]"""
else if (rt <:< typeOf[Enumeration#Value])
q"""$target.toString.asInstanceOf[js.Any]"""
else if (rt <:< typeOf[js.|[_, _]]) {
val (jsTypes, scalaTypes) = flattenUnion(rt, Set(rt)).distinct.partition(_ <:< typeOf[js.Any])
val scalaCases = scalaTypes.map(
tpe => cq"""x: $tpe => ${getJSValueTree(q"x", tpe)}"""
)
if (jsTypes.size > 1) {
c.warning(target.pos, s"Cannot differentiate ${jsTypes.mkString(", ")}")
}
val jsCase = jsTypes.take(1).map(tpe => cq"""x => x.asInstanceOf[js.Any]""")
q"""($target: scala.Any) match {
case ..$scalaCases
case ..$jsCase
}"""
}
else {
val conversion = c.inferImplicitView(target, rt, typeOf[js.Any], silent = true)
if (conversion == EmptyTree) {
throw new RuntimeException(s"Don't know how to convert $rt to js.Any")
}
q"""$conversion($target)"""
}
}
val tpe = c.weakTypeOf[T]
val target = c.freshName[TermName](TermName("t"))
val props = c.freshName[TermName](TermName("p"))
val fieldSymbols: List[Symbol] = tpe.decls
.collectFirst {
case m: MethodSymbol if m.isPrimaryConstructor => m
}
.get
.paramLists
.head
val fieldUpdates = fieldSymbols.map { f =>
val name = f.asTerm.name
val decoded = name.decodedName.toString
val res = if (isOptional(f.typeSignature)) {
val valueTree = getJSValueTree(q"v", f.typeSignature.typeArgs.head)
q"""$target.$name.foreach(v => $props.updateDynamic($decoded)($valueTree))"""
} else {
val valueTree = getJSValueTree(q"$target.$name", f.typeSignature)
q"""$props.updateDynamic($decoded)($valueTree)"""
}
res
}
// Comment this code back in to see what the macro spits out.
// println(
// s""" ($target: $tpe) => {
// val $props = scala.scalajs.js.Dynamic.literal()
// ..$fieldUpdates
// $props
// }""")
q""" ($target: $tpe) => {
val $props = scala.scalajs.js.Dynamic.literal()
..$fieldUpdates
$props
}"""
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | client-macros/src/main/scala/clientmacros/tojs/JSMacro.scala | Scala | apache-2.0 | 6,704 |
package org.camunda.feel.impl.script
import java.io.{Closeable, IOException, Reader}
import javax.script._
import org.camunda.feel.FeelEngine
import org.camunda.feel.FeelEngine.EvalExpressionResult
import org.camunda.feel.impl.SpiServiceLoader
import org.camunda.feel.syntaxtree.{Exp, ParsedExpression}
import org.camunda.feel.impl.parser.FeelParser._
import scala.collection.JavaConverters._
import scala.annotation.tailrec
trait FeelScriptEngine
extends AbstractScriptEngine
with ScriptEngine
with Compilable {
val eval: (String, Map[String, Any]) => EvalExpressionResult
val parse: String => ParseResult[Exp]
val factory: ScriptEngineFactory
lazy val engine: FeelEngine =
new FeelEngine(functionProvider = SpiServiceLoader.loadFunctionProvider,
valueMapper = SpiServiceLoader.loadValueMapper)
def getFactory: ScriptEngineFactory = factory
def createBindings(): Bindings = new SimpleBindings
def eval(reader: Reader, context: ScriptContext): Object = {
val script = readerAsString(reader)
eval(script, context)
}
def eval(script: String, context: ScriptContext): Object = {
val engineContext = getEngineContext(context)
val result = eval(script, engineContext)
handleEvaluationResult(result)
}
def eval(script: CompiledFeelScript, context: ScriptContext): Object = {
val engineContext = getEngineContext(context)
val result = engine.eval(script.expression, engineContext)
handleEvaluationResult(result)
}
def compile(reader: Reader): CompiledScript = {
val script = readerAsString(reader)
compile(script)
}
def compile(script: String): CompiledScript = parse(script) match {
case Success(exp, _) =>
CompiledFeelScript(this, ParsedExpression(exp, script))
case e: NoSuccess =>
throw new ScriptException(s"failed to parse expression '$script':\\n$e")
}
private def handleEvaluationResult(result: EvalExpressionResult): Object =
result match {
case Right(value) => value.asInstanceOf[AnyRef]
case Left(failure) => throw new ScriptException(failure.message)
}
private def getEngineContext(context: ScriptContext): Map[String, Any] = {
List(ScriptContext.GLOBAL_SCOPE, ScriptContext.ENGINE_SCOPE)
.flatMap(scope => Option(context.getBindings(scope)))
.flatMap(_.asScala)
.toMap
}
private def readerAsString(reader: Reader): String = {
try {
read(reader)
} catch {
case e: IOException => throw new ScriptException(e)
} finally {
closeSilently(reader)
}
}
@tailrec
private def read(reader: Reader,
buffer: StringBuffer = new StringBuffer): String = {
val chars = new Array[Char](16 * 1024)
reader.read(chars, 0, chars.length) match {
case -1 => buffer.toString
case i =>
buffer.append(chars, 0, i)
read(reader, buffer)
}
}
private def closeSilently(closable: Closeable) {
try {
closable.close()
} catch {
case _: IOException => // ignore
}
}
}
| saig0/feel | feel-engine/src/main/scala/org/camunda/feel/impl/script/FeelScriptEngine.scala | Scala | apache-2.0 | 3,072 |
/**
* The MIT License (MIT)
*
* Copyright (c) 2018 Israel Freitas(israel.araujo.freitas@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package ifreitas.scalaaiml.elements
case class Id() extends TemplateExpression {
def toXml = <id/>
}
| ifreitas/AimlToXml | src/main/scala/ifreitas/scalaaiml/elements/Id.scala | Scala | mit | 1,287 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken.dao
import acolyte.jdbc.Implicits._
import acolyte.jdbc.RowLists.rowList1
import acolyte.jdbc._
import cats.effect.IO
import cats.scalatest.{ ValidatedMatchers, ValidatedValues }
import ch.chuv.lren.woken.Predefined.FeaturesDatabase._
import ch.chuv.lren.woken.core.fp.runNow
import ch.chuv.lren.woken.messages.query.{ CodeValue, ValidationSpec }
import ch.chuv.lren.woken.messages.query.filters.{ InputType, Operator, SingleFilterRule }
import ch.chuv.lren.woken.validation.KFoldFeaturesSplitterDefinition
import org.scalatest.{ Matchers, WordSpec }
class ExtendedFeaturesTableRepositoryDAOTest
extends WordSpec
with Matchers
with DAOTest
with ValidatedMatchers
with ValidatedValues {
val sampleTableHandler: ScalaCompositeHandler = AcolyteDSL.handleStatement
.withQueryDetection("^SELECT ") // regex test from beginning
.withQueryHandler { e: QueryExecution =>
e.sql.trim match {
case _ => throw new IllegalArgumentException(s"Unhandled $e")
}
}
val cdeTableHandler1: ScalaCompositeHandler = AcolyteDSL.handleStatement
.withQueryDetection("^SELECT ") // regex test from beginning
.withQueryHandler { e: QueryExecution =>
e.sql.trim match {
case """SELECT setseed( 0.67 );""" => rowList1(classOf[Double]) :+ 0.67
case """SELECT count(*) FROM "cde_features_a__1v"""" =>
rowList1(classOf[Int]) :+ 99
case _ =>
fail(s"Unhandled $e")
}
}
.withUpdateHandler { e: UpdateExecution =>
val sql = e.sql.trim.replaceAll("""[\\s\\n]+""", " ")
if (sql.startsWith("CREATE TABLE")) {
sql shouldBe """CREATE TABLE "cde_features_a__1" ( "subjectcode" varchar(256) PRIMARY KEY, "_rnd" numeric ) WITH ( OIDS=FALSE );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("INSERT INTO")) {
sql shouldBe """INSERT INTO "cde_features_a__1" ( "subjectcode","_rnd" ) (SELECT "subjectcode" , random() as "_rnd" FROM "cde_features_a" ORDER BY "_rnd" );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("CREATE OR REPLACE VIEW")) {
sql shouldBe """CREATE OR REPLACE VIEW "cde_features_a__1v" ( "subjectcode","apoe4","lefthippocampus","dataset","_rnd" ) AS SELECT "cde_features_a"."subjectcode","cde_features_a"."apoe4","cde_features_a"."lefthippocampus","cde_features_a"."dataset" , "cde_features_a__1"."_rnd" FROM "cde_features_a" INNER JOIN "cde_features_a__1" ON "cde_features_a"."subjectcode" = "cde_features_a__1"."subjectcode""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP VIEW")) {
sql shouldBe """DROP VIEW IF EXISTS "cde_features_a__1v""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP TABLE")) {
sql shouldBe """DROP TABLE IF EXISTS "cde_features_a__1""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else {
fail(s"Unhandled $sql")
}
}
val cdeTableHandler2: ScalaCompositeHandler = AcolyteDSL.handleStatement
.withQueryDetection("^SELECT ") // regex test from beginning
.withQueryHandler { e: QueryExecution =>
e.sql.trim match {
case """SELECT setseed( 0.67 );""" => rowList1(classOf[Double]) :+ 0.67
case """SELECT count(*) FROM "cde_features_a__1v"""" =>
rowList1(classOf[Int]) :+ 15
case _ =>
fail(s"Unhandled $e")
}
}
.withUpdateHandler { e: UpdateExecution =>
val sql = e.sql.trim.replaceAll("""[\\s\\n]+""", " ")
if (sql.startsWith("CREATE TABLE")) {
sql shouldBe """CREATE TABLE "cde_features_a__1" ( "subjectcode" varchar(256) PRIMARY KEY, "_rnd" numeric ) WITH ( OIDS=FALSE );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("INSERT INTO")) {
sql shouldBe """INSERT INTO "cde_features_a__1" ( "subjectcode","_rnd" ) (SELECT "subjectcode" , random() as "_rnd" FROM "cde_features_a" WHERE "apoe4" = 2 ORDER BY "_rnd" );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("CREATE OR REPLACE VIEW")) {
sql shouldBe """CREATE OR REPLACE VIEW "cde_features_a__1v" ( "subjectcode","apoe4","lefthippocampus","dataset","_rnd" ) AS SELECT "cde_features_a"."subjectcode","cde_features_a"."apoe4","cde_features_a"."lefthippocampus","cde_features_a"."dataset" , "cde_features_a__1"."_rnd" FROM "cde_features_a" INNER JOIN "cde_features_a__1" ON "cde_features_a"."subjectcode" = "cde_features_a__1"."subjectcode""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP VIEW")) {
sql shouldBe """DROP VIEW IF EXISTS "cde_features_a__1v""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP TABLE")) {
sql shouldBe """DROP TABLE IF EXISTS "cde_features_a__1""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else {
fail(s"Unhandled $sql")
}
}
val cdeTableHandler3: ScalaCompositeHandler = AcolyteDSL.handleStatement
.withQueryDetection("^SELECT ") // regex test from beginning
.withQueryHandler { e: QueryExecution =>
e.sql.trim match {
case """SELECT setseed( 0.67 );""" => rowList1(classOf[Double]) :+ 0.67
case """SELECT count(*) FROM "cde_features_a__1v"""" =>
rowList1(classOf[Int]) :+ 15
case _ =>
fail(s"Unhandled $e")
}
}
.withUpdateHandler { e: UpdateExecution =>
val sql = e.sql.trim.replaceAll("""[\\s\\n]+""", " ")
if (sql.startsWith("CREATE TABLE")) {
sql shouldBe """CREATE TABLE "cde_features_a__1" ( "subjectcode" varchar(256) PRIMARY KEY, "_win_kfold_5" int,"_rnd" numeric ) WITH ( OIDS=FALSE );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("INSERT INTO")) {
sql shouldBe """INSERT INTO "cde_features_a__1" ( "subjectcode","_rnd" ) (SELECT "subjectcode" , random() as "_rnd" FROM "cde_features_a" WHERE "apoe4" = 2 ORDER BY "_rnd" );"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("WITH")) {
sql shouldBe """WITH "win" as (SELECT "subjectcode" , ntile( 5 ) over (order by "_rnd" ) as win FROM "cde_features_a__1" ) UPDATE "cde_features_a__1" SET "_win_kfold_5" = "win".win FROM win WHERE "cde_features_a__1"."subjectcode" = "win"."subjectcode" ;"""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("CREATE OR REPLACE VIEW")) {
sql shouldBe """CREATE OR REPLACE VIEW "cde_features_a__1v" ( "subjectcode","apoe4","lefthippocampus","dataset","_win_kfold_5","_rnd" ) AS SELECT "cde_features_a"."subjectcode","cde_features_a"."apoe4","cde_features_a"."lefthippocampus","cde_features_a"."dataset" , "cde_features_a__1"."_win_kfold_5","cde_features_a__1"."_rnd" FROM "cde_features_a" INNER JOIN "cde_features_a__1" ON "cde_features_a"."subjectcode" = "cde_features_a__1"."subjectcode""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP VIEW")) {
sql shouldBe """DROP VIEW IF EXISTS "cde_features_a__1v""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else if (sql.startsWith("DROP TABLE")) {
sql shouldBe """DROP TABLE IF EXISTS "cde_features_a__1""""
AcolyteDSL.updateResult(1, RowLists.longList.append(1L))
} else {
fail(s"Unhandled $sql")
}
}
"ExtendedFeaturesTableRepositoryDAO" should {
"not be used from a table without a primary key" in withRepository[FeaturesTableRepositoryDAO[
IO
]](
sampleTableHandler,
xa => {
val wokenRepository = new WokenInMemoryRepository[IO]()
val sourceTable =
new FeaturesTableRepositoryDAO[IO](xa, churnTable, churnHeaders)
val extendedTableFromNoKeyTable = ExtendedFeaturesTableRepositoryDAO
.apply[IO](sourceTable, None, Nil, Nil, Nil, runNow(wokenRepository.nextTableSeqNumber()))
extendedTableFromNoKeyTable should haveInvalid(
"Extended features table expects a primary key of one column for table churn"
)
sourceTable
}
) { dao =>
dao.table.table.name shouldBe "churn"
}
"create an extended table without any new columns and use it" in withRepositoryResource[
ExtendedFeaturesTableRepositoryDAO[
IO
]
](
cdeTableHandler1,
xa => {
val wokenRepository = new WokenInMemoryRepository[IO]()
val sourceTable =
new FeaturesTableRepositoryDAO[IO](xa, cdeTable, cdeHeaders)
ExtendedFeaturesTableRepositoryDAO
.apply[IO](sourceTable, None, Nil, Nil, Nil, runNow(wokenRepository.nextTableSeqNumber()))
.value
}
) { dao =>
dao.count.unsafeRunSync() shouldBe 99
}
"create an extended table filtering original data and without any new columns and use it" in withRepositoryResource[
ExtendedFeaturesTableRepositoryDAO[
IO
]
](
cdeTableHandler2,
xa => {
val wokenRepository = new WokenInMemoryRepository[IO]()
val sourceTable =
new FeaturesTableRepositoryDAO[IO](xa, cdeTable, cdeHeaders)
val filter =
SingleFilterRule("apoe4", "apoe4", "number", InputType.number, Operator.equal, List("2"))
ExtendedFeaturesTableRepositoryDAO
.apply[IO](sourceTable,
Some(filter),
Nil,
Nil,
Nil,
runNow(wokenRepository.nextTableSeqNumber()))
.value
}
) { dao =>
dao.count.unsafeRunSync() shouldBe 15
}
"create an extended table filtering original data and with new columns and use it" in withRepositoryResource[
ExtendedFeaturesTableRepositoryDAO[
IO
]
](
cdeTableHandler3,
xa => {
val wokenRepository = new WokenInMemoryRepository[IO]()
val sourceTable =
new FeaturesTableRepositoryDAO[IO](xa, cdeTable, cdeHeaders)
val filter =
SingleFilterRule("apoe4", "apoe4", "number", InputType.number, Operator.equal, List("2"))
val validationSpec = ValidationSpec("kfold", List(CodeValue("k", "5")))
val splitterDef = KFoldFeaturesSplitterDefinition(validationSpec, 5)
val newFeatures = List(splitterDef.splitColumn)
val prefills = List(splitterDef)
ExtendedFeaturesTableRepositoryDAO
.apply[IO](sourceTable,
Some(filter),
newFeatures,
Nil,
prefills,
runNow(wokenRepository.nextTableSeqNumber()))
.value
}
) { dao =>
dao.count.unsafeRunSync() shouldBe 15
}
}
}
| HBPSP8Repo/workflow | src/test/scala/ch/chuv/lren/woken/dao/ExtendedFeaturesTableRepositoryDAOTest.scala | Scala | apache-2.0 | 11,807 |
package org.judal.storage.scala
import javax.jdo.JDOException
import org.judal.storage.EngineFactory
import org.judal.storage.StorageObjectFactory
import org.judal.storage.query.AbstractQuery
import org.judal.storage.query.relational.AbstractRelationalQuery
import org.judal.storage.table.Record
import org.judal.storage.table.RecordSet
import org.judal.storage.relational.RelationalDataSource
import scala.collection.JavaConverters._
class RelationalQuery[R >: Null <: Record](dts: RelationalDataSource , recClass: Class[R], alias: String) extends AbstractRelationalQuery[R](dts, recClass, alias) {
def this(recClass: Class[R]) = this(EngineFactory.getDefaultRelationalDataSource, recClass, null);
def this(recClass: Class[R], alias: String ) = this(EngineFactory.getDefaultRelationalDataSource(), recClass, alias)
def this(dts: RelationalDataSource, recClass: Class[R]) = this(dts, recClass, null)
def this(dts: RelationalDataSource, rec: R, alias: String ) = {
this(dts, if (rec.getClass!=null) rec.getClass.asInstanceOf[Class[R]] else null)
viw = dts.openRelationalView(rec)
if (alias!=null && alias.length()>0)
viw.getClass().getMethod("setAlias", classOf[String]).invoke(viw, alias)
qry = viw.newQuery.asInstanceOf[AbstractQuery]
prd = qry.newPredicate
}
def this(rec: R, alias: String) = this(EngineFactory.getDefaultRelationalDataSource, rec, alias)
def this(rec: R) = this(rec, null)
def this(dts: RelationalDataSource, rec: R) = this(dts, rec, null);
private def this() = this(null: RelationalDataSource, null: Class[R], null: String)
override def clone() : RelationalQuery[R] = {
val theClone = new RelationalQuery[R]()
theClone.clone(this)
theClone
}
override def fetch() : Iterable[R] = {
if (prd!=null & prd.parts.size>0)
qry.setFilter(prd)
viw.fetch(qry).asScala
}
def fetchWithArray(params: (String,AnyRef)*) = {
qry.declareParameters(params.map(p => p._1).mkString(","))
qry.executeWithArray(params.map(p => p._2):_*).asInstanceOf[RecordSet[R]].asScala
}
def fetchWithMap(params: scala.collection.mutable.LinkedHashMap[String,AnyRef]) = {
qry.declareParameters(params.keysIterator.mkString(","))
qry.executeWithArray(params.values.toSeq:_*).asInstanceOf[RecordSet[R]].asScala
}
def fetchFirst() : R = {
var rst : RecordSet[R] = null
if (prd!=null & prd.parts.size>0)
qry.setFilter(prd)
if (qry.getRangeFromIncl==0l && qry.getRangeToExcl==1l) {
rst = viw.fetch(qry)
} else {
val q1 = clone
q1.setRange(0l, 1l)
if (prd!=null & prd.parts.size>0)
q1.setFilter(prd)
rst = viw.fetch(q1.qry)
}
if (rst.isEmpty()) null else rst.get(0)
}
}
| sergiomt/judal | scala-adaptor/src/main/scala/org/judal/storage/scala/RelationalQuery.scala | Scala | apache-2.0 | 2,683 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.jobs.accumulo.index
import com.beust.jcommander.Parameter
import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.data.Mutation
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.{Counter, Job, Mapper}
import org.apache.hadoop.util.{Tool, ToolRunner}
import org.geotools.data.{DataStoreFinder, Query}
import org.locationtech.geomesa.accumulo.data._
import org.locationtech.geomesa.accumulo.index.{AccumuloFeatureIndex, AttributeIndex}
import org.locationtech.geomesa.jobs._
import org.locationtech.geomesa.jobs.accumulo.{AccumuloJobUtils, GeoMesaArgs, InputDataStoreArgs, InputFeatureArgs}
import org.locationtech.geomesa.jobs.mapreduce.GeoMesaAccumuloInputFormat
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import org.locationtech.geomesa.utils.index.IndexMode
import org.locationtech.geomesa.utils.stats.IndexCoverage
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
import scala.collection.JavaConversions._
object AttributeIndexJob {
final val IndexAttributes = "--geomesa.index.attributes"
final val IndexCoverage = "--geomesa.index.coverage"
protected[index] val AttributesKey = "org.locationtech.geomesa.attributes"
protected[index] val CoverageKey = "org.locationtech.geomesa.coverage"
def main(args: Array[String]): Unit = {
val result = ToolRunner.run(new AttributeIndexJob(), args)
System.exit(result)
}
}
class AttributeIndexArgs(args: Array[String]) extends GeoMesaArgs(args) with InputFeatureArgs with InputDataStoreArgs {
@Parameter(names = Array(AttributeIndexJob.IndexAttributes), description = "Attributes to index", variableArity = true, required = true)
var attributes: java.util.List[String] = new java.util.ArrayList[String]()
@Parameter(names = Array(AttributeIndexJob.IndexCoverage), description = "Type of index (join or full)")
var coverage: String = null
override def unparse(): Array[String] = {
val attrs = if (attributes == null || attributes.isEmpty) {
Array.empty[String]
} else {
attributes.flatMap(n => Seq(AttributeIndexJob.IndexAttributes, n)).toArray
}
val cov = if (coverage == null) {
Array.empty[String]
} else {
Array(AttributeIndexJob.IndexCoverage, coverage)
}
Array.concat(super[InputFeatureArgs].unparse(),
super[InputDataStoreArgs].unparse(),
attrs,
cov)
}
}
class AttributeIndexJob extends Tool {
private var conf: Configuration = new Configuration
override def run(args: Array[String]): Int = {
val parsedArgs = new AttributeIndexArgs(args)
parsedArgs.parse()
val typeName = parsedArgs.inFeature
val dsInParams = parsedArgs.inDataStore
val attributes = parsedArgs.attributes
val coverage = Option(parsedArgs.coverage).map { c =>
try { IndexCoverage.withName(c) } catch {
case e: Exception => throw new IllegalArgumentException(s"Invalid coverage value $c")
}
}.getOrElse(IndexCoverage.JOIN)
// validation and initialization - ensure the types exist before launching distributed job
val ds = DataStoreFinder.getDataStore(dsInParams).asInstanceOf[AccumuloDataStore]
require(ds != null, "The specified input data store could not be created - check your job parameters")
val sft = ds.getSchema(typeName)
require(sft != null, s"The schema '$typeName' does not exist in the input data store")
val index = AccumuloFeatureIndex.indices(sft, IndexMode.Write)
.find(_.name == AttributeIndex.name).getOrElse {
AttributeIndex.configure(sft, ds)
AttributeIndex
}
val tableName = index.getTableName(typeName, ds)
val valid = sft.getAttributeDescriptors.map(_.getLocalName)
attributes.foreach(a => assert(valid.contains(a), s"Attribute '$a' does not exist in schema '$typeName'"))
val job = Job.getInstance(conf,
s"GeoMesa Attribute Index Job '${sft.getTypeName}' - '${attributes.mkString(", ")}'")
AccumuloJobUtils.setLibJars(job.getConfiguration)
job.setJarByClass(SchemaCopyJob.getClass)
job.setMapperClass(classOf[AttributeMapper])
job.setInputFormatClass(classOf[GeoMesaAccumuloInputFormat])
job.setOutputFormatClass(classOf[AccumuloOutputFormat])
job.setMapOutputKeyClass(classOf[Text])
job.setMapOutputValueClass(classOf[Mutation])
job.setNumReduceTasks(0)
// TODO we could use GeoMesaOutputFormat with indices
val query = new Query(sft.getTypeName, Filter.INCLUDE)
GeoMesaAccumuloInputFormat.configure(job, dsInParams, query)
job.getConfiguration.set(AttributeIndexJob.AttributesKey, attributes.mkString(","))
job.getConfiguration.set(AttributeIndexJob.CoverageKey, coverage.toString)
AccumuloOutputFormat.setConnectorInfo(job, parsedArgs.inUser, new PasswordToken(parsedArgs.inPassword.getBytes))
// use deprecated method to work with both 1.5/1.6
AccumuloOutputFormat.setZooKeeperInstance(job, parsedArgs.inInstanceId, parsedArgs.inZookeepers)
AccumuloOutputFormat.setDefaultTableName(job, tableName)
AccumuloOutputFormat.setCreateTables(job, true)
val result = job.waitForCompletion(true)
if (result) {
// update the metadata and splits
// reload the sft, as we nulled out the index flags earlier
val sft = ds.getSchema(typeName)
def wasIndexed(ad: AttributeDescriptor) = attributes.contains(ad.getLocalName)
sft.getAttributeDescriptors.filter(wasIndexed).foreach(_.setIndexCoverage(coverage))
ds.updateSchema(typeName, sft)
// schedule a table compaction to clean up the table
ds.connector.tableOperations().compact(tableName, null, null, true, false)
}
ds.dispose()
if (result) 0 else 1
}
override def getConf: Configuration = conf
override def setConf(conf: Configuration): Unit = this.conf = conf
}
class AttributeMapper extends Mapper[Text, SimpleFeature, Text, Mutation] {
type Context = Mapper[Text, SimpleFeature, Text, Mutation]#Context
private var counter: Counter = null
private var writer: (AccumuloFeature) => Seq[Mutation] = null
private var toWritable: (SimpleFeature) => AccumuloFeature = null
override protected def setup(context: Context): Unit = {
counter = context.getCounter("org.locationtech.geomesa", "attributes-written")
val dsParams = GeoMesaConfigurator.getDataStoreInParams(context.getConfiguration)
val ds = DataStoreFinder.getDataStore(dsParams).asInstanceOf[AccumuloDataStore]
val sft = ds.getSchema(GeoMesaConfigurator.getFeatureType(context.getConfiguration))
val attributes = context.getConfiguration.get(AttributeIndexJob.AttributesKey).split(",").toSet
val coverage = IndexCoverage.withName(context.getConfiguration.get(AttributeIndexJob.CoverageKey))
// set the coverage for each descriptor so that we write out the ones we want to index and not others
sft.getAttributeDescriptors.foreach { d =>
d.setIndexCoverage(if (attributes.contains(d.getLocalName)) coverage else IndexCoverage.NONE)
}
val index = AccumuloFeatureIndex.indices(sft, IndexMode.Write)
.find(_.name == AttributeIndex.name).getOrElse(AttributeIndex)
writer = index.writer(sft, ds)
toWritable = AccumuloFeature.wrapper(sft, ds.config.defaultVisibilities)
ds.dispose()
}
override protected def cleanup(context: Context): Unit = {
}
override def map(key: Text, value: SimpleFeature, context: Context) {
val mutations = writer(toWritable(value))
mutations.foreach(context.write(null: Text, _)) // default table name is set already
counter.increment(mutations.length)
}
}
| nagavallia/geomesa | geomesa-accumulo/geomesa-accumulo-jobs/src/main/scala/org/locationtech/geomesa/jobs/accumulo/index/AttributeIndexJob.scala | Scala | apache-2.0 | 8,365 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freestyle.free.cache
import cats.arrow.FunctionK
import cats.{~>, Applicative, Id}
import freestyle.free.FSHandler
import freestyle.free.cache.hashmap._
import org.scalatest.{BeforeAndAfterEach, Suite}
trait CacheTestContext extends BeforeAndAfterEach { self: Suite =>
import Hasher.string
import freestyle.free.Capture.freeStyleIdCaptureInstance
private[this] implicit val rawMap: KeyValueMap[Id, String, Int] =
new ConcurrentHashMapWrapper[Id, String, Int]
private[this] implicit val idHandler: FSHandler[Id, Id] = FunctionK.id[Id]
protected[this] final val provider = new KeyValueProvider[String, Int]
protected[this] implicit val interpret: provider.CacheM.Handler[Id] =
provider.implicits.cacheHandler(rawMap, idHandler)
override def beforeEach = rawMap.clear
}
| frees-io/freestyle | modules/cache/shared/src/test/scala/free/TestContext.scala | Scala | apache-2.0 | 1,435 |
package com.github.cuzfrog.scmd.macros.argutils
import com.github.cuzfrog.scmd.macros.argutils.RawArg.{RawCommand, RawPrior}
import scala.collection.immutable
import scala.meta._
private object AddExplicitTypeImpl {
def addExplicitType(rawArgs: immutable.Seq[RawArg]): immutable.Seq[Stat] = {
rawArgs map {
case r: RawCommand =>
q"val ${r.name.toPatTerm}: Command = DummyApi.cmdDef"
case r: RawPrior =>
q"val ${r.name.toPatTerm}: PriorArg = DummyApi.priorDef"
case r: RawTypedArg =>
q"val ${r.name.toPatTerm}:${r.composedTpe} = ${getDummyApi(r.tpe, r.composedTpe)}"
}
}
private def getDummyApi(tpe: Type, composedTpe: Type): Term.ApplyType = {
val name = Term.Name(composedTpe.syntax.replaceAll("""(with)|(\\s)|(\\[[^\\[\\]]+\\])""", ""))
q"DummyApi.$name[$tpe]"
}
}
| cuzfrog/simple-cmd | src/main/scala/com/github/cuzfrog/scmd/macros/argutils/AddExplicitTypeImpl.scala | Scala | apache-2.0 | 834 |
/*
* Copyright 2015 Dmitriy Yefremov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.yefremov.sleipnir.data.custom
import com.linkedin.data.DataMap
import com.linkedin.data.schema.MapDataSchema
import com.linkedin.data.template.DataTemplateUtil
import net.yefremov.sleipnir.data.{TypeCoercer, ScalaMapTemplate}
import ScalaMapTemplate._
import TypeCoercer._
import LongMap._
/**
* Custom wrapper for Map[String, Long].
* @author Dmitriy Yefremov
*/
class LongMap protected(override val map: Map[String, Long], mapData: DataMap) extends ScalaMapTemplate(mapData, Schema) {
def this(map: Map[String, Long]) = this(map, ScalaMapTemplate.unwrapAll(map, PrimitiveInputCoercer))
def this(data: DataMap) = this(wrapAll(data, Coercer), data)
}
object LongMap {
private val SchemaJson: String = "{ \\"type\\" : \\"map\\", \\"values\\" : \\"long\\" }"
private val Schema: MapDataSchema = DataTemplateUtil.parseSchema(SchemaJson).asInstanceOf[MapDataSchema]
private val Coercer: OutputCoercer[Long] = { case x: Long => x }
def apply(map: Map[String, Long]): LongMap = new LongMap(map)
}
| dmitriy-yefremov/sleipnir | generator/src/main/scala/net/yefremov/sleipnir/data/custom/LongMap.scala | Scala | apache-2.0 | 1,660 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.core.simulation.state
import com.bwsw.sj.common.engine.core.state.StateLoaderInterface
import scala.collection.mutable
/**
* Mock for [[com.bwsw.sj.common.engine.core.state.StateLoaderInterface]]
*
* @param lastState last state
* @author Pavel Tomskikh
*/
class StateLoaderMock(lastState: mutable.Map[String, Any] = mutable.Map.empty)
extends StateLoaderInterface {
/**
* Allows getting last state. Needed for restoring after crashing
*
* @return (ID of the last state, state variables)
*/
override def loadLastState(): (Option[Long], mutable.Map[String, Any]) = (None, lastState)
}
| bwsw/sj-platform | core/sj-engine-simulators/src/main/scala/com/bwsw/sj/engine/core/simulation/state/StateLoaderMock.scala | Scala | apache-2.0 | 1,454 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.{CoreUtils, TestUtils, ZkUtils}
import kafka.zk.ZooKeeperTestHarness
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.Test
class ServerStartupTest extends ZooKeeperTestHarness {
@Test
def testBrokerCreatesZKChroot {
val brokerId = 0
val zookeeperChroot = "/kafka-chroot-for-unittest"
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val zooKeeperConnect = props.get("zookeeper.connect")
props.put("zookeeper.connect", zooKeeperConnect + zookeeperChroot)
val server = TestUtils.createServer(KafkaConfig.fromProps(props))
val pathExists = zkUtils.pathExists(zookeeperChroot)
assertTrue(pathExists)
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
@Test
def testConflictBrokerStartupWithSamePort {
// Create and start first broker
val brokerId1 = 0
val props1 = TestUtils.createBrokerConfig(brokerId1, zkConnect)
val server1 = TestUtils.createServer(KafkaConfig.fromProps(props1))
val port = TestUtils.boundPort(server1)
// Create a second broker with same port
val brokerId2 = 1
val props2 = TestUtils.createBrokerConfig(brokerId2, zkConnect, port = port)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Starting a broker with the same port should fail")
} catch {
case _: RuntimeException => // expected
} finally {
server1.shutdown()
CoreUtils.delete(server1.config.logDirs)
}
}
@Test
def testConflictBrokerRegistration {
// Try starting a broker with the a conflicting broker id.
// This shouldn't affect the existing broker registration.
val brokerId = 0
val props1 = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server1 = TestUtils.createServer(KafkaConfig.fromProps(props1))
val brokerRegistration = zkUtils.readData(ZkUtils.BrokerIdsPath + "/" + brokerId)._1
val props2 = TestUtils.createBrokerConfig(brokerId, zkConnect)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Registering a broker with a conflicting id should fail")
} catch {
case _: RuntimeException =>
// this is expected
}
// broker registration shouldn't change
assertEquals(brokerRegistration, zkUtils.readData(ZkUtils.BrokerIdsPath + "/" + brokerId)._1)
server1.shutdown()
CoreUtils.delete(server1.config.logDirs)
}
@Test
def testBrokerSelfAware {
val brokerId = 0
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server = TestUtils.createServer(KafkaConfig.fromProps(props))
TestUtils.waitUntilTrue(() => server.metadataCache.getAliveBrokers.nonEmpty, "Wait for cache to update")
assertEquals(1, server.metadataCache.getAliveBrokers.size)
assertEquals(brokerId, server.metadataCache.getAliveBrokers.head.id)
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
@Test
def testBrokerStateRunningAfterZK {
val brokerId = 0
val mockBrokerState = EasyMock.niceMock(classOf[kafka.server.BrokerState])
class BrokerStateInterceptor() extends BrokerState {
override def newState(newState: BrokerStates): Unit = {
val brokers = zkUtils.getAllBrokersInCluster()
assertEquals(1, brokers.size)
assertEquals(brokerId, brokers.head.id)
}
}
class MockKafkaServer(override val config: KafkaConfig, override val brokerState: BrokerState = mockBrokerState) extends KafkaServer(config) {}
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val server = new MockKafkaServer(KafkaConfig.fromProps(props))
EasyMock.expect(mockBrokerState.newState(RunningAsBroker)).andDelegateTo(new BrokerStateInterceptor).once()
EasyMock.replay(mockBrokerState)
server.startup()
server.shutdown()
CoreUtils.delete(server.config.logDirs)
}
}
| ijuma/kafka | core/src/test/scala/unit/kafka/server/ServerStartupTest.scala | Scala | apache-2.0 | 4,711 |
object Test {
def main(args: Array[String]) {
new foo.Bar
println("You buttered your bread. Now sleep in it!")
}
}
package foo {
trait Foo { def foo() {} }
class Bar extends Baz with Foo
abstract class Baz
object Baz extends Foo
}
| felixmulder/scala | test/files/run/t5105.scala | Scala | bsd-3-clause | 253 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.tensor
import breeze.linalg.{DenseMatrix => BrzDenseMatrix, DenseVector => BrzDenseVector}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import org.apache.spark.mllib.linalg.{DenseMatrix, DenseVector, Matrix, Vector}
import scala.reflect.ClassTag
abstract class QuantizedTensorUnsupported[T: ClassTag] extends Tensor[T] {
val errorString = s"QuantizeTensor doesn't support this operation now"
/**
* Fill with a given value. It will change the value of the current tensor and return itself
*
* @param v value to fill the tensor
* @return current tensor
*/
override def fill(v: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with zero. It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def zero(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(normal gaussian distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def randn(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(normal gaussian distribution with the specified mean
* and stdv).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def randn(mean: Double, stdv: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(uniform distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def rand(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(uniform distribution between [lowerBound, upperBound])
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def rand(lowerBound: Double, upperBound: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fill with random value(bernoulli distribution).
* It will change the value of the current tensor and return itself
*
* @return current tensor
*/
override def bernoulli(p: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** *
* Create a new tensor which exchanges the given dimensions of the current tensor
*
* @param dim1 dimension to be exchanged, count from one
* @param dim2 dimension to be exchanged, count from one
* @return new tensor
*/
override def transpose(dim1: Int, dim2: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Shortcut of transpose(1, 2) for 2D tensor
*
* @see transpose()
*/
override def t(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Query tensor on a given index. Tensor should not be empty
*
* @param index count from 1
* @return
*/
override def apply(index: Int): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Query the value on a given index. Tensor should not be empty
*
* @param indexes the indexes length should be same as the tensor dimension length and each
* value count from 1
* @return the value on the given index
*/
override def apply(indexes: Array[Int]): T = throw new UnsupportedOperationException(errorString)
/**
* Query the value on a given position. The number of parameters
* should be equal to the dimension number of the tensor.
* Tensor should not be empty.
*
* @param d1 ,( d2, d3, d4, d5) the given position
* @return the value on a given position
*/
override def valueAt(d1: Int): T = throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int): T = throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int): T =
throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int): T =
throw new UnsupportedOperationException(errorString)
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* Subset the tensor by apply the element of the given table to corresponding dimension of the
* tensor. The element of the given table can be an Int or another Table.
* An Int means select on current dimension; A table means narrow on current dimension,
* the table should has two elements, of which the first is start index and
* the second is the end index. An empty table is equals to Table(1, size_of_current_dimension)
* If the table length is less than the tensor dimension, the missing dimension is applied by
* an empty table
*
* @see select
* @see narrow
* @param t The table length should be less than or equal to the tensor dimensions
* @return
*/
override def apply(t: Table): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* For tensor(i) = value. If tensor(i) is another tensor, it will fill the selected subset by
* the given value
*
* @param index index
* @param value value to write
*/
override def update(index: Int, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Copy the give tensor value to the select subset of the current tensor by the given index.
* The subset should
* has the same size of the given tensor
*
* @param index index
* @param src tensor to write
*/
override def update(index: Int, src: Tensor[T]): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Write the value to the value indexed by the given index array
*
* @param indexes index array. It should has same length with the tensor dimension
* @param value value to write
*/
override def update(indexes: Array[Int], value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Write the value on a given position. The number of parameters
* should be equal to the dimension number of the tensor.
*
* @param d1 ,( d2, d3, d4, d5) the given position
* @param value the written value
* @return
*/
override def setValue(d1: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int,
value: T): this.type = throw new UnsupportedOperationException(errorString)
/**
* Fill the select subset of the current tensor with the given value.
* The element of the given table can be an Int or another Table. An Int means select on current
* dimension; A tablemeans narrow on current dimension, the table should has two elements,
* of which the first is start index and the second is the end index. An empty table is equals
* to Table(1, size_of_current_dimension) If the table length is less than the tensor dimension,
* the missing dimension is applied by an empty table
*
* @param t subset table
* @param value value to write
*/
override def update(t: Table, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Copy the given tensor value to the select subset of the current tensor
* The element of the given table can be an Int or another Table. An Int means select on current
* dimension; A table means narrow on current dimension, the table should has two elements,
* of which the first is start index and the second is the end index. An empty table is equals
* to Table(1, size_of_current_dimension) If the table length is less than the tensor dimension,
* the missing dimension is applied by an empty table
*
* @param t subset table
* @param src tensor to copy
*/
override def update(t: Table, src: Tensor[T]): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Update the value meeting the filter criteria with the give value
*
* @param filter filter
* @param value value to update
*/
override def update(filter: (T) => Boolean, value: T): Unit =
throw new UnsupportedOperationException(errorString)
/**
* Check if the tensor is contiguous on the storage
*
* @return true if it's contiguous
*/
override def isContiguous(): Boolean = throw new UnsupportedOperationException(errorString)
/**
* Get a contiguous tensor from current tensor
*
* @return the current tensor if it's contiguous; or a new contiguous tensor with separated
* storage
*/
override def contiguous(): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Check if the size is same with the give tensor
*
* @param other tensor to be compared
* @return true if they have same size
*/
override def isSameSizeAs(other: Tensor[_]): Boolean =
throw new UnsupportedOperationException(errorString)
/**
* Resize the current tensor to the same size of the given tensor. It will still use the same
* storage if the storage
* is sufficient for the new size
*
* @param src target tensor
* @return current tensor
*/
override def resizeAs(src: Tensor[_]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Remove the dim-th dimension and return the subset part. For instance
* tensor =
* 1 2 3
* 4 5 6
* tensor.select(1, 1) is [1 2 3]
* tensor.select(1, 2) is [4 5 6]
* tensor.select(2, 3) is [3 6]
*
* @param dim
* @param index
* @return
*/
override def select(dim: Int, index: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Get the storage
*
* @return storage
*/
override def storage(): Storage[T] =
throw new UnsupportedOperationException(errorString)
/**
* tensor offset on the storage
*
* @return storage offset, count from 1
*/
override def storageOffset(): Int =
throw new UnsupportedOperationException(errorString)
/**
* The Tensor is now going to "view" the given storage, starting at position storageOffset (>=1)
* with the given dimension sizes and the optional given strides. As the result, any
* modification in the elements of the Storage will have an impact on the elements of the Tensor,
* and vice-versa. This is an efficient method, as there is no memory copy!
*
* If only storage is provided, the whole storage will be viewed as a 1D Tensor.
*
* @param storage
* @param storageOffset
* @param sizes
* @param strides
* @return current tensor
*/
override def set(storage: Storage[T], storageOffset: Int, sizes: Array[Int],
strides: Array[Int]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Get a subset of the tensor on dim-th dimension. The offset is given by index, and length is
* give by size. The important difference with select is that it will not reduce the dimension
* number. For Instance
* tensor =
* 1 2 3
* 4 5 6
* tensor.narrow(1, 1, 1) is [1 2 3]
* tensor.narrow(2, 2, 2) is
* 2 3
* 5 6
*
* @param dim
* @param index
* @param size
* @return
*/
override def narrow(dim: Int, index: Int, size: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Apply a function to each element of the tensor and modified it value if it return a double
*
* @param func applied function
* @return current tensor
*/
override def apply1(func: (T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Map value of another tensor to corresponding value of current tensor and apply function on
* the two value and change the value of the current tensor
* The another tensor should has the same size of the current tensor
*
* @param other another tensor
* @param func applied function
* @return current tensor
*/
override def map(other: Tensor[T], func: (T, T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Removes all singleton dimensions of the tensor
*
* @return current tensor
*/
override def squeeze(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Removes given dimensions of the tensor if it's singleton
*
* @return current tensor
*/
override def squeeze(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Create a new tensor that removes all singleton dimensions of the tensor
*
* @return create a new tensor
*/
override def squeezeNewTensor(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def view(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
*
* Returns a tensor which contains all slices of size @param size
* in the dimension @param dim. Step between two slices is given by @param step.
*
* @param dim
* @param size
* @param step Step between two slices
* @return new tensor
*/
override def unfold(dim: Int, size: Int, step: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Repeating a tensor allocates new memory, unless result is provided, in which case its memory
* is resized. sizes specify the number of times the tensor is repeated in each dimension.
*
* @param sizes
* @return
*/
override def repeatTensor(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This is equivalent to this.expand(template.size())
*
* @param template the given tensor
* @return
*/
override def expandAs(template: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Expanding a tensor allocates new memory, tensor where singleton dimensions can be expanded
* to multiple ones by setting the stride to 0. Any dimension that has size 1 can be expanded
* to arbitrary value with new memory allocation. Attempting to expand along a dimension that
* does not have size 1 will result in an error.
*
* @param sizes the size that tensor will expend to
* @return
*/
override def expand(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Splits current tensor along dimension dim into a result table of Tensors of size size
* (a number) or less (in the case of the last Tensor). The sizes of the non-dim dimensions
* remain unchanged. Internally, a series of narrows are performed along dimensions dim.
* Argument dim defaults to 1.
*
* @param size
* @param dim
* @return
*/
override def split(size: Int, dim: Int): Array[Tensor[T]] =
throw new UnsupportedOperationException(errorString)
/**
* spilt one tensor into multi tensor along the `dim` dimension
*
* @param dim the specific dimension
* @return
*/
override def split(dim: Int): Array[Tensor[T]] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to BreezeVector, the dimension of the tensor need to be 1.
*
* @return BrzDenseVector
*/
override def toBreezeVector(): BrzDenseVector[T] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to MLlibVector, the dimension of the
* tensor need to be 1, and tensor need to be continuous.
*
* @return Vector
*/
override def toMLlibVector(): Vector =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to BreezeMatrix, the dimension of the tensor need to be 2.
*
* @return BrzDenseMatrix
*/
override def toBreezeMatrix(): BrzDenseMatrix[T] =
throw new UnsupportedOperationException(errorString)
/**
* convert the tensor to MLlibMatrix, the dimension of the
* tensor need to be 2, and tensor need to be continuous.
*
* @return Matrix
*/
override def toMLlibMatrix(): Matrix =
throw new UnsupportedOperationException(errorString)
/**
* return the tensor datatype( DoubleType or FloatType)
*
* @return
*/
override def getType(): TensorDataType =
throw new UnsupportedOperationException(errorString)
/**
* Compare and print differences between two tensors
*
* @param other
* @param count
* @return true if there's difference, vice versa
*/
override def diff(other: Tensor[T], count: Int, reverse: Boolean): Boolean =
throw new UnsupportedOperationException(errorString)
/**
* view this.tensor and add a Singleton Dimension to `dim` dimension
*
* @param t source tensor
* @param dim the specific dimension, default is 1
* @return this
*/
override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* view this.tensor and add multiple Dimensions to `dim` dimension
*
* @param t source tensor
* @param dim the specific dimension array, default is [1]
* @return this
*/
override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* create a new tensor without any change of the tensor
*
* @param sizes the size of the new Tensor
* @return
*/
override def reshape(sizes: Array[Int]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Save the tensor to given path
*
* @param path
* @param overWrite
* @return
*/
override def save(path: String, overWrite: Boolean): this.type =
throw new UnsupportedOperationException(errorString)
// scalastyle:off methodName
/**
* Add all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def +(s: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Add a Tensor to another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def +(t: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* subtract all elements of this with the value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def -(s: T): Tensor[T] = throw new UnsupportedOperationException(errorString)
/**
* Subtract a Tensor from another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def -(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def unary_-(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* divide all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def /(s: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Divide a Tensor by another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def /(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* multiply all elements of this with value not in place.
* It will allocate new memory.
*
* @param s
* @return
*/
override def *(s: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Multiply a Tensor by another one, return the result in new allocated memory.
* The number of elements in the Tensors must match, but the sizes do not matter.
* The size of the returned Tensor will be the size of the first Tensor
*
* @param t
* @return
*/
override def *(t: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
// scalastyle:on methodName
/**
* returns the sum of the elements of this
*
* @return
*/
override def sum(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the sum operation over the dimension dim
*
* @param dim
* @return
*/
override def sum(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sum(x: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
def prod(): T =
throw new UnsupportedOperationException(errorString)
def prod(x: Tensor[T], dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the mean of all elements of this.
*
* @return
*/
override def mean(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the mean operation over the dimension dim.
*
* @param dim
* @return
*/
override def mean(dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the single biggest element of x
*
* @return
*/
override def max(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the max operation over the dimension n
*
* @param dim
* @return
*/
override def max(dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* performs the max operation over the dimension n
*
* @param values
* @param indices
* @param dim
* @return
*/
override def max(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* returns the single minimum element of x
*
* @return
*/
override def min(): T =
throw new UnsupportedOperationException(errorString)
/**
* performs the min operation over the dimension n
*
* @param dim
* @return
*/
override def min(dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* performs the min operation over the dimension n
*
* @param values
* @param indices
* @param dim
* @return
*/
override def min(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* Writes all values from tensor src into this tensor at the specified indices
*
* @param dim
* @param index
* @param src
* @return this
*/
override def scatter(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* change this tensor with values from the original tensor by gathering a number of values
* from each "row", where the rows are along the dimension dim.
*
* @param dim
* @param index
* @param src
* @return this
*/
override def gather(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This function computes 2 dimensional convolution of a single image
* with a single kernel (2D output). the dimensions of input and kernel
* need to be 2, and Input image needs to be bigger than kernel. The
* last argument controls if the convolution is a full ('F') or valid
* ('V') convolution. The default is valid convolution.
*
* @param kernel
* @param vf full ('F') or valid ('V') convolution.
* @return
*/
override def conv2(kernel: Tensor[T], vf: Char): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* This function operates with same options and input/output configurations as conv2,
* but performs cross-correlation of the input with the kernel k.
*
* @param kernel
* @param vf full ('F') or valid ('V') convolution.
* @return
*/
override def xcorr2(kernel: Tensor[T], vf: Char): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* replaces all elements in-place with the square root of the elements of this.
*
* @return
*/
override def sqrt(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* replaces all elements in-place with the absolute values of the elements of this.
*
* @return
*/
override def abs(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* x.add(value,y) multiply-accumulates values of y into x.
*
* @param value scalar
* @param y other tensor
* @return current tensor
*/
override def add(value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* accumulates all elements of y into this
*
* @param y other tensor
* @return current tensor
*/
override def add(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* z.add(x, value, y) puts the result of x + value * y in z.
*
* @param x
* @param value
* @param y
* @return
*/
override def add(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* x.add(value) : add value to all elements of x in place.
*
* @param value
* @return
*/
override def add(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def add(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the dot product. The number of elements must match: both Tensors are seen as a 1D
* vector.
*
* @param y
* @return
*/
override def dot(y: Tensor[T]): T =
throw new UnsupportedOperationException(errorString)
/**
* For each elements of the tensor, performs the max operation compared with the given value
* vector.
*
* @param value
* @return
*/
override def cmax(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the p-norm distance calculation between two tensors
*
* @param y the secode Tensor
* @param norm the norm of distance
* @return
*/
override def dist(y: Tensor[T], norm: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* Performs the element-wise multiplication of tensor1 by tensor2, multiply the result by the
* scalar value (1 if not present) and add it to x. The number of elements must match, but sizes
* do not matter.
*
* @param value
* @param tensor1
* @param tensor2
*/
override def addcmul(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addcmul(tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the element-wise division of tensor1 by tensor2, multiply the result by the scalar
* value and add it to x.
* The number of elements must match, but sizes do not matter.
*
* @param value
* @param tensor1
* @param tensor2
* @return
*/
override def addcdiv(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* subtracts all elements of y from this
*
* @param y other tensor
* @return current tensor
*/
override def sub(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sub(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise multiply
* x.cmul(y) multiplies all elements of x with corresponding elements of y.
* x = x * y
*
* @param y tensor
* @return current tensor
*/
override def cmul(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise multiply
* z.cmul(x, y) equals z = x * y
*
* @param x tensor
* @param y tensor
* @return current tensor
*/
override def cmul(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise divide
* x.cdiv(y) all elements of x divide all elements of y.
* x = x / y
*
* @param y tensor
* @return current tensor
*/
override def cdiv(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Element-wise divide
* z.cdiv(x, y) means z = x / y
*
* @param x tensor
* @param y tensor
* @return current tensor
*/
override def cdiv(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* multiply all elements of this with value in-place.
*
* @param value
* @return
*/
override def mul(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* divide all elements of this with value in-place.
*
* @param value
* @return
*/
override def div(value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* put the result of x * value in current tensor
*
* @param value
* @return
*/
override def mul(x: Tensor[T], value: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs a matrix-matrix multiplication between mat1 (2D tensor) and mat2 (2D tensor).
* Optional values v1 and v2 are scalars that multiply M and mat1 * mat2 respectively.
* Optional value beta is a scalar that scales the result tensor, before accumulating the result
* into the tensor. Defaults to 1.0.
* If mat1 is a n x m matrix, mat2 a m x p matrix, M must be a n x p matrix.
*
* res = (v1 * M) + (v2 * mat1*mat2)
*
* @param v1
* @param M
* @param v2
* @param mat1
* @param mat2
*/
override def addmm(v1: T, M: Tensor[T], v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = M + (mat1*mat2) */
override def addmm(M: Tensor[T], mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + mat1 * mat2 */
override def addmm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + v2 * mat1 * mat2 */
override def addmm(v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = v1 * res + v2 * mat1*mat2 */
override def addmm(v1: T, v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = mat1*mat2 */
override def mm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the outer-product between vec1 (1D tensor) and vec2 (1D tensor).
* Optional values v1 and v2 are scalars that multiply mat and vec1 [out] vec2 respectively.
* In other words,
* res_ij = (v1 * mat_ij) + (v2 * vec1_i * vec2_j)
*
* @param t1
* @param t2
* @return
*/
override def addr(t1: Tensor[T], t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addr(v1: T, t1: Tensor[T], t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Performs the outer-product between vec1 (1D Tensor) and vec2 (1D Tensor).
* Optional values v1 and v2 are scalars that multiply mat and vec1 [out] vec2 respectively.
* In other words,res_ij = (v1 * mat_ij) + (v2 * vec1_i * vec2_j)
*
* @param v1
* @param t1
* @param v2
* @param t2
* @param t3
* @return
*/
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T], t3: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* return pseudo-random numbers, require 0<=args.length<=2
* if args.length = 0, return [0, 1)
* if args.length = 1, return [1, args(0)] or [args(0), 1]
* if args.length = 2, return [args(0), args(1)]
*
* @param args
*/
override def uniform(args: T*): T =
throw new UnsupportedOperationException(errorString)
/**
* Performs a matrix-vector multiplication between mat (2D Tensor) and vec2 (1D Tensor) and add
* it to vec1. Optional values v1 and v2 are scalars that multiply vec1 and vec2 respectively.
*
* In other words,
* res = (beta * vec1) + alpha * (mat * vec2)
*
* Sizes must respect the matrix-multiplication operation: if mat is a n × m matrix,
* vec2 must be vector of size m and vec1 must be a vector of size n.
*/
override def addmv(beta: T, vec1: Tensor[T], alpha: T, mat: Tensor[T],
vec2: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/** res = beta * res + alpha * (mat * vec2) */
override def addmv(beta: T, alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + alpha * (mat * vec2) */
override def addmv(alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res = res + (mat * vec2) */
override def mv(mat: Tensor[T], vec2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Perform a batch matrix matrix multiplication of matrices and stored in batch1 and batch2
* with batch add. batch1 and batch2 must be 3D Tensors each containing the same number of
* matrices. If batch1 is a b × n × m Tensor, batch2 a b × m × p Tensor, res will be a
* b × n × p Tensor.
*
* In other words,
* res_i = (beta * M_i) + (alpha * batch1_i * batch2_i)
*/
override def baddbmm(beta: T, M: Tensor[T], alpha: T, batch1: Tensor[T],
batch2: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
/** res_i = (beta * res_i) + (alpha * batch1_i * batch2_i) */
override def baddbmm(beta: T, alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res_i = res_i + (alpha * batch1_i * batch2_i) */
override def baddbmm(alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/** res_i = res_i + batch1_i * batch2_i */
override def bmm(batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Replaces all elements in-place with the elements of x to the power of n
*
* @param y
* @param n
* @return current tensor reference
*/
override def pow(y: Tensor[T], n: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def pow(n: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def square(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Get the top k smallest values and their indices.
*
* @param result result buffer
* @param indices indices buffer
* @param k
* @param dim dimension, default is the last dimension
* @param increase sort order, set it to true if you want to get the smallest top k values
* @return
*/
override def topk(k: Int, dim: Int, increase: Boolean, result: Tensor[T],
indices: Tensor[T], sortedResult: Boolean = true): (Tensor[T], Tensor[T]) =
throw new UnsupportedOperationException(errorString)
/**
* Replaces all elements in-place with the elements of lnx
*
* @param y
* @return current tensor reference
*/
override def log(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def exp(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sqrt(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log1p(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def exp(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def log1p(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def abs(x: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the p-norms of the Tensor x computed over the dimension dim.
*
* @param y result buffer
* @param value
* @param dim
* @return
*/
override def norm(y: Tensor[T], value: Int, dim: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements > operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def gt(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements < operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def lt(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements <= operator comparing each element in x with y
*
* @param x
* @param y
* @return current tensor reference
*/
override def le(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements == operator comparing each element in x with y
*
* @param y
* @return current tensor reference
*/
override def eq(x: Tensor[T], y: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Fills the masked elements of itself with value val
*
* @param mask
* @param e
* @return current tensor reference
*/
override def maskedFill(mask: Tensor[T], e: T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Copies the elements of tensor into mask locations of itself.
*
* @param mask
* @param y
* @return current tensor reference
*/
override def maskedCopy(mask: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Returns a new Tensor which contains all elements aligned to a 1 in the corresponding mask.
*
* @param mask
* @param y
* @return current tensor reference
*/
override def maskedSelect(mask: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* returns the sum of the n-norms on the Tensor x
*
* @param value the n-norms
* @return
*/
override def norm(value: Int): T =
throw new UnsupportedOperationException(errorString)
/**
* returns a new Tensor with the sign (+/- 1 or 0) of the elements of x.
*
* @return
*/
override def sign(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Implements >= operator comparing each element in x with value
*
* @param x
* @param value
* @return
*/
override def ge(x: Tensor[T], value: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Accumulate the elements of tensor into the original tensor by adding to the indices
* in the order given in index. The shape of tensor must exactly match the elements indexed
* or an error will be thrown.
*
* @param dim
* @param index
* @param y
* @return
*/
override def indexAdd(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* Accumulate the elements of tensor into the original tensor by adding to the indices
* in the order given in index. The shape of tensor must exactly match the elements indexed
* or an error will be thrown.
*
* @param dim
* @param index
* @param y
* @return
*/
override def index(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in x.
* x.cmax(y) = max(x, y)
*
* @param y tensor
* @return current tensor
*/
override def cmax(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in x.
* x.cmin(y) = min(x, y)
*
* @param y tensor
* @return current tensor
*/
override def cmin(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in z.
* z.cmax(x, y) means z = max(x, y)
*
* @param x tensor
* @param y tensor
*/
override def cmax(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* stores the element-wise maximum of x and y in z.
* z.cmin(x, y) means z = min(x, y)
*
* @param x tensor
* @param y tensor
*/
override def cmin(x: Tensor[T], y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
/**
* resize this tensor size to floor((xmax - xmin) / step) + 1 and set values from
* xmin to xmax with step (default to 1).
*
* @param xmin
* @param xmax
* @param step
* @return this tensor
*/
override def range(xmin: Double, xmax: Double, step: Int): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def toTensor[D](implicit ev: TensorNumeric[D]): Tensor[D] =
throw new UnsupportedOperationException(errorString)
override def tanh(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def tanh(y: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def resize(sizes: Array[Int], strides: Array[Int]): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int): this.type = throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int, size4: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def resize(size1: Int, size2: Int, size3: Int, size4: Int, size5: Int): this.type =
throw new UnsupportedOperationException(errorString)
override def isEmpty: Boolean =
throw new UnsupportedOperationException(errorString)
override def isScalar: Boolean =
throw new UnsupportedOperationException(errorString)
override def value(): T =
throw new UnsupportedOperationException(errorString)
override def setValue(value: T): this.type =
throw new UnsupportedOperationException(errorString)
override def zipWith[A: ClassTag, B: ClassTag](t1: Tensor[A], t2: Tensor[B],
func: (A, B) => T): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def forceFill(v: Any): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def emptyInstance(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def applyFun[A: ClassTag](t: Tensor[A], func: (A) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def cast[D: ClassTag](castTensor: Tensor[D])(implicit ev: TensorNumeric[D]): Tensor[D] =
throw new UnsupportedOperationException(errorString)
override def div(y: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def floor(y: Tensor[T]): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def floor(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def ceil(): Tensor[T] = throw new UnsupportedOperationException(errorString)
override def negative(x: Tensor[T]): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def inv(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def reduce(dim: Int, result: Tensor[T], reducer: (T, T) => T): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def toArray(): Array[T] =
throw new UnsupportedOperationException(errorString)
override def erf(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def erfc(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def logGamma(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def digamma(): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def clamp(minValue: Double, maxValue: Double): Tensor[T] =
throw new UnsupportedOperationException(errorString)
override def sumSquare(): T =
throw new UnsupportedOperationException(errorString)
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/tensor/QuantizedTensorUnsupported.scala | Scala | apache-2.0 | 47,033 |
package com.github.lstephen.ootp.ai.value
import com.github.lstephen.ootp.ai.player.Player
import com.github.lstephen.ootp.ai.player.ratings.Position
import com.github.lstephen.ootp.ai.regression.Predictor
import com.github.lstephen.ootp.ai.score._
import com.github.lstephen.ootp.ai.selection.lineup.PlayerDefenseScore
import collection.JavaConversions._
trait BatterFutureAbility { this: Ability =>
override val batting = Some(predictor.predictFutureBatting(player).overall)
override val defense = Some(new PlayerDefenseScore(player, position).score)
}
trait PitcherFutureAbility { this: Ability =>
val endurance = position match {
case Position.MIDDLE_RELIEVER => 0.865
case Position.STARTING_PITCHER => {
val end = player.getPitchingRatings.getVsRight.getEndurance;
(1000.0 - Math.pow(10 - end, 3)) / 1000.0;
}
}
override val pitching = Some(
endurance *: predictor.predictFuturePitching(player).overall)
}
object FutureAbility {
def apply(p: Player, pos: Position)(implicit ps: Predictor): Ability = {
if (p.getAge <= 28) {
if (p.isHitter && pos.isHitting) {
return new Ability(p, pos) with BatterFutureAbility
} else if (p.isPitcher && pos.isPitching) {
return new Ability(p, pos) with PitcherFutureAbility
}
}
new Ability(p, pos)
}
def apply(p: Player)(implicit ps: Predictor): Ability =
(Position.hitting ++ Position.pitching).map(FutureAbility(p, _)).max
}
class FutureValue(val player: Player, val position: Position)(
implicit val predictor: Predictor)
extends ComponentScore {
val ability = FutureAbility(player, position)
val vsReplacement =
if (player.getAge <= 28) {
val vsCurrent = ReplacementLevels.getForIdeal.get(ability)
val vsAverage = ReplacementLevels.getForIdeal.getVsAverage(ability)
Some(List(vsCurrent, vsAverage).average)
} else
None
val vsMax =
if (player.getAge <= 28) {
val vsCurrent = MaxLevels.getVsIdeal(ability)
val vsAverage = MaxLevels.getVsAverage(ability)
val v = List(vsCurrent, vsAverage).average
if (v > Score.zero) Some(v) else None
} else
None
val vsAge: Option[Score] =
if (player.getAge <= 28) {
val sbaHitting =
SkillByAge.getInstance.getHitting.getThreeYearAverage(player.getAge)
val sbaPitching =
SkillByAge.getInstance.getPitching.getThreeYearAverage(player.getAge)
if (player.isHitter && position.isHitting && sbaHitting.isPresent) {
NowAbility(player, position).batting.map { s =>
s - Score(sbaHitting.getAsDouble)
}
} else if (player.isPitcher && position.isPitching && sbaPitching.isPresent) {
NowAbility(player, position).pitching.map { s =>
s - Score(sbaPitching.getAsDouble)
}
} else {
None
}
} else
None
def components = ability.components :+ vsAge :+ vsReplacement :+ vsMax
def format: String = {
val p = if (score.isPositive) position.getAbbreviation else ""
components
.map(_.map(s => f"${s.toLong}%3d"))
.map(_.getOrElse(" "))
.mkString(f"${p}%2s : ", " ", f" : ${score.toLong}%3d")
}
}
object FutureValue {
def apply(p: Player, pos: Position)(implicit ps: Predictor) =
new FutureValue(p, pos)
def apply(p: Player)(implicit ps: Predictor): FutureValue =
(Position.hitting ++ Position.pitching).map(FutureValue(p, _)).max
}
| lstephen/ootp-ai | src/main/scala/com/github/lstephen/ootp/ai/value/FutureValue.scala | Scala | apache-2.0 | 3,456 |
/*
* Copyright (c) 2013 Daniel Krzywicki <daniel.krzywicki@agh.edu.pl>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package pl.edu.agh.scalamas.app
import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import pl.edu.agh.scalamas.random.ConcurrentRandomGeneratorComponent
import pl.edu.agh.scalamas.stats.{StatsComponent, ConcurrentStatsFactory}
/**
* Application stack for running concurrent applications.
*
* Provides a concurrent agent runtime, stats factory and random generators.
*
* This stacks still needs to be mixed-in with an Environment strategy to use fine or coarse-grained agent concurrency.
*/
class ConcurrentStack(name: String)
extends ConcurrentAgentRuntimeComponent
with ConcurrentStatsFactory
with ConcurrentRandomGeneratorComponent
with ConcurrentRunner {
this: EnvironmentStrategy with StatsComponent =>
val agentRuntime = new ConcurrentAgentRuntime {
val config: Config = ConfigFactory.load()
val system: ActorSystem = ActorSystem(name)
}
} | eleaar/scala-mas | core/src/main/scala/pl/edu/agh/scalamas/app/ConcurrentStack.scala | Scala | mit | 2,057 |
package xyz.hyperreal.cramsite
import akka.actor.Actor
import spray.routing._
import spray.http._
import spray.json._
import spray.json.DefaultJsonProtocol._
import spray.httpx.SprayJsonSupport._
import MediaTypes._
import shapeless._
import in.azeemarshad.common.sessionutils.SessionDirectives
import concurrent.duration._
//import concurrent.ExecutionContext.Implicits.global
import util.{Success, Failure}
import models._
class CramSiteServiceActor extends Actor with HttpService with SessionDirectives {
def actorRefFactory = context
import context.dispatcher
def receive = runRoute(route)
def optionalUser: Directive[Option[dao.User] :: HNil] = optionalSession hflatMap {
case None :: HNil => provide( None )
case Some( s ) :: HNil => provide( await(dao.Users.find(s.data("id").toInt)) )
}
def guest: Directive[dao.User :: HNil] = {
val priv = await( dao.Files.create( dao.Users.count toString, "", Some(privateid), false, None, None ) )
val pid = priv.id.get
val u = await( dao.Users.create(None, None, None, pid, GUEST) )
val uid = u.id.get.toString
setSession( "id" -> uid ) & provide( u )
}
def user: Directive[dao.User :: HNil] = optionalUser hflatMap {
case None :: HNil => guest
case Some( u ) :: HNil => provide( u )
}
def requireUser: Directive[dao.User :: HNil] = optionalUser hflatMap {
case None :: HNil => reject
case Some( u ) :: HNil => provide( u )
}
// def admin: Directive[dao.Blog :: models.User :: HNil] = (blog & session) hflatMap {
// case b :: s :: HNil =>
// Queries.findUser( s.data("id").toInt ) match {
// case Some( u ) if u.roles.exists(r => r.blogid == b.id.get && r.role == "admin") => hprovide( b :: u :: HNil )
// case _ => reject( AuthorizationFailedRejection )
// }
// }
//
// robots.txt request logging
//
val route = (get & pathPrefixTest( "robots.txt" ) & clientIP & unmatchedPath) { (ip, path) =>
Application.logVisit( ip, path toString, None, None )
reject } ~
//
// resource renaming routes (these will mostly be removed as soon as possible)
//
pathPrefix("sass") {
getFromResourceDirectory("resources/public") } ~
(pathPrefix("js") | pathPrefix("css")) {
getFromResourceDirectory("public") } ~
pathSuffixTest( """.*(?:\\.(?:html|png|ico|txt))"""r ) { _ =>
getFromResourceDirectory( "public" ) } ~
pathPrefix("coffee") {
getFromResourceDirectory("public/js") } ~
pathPrefix("webjars") {
getFromResourceDirectory("META-INF/resources/webjars") } ~
//
// application request logging (ignores admin and api requests)
//
(get & pathPrefixTest( !("api"|"setup-admin"|"admin") ) & clientIP & unmatchedPath & optionalHeaderValueByName( "Referer" ) & optionalUser) {
(ip, path, referrer, user) =>
Application.logVisit( ip, path toString, referrer, user )
reject } ~
//
// application routes
//
//hostName {h => complete(h)} ~
(get & pathSingleSlash & user) {
u => complete( Application.index(u) ) } ~
(get & path( "image"/IntNumber )) {
img => complete( Application.image(img) ) } ~
path( "login" ) {
(get & user) { u =>
if (u.status != GUEST)
redirect( "/", StatusCodes.SeeOther )
else
complete( Application.login ) } ~
(post & formFields( 'email, 'password, 'rememberme ? "no" )) {
(email, password, rememberme) => Application.authenticate( email, password ) } } ~
(get & path( "register" ) & user) {
u =>
if (u.status != GUEST)
redirect( "/", StatusCodes.SeeOther )
else
complete( Application.register ) } ~
// (get & path( "admin" ) & admin) {
// (b, _) => complete( Views.admin(b) ) } ~
// (post & path( "post" ) & admin & formFields( 'category.as[Int], 'headline, 'text )) {
// (b, u, category, headline, text) => complete( Application.post(b, u, category, headline, text) ) } ~
(get & path( "logout" ) & session) {
_ => clearSession & redirect( "/", StatusCodes.SeeOther ) } ~
//
// API routes
//
pathPrefix( "api"/"v1" ) {
(get & path("files")) {
complete( API.filesUnderRoot ) } ~
(get & path("files"/IntNumber) & session) { (parentid, _) =>
complete( API.filesUnder(parentid) ) } ~
(post & path("files") & parameters("parentid".as[Int], "content".as[Boolean]) & entity(as[FileContent]) & session) { (parentid, _, content, _) =>
complete( API.filesPostCreate(parentid, content) ) } ~
(post & path("files") & parameter("parentid".as[Int]) & entity(as[FileInfo]) & session) { (parentid, info, _) =>
complete( API.filesPostCreate(parentid, info) ) } ~
(post & path("files"/IntNumber) & entity(as[FileInfo]) & session) { (id, info, _) =>
complete( API.filesPost(id, info) ) } ~
(post & path("pairs"/IntNumber) & entity(as[PairJson]) & session) { (id, pair, _) =>
complete( API.pairsPost(id, pair) ) } ~
(delete & path("pairs"/IntNumber) & session) { (id, _) =>
complete( API.pairsDelete(id) ) } ~
(get & path("lessons"/IntNumber)) { fileid =>
complete( API.lessonsGet(fileid) ) } ~
(post & path("lessons"/IntNumber) & entity(as[PairJson]) & session) { (fileid, pair, _) =>
complete( API.lessonsPost(fileid, pair) ) } ~
(get & path("tallies"/IntNumber/IntNumber)) { (fileid, userid) =>
complete( API.talliesGet(fileid, userid) ) } ~
(post & path("tallies"/IntNumber/IntNumber) & entity(as[TallyUpdate])) { (userid, pairid, update) =>
complete( API.talliesPost(userid, pairid, update) ) } ~
(post & path("folders") & parameter("parentid".as[Int]) & entity(as[FileInfo]) & session) { (parentid, info, _) =>
complete( API.foldersPostCreate(parentid, info) ) } ~
(post & path("favorites") & entity(as[FavoriteInfo]) & session) { (fav, _) =>
complete( API.favoritesPost(fav) ) } ~
(get & path("favorites"/IntNumber) & session) { (userid, _) =>
complete( API.favoritesGet(userid) ) } ~
// (post & path("private") & entity(as[FileInfo]) & session) { (f, _) =>
// complete( API.privatePost(f) ) } ~
// (get & path("private"/IntNumber) & session) { (userid, _) =>
// complete( API.privateGet(userid) ) } ~
(get & path("users"/"exists") & parameter("name")) {
name => complete( API.usersExistsName(name) ) } ~
(get & path("users"/"exists") & parameter("email")) {
email => complete( API.usersExistsEmail(email) ) } ~
// (get & path( "visits"/"count" ) & admin) {
// (b, _) => complete( API.visitsCount(b) ) } ~
// (get & path( "visits" ) & admin) {
// (b, _) => complete( API.visits(b) ) } ~
// (get & path("users"/IntNumber)) {
// userid => complete( API.usersGet(userid) ) } ~
(post & path("users") & detach(dispatcher) & entity(as[UserJson]) & user) {
(u, g) => API.usersPost( u, g ) } ~
// (get & path("users"/Segment)) {
// email => complete( API.users(email) ) } ~
(get & path("users")) {
complete( API.usersGet ) }
}
} | edadma/cram-site | src/main/scala/CramSiteServiceActor.scala | Scala | mit | 6,779 |
package org.modelfun.paths
/**
* Arbitrary network of connected nodes.
*/
trait Path {
def roots: List[PathNode]
}
| zzorn/modelfun | src/main/scala/org/modelfun/paths/Path.scala | Scala | lgpl-3.0 | 120 |
package spire
package math
import scala.math.{ScalaNumber, ScalaNumericConversions}
import scala.collection.compat.immutable.LazyList
import scala.collection.compat.immutable.LazyList.#::
import spire.algebra.{Field, Trig, TruncatedDivisionCRing}
import spire.syntax.nroot._
import spire.util.Opt
sealed trait Real extends ScalaNumber with ScalaNumericConversions { x =>
import Real.{roundUp, Exact}
def apply(p: Int): SafeLong
def toRational(p: Int): Rational = this match {
case Exact(n) => n
case _ => Rational(x(p), SafeLong.two.pow(p))
}
def toRational: Rational = toRational(Real.bits)
// ugh scala.math
def doubleValue(): Double = toRational.toDouble
def floatValue(): Float = toRational.toFloat
def intValue(): Int = toRational.toInt
def longValue(): Long = toRational.toLong
def underlying(): Object = this
override def isValidChar: Boolean = {
val r = toRational
r.isWhole && r.isValidChar
}
override def isValidByte: Boolean = {
val r = toRational
r.isWhole && r.isValidByte
}
override def isValidShort: Boolean = {
val r = toRational
r.isWhole && r.isValidShort
}
override def isValidInt: Boolean = {
val r = toRational
r.isWhole && r.isValidInt
}
def isValidLong: Boolean = {
val r = toRational
r.isWhole && r.isValidLong
}
override def hashCode(): Int = toRational.hashCode
override def equals(y: Any): Boolean = y match {
case y: Real => this === y
case y => toRational.equals(y)
}
def ===(y: Real): Boolean =
(x compare y) == 0
def =!=(y: Real): Boolean =
!(this === y)
def compare(y: Real): Int = (x, y) match {
case (Exact(nx), Exact(ny)) => nx compare ny
case _ => (x - y).signum
}
def min(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx min ny)
case _ => Real(p => x(p) min y(p))
}
def max(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx max ny)
case _ => Real(p => x(p) max y(p))
}
def abs(): Real = this match {
case Exact(n) => Exact(n.abs)
case _ => Real(p => x(p).abs)
}
def signum(): Int = this match {
case Exact(n) => n.signum
case _ => x(Real.bits).signum
}
def unary_- : Real = this match {
case Exact(n) => Exact(-n)
case _ => Real(p => -x(p))
}
def reciprocal(): Real = {
def findNonzero(i: Int): Int =
if (SafeLong.three <= x(i).abs) i else findNonzero(i + 1)
this match {
case Exact(n) => Exact(n.reciprocal)
case _ => Real({p =>
val s = findNonzero(0)
roundUp(Rational(SafeLong.two.pow(2 * p + 2 * s + 2), x(p + 2 * s + 2)))
})
}
}
def +(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx + ny)
case (Exact(Rational.zero), _) => y
case (_, Exact(Rational.zero)) => x
case _ => Real(p => roundUp(Rational(x(p + 2) + y(p + 2), 4)))
}
def -(y: Real): Real = x + (-y)
def *(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx * ny)
case (Exact(Rational.zero), _) => Real.zero
case (_, Exact(Rational.zero)) => Real.zero
case (Exact(Rational.one), _) => y
case (_, Exact(Rational.one)) => x
case _ => Real({p =>
val x0 = x(0).abs + 2
val y0 = y(0).abs + 2
val sx = Real.sizeInBase(x0, 2) + 3
val sy = Real.sizeInBase(y0, 2) + 3
roundUp(Rational(x(p + sy) * y(p + sx), SafeLong.two.pow(p + sx + sy)))
})
}
def **(k: Int): Real = pow(k)
def pow(k: Int): Real = {
@tailrec
def loop(b: Real, k: Int, extra: Real): Real =
if (k == 1)
b * extra
else
loop(b * b, k >>> 1, if ((k & 1) == 1) b * extra else extra)
this match {
case Exact(n) =>
Exact(n.pow(k))
case _ =>
if (k < 0) {
reciprocal.pow(-k)
} else if (k == 0) {
Real.one
} else if (k == 1) {
this
} else {
loop(x, k - 1, x)
}
}
}
def /(y: Real): Real = x * y.reciprocal
def tmod(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx tmod ny)
case _ => Real({ p =>
val d = x / y
val s = d(2)
val d2 = if (s >= 0) d.floor else d.ceil
(x - d2 * y)(p)
})
}
def tquot(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx tquot ny)
case _ => Real({ p =>
val d = x / y
val s = d(2)
val d2 = if (s >= 0) d.floor else d.ceil
d2(p)
})
}
/* TODO: what to do with this definition of gcd/lcm?
def gcd(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx gcd ny)
case _ => Real({ p =>
val g = x.toRational(p) gcd y.toRational(p)
roundUp(g * SafeLong.two.pow(p))
})
}
def lcm(y: Real): Real = (x, y) match {
case (Exact(nx), Exact(ny)) => Exact(nx lcm ny)
case _ => Real({ p =>
val g = x.toRational(p) lcm y.toRational(p)
roundUp(g * SafeLong.two.pow(p))
})
}
*/
def ceil(): Real = x match {
case Exact(n) => Exact(n.ceil)
case _ => Real({ p =>
val n = x(p)
val t = SafeLong.two.pow(p)
val m = n % t
if (m == 0) n
else if (n.signum >= 0) n + t - m
else n - m
})
}
def floor(): Real = x match {
case Exact(n) => Exact(n.floor)
case _ => Real({ p =>
val n = x(p)
val t = SafeLong.two.pow(p)
val m = n % t
if (n.signum >= 0) n - m else n - t - m
})
}
def round(): Real = x match {
case Exact(n) => Exact(n.round)
case _ => Real({ p =>
val n = x(p)
val t = SafeLong.two.pow(p)
val h = t / 2
val m = n % t
if (m < h) n - m else n - m + t
})
}
def isWhole(): Boolean = x match {
case Exact(n) =>
n.isWhole
case _ =>
val n = x(Real.bits)
val t = SafeLong.two.pow(Real.bits)
(n % t) == 0
}
def sqrt(): Real = Real(p => x(p * 2).sqrt)
def nroot(k: Int): Real =
if (k >= 0) Real(p => x(p * k).nroot(k))
else Real(p => x.reciprocal.nroot(math.abs(k))(p))
def fpow(r: Rational): Real =
Real({ p =>
val r2 = r.limitToInt
val n = r2.numerator
val d = r2.denominator
x.pow(n.toInt).nroot(d.toInt)(p)
})
// a bit hand-wavy
def fpow(y: Real): Real = y match {
case Exact(n) => x.fpow(n)
case _ => Real({ p =>
x.fpow(Rational(y(p), SafeLong.two.pow(p)))(p)
})
}
override def toString: String = x match {
case Exact(n) => n.toString
case _ => getString(Real.digits)
}
def repr: String = x match {
case Exact(n) => s"Exact(${n.toString})"
case _ => s"Inexact(${toRational})"
}
def getString(d: Int): String = {
val b = Real.digitsToBits(d)
val r = Rational(x(b) * SafeLong.ten.pow(d), SafeLong.two.pow(b))
val m = roundUp(r)
val (sign, str) = m.signum match {
case -1 => ("-", m.abs.toString)
case 0 => ("", "0")
case 1 => ("", m.toString)
}
val i = str.length - d
val s = if (i > 0) {
sign + str.substring(0, i) + "." + str.substring(i)
} else {
sign + "0." + ("0" * -i) + str
}
s.replaceAll("0+$", "").replaceAll("\\\\.$", "")
}
}
object Real extends RealInstances {
val zero: Real = Exact(Rational.zero)
val one: Real = Exact(Rational.one)
val two: Real = Exact(Rational(2))
val four: Real = Exact(Rational(4))
def apply(f: Int => SafeLong): Real = Inexact(f)
implicit def apply(n: Int): Real = Exact(Rational(n))
implicit def apply(n: Long): Real = Exact(Rational(n))
implicit def apply(n: BigInt): Real = Exact(Rational(n))
implicit def apply(n: SafeLong): Real = Exact(Rational(n))
implicit def apply(n: Rational): Real = Exact(n)
implicit def apply(n: Double): Real = Exact(Rational(n))
implicit def apply(n: BigDecimal): Real = Exact(Rational(n))
def apply(s: String): Real = Exact(Rational(s))
lazy val pi: Real =
Real(16) * atan(Real(Rational(1, 5))) - Real.four * atan(Real(Rational(1, 239)))
lazy val e: Real =
exp(Real.one)
lazy val phi: Real =
(Real.one + Real(5).sqrt) / Real.two
def log(x: Real): Real = {
val t = x(2)
val n = sizeInBase(t, 2) - 3
if (t < 0) throw new ArithmeticException("log of negative number")
else if (t < 4) -log(x.reciprocal)
else if (t < 8) logDr(x)
else logDr(div2n(x, n)) + Real(n) * log2
}
def exp(x: Real): Real = {
val u = x / log2
val n = u(0)
val s = x - Real(n) * log2
if (!n.isValidInt) throw new ArithmeticException("invalid power in exp")
else if (n < 0) div2n(expDr(s), -n.toInt)
else if (n > 0) mul2n(expDr(s), n.toInt)
else expDr(s)
}
def sin(x: Real): Real = {
val z = x / piBy4
val s = roundUp(Rational(z(2), 4))
val y = x - piBy4 * Real(s)
val m = (s % 8).toInt
val n = if (m < 0) m + 8 else m
n match {
case 0 => sinDr(y)
case 1 => sqrt1By2 * (cosDr(y) + sinDr(y))
case 2 => cosDr(y)
case 3 => sqrt1By2 * (cosDr(y) - sinDr(y))
case 4 => -sinDr(y)
case 5 => -sqrt1By2 * (cosDr(y) + sinDr(y))
case 6 => -cosDr(y)
case 7 => -sqrt1By2 * (cosDr(y) - sinDr(y))
}
}
def cos(x: Real): Real = {
val z = x / piBy4
val s = roundUp(Rational(z(2), 4))
val y = x - piBy4 * Real(s)
val m = (s % 8).toInt
val n = if (m < 0) m + 8 else m
n match {
case 0 => cosDr(y)
case 1 => sqrt1By2 * (cosDr(y) - sinDr(y))
case 2 => -sinDr(y)
case 3 => -sqrt1By2 * (cosDr(y) + sinDr(y))
case 4 => -cosDr(y)
case 5 => -sqrt1By2 * (cosDr(y) - sinDr(y))
case 6 => sinDr(y)
case 7 => sqrt1By2 * (cosDr(y) + sinDr(y))
}
}
def tan(x: Real): Real = sin(x) / cos(x)
def atan(x: Real): Real = {
val t = x(2)
val xp1 = x + Real.one
val xm1 = x - Real.one
if (t < -5) atanDr(-x.reciprocal) - piBy2
else if (t == -4) -piBy4 - atanDr(xp1 / xm1)
else if (t < 4) atanDr(x)
else if (t == 4) piBy4 + atanDr(xm1 / xp1)
else piBy2 - atanDr(x.reciprocal)
}
def atan2(y: Real, x: Real): Real = Real({ p =>
var pp = p
var sx = x(pp).signum
var sy = y(pp).signum
// val maxp = p * p
// while (sx == 0 && sy == 0 && pp < maxp) {
while (sx == 0 && sy == 0) {
sx = x(pp).signum
sy = y(pp).signum
pp += 1
}
if (sx > 0) {
atan(y / x)(p)
} else if (sy >= 0 && sx < 0) {
(atan(y / x) + Real.pi)(p)
} else if (sy < 0 && sx < 0) {
(atan(y / x) - Real.pi)(p)
} else if (sy > 0) {
(Real.pi / Real.two)(p)
} else if (sy < 0) {
(-Real.pi / Real.two)(p)
} else {
throw new IllegalArgumentException("atan2(0, 0) is undefined")
// // ugh
// Real.zero
// //sys.error("undefined sx=%s sy=%s" format (sx, sy))
}
})
def asin(x: Real): Real = {
val x0 = x(0)
val s = (Real.one - x * x).sqrt
x0.signum match {
case n if n > 0 => (Real.pi / Real.two) - atan(s / x)
case 0 => atan(x / s)
case _ => (-Real.pi / Real.two) - atan(s / x)
}
}
def acos(x: Real): Real = (Real.pi / Real.two) - asin(x)
def sinh(x: Real): Real = {
val y = exp(x)
(y - y.reciprocal) / Real.two
}
def cosh(x: Real): Real = {
val y = exp(x)
(y + y.reciprocal) / Real.two
}
def tanh(x: Real): Real = {
val y = exp(x);
val y2 = y.reciprocal
(y - y2) / (y + y2)
}
def asinh(x: Real): Real = log(x + (x * x + Real.one).sqrt)
def acosh(x: Real): Real = log(x + (x * x - Real.one).sqrt)
def atanh(x: Real): Real = log((Real.one + x) / (Real.one - x)) / Real.two
def digits: Int = 40
def bits: Int = digitsToBits(digits)
def digitsToBits(n: Int): Int =
spire.math.ceil(n * (spire.math.log(10.0) / spire.math.log(2.0))).toInt + 4
def sizeInBase(n: SafeLong, base: Int): Int = {
def loop(n: SafeLong, acc: Int): Int = if (n <= 1) acc + 1 else loop(n / base, acc + 1)
loop(n.abs, 0)
}
def roundUp(r: Rational): SafeLong = SafeLong(r.round.toBigInt)
def div2n(x: Real, n: Int): Real =
Real(p => if (p >= n) x(p - n) else roundUp(Rational(x(p), SafeLong.two.pow(n))))
def mul2n(x: Real, n: Int): Real =
Real(p => x(p + n))
lazy val piBy2 = div2n(pi, 1)
lazy val piBy4 = div2n(pi, 2)
lazy val log2 = div2n(logDrx(Real.two.reciprocal), 1)
lazy val sqrt1By2 = Real.two.reciprocal.sqrt
def accumulate(total: SafeLong, xs: LazyList[SafeLong], cs: LazyList[Rational]): SafeLong = {
(xs, cs) match {
case (_, Seq()) => total
case (Seq(), _) => sys.error("nooooo")
case (x #:: xs, c #:: cs) =>
val t = roundUp(c * Rational(x))
if (t == 0) total else accumulate(total + t, xs, cs)
}
}
@deprecated("prefer LazyList instead", "0.17.0")
def accumulate(total: SafeLong, xs: Stream[SafeLong], cs: Stream[Rational]): SafeLong = {
import scala.#::
(xs, cs) match {
case (_, Stream.Empty) => total
case (Stream.Empty, _) => sys.error("nooooo")
case (x #:: xs, c #:: cs) =>
val t = roundUp(c * Rational(x))
if (t == 0) total else accumulate(total + t, xs, cs)
}
}
private[spire] def powerSeries(ps: LazyList[Rational], terms: Int => Int, x: Real): Real = {
Real({p =>
val t = terms(p)
val l2t = 2 * sizeInBase(SafeLong(t) + 1, 2) + 6
val p2 = p + l2t
val xr = x(p2)
val xn = SafeLong.two.pow(p2)
if (xn == 0) sys.error("oh no")
def g(yn: SafeLong): SafeLong = roundUp(Rational(yn * xr, xn))
val num = accumulate(SafeLong.zero, LazyList.iterate(xn)(g), ps.take(t))
val denom = SafeLong.two.pow(l2t)
roundUp(Rational(num, denom))
})
}
private[spire] def accSeq(f: (Rational, SafeLong) => Rational): LazyList[Rational] = {
def loop(r: Rational, n: SafeLong): LazyList[Rational] =
r #:: loop(f(r, n), n + 1)
loop(Rational.one, SafeLong.one)
}
def expDr(x: Real): Real =
powerSeries(accSeq((r, n) => r / n), n => n, x)
def logDr(x: Real): Real = {
val y = (x - Real.one) / x
y * logDrx(y)
}
def logDrx(x: Real): Real = {
powerSeries(LazyList.from(1).map(n => Rational(1, n)), _ + 1, x)
}
def sinDr(x: Real): Real =
x * powerSeries(accSeq((r, n) => -r * Rational(1, 2*n*(2*n+1))), n => n, x * x)
def cosDr(x: Real): Real =
powerSeries(accSeq((r, n) => -r * Rational(1, 2*n*(2*n-1))), n => n, x * x)
def atanDr(x: Real): Real = {
val y = x * x + Real(1)
(x / y) * atanDrx((x * x) / y)
}
def atanDrx(x: Real): Real =
//powerSeries(accSeq((r, n) => r * (Rational(2*n, 2*n + 1))), _ + 1, x)
powerSeries(accSeq((r, n) => r * (Rational(2*n, 2*n + 1))), _ * 2, x)
case class Exact(n: Rational) extends Real {
def apply(p: Int): SafeLong = Real.roundUp(Rational(2).pow(p) * n)
}
case class Inexact(f: Int => SafeLong) extends Real {
@volatile private[spire] var memo: Option[(Int, SafeLong)] = None
def apply(p: Int): SafeLong = memo match {
case Some((bits, value)) if bits >= p =>
Real.roundUp(Rational(value, SafeLong(2).pow(bits - p)))
case _ =>
val result = f(p)
memo = Some((p, result))
result
}
}
}
trait RealInstances {
implicit final val algebra = new RealAlgebra
import NumberTag._
implicit final val RealTag = new LargeTag[Real](Exact, Real.zero)
}
@SerialVersionUID(0L)
class RealAlgebra extends RealIsFractional
trait RealIsFractional extends Fractional[Real] with TruncatedDivisionCRing[Real] with Trig[Real] with Field.WithDefaultGCD[Real] {
override def abs(x: Real): Real = x.abs
override def signum(x: Real): Int = x.signum
override def eqv(x: Real, y: Real): Boolean = x === y
def compare(x: Real, y: Real): Int = x compare y
def zero: Real = Real.zero
def one: Real = Real.one
def negate(x: Real): Real = -x
def plus(x: Real, y: Real): Real = x + y
override def minus(x: Real, y: Real): Real = x - y
def times(x: Real, y: Real): Real = x * y
def toBigIntOpt(x: Real): Opt[BigInt] = if (x.isWhole) Opt(x.toRational.toBigInt) else Opt.empty[BigInt]
def tquot(x: Real, y: Real): Real = x tquot y
def tmod(x: Real, y: Real): Real = x tmod y
override def reciprocal(x: Real): Real = x.reciprocal
def div(x: Real, y: Real): Real = x / y
override def sqrt(x: Real): Real = x.sqrt
def nroot(x: Real, k: Int): Real = x.nroot(k)
def fpow(x: Real, y: Real): Real = x fpow y
def acos(a: Real): Real = Real.acos(a)
def asin(a: Real): Real = Real.asin(a)
def atan(a: Real): Real = Real.atan(a)
def atan2(y: Real, x: Real): Real = Real.atan2(y, x)
def cos(a: Real): Real = Real.cos(a)
def cosh(x: Real): Real = Real.cosh(x)
def e: Real = Real.e
def exp(x: Real): Real = Real.exp(x)
def expm1(x: Real): Real = Real.exp(x) - Real.one
def log(x: Real): Real = Real.log(x)
def log1p(x: Real): Real = Real.log(Real.one + x)
def pi: Real = Real.pi
def sin(x: Real): Real = Real.sin(x)
def sinh(x: Real): Real = Real.sinh(x)
def tan(x: Real): Real = Real.tan(x)
def tanh(x: Real): Real = Real.tanh(x)
def toDegrees(a: Real): Real = a / (Real.two * Real.pi) * Real(360)
def toRadians(a: Real): Real = a / Real(360) * (Real.two * Real.pi)
def ceil(x: Real): Real = x.ceil
def floor(x: Real): Real = x.floor
def isWhole(x: Real): Boolean = x.isWhole
def round(x: Real): Real = x.round
def toByte(x: Real): Byte = x.toRational.toByte
def toInt(x: Real): Int = x.toRational.toInt
def toShort(x: Real): Short = x.toRational.toShort
def toLong(x: Real): Long = x.toRational.toLong
def toFloat(x: Real): Float = x.toRational.toFloat
def toDouble(x: Real): Double = x.toRational.toDouble
def toBigInt(x: Real): BigInt = x.toRational.toBigInt
def toBigDecimal(x: Real): BigDecimal = x.toRational.toBigDecimal(java.math.MathContext.DECIMAL64)
def toRational(x: Real): Rational = x.toRational
def toAlgebraic(x: Real): Algebraic = Algebraic(x.toRational) //FIXME
def toReal(x: Real): Real = x
def toNumber(x: Real): Number = Number(x.toRational)
def toString(x: Real): String = x.toString
def toType[B](x: Real)(implicit ev: ConvertableTo[B]): B =
ev.fromReal(x)
def fromByte(n: Byte): Real = Real(n)
def fromShort(n: Short): Real = Real(n)
def fromFloat(n: Float): Real = Real(n)
def fromLong(n: Long): Real = Real(n)
override def fromBigInt(n: BigInt): Real = Real(n)
def fromBigDecimal(n: BigDecimal): Real = Real(n)
def fromRational(n: Rational): Real = Real(n)
def fromAlgebraic(n: Algebraic): Real = n.evaluateWith[Real]
def fromReal(n: Real): Real = n
def fromType[B](b: B)(implicit ev: ConvertableFrom[B]): Real =
ev.toReal(b)
}
| non/spire | core/src/main/scala/spire/math/Real.scala | Scala | mit | 18,719 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io._
import java.util.concurrent.atomic._
import junit.framework.Assert._
import org.scalatest.junit.JUnitSuite
import org.junit.{After, Before, Test}
import kafka.message._
import kafka.common.{MessageSizeTooLargeException, OffsetOutOfRangeException, MessageSetSizeTooLargeException}
import kafka.utils._
import kafka.server.KafkaConfig
class LogTest extends JUnitSuite {
var logDir: File = null
val time = new MockTime(0)
var config: KafkaConfig = null
val logConfig = LogConfig()
@Before
def setUp() {
logDir = TestUtils.tempDir()
val props = TestUtils.createBrokerConfig(0, -1)
config = KafkaConfig.fromProps(props)
}
@After
def tearDown() {
Utils.rm(logDir)
}
def createEmptyLogs(dir: File, offsets: Int*) {
for(offset <- offsets) {
Log.logFilename(dir, offset).createNewFile()
Log.indexFilename(dir, offset).createNewFile()
}
}
/**
* Tests for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRoll() {
val set = TestUtils.singleMessageSet("test".getBytes())
// create a log
val log = new Log(logDir,
logConfig.copy(segmentMs = 1 * 60 * 60L),
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
time.sleep(log.config.segmentMs + 1)
log.append(set)
assertEquals("Log doesn't roll if doing so creates an empty segment.", 1, log.numberOfSegments)
log.append(set)
assertEquals("Log rolls on this append since time has expired.", 2, log.numberOfSegments)
for(numSegments <- 3 until 5) {
time.sleep(log.config.segmentMs + 1)
log.append(set)
assertEquals("Changing time beyond rollMs and appending should create a new segment.", numSegments, log.numberOfSegments)
}
val numSegments = log.numberOfSegments
time.sleep(log.config.segmentMs + 1)
log.append(new ByteBufferMessageSet())
assertEquals("Appending an empty message set should not roll log even if succient time has passed.", numSegments, log.numberOfSegments)
}
/**
* Test for jitter s for time based log roll. This test appends messages then changes the time
* using the mock clock to force the log to roll and checks the number of segments.
*/
@Test
def testTimeBasedLogRollJitter() {
val set = TestUtils.singleMessageSet("test".getBytes())
val maxJitter = 20 * 60L
// create a log
val log = new Log(logDir,
logConfig.copy(segmentMs = 1 * 60 * 60L, segmentJitterMs = maxJitter),
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time)
assertEquals("Log begins with a single empty segment.", 1, log.numberOfSegments)
log.append(set)
time.sleep(log.config.segmentMs - maxJitter)
log.append(set)
assertEquals("Log does not roll on this append because it occurs earlier than max jitter", 1, log.numberOfSegments);
time.sleep(maxJitter - log.activeSegment.rollJitterMs + 1)
log.append(set)
assertEquals("Log should roll after segmentMs adjusted by random jitter", 2, log.numberOfSegments)
}
/**
* Test that appending more than the maximum segment size rolls the log
*/
@Test
def testSizeBasedLogRoll() {
val set = TestUtils.singleMessageSet("test".getBytes)
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * (setSize - 1) // each segment will be 10 messages
// create a log
val log = new Log(logDir, logConfig.copy(segmentSize = segmentSize), recoveryPoint = 0L, time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
// segments expire in size
for (i<- 1 to (msgPerSeg + 1)) {
log.append(set)
}
assertEquals("There should be exactly 2 segments.", 2, log.numberOfSegments)
}
/**
* Test that we can open and append to an empty log
*/
@Test
def testLoadEmptyLog() {
createEmptyLogs(logDir, 0)
val log = new Log(logDir, logConfig, recoveryPoint = 0L, time.scheduler, time = time)
log.append(TestUtils.singleMessageSet("test".getBytes))
}
/**
* This test case appends a bunch of messages and checks that we can read them all back using sequential offsets.
*/
@Test
def testAppendAndReadWithSequentialOffsets() {
val log = new Log(logDir, logConfig.copy(segmentSize = 71), recoveryPoint = 0L, time.scheduler, time = time)
val messages = (0 until 100 by 2).map(id => new Message(id.toString.getBytes)).toArray
for(i <- 0 until messages.length)
log.append(new ByteBufferMessageSet(NoCompressionCodec, messages = messages(i)))
for(i <- 0 until messages.length) {
val read = log.read(i, 100, Some(i+1)).messageSet.head
assertEquals("Offset read should match order appended.", i, read.offset)
assertEquals("Message should match appended.", messages(i), read.message)
}
assertEquals("Reading beyond the last message returns nothing.", 0, log.read(messages.length, 100, None).messageSet.size)
}
/**
* This test appends a bunch of messages with non-sequential offsets and checks that we can read the correct message
* from any offset less than the logEndOffset including offsets not appended.
*/
@Test
def testAppendAndReadWithNonSequentialOffsets() {
val log = new Log(logDir, logConfig.copy(segmentSize = 71), recoveryPoint = 0L, time.scheduler, time = time)
val messageIds = ((0 until 50) ++ (50 until 200 by 7)).toArray
val messages = messageIds.map(id => new Message(id.toString.getBytes))
// now test the case that we give the offsets and use non-sequential offsets
for(i <- 0 until messages.length)
log.append(new ByteBufferMessageSet(NoCompressionCodec, new AtomicLong(messageIds(i)), messages = messages(i)), assignOffsets = false)
for(i <- 50 until messageIds.max) {
val idx = messageIds.indexWhere(_ >= i)
val read = log.read(i, 100, None).messageSet.head
assertEquals("Offset read should match message id.", messageIds(idx), read.offset)
assertEquals("Message should match appended.", messages(idx), read.message)
}
}
/**
* This test covers an odd case where we have a gap in the offsets that falls at the end of a log segment.
* Specifically we create a log where the last message in the first segment has offset 0. If we
* then read offset 1, we should expect this read to come from the second segment, even though the
* first segment has the greatest lower bound on the offset.
*/
@Test
def testReadAtLogGap() {
val log = new Log(logDir, logConfig.copy(segmentSize = 300), recoveryPoint = 0L, time.scheduler, time = time)
// keep appending until we have two segments with only a single message in the second segment
while(log.numberOfSegments == 1)
log.append(new ByteBufferMessageSet(NoCompressionCodec, messages = new Message("42".getBytes)))
// now manually truncate off all but one message from the first segment to create a gap in the messages
log.logSegments.head.truncateTo(1)
assertEquals("A read should now return the last message in the log", log.logEndOffset-1, log.read(1, 200, None).messageSet.head.offset)
}
/**
* Test reading at the boundary of the log, specifically
* - reading from the logEndOffset should give an empty message set
* - reading beyond the log end offset should throw an OffsetOutOfRangeException
*/
@Test
def testReadOutOfRange() {
createEmptyLogs(logDir, 1024)
val log = new Log(logDir, logConfig.copy(segmentSize = 1024), recoveryPoint = 0L, time.scheduler, time = time)
assertEquals("Reading just beyond end of log should produce 0 byte read.", 0, log.read(1024, 1000).messageSet.sizeInBytes)
try {
log.read(0, 1024)
fail("Expected exception on invalid read.")
} catch {
case e: OffsetOutOfRangeException => "This is good."
}
try {
log.read(1025, 1000)
fail("Expected exception on invalid read.")
} catch {
case e: OffsetOutOfRangeException => // This is good.
}
}
/**
* Test that covers reads and writes on a multisegment log. This test appends a bunch of messages
* and then reads them all back and checks that the message read and offset matches what was appended.
*/
@Test
def testLogRolls() {
/* create a multipart log with 100 messages */
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
val numMessages = 100
val messageSets = (0 until numMessages).map(i => TestUtils.singleMessageSet(i.toString.getBytes))
messageSets.foreach(log.append(_))
log.flush
/* do successive reads to ensure all our messages are there */
var offset = 0L
for(i <- 0 until numMessages) {
val messages = log.read(offset, 1024*1024).messageSet
assertEquals("Offsets not equal", offset, messages.head.offset)
assertEquals("Messages not equal at offset " + offset, messageSets(i).head.message, messages.head.message)
offset = messages.head.offset + 1
}
val lastRead = log.read(startOffset = numMessages, maxLength = 1024*1024, maxOffset = Some(numMessages + 1)).messageSet
assertEquals("Should be no more messages", 0, lastRead.size)
// check that rolling the log forced a flushed the log--the flush is asyn so retry in case of failure
TestUtils.retry(1000L){
assertTrue("Log role should have forced flush", log.recoveryPoint >= log.activeSegment.baseOffset)
}
}
/**
* Test reads at offsets that fall within compressed message set boundaries.
*/
@Test
def testCompressedMessages() {
/* this log should roll after every messageset */
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
/* append 2 compressed message sets, each with two messages giving offsets 0, 1, 2, 3 */
log.append(new ByteBufferMessageSet(DefaultCompressionCodec, new Message("hello".getBytes), new Message("there".getBytes)))
log.append(new ByteBufferMessageSet(DefaultCompressionCodec, new Message("alpha".getBytes), new Message("beta".getBytes)))
def read(offset: Int) = ByteBufferMessageSet.deepIterator(log.read(offset, 4096).messageSet.head.message)
/* we should always get the first message in the compressed set when reading any offset in the set */
assertEquals("Read at offset 0 should produce 0", 0, read(0).next().offset)
assertEquals("Read at offset 1 should produce 0", 0, read(1).next().offset)
assertEquals("Read at offset 2 should produce 2", 2, read(2).next().offset)
assertEquals("Read at offset 3 should produce 2", 2, read(3).next().offset)
}
/**
* Test garbage collecting old segments
*/
@Test
def testThatGarbageCollectingSegmentsDoesntChangeOffset() {
for(messagesToAppend <- List(0, 1, 25)) {
logDir.mkdirs()
// first test a log segment starting at 0
val log = new Log(logDir, logConfig.copy(segmentSize = 100), recoveryPoint = 0L, time.scheduler, time = time)
for(i <- 0 until messagesToAppend)
log.append(TestUtils.singleMessageSet(i.toString.getBytes))
var currOffset = log.logEndOffset
assertEquals(currOffset, messagesToAppend)
// time goes by; the log file is deleted
log.deleteOldSegments(_ => true)
assertEquals("Deleting segments shouldn't have changed the logEndOffset", currOffset, log.logEndOffset)
assertEquals("We should still have one segment left", 1, log.numberOfSegments)
assertEquals("Further collection shouldn't delete anything", 0, log.deleteOldSegments(_ => true))
assertEquals("Still no change in the logEndOffset", currOffset, log.logEndOffset)
assertEquals("Should still be able to append and should get the logEndOffset assigned to the new append",
currOffset,
log.append(TestUtils.singleMessageSet("hello".toString.getBytes)).firstOffset)
// cleanup the log
log.delete()
}
}
/**
* MessageSet size shouldn't exceed the config.segmentSize, check that it is properly enforced by
* appending a message set larger than the config.segmentSize setting and checking that an exception is thrown.
*/
@Test
def testMessageSetSizeCheck() {
val messageSet = new ByteBufferMessageSet(NoCompressionCodec, new Message ("You".getBytes), new Message("bethe".getBytes))
// append messages to log
val configSegmentSize = messageSet.sizeInBytes - 1
val log = new Log(logDir, logConfig.copy(segmentSize = configSegmentSize), recoveryPoint = 0L, time.scheduler, time = time)
try {
log.append(messageSet)
fail("message set should throw MessageSetSizeTooLargeException.")
} catch {
case e: MessageSetSizeTooLargeException => // this is good
}
}
@Test
def testCompactedTopicConstraints() {
val keyedMessage = new Message(bytes = "this message has a key".getBytes, key = "and here it is".getBytes)
val anotherKeyedMessage = new Message(bytes = "this message also has a key".getBytes, key ="another key".getBytes)
val unkeyedMessage = new Message(bytes = "this message does not have a key".getBytes)
val messageSetWithUnkeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, unkeyedMessage, keyedMessage)
val messageSetWithOneUnkeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, unkeyedMessage)
val messageSetWithCompressedKeyedMessage = new ByteBufferMessageSet(GZIPCompressionCodec, keyedMessage)
val messageSetWithKeyedMessage = new ByteBufferMessageSet(NoCompressionCodec, keyedMessage)
val messageSetWithKeyedMessages = new ByteBufferMessageSet(NoCompressionCodec, keyedMessage, anotherKeyedMessage)
val log = new Log(logDir, logConfig.copy(compact = true), recoveryPoint = 0L, time.scheduler, time)
try {
log.append(messageSetWithUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
log.append(messageSetWithOneUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
log.append(messageSetWithCompressedKeyedMessage)
fail("Compacted topics cannot accept compressed messages.")
} catch {
case e: InvalidMessageException => // this is good
}
// the following should succeed without any InvalidMessageException
log.append(messageSetWithKeyedMessage)
log.append(messageSetWithKeyedMessages)
// test that a compacted topic with broker-side compression type set to uncompressed can accept compressed messages
val uncompressedLog = new Log(logDir, logConfig.copy(compact = true, compressionType = "uncompressed"),
recoveryPoint = 0L, time.scheduler, time)
uncompressedLog.append(messageSetWithCompressedKeyedMessage)
uncompressedLog.append(messageSetWithKeyedMessage)
uncompressedLog.append(messageSetWithKeyedMessages)
try {
uncompressedLog.append(messageSetWithUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
try {
uncompressedLog.append(messageSetWithOneUnkeyedMessage)
fail("Compacted topics cannot accept a message without a key.")
} catch {
case e: InvalidMessageException => // this is good
}
}
/**
* We have a max size limit on message appends, check that it is properly enforced by appending a message larger than the
* setting and checking that an exception is thrown.
*/
@Test
def testMessageSizeCheck() {
val first = new ByteBufferMessageSet(NoCompressionCodec, new Message ("You".getBytes), new Message("bethe".getBytes))
val second = new ByteBufferMessageSet(NoCompressionCodec, new Message("change".getBytes))
// append messages to log
val maxMessageSize = second.sizeInBytes - 1
val log = new Log(logDir, logConfig.copy(maxMessageSize = maxMessageSize), recoveryPoint = 0L, time.scheduler, time = time)
// should be able to append the small message
log.append(first)
try {
log.append(second)
fail("Second message set should throw MessageSizeTooLargeException.")
} catch {
case e: MessageSizeTooLargeException => // this is good
}
}
/**
* Append a bunch of messages to a log and then re-open it both with and without recovery and check that the log re-initializes correctly.
*/
@Test
def testLogRecoversToCorrectOffset() {
val numMessages = 100
val messageSize = 100
val segmentSize = 7 * messageSize
val indexInterval = 3 * messageSize
val config = logConfig.copy(segmentSize = segmentSize, indexInterval = indexInterval, maxIndexSize = 4096)
var log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
for(i <- 0 until numMessages)
log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(messageSize)))
assertEquals("After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages), numMessages, log.logEndOffset)
val lastIndexOffset = log.activeSegment.index.lastOffset
val numIndexEntries = log.activeSegment.index.entries
val lastOffset = log.logEndOffset
log.close()
log = new Log(logDir, config, recoveryPoint = lastOffset, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened w/o recovery".format(numMessages), numMessages, log.logEndOffset)
assertEquals("Should have same last index offset as before.", lastIndexOffset, log.activeSegment.index.lastOffset)
assertEquals("Should have same number of index entries as before.", numIndexEntries, log.activeSegment.index.entries)
log.close()
// test recovery case
log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened with recovery".format(numMessages), numMessages, log.logEndOffset)
assertEquals("Should have same last index offset as before.", lastIndexOffset, log.activeSegment.index.lastOffset)
assertEquals("Should have same number of index entries as before.", numIndexEntries, log.activeSegment.index.entries)
log.close()
}
/**
* Test that if we manually delete an index segment it is rebuilt when the log is re-opened
*/
@Test
def testIndexRebuild() {
// publish the messages and close the log
val numMessages = 200
val config = logConfig.copy(segmentSize = 200, indexInterval = 1)
var log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
for(i <- 0 until numMessages)
log.append(TestUtils.singleMessageSet(TestUtils.randomBytes(10)))
val indexFiles = log.logSegments.map(_.index.file)
log.close()
// delete all the index files
indexFiles.foreach(_.delete())
// reopen the log
log = new Log(logDir, config, recoveryPoint = 0L, time.scheduler, time)
assertEquals("Should have %d messages when log is reopened".format(numMessages), numMessages, log.logEndOffset)
for(i <- 0 until numMessages)
assertEquals(i, log.read(i, 100, None).messageSet.head.offset)
log.close()
}
/**
* Test the Log truncate operations
*/
@Test
def testTruncateTo() {
val set = TestUtils.singleMessageSet("test".getBytes())
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
// create a log
val log = new Log(logDir, logConfig.copy(segmentSize = segmentSize), recoveryPoint = 0L, scheduler = time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segments.", 1, log.numberOfSegments)
assertEquals("Log end offset should be equal to number of messages", msgPerSeg, log.logEndOffset)
val lastOffset = log.logEndOffset
val size = log.size
log.truncateTo(log.logEndOffset) // keep the entire log
assertEquals("Should not change offset", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(log.logEndOffset + 1) // try to truncate beyond lastOffset
assertEquals("Should not change offset but should log error", lastOffset, log.logEndOffset)
assertEquals("Should not change log size", size, log.size)
log.truncateTo(msgPerSeg/2) // truncate somewhere in between
assertEquals("Should change offset", log.logEndOffset, msgPerSeg/2)
assertTrue("Should change log size", log.size < size)
log.truncateTo(0) // truncate the entire log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", 0, log.size)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("Should be back to original offset", log.logEndOffset, lastOffset)
assertEquals("Should be back to original size", log.size, size)
log.truncateFullyAndStartAt(log.logEndOffset - (msgPerSeg - 1))
assertEquals("Should change offset", log.logEndOffset, lastOffset - (msgPerSeg - 1))
assertEquals("Should change log size", log.size, 0)
for (i<- 1 to msgPerSeg)
log.append(set)
assertTrue("Should be ahead of to original offset", log.logEndOffset > msgPerSeg)
assertEquals("log size should be same as before", size, log.size)
log.truncateTo(0) // truncate before first start offset in the log
assertEquals("Should change offset", 0, log.logEndOffset)
assertEquals("Should change log size", log.size, 0)
}
/**
* Verify that when we truncate a log the index of the last segment is resized to the max index size to allow more appends
*/
@Test
def testIndexResizingAtTruncation() {
val set = TestUtils.singleMessageSet("test".getBytes())
val setSize = set.sizeInBytes
val msgPerSeg = 10
val segmentSize = msgPerSeg * setSize // each segment will be 10 messages
val config = logConfig.copy(segmentSize = segmentSize)
val log = new Log(logDir, config, recoveryPoint = 0L, scheduler = time.scheduler, time = time)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 2 segment.", 2, log.numberOfSegments)
assertEquals("The index of the first segment should be trimmed to empty", 0, log.logSegments.toList(0).index.maxEntries)
log.truncateTo(0)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
assertEquals("The index of segment 1 should be resized to maxIndexSize", log.config.maxIndexSize/8, log.logSegments.toList(0).index.maxEntries)
for (i<- 1 to msgPerSeg)
log.append(set)
assertEquals("There should be exactly 1 segment.", 1, log.numberOfSegments)
}
/**
* When we open a log any index segments without an associated log segment should be deleted.
*/
@Test
def testBogusIndexSegmentsAreRemoved() {
val bogusIndex1 = Log.indexFilename(logDir, 0)
val bogusIndex2 = Log.indexFilename(logDir, 5)
val set = TestUtils.singleMessageSet("test".getBytes())
val log = new Log(logDir,
logConfig.copy(segmentSize = set.sizeInBytes * 5,
maxIndexSize = 1000,
indexInterval = 1),
recoveryPoint = 0L,
time.scheduler,
time)
assertTrue("The first index file should have been replaced with a larger file", bogusIndex1.length > 0)
assertFalse("The second index file should have been deleted.", bogusIndex2.exists)
// check that we can append to the log
for(i <- 0 until 10)
log.append(set)
log.delete()
}
/**
* Verify that truncation works correctly after re-opening the log
*/
@Test
def testReopenThenTruncate() {
val set = TestUtils.singleMessageSet("test".getBytes())
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5,
maxIndexSize = 1000,
indexInterval = 10000)
// create a log
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// add enough messages to roll over several segments then close and re-open and attempt to truncate
for(i <- 0 until 100)
log.append(set)
log.close()
log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
log.truncateTo(3)
assertEquals("All but one segment should be deleted.", 1, log.numberOfSegments)
assertEquals("Log end offset should be 3.", 3, log.logEndOffset)
}
/**
* Test that deleted files are deleted after the appropriate time.
*/
@Test
def testAsyncDelete() {
val set = TestUtils.singleMessageSet("test".getBytes())
val asyncDeleteMs = 1000
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5,
fileDeleteDelayMs = asyncDeleteMs,
maxIndexSize = 1000,
indexInterval = 10000)
val log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// append some messages to create some segments
for(i <- 0 until 100)
log.append(set)
// files should be renamed
val segments = log.logSegments.toArray
val oldFiles = segments.map(_.log.file) ++ segments.map(_.index.file)
log.deleteOldSegments((s) => true)
assertEquals("Only one segment should remain.", 1, log.numberOfSegments)
assertTrue("All log and index files should end in .deleted", segments.forall(_.log.file.getName.endsWith(Log.DeletedFileSuffix)) &&
segments.forall(_.index.file.getName.endsWith(Log.DeletedFileSuffix)))
assertTrue("The .deleted files should still be there.", segments.forall(_.log.file.exists) &&
segments.forall(_.index.file.exists))
assertTrue("The original file should be gone.", oldFiles.forall(!_.exists))
// when enough time passes the files should be deleted
val deletedFiles = segments.map(_.log.file) ++ segments.map(_.index.file)
time.sleep(asyncDeleteMs + 1)
assertTrue("Files should all be gone.", deletedFiles.forall(!_.exists))
}
/**
* Any files ending in .deleted should be removed when the log is re-opened.
*/
@Test
def testOpenDeletesObsoleteFiles() {
val set = TestUtils.singleMessageSet("test".getBytes())
val config = logConfig.copy(segmentSize = set.sizeInBytes * 5, maxIndexSize = 1000)
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
// append some messages to create some segments
for(i <- 0 until 100)
log.append(set)
log.deleteOldSegments((s) => true)
log.close()
log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
assertEquals("The deleted segments should be gone.", 1, log.numberOfSegments)
}
@Test
def testAppendMessageWithNullPayload() {
val log = new Log(logDir,
LogConfig(),
recoveryPoint = 0L,
time.scheduler,
time)
log.append(new ByteBufferMessageSet(new Message(bytes = null)))
val messageSet = log.read(0, 4096, None).messageSet
assertEquals(0, messageSet.head.offset)
assertTrue("Message payload should be null.", messageSet.head.message.isNull)
}
@Test
def testCorruptLog() {
// append some messages to create some segments
val config = logConfig.copy(indexInterval = 1, maxMessageSize = 64*1024, segmentSize = 1000)
val set = TestUtils.singleMessageSet("test".getBytes())
val recoveryPoint = 50L
for(iteration <- 0 until 50) {
// create a log and write some messages to it
logDir.mkdirs()
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
val numMessages = 50 + TestUtils.random.nextInt(50)
for(i <- 0 until numMessages)
log.append(set)
val messages = log.logSegments.flatMap(_.log.iterator.toList)
log.close()
// corrupt index and log by appending random bytes
TestUtils.appendNonsenseToFile(log.activeSegment.index.file, TestUtils.random.nextInt(1024) + 1)
TestUtils.appendNonsenseToFile(log.activeSegment.log.file, TestUtils.random.nextInt(1024) + 1)
// attempt recovery
log = new Log(logDir, config, recoveryPoint, time.scheduler, time)
assertEquals(numMessages, log.logEndOffset)
assertEquals("Messages in the log after recovery should be the same.", messages, log.logSegments.flatMap(_.log.iterator.toList))
Utils.rm(logDir)
}
}
@Test
def testCleanShutdownFile() {
// append some messages to create some segments
val config = logConfig.copy(indexInterval = 1, maxMessageSize = 64*1024, segmentSize = 1000)
val set = TestUtils.singleMessageSet("test".getBytes())
val parentLogDir = logDir.getParentFile
assertTrue("Data directory %s must exist", parentLogDir.isDirectory)
val cleanShutdownFile = new File(parentLogDir, Log.CleanShutdownFile)
cleanShutdownFile.createNewFile()
assertTrue(".kafka_cleanshutdown must exist", cleanShutdownFile.exists())
var recoveryPoint = 0L
// create a log and write some messages to it
var log = new Log(logDir,
config,
recoveryPoint = 0L,
time.scheduler,
time)
for(i <- 0 until 100)
log.append(set)
log.close()
// check if recovery was attempted. Even if the recovery point is 0L, recovery should not be attempted as the
// clean shutdown file exists.
recoveryPoint = log.logEndOffset
log = new Log(logDir, config, 0L, time.scheduler, time)
assertEquals(recoveryPoint, log.logEndOffset)
cleanShutdownFile.delete()
}
@Test
def testParseTopicPartitionName() {
val topic: String = "test_topic"
val partition:String = "143"
val dir: File = new File(logDir + topicPartitionName(topic, partition))
val topicAndPartition = Log.parseTopicPartitionName(dir);
assertEquals(topic, topicAndPartition.asTuple._1)
assertEquals(partition.toInt, topicAndPartition.asTuple._2)
}
@Test
def testParseTopicPartitionNameForEmptyName() {
try {
val dir: File = new File("")
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForNull() {
try {
val dir: File = null
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingSeparator() {
val topic: String = "test_topic"
val partition:String = "1999"
val dir: File = new File(logDir + File.separator + topic + partition)
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingTopic() {
val topic: String = ""
val partition:String = "1999"
val dir: File = new File(logDir + topicPartitionName(topic, partition))
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
@Test
def testParseTopicPartitionNameForMissingPartition() {
val topic: String = "test_topic"
val partition:String = ""
val dir: File = new File(logDir + topicPartitionName(topic, partition))
try {
val topicAndPartition = Log.parseTopicPartitionName(dir);
fail("KafkaException should have been thrown for dir: " + dir.getCanonicalPath)
} catch {
case e: Exception => // its GOOD!
}
}
def topicPartitionName(topic: String, partition: String): String = {
File.separator + topic + "-" + partition
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/log/LogTest.scala | Scala | bsd-2-clause | 34,238 |
package de.htwg.zeta.generator.file
import java.util.UUID
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.Promise
import com.google.inject.Guice
import de.htwg.zeta.common.models.entity.File
import de.htwg.zeta.common.models.entity.Filter
import de.htwg.zeta.common.models.entity.Generator
import de.htwg.zeta.common.models.entity.GeneratorImage
import de.htwg.zeta.common.models.project.instance.GraphicalDslInstance
import de.htwg.zeta.generator.template.Error
import de.htwg.zeta.generator.template.Result
import de.htwg.zeta.generator.template.Settings
import de.htwg.zeta.generator.template.Success
import de.htwg.zeta.generator.template.Template
import de.htwg.zeta.generator.template.Transformer
import de.htwg.zeta.persistence.PersistenceModule
import de.htwg.zeta.persistence.general.FileRepository
import org.slf4j.Logger
import org.slf4j.LoggerFactory
class MyTransformer() extends Transformer {
private val logger: Logger = LoggerFactory.getLogger(getClass)
private val injector = Guice.createInjector(new PersistenceModule)
private val filePersistence = injector.getInstance(classOf[FileRepository])
def transform(entity: GraphicalDslInstance): Future[Transformer] = {
logger.info("Start example")
val p = Promise[Transformer]
val filename = "example.txt"
val content =
s"""
|Number of nodes : ${entity.nodeMap.size}
|Number of edges : ${entity.edgeMap.size}
""".stripMargin
filePersistence.create(File(entity.id, filename, content)).map { _ =>
logger.info(s"Successfully saved results to '$filename' for model '${entity.name}' (MetaModel '${entity.graphicalDslId}')")
p.success(this)
}.recover {
case e: Exception => p.failure(e)
}
p.future
}
def exit(): Future[Result] = {
val result = Success("The generator finished")
Future.successful(result)
}
}
/**
* Main class of file generator
*/
object Main extends Template[CreateOptions, String] {
override def createTransformer(options: CreateOptions, imageId: UUID): Future[Result] = {
for {
image <- generatorImagePersistence.read(imageId)
file <- createFileContent()
_ <- createGenerator(options, image, file)
} yield {
Success()
}
}
private def createGenerator(options: CreateOptions, image: GeneratorImage, file: File): Future[Generator] = {
val entity = Generator(
id = UUID.randomUUID(),
name = options.name,
imageId = image.id,
files = Map(file.id -> file.name)
)
generatorPersistence.create(entity)
}
private def createFileContent(): Future[File] = {
val content = "This is a demo to save the results of a generator. No further configuration is required."
val entity = File(UUID.randomUUID, Settings.generatorFile, content)
filePersistence.create(entity)
}
/**
* Initialize the generator
*
* @param file The file which was loaded for the generator
* @return A Generator
*/
override def getTransformer(file: File, filter: Filter): Future[Transformer] =
Future.successful(new MyTransformer())
/**
* Initialize the generator
*
* @param file The file which was loaded for the generator
* @return A Generator
*/
override def getTransformer(file: File, model: GraphicalDslInstance): Future[Transformer] =
Future.successful(new MyTransformer())
/**
* Initialize the generator
*
* @return A Generator
*/
override def runGeneratorWithOptions(options: String): Future[Result] = {
Future.successful(Error(s"Call a generator from a generator is not supported in this example"))
}
}
| Zeta-Project/zeta | api/images/generator/file/src/main/scala/de/htwg/zeta/generator/file/Main.scala | Scala | bsd-2-clause | 3,696 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.security
import java.nio.charset.StandardCharsets
import org.apache.accumulo.core.security.{Authorizations, ColumnVisibility, VisibilityEvaluator}
import org.geotools.util.factory.GeoTools
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.FunctionExpressionImpl
import org.geotools.filter.capability.FunctionNameImpl
import org.locationtech.geomesa.security
import org.locationtech.geomesa.security._
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
import org.opengis.filter.capability.FunctionName
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
@deprecated
object VisibilityFilterFunction {
val name: FunctionName = new FunctionNameImpl("visibility", classOf[java.lang.Boolean])
def filter: Filter = {
val ff = CommonFactoryFinder.getFilterFactory2( GeoTools.getDefaultHints )
val visibilityFunction = ff.function(VisibilityFilterFunction.name.getFunctionName)
ff.equals(visibilityFunction, ff.literal(true))
}
}
@deprecated
class VisibilityFilterFunction
extends FunctionExpressionImpl(VisibilityFilterFunction.name) {
private val provider = security.getAuthorizationsProvider(Map.empty[String, java.io.Serializable].asJava, Seq())
private val auths = provider.getAuthorizations.map(_.getBytes(StandardCharsets.UTF_8))
private val vizEvaluator = new VisibilityEvaluator(new Authorizations(auths))
private val vizCache = collection.concurrent.TrieMap.empty[String, Boolean]
def evaluateSF(feature: SimpleFeature): java.lang.Boolean = {
feature.visibility.exists(v => vizCache.getOrElseUpdate(v, vizEvaluator.evaluate(new ColumnVisibility(v))))
}
@Override
override def evaluate(obj: Object): Object = obj match {
case sf: SimpleFeature => evaluateSF(sf)
case _ => java.lang.Boolean.FALSE
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-security/src/main/scala/org/locationtech/geomesa/accumulo/security/VisibilityFilterFunction.scala | Scala | apache-2.0 | 2,351 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.