code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import java.util
import scala.collection.JavaConverters._
import org.apache.spark.SparkException
import org.apache.spark.sql.{ForeachWriter, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table, TableCapability}
import org.apache.spark.sql.connector.write.{DataWriter, LogicalWriteInfo, PhysicalWriteInfo, SupportsTruncate, WriteBuilder, WriterCommitMessage}
import org.apache.spark.sql.connector.write.streaming.{StreamingDataWriterFactory, StreamingWrite}
import org.apache.spark.sql.execution.python.PythonForeachWriter
import org.apache.spark.sql.internal.connector.SupportsStreamingUpdateAsAppend
import org.apache.spark.sql.types.StructType
/**
* A write-only table for forwarding data into the specified [[ForeachWriter]].
*
* @param writer The [[ForeachWriter]] to process all data.
* @param converter An object to convert internal rows to target type T. Either it can be
* a [[ExpressionEncoder]] or a direct converter function.
* @tparam T The expected type of the sink.
*/
case class ForeachWriterTable[T](
writer: ForeachWriter[T],
converter: Either[ExpressionEncoder[T], InternalRow => T])
extends Table with SupportsWrite {
override def name(): String = "ForeachSink"
override def schema(): StructType = StructType(Nil)
override def capabilities(): util.Set[TableCapability] = {
Set(TableCapability.STREAMING_WRITE).asJava
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new WriteBuilder with SupportsTruncate with SupportsStreamingUpdateAsAppend {
private val inputSchema: StructType = info.schema()
// Do nothing for truncate. Foreach sink is special and it just forwards all the
// records to ForeachWriter.
override def truncate(): WriteBuilder = this
override def buildForStreaming(): StreamingWrite = {
new StreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
override def createStreamingWriterFactory(
info: PhysicalWriteInfo): StreamingDataWriterFactory = {
val rowConverter: InternalRow => T = converter match {
case Left(enc) =>
val boundEnc = enc.resolveAndBind(
inputSchema.toAttributes,
SparkSession.getActiveSession.get.sessionState.analyzer)
boundEnc.createDeserializer()
case Right(func) =>
func
}
ForeachWriterFactory(writer, rowConverter)
}
}
}
}
}
}
object ForeachWriterTable {
def apply[T](
writer: ForeachWriter[T],
encoder: ExpressionEncoder[T]): ForeachWriterTable[_] = {
writer match {
case pythonWriter: PythonForeachWriter =>
new ForeachWriterTable[UnsafeRow](
pythonWriter, Right((x: InternalRow) => x.asInstanceOf[UnsafeRow]))
case _ =>
new ForeachWriterTable[T](writer, Left(encoder))
}
}
}
case class ForeachWriterFactory[T](
writer: ForeachWriter[T],
rowConverter: InternalRow => T)
extends StreamingDataWriterFactory {
override def createWriter(
partitionId: Int,
taskId: Long,
epochId: Long): ForeachDataWriter[T] = {
new ForeachDataWriter(writer, rowConverter, partitionId, epochId)
}
}
/**
* A [[DataWriter]] which writes data in this partition to a [[ForeachWriter]].
*
* @param writer The [[ForeachWriter]] to process all data.
* @param rowConverter A function which can convert [[InternalRow]] to the required type [[T]]
* @param partitionId
* @param epochId
* @tparam T The type expected by the writer.
*/
class ForeachDataWriter[T](
writer: ForeachWriter[T],
rowConverter: InternalRow => T,
partitionId: Int,
epochId: Long)
extends DataWriter[InternalRow] {
// If open returns false, we should skip writing rows.
private val opened = writer.open(partitionId, epochId)
private var errorOrNull: Throwable = _
override def write(record: InternalRow): Unit = {
if (!opened) return
try {
writer.process(rowConverter(record))
} catch {
case t: Throwable =>
errorOrNull = t
throw t
}
}
override def commit(): WriterCommitMessage = {
ForeachWriterCommitMessage
}
override def abort(): Unit = {
if (errorOrNull == null) {
errorOrNull = new SparkException("Foreach writer has been aborted due to a task failure")
}
}
override def close(): Unit = {
writer.close(errorOrNull)
}
}
/**
* An empty [[WriterCommitMessage]]. [[ForeachWriter]] implementations have no global coordination.
*/
case object ForeachWriterCommitMessage extends WriterCommitMessage
| BryanCutler/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ForeachWriterTable.scala | Scala | apache-2.0 | 5,859 |
package org.hqjpa
import scala.collection.JavaConverters.asJavaCollectionConverter
import javax.persistence.criteria.Expression
import javax.persistence.criteria.Order
import javax.persistence.criteria.Selection
import scala.runtime.ScalaWholeNumberProxy
/**
* Companion object for related class.<br/>
* <br/>
* Static members are thread safe, instance members are not.
*/
object ExpressionProxy {
/**
* Implicit type converter to add condition extensions (in ComparableConditionExtensions) to
* proxies of expressions of comparable types.
*
* @param VALUE Type of underlying expression.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toComparableExtensions[VALUE <: Comparable[VALUE]](
src : ExpressionProxy[VALUE]
) : ComparableExtensions[VALUE] =
{
val extensions = new ComparableExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add string condition extensions (in StringConditionExtensions) to
* proxies for expressions of string types.
*
* @param VALUE Type of underlying expression.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toStringExtensions(src : ExpressionProxy[String]) : StringExtensions = {
val extensions = new StringExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add operators in GenericExtensions to
* proxies for expressions.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toGenericExtensions[VALUE](src : ExpressionProxy[VALUE]) : GenericExtensions[VALUE] = {
val extensions = new GenericExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add operators in NumberExtensions to
* proxies for expressions.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toNumberExtensions[VALUE <: Number](src : ExpressionProxy[VALUE]) : NumberExtensions[VALUE] = {
val extensions = new NumberExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add operators in FloatExtensions to
* proxies for expressions.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toFloatExtensions(src : ExpressionProxy[java.lang.Float]) : FloatExtensions = {
val extensions = new FloatExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add operators in IntegerAggregateExtensions to
* proxies for expressions.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toIntegerExtensions(src : ExpressionProxy[java.lang.Integer]) : IntegerExtensions = {
val extensions = new IntegerExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Implicit type converter to add operators in OperatorExtensions.BooleanExtensions to
* proxies for expressions.
* @param src Instance being converted.
* @return Extending instance.
*/
implicit def toBooleanExtensions(src : ExpressionProxy[java.lang.Boolean]) : BooleanExtensions = {
val extensions = new BooleanExtensions(src.expr, src.queryBuilder);
//
return extensions;
}
/**
* Extensions for proxies over comparable expressions.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying expression expression.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class ComparableExtensions[VALUE <: Comparable[VALUE]](
override val expr : Expression[VALUE],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[VALUE](expr, queryBuilder) with
OperatorExtensions.ComparableExtensions[VALUE]
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[VALUE]) = { () => expr };
/**
* Allows forcing comparable extensions on compatible expression proxies in
* scopes having ambiguous implicit conversions.
*/
def cmp = this;
}
/**
* Additional extensions for expressions over strings.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class StringExtensions(
override val expr : Expression[String],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[String](expr, queryBuilder) with
OperatorExtensions.StringExtensions
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[String]) = { () => expr };
/**
* Allows forcing string extensions on compatible expression proxies in
* scopes having ambiguous implicit conversions.
*/
def str = this;
}
/**
* Extensions for expressions over all types.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying expression of expression being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class GenericExtensions[VALUE](
override val expr : Expression[VALUE],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[VALUE](expr, queryBuilder) with
OperatorExtensions.GeneralExtensions[VALUE]
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[VALUE]) = { () => expr };
/**
* Allows forcing aggregate extensions on compatible expression proxies in
* scopes having ambiguous implicit conversions.
*/
def agg = this;
}
/**
* Extensions for expressions over numbers.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying attribute of attribute being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class NumberExtensions[VALUE <: Number](
override val expr : Expression[VALUE],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[VALUE](expr, queryBuilder) with
OperatorExtensions.NumberExtensions[VALUE]
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[VALUE]) = { () => expr };
/**
* Allows forcing aggregate extensions on compatible attribute proxies in
* scopes having ambiguous implicit conversions.
*/
def num = this;
}
/**
* Extensions for expressions over float numbers.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying attribute of attribute being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class FloatExtensions(
override val expr : Expression[java.lang.Float],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[java.lang.Float](expr, queryBuilder) with
OperatorExtensions.FloatExtensions
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[java.lang.Float]) = { () => expr };
/**
* Allows forcing aggregate extensions on compatible attribute proxies in
* scopes having ambiguous implicit conversions.
*/
def flt = this;
}
/**
* Extensions for expressions over integer numbers.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param VALUE Type of underlying attribute of attribute being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class IntegerExtensions(
override val expr : Expression[java.lang.Integer],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[java.lang.Integer](expr, queryBuilder) with
OperatorExtensions.IntegerExtensions
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[java.lang.Integer]) = { () => expr };
/**
* Allows forcing aggregate extensions on compatible attribute proxies in
* scopes having ambiguous implicit conversions.
*/
def int = this;
}
/**
* Extensions for expressions over boolean values.<br/>
* <br/>
* Static methods are thread safe, instance methods are not.
*
* @param expr CriteriaBuilder.Case being proxied.
* @param queryBuilder Host query builder.
*/
class BooleanExtensions(
override val expr : Expression[java.lang.Boolean],
override val queryBuilder : IQueryBuilder
)
extends
ExpressionProxy[java.lang.Boolean](expr, queryBuilder) with
OperatorExtensions.BooleanExtensions
{
/** Extractor for left side of the expression. */
override val __leftSideExpr : (() => Expression[java.lang.Boolean]) = { () => expr };
/**
* Allows forcing aggregate extensions on compatible attribute proxies in
* scopes having ambiguous implicit conversions.
*/
def bool = this;
}
}
/**
* Proxy for expressions.<br/>
* <br/>
* Uses in WHERE and HAVING clauses are enabled by implicit cast to one of extension classes
* defined in companion object. <br/>
* <br/>
* Uses in results sets are enabled by implicit cast to VALUE, exposing actual value of the
* expression.<br/>
* <br/>
* Static members are thread safe, instance members are not.
*
* @param VALUE Type of underlying value of expression being proxied.
*
* @param expr Expression being proxied.
* @param queryBuilder Host query builder.
*/
class ExpressionProxy[VALUE](
val expr : Expression[VALUE],
val queryBuilder : IQueryBuilder
)
extends
ISelectionProvider[VALUE] with
IExpressionProvider[VALUE]
{
/**
* Get selection compatible part of the proxied object for SELECT statement.
* @return Selection compatible part of the proxies object for SELECT statement.
*/
override def __getSelection() : Selection[VALUE] = {
return expr;
}
/**
* Get expression compatible part of the proxied object.
* @return Expression compatible part of the proxied object.
*/
override def __getExpression() : Expression[VALUE] = {
return expr;
}
/**
* Ascending ordering for this expression.
*/
def asc : Order = {
return queryBuilder.criteriaBuilder.asc(expr);
}
/**
* Descending ordering for this expression.
*/
def desc : Order = {
return queryBuilder.criteriaBuilder.desc(expr);
}
/**
* Derive ordering from this expression.
* @param asc True to derive ascending ordering, false to derive descending ordering.
* @return Ordering derived.
*/
def order(asc : Boolean) : Order = {
return (if( asc ) this.asc else this.desc);
}
} | vejobrolis/hqjpa | hqjpa/src/org/hqjpa/ExpressionProxy.scala | Scala | lgpl-3.0 | 11,253 |
/*
* Copyright 2020 David Edwards
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.loopfor.zookeeper.cli.command
import com.loopfor.scalop._
import com.loopfor.zookeeper._
import java.nio.charset.Charset
import scala.annotation.tailrec
object Get {
val Usage = """usage: get [OPTIONS] [PATH...]
Gets the data for the node specified by each PATH. PATH may be omitted, in
which case the current working path is assumed.
By default, data is displayed in a hex/ASCII table with offsets, though the
output format can be changed using --string or --binary. If --string is
chosen, it may be necessary to also specify the character encoding if the
default of `UTF-8` is incorrect. The CHARSET in --encoding refers to any of
the possible character sets installed on the underlying JRE.
options:
--hex, -h : display data as hex/ASCII (default)
--string, -s : display data as string (see --encoding)
--binary, -b : display data as binary
--encoding, -e CHARSET : charset for use with --string (default=UTF-8)
"""
private val UTF_8 = Charset forName "UTF-8"
private type DisplayFunction = Array[Byte] => Unit
private lazy val opts =
("hex", 'h') ~> just(true) ~~ false ::
("string", 's') ~> just(true) ~~ false ::
("binary", 'b') ~> just(true) ~~ false ::
("encoding", 'e') ~> as[Charset] ~~ UTF_8 ::
Nil
def command(zk: Zookeeper) = new CommandProcessor {
implicit val _zk = zk
def apply(cmd: String, args: Seq[String], context: Path): Path = {
val optr = opts <~ args
val display = displayOpt(optr)
val nodes = pathArgs(optr).map { path => Node(context.resolve(path)) }
get(nodes, display)
context
}
}
def find(zk: Zookeeper, args: Seq[String]) = new FindProcessor {
val optr = opts <~ args
val display = displayOpt(optr)
def apply(node: Node): Unit = {
get(Seq(node), display)
}
}
private def get(nodes: Seq[Node], display: DisplayFunction): Unit = {
val count = nodes.size
nodes.foldLeft(1) { case (i, node) =>
try {
val (data, _) = node.get()
if (count > 1) println(s"${node.path}:")
display(data)
if (count > 1 && i < count) println()
} catch {
case _: NoNodeException => println(s"${node.path}: no such node")
}
i + 1
}
}
private def displayOpt(optr: OptResult): DisplayFunction = {
if (optr[Boolean]("hex")) displayHex _
else if (optr[Boolean]("binary")) displayBinary _
else if (optr[Boolean]("string")) displayString(optr[Charset]("encoding")) _
else displayHex _
}
private def pathArgs(optr: OptResult): Seq[Path] = optr.args match {
case Seq() => Seq(Path(""))
case paths => paths.map { Path(_) }
}
private def displayHex(data: Array[Byte]): Unit = {
@tailrec def display(n: Int): Unit = {
def charOf(b: Byte) = { if (b >= 32 && b < 127) b.asInstanceOf[Char] else '.' }
def pad(n: Int) = { " ".repeat(n) }
if (n < data.length) {
val l = Math.min(n + 16, data.length) - n
print("%08x ".format(n))
print((for (i <- n until (n + l)) yield "%02x ".format(data(i)).mkString))
print(pad((16 - l) * 3))
print(" |")
print((for (i <- n until (n + l)) yield charOf(data(i))).mkString)
print(pad(16 - l))
println("|")
display(n + l)
}
}
display(0)
}
private def displayString(encoding: Charset)(data: Array[Byte]) = {
println(new String(data, encoding))
}
private def displayBinary(data: Array[Byte]) = {
Console.out.write(data, 0, data.length)
}
}
| davidledwards/zookeeper | zookeeper-cli/src/main/scala/com/loopfor/zookeeper/cli/command/Get.scala | Scala | apache-2.0 | 4,191 |
package equellatests.tests
import cats.instances.vector._
import cats.syntax.all._
import equellatests.domain.{ItemId, RandomWords, TestLogon, Uniqueify}
import equellatests.instgen.workflow
import equellatests.pages.search.ManageResourcesPage
import equellatests.restapi._
import equellatests.sections.search.{BulkOpConfirm, BulkOperationDialog, ResetToTaskConfigPage}
import equellatests.{SimpleSeleniumBrowser, SimpleTestCase, StatefulProperties}
import io.circe.generic.semiauto._
import io.circe.generic.auto._
import io.circe.{Decoder, Encoder}
import org.scalacheck.{Arbitrary, Gen, Prop}
import org.scalacheck.Prop._
object BulkItemProperties extends StatefulProperties("BulkItemOps") with SimpleTestCase {
object BulkItemOp extends Enumeration {
type BulkItemOp = Value
val removeWorkflow, resetToTask = Value
implicit val encJson = Encoder.enumEncoder(BulkItemOp)
implicit val decJson = Decoder.enumDecoder(BulkItemOp)
}
sealed trait BulkOp {
def typ: BulkItemOp.Value
}
case class NoParamOp(typ: BulkItemOp.Value) extends BulkOp
case class ResetToTask(task: String, message: String) extends BulkOp {
def typ = BulkItemOp.resetToTask
}
case class RunBulkOp(names: Seq[String], op: BulkOp)
case class BulkItemState(ops: BulkItemOp.ValueSet = BulkItemOp.ValueSet.empty)
override type Command = RunBulkOp
override type State = BulkItemState
override implicit val testCaseDecoder: Decoder[RunBulkOp] = deriveDecoder
override implicit val testCaseEncoder: Encoder[RunBulkOp] = deriveEncoder
override def initialState: BulkItemState = BulkItemState()
override def runCommand(c: RunBulkOp, s: BulkItemState): BulkItemState = s.copy(s.ops + c.op.typ)
def configOp(bog: BulkOperationDialog, op: BulkOp): Unit = op match {
case NoParamOp(t) =>
val title = t match {
case BulkItemOp.removeWorkflow => "Removing from workflow"
}
bog.next(BulkOpConfirm(title))
case ResetToTask(t, msg) =>
val cp = bog.next(ResetToTaskConfigPage)
cp.selectTask(t)
cp.comment = msg
}
def opName(op: BulkItemOp.Value): String = op match {
case BulkItemOp.removeWorkflow => "Remove from workflow..."
case BulkItemOp.resetToTask => "Reset to workflow task..."
}
def checkProp(op: BulkOp, itemIds: Vector[ItemId]): ERest[Prop] = op match {
case NoParamOp(o) =>
o match {
case BulkItemOp.removeWorkflow =>
for {
items <- itemIds.traverse(RItems.get)
} yield all(items.map(i => i.status ?= RStatus.live): _*)
}
case ResetToTask(task, msg) =>
for {
items <- itemIds.traverse(i => RItems.getModeration(i).product(RItems.getHistory(i)))
} yield
all(items.map {
case (mod, history) =>
all(
mod.firstIncompleteTask
.map(_.name ?= task)
.getOrElse(Prop.falsified.label(s"Meant to be at task $task")),
history.collectFirst {
case he: RHistoryEvent
if he.`type` == RHistoryEventType.taskMove &&
he.comment.isDefined && he.toStepName.contains(task) =>
he.comment.get
} ?= Some(msg)
)
}: _*)
}
override def runCommandInBrowser(c: RunBulkOp, s: BulkItemState, b: SimpleSeleniumBrowser): Prop =
b.verify {
b.resetUnique()
val ctx = b.page.ctx
val opType = c.op.typ
val itemIds = ERest.run(ctx) {
c.names.toVector.traverse[ERest, ItemId] { n =>
val item = RCreateItem(RCollectionRef(workflow.threeStepWMUuid),
workflow.simpleMetadata(b.uniquePrefix(n)))
RItems.create(item)
}
}
val mrp = ManageResourcesPage(ctx).load()
mrp.query = b.allUniqueQuery
mrp.search()
c.op match {
case ResetToTask(_, _) =>
val filters = mrp.openFilters()
filters.onlyModeration(true)
filters.filterByWorkflow(Some("3 Step with multiple users"))
case NoParamOp(_) => mrp.clearFiltersIfSet()
}
c.names.foreach { n =>
mrp.resultForName(b.uniquePrefix(n)).select()
}
val bog = mrp.performOperation()
bog.selectAction(opName(opType))
configOp(bog, c.op)
bog.execute()
mrp -> ERest.run(ctx)(checkProp(c.op, itemIds))
}
override def logon: TestLogon = workflow.adminLogon
statefulProp("run bulk ops") {
generateCommands { s =>
val remainingOps = BulkItemOp.values -- s.ops
if (remainingOps.isEmpty) List()
else {
for {
opEnum <- Gen.oneOf(remainingOps.toSeq)
numItems <- Gen.chooseNum(3, 10)
names <- Gen
.listOfN(numItems, Arbitrary.arbitrary[RandomWords])
.map(Uniqueify.uniqueSeq(RandomWords.withNumberAfter))
.map(_.map(_.asString))
op <- opEnum match {
case BulkItemOp.resetToTask =>
for {
task <- Gen.oneOf(workflow.workflow3StepTasks)
msg <- Arbitrary.arbitrary[RandomWords]
} yield ResetToTask(task, msg.asString)
case o => Gen.const(NoParamOp(o))
}
} yield List(RunBulkOp(names, op))
}
}
}
}
| equella/Equella | autotest/Tests/src/test/scala/equellatests/tests/BulkItemProperties.scala | Scala | apache-2.0 | 5,351 |
package com.norbitltd.spoiwo.model.enums
object CellFill {
lazy val None = CellFill("None")
lazy val Solid = CellFill("Solid")
object Pattern {
lazy val AltBars = CellFill("AltBars")
lazy val BigSpots = CellFill("BigSpots")
lazy val Bricks = CellFill("Bricks")
lazy val Diamonds = CellFill("Diamonds")
lazy val Squares = CellFill("Squares")
object Dots {
lazy val Fine = CellFill("Fine")
lazy val Least = CellFill("Least")
lazy val Less = CellFill("Less")
lazy val Sparse = CellFill("Sparse")
}
object Diagonals {
lazy val ThinBackward = CellFill("ThinBackward")
lazy val ThinForward = CellFill("ThinForward")
lazy val ThickBackward = CellFill("ThickBackward")
lazy val ThickForward = CellFill("ThickForward")
}
object Bands {
lazy val ThickHorizontal = CellFill("ThickHorizontal")
lazy val ThickVertical = CellFill("ThickVertical")
lazy val ThinHorizontal = CellFill("ThinHorizontal")
lazy val ThinVertical = CellFill("ThinVertical")
}
}
}
case class CellFill private(value: String){
override def toString = value
}
| intracer/spoiwo | src/main/scala/com/norbitltd/spoiwo/model/enums/CellFill.scala | Scala | mit | 1,155 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zookeeper
import java.util
import java.util.Locale
import java.util.concurrent.locks.{ReentrantLock, ReentrantReadWriteLock}
import java.util.concurrent._
import com.yammer.metrics.core.{Gauge, MetricName}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.CoreUtils.{inLock, inReadLock, inWriteLock}
import kafka.utils.{KafkaScheduler, Logging}
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.AsyncCallback._
import org.apache.zookeeper.KeeperException.Code
import org.apache.zookeeper.Watcher.Event.{EventType, KeeperState}
import org.apache.zookeeper.ZooKeeper.States
import org.apache.zookeeper.data.{ACL, Stat}
import org.apache.zookeeper._
import scala.collection.JavaConverters._
import scala.collection.Seq
import scala.collection.mutable.Set
/**
* A ZooKeeper client that encourages pipelined requests.
*
* @param connectString comma separated host:port pairs, each corresponding to a zk server
* @param sessionTimeoutMs session timeout in milliseconds
* @param connectionTimeoutMs connection timeout in milliseconds
* @param maxInFlightRequests maximum number of unacknowledged requests the client will send before blocking.
* @param name name of the client instance
*/
class ZooKeeperClient(connectString: String,
sessionTimeoutMs: Int,
connectionTimeoutMs: Int,
maxInFlightRequests: Int,
time: Time,
metricGroup: String,
metricType: String,
name: Option[String]) extends Logging with KafkaMetricsGroup {
def this(connectString: String,
sessionTimeoutMs: Int,
connectionTimeoutMs: Int,
maxInFlightRequests: Int,
time: Time,
metricGroup: String,
metricType: String) = {
this(connectString, sessionTimeoutMs, connectionTimeoutMs, maxInFlightRequests, time, metricGroup, metricType, None)
}
this.logIdent = name match {
case Some(n) => s"[ZooKeeperClient $n] "
case _ => "[ZooKeeperClient] "
}
private val initializationLock = new ReentrantReadWriteLock()
private val isConnectedOrExpiredLock = new ReentrantLock()
private val isConnectedOrExpiredCondition = isConnectedOrExpiredLock.newCondition()
private val zNodeChangeHandlers = new ConcurrentHashMap[String, ZNodeChangeHandler]().asScala
private val zNodeChildChangeHandlers = new ConcurrentHashMap[String, ZNodeChildChangeHandler]().asScala
private val inFlightRequests = new Semaphore(maxInFlightRequests)
private val stateChangeHandlers = new ConcurrentHashMap[String, StateChangeHandler]().asScala
private[zookeeper] val expiryScheduler = new KafkaScheduler(threads = 1, "zk-session-expiry-handler")
private val metricNames = Set[String]()
// The state map has to be created before creating ZooKeeper since it's needed in the ZooKeeper callback.
private val stateToMeterMap = {
import KeeperState._
val stateToEventTypeMap = Map(
Disconnected -> "Disconnects",
SyncConnected -> "SyncConnects",
AuthFailed -> "AuthFailures",
ConnectedReadOnly -> "ReadOnlyConnects",
SaslAuthenticated -> "SaslAuthentications",
Expired -> "Expires"
)
stateToEventTypeMap.map { case (state, eventType) =>
val name = s"ZooKeeper${eventType}PerSec"
metricNames += name
state -> newMeter(name, eventType.toLowerCase(Locale.ROOT), TimeUnit.SECONDS)
}
}
info(s"Initializing a new session to $connectString.")
// Fail-fast if there's an error during construction (so don't call initialize, which retries forever)
@volatile private var zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher)
newGauge("SessionState", new Gauge[String] {
override def value: String = Option(connectionState.toString).getOrElse("DISCONNECTED")
})
metricNames += "SessionState"
expiryScheduler.startup()
try waitUntilConnected(connectionTimeoutMs, TimeUnit.MILLISECONDS)
catch {
case e: Throwable =>
close()
throw e
}
override def metricName(name: String, metricTags: scala.collection.Map[String, String]): MetricName = {
explicitMetricName(metricGroup, metricType, name, metricTags)
}
/**
* Return the state of the ZooKeeper connection.
*/
def connectionState: States = zooKeeper.getState
/**
* Send a request and wait for its response. See handle(Seq[AsyncRequest]) for details.
*
* @param request a single request to send and wait on.
* @return an instance of the response with the specific type (e.g. CreateRequest -> CreateResponse).
*/
def handleRequest[Req <: AsyncRequest](request: Req): Req#Response = {
handleRequests(Seq(request)).head
}
/**
* Send a pipelined sequence of requests and wait for all of their responses.
*
* The watch flag on each outgoing request will be set if we've already registered a handler for the
* path associated with the request.
*
* @param requests a sequence of requests to send and wait on.
* @return the responses for the requests. If all requests have the same type, the responses will have the respective
* response type (e.g. Seq[CreateRequest] -> Seq[CreateResponse]). Otherwise, the most specific common supertype
* will be used (e.g. Seq[AsyncRequest] -> Seq[AsyncResponse]).
*/
def handleRequests[Req <: AsyncRequest](requests: Seq[Req]): Seq[Req#Response] = {
if (requests.isEmpty)
Seq.empty
else {
val countDownLatch = new CountDownLatch(requests.size)
val responseQueue = new ArrayBlockingQueue[Req#Response](requests.size)
requests.foreach { request =>
inFlightRequests.acquire()
try {
inReadLock(initializationLock) {
send(request) { response =>
responseQueue.add(response)
inFlightRequests.release()
countDownLatch.countDown()
}
}
} catch {
case e: Throwable =>
inFlightRequests.release()
throw e
}
}
countDownLatch.await()
responseQueue.asScala.toBuffer
}
}
// Visibility to override for testing
private[zookeeper] def send[Req <: AsyncRequest](request: Req)(processResponse: Req#Response => Unit): Unit = {
// Safe to cast as we always create a response of the right type
def callback(response: AsyncResponse): Unit = processResponse(response.asInstanceOf[Req#Response])
def responseMetadata(sendTimeMs: Long) = new ResponseMetadata(sendTimeMs, receivedTimeMs = time.hiResClockMs())
val sendTimeMs = time.hiResClockMs()
request match {
case ExistsRequest(path, ctx) =>
zooKeeper.exists(path, shouldWatch(request), new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(ExistsResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetDataRequest(path, ctx) =>
zooKeeper.getData(path, shouldWatch(request), new DataCallback {
override def processResult(rc: Int, path: String, ctx: Any, data: Array[Byte], stat: Stat): Unit =
callback(GetDataResponse(Code.get(rc), path, Option(ctx), data, stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetChildrenRequest(path, ctx) =>
zooKeeper.getChildren(path, shouldWatch(request), new Children2Callback {
override def processResult(rc: Int, path: String, ctx: Any, children: java.util.List[String], stat: Stat): Unit =
callback(GetChildrenResponse(Code.get(rc), path, Option(ctx),
Option(children).map(_.asScala).getOrElse(Seq.empty), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case CreateRequest(path, data, acl, createMode, ctx) =>
zooKeeper.create(path, data, acl.asJava, createMode, new StringCallback {
override def processResult(rc: Int, path: String, ctx: Any, name: String): Unit =
callback(CreateResponse(Code.get(rc), path, Option(ctx), name, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case SetDataRequest(path, data, version, ctx) =>
zooKeeper.setData(path, data, version, new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(SetDataResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case DeleteRequest(path, version, ctx) =>
zooKeeper.delete(path, version, new VoidCallback {
override def processResult(rc: Int, path: String, ctx: Any): Unit =
callback(DeleteResponse(Code.get(rc), path, Option(ctx), responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetAclRequest(path, ctx) =>
zooKeeper.getACL(path, null, new ACLCallback {
override def processResult(rc: Int, path: String, ctx: Any, acl: java.util.List[ACL], stat: Stat): Unit = {
callback(GetAclResponse(Code.get(rc), path, Option(ctx), Option(acl).map(_.asScala).getOrElse(Seq.empty),
stat, responseMetadata(sendTimeMs)))
}}, ctx.orNull)
case SetAclRequest(path, acl, version, ctx) =>
zooKeeper.setACL(path, acl.asJava, version, new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(SetAclResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case MultiRequest(zkOps, ctx) =>
zooKeeper.multi(zkOps.map(_.toZookeeperOp).asJava, new MultiCallback {
override def processResult(rc: Int, path: String, ctx: Any, opResults: util.List[OpResult]): Unit = {
callback(MultiResponse(Code.get(rc), path, Option(ctx),
if (opResults == null)
null
else
zkOps.zip(opResults.asScala) map { case (zkOp, result) => ZkOpResult(zkOp, result) },
responseMetadata(sendTimeMs)))
}
}, ctx.orNull)
}
}
/**
* Wait indefinitely until the underlying zookeeper client to reaches the CONNECTED state.
* @throws ZooKeeperClientAuthFailedException if the authentication failed either before or while waiting for connection.
* @throws ZooKeeperClientExpiredException if the session expired either before or while waiting for connection.
*/
def waitUntilConnected(): Unit = inLock(isConnectedOrExpiredLock) {
waitUntilConnected(Long.MaxValue, TimeUnit.MILLISECONDS)
}
private def waitUntilConnected(timeout: Long, timeUnit: TimeUnit): Unit = {
info("Waiting until connected.")
var nanos = timeUnit.toNanos(timeout)
inLock(isConnectedOrExpiredLock) {
var state = connectionState
while (!state.isConnected && state.isAlive) {
if (nanos <= 0) {
throw new ZooKeeperClientTimeoutException(s"Timed out waiting for connection while in state: $state")
}
nanos = isConnectedOrExpiredCondition.awaitNanos(nanos)
state = connectionState
}
if (state == States.AUTH_FAILED) {
throw new ZooKeeperClientAuthFailedException("Auth failed either before or while waiting for connection")
} else if (state == States.CLOSED) {
throw new ZooKeeperClientExpiredException("Session expired either before or while waiting for connection")
}
}
info("Connected.")
}
// If this method is changed, the documentation for registerZNodeChangeHandler and/or registerZNodeChildChangeHandler
// may need to be updated.
private def shouldWatch(request: AsyncRequest): Boolean = request match {
case _: GetChildrenRequest => zNodeChildChangeHandlers.contains(request.path)
case _: ExistsRequest | _: GetDataRequest => zNodeChangeHandlers.contains(request.path)
case _ => throw new IllegalArgumentException(s"Request $request is not watchable")
}
/**
* Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher.
*
* The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest])
* with either a GetDataRequest or ExistsRequest.
*
* NOTE: zookeeper only allows registration to a nonexistent znode with ExistsRequest.
*
* @param zNodeChangeHandler the handler to register
*/
def registerZNodeChangeHandler(zNodeChangeHandler: ZNodeChangeHandler): Unit = {
zNodeChangeHandlers.put(zNodeChangeHandler.path, zNodeChangeHandler)
}
/**
* Unregister the handler from ZooKeeperClient. This is just a local operation.
* @param path the path of the handler to unregister
*/
def unregisterZNodeChangeHandler(path: String): Unit = {
zNodeChangeHandlers.remove(path)
}
/**
* Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher.
*
* The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest]) with a GetChildrenRequest.
*
* @param zNodeChildChangeHandler the handler to register
*/
def registerZNodeChildChangeHandler(zNodeChildChangeHandler: ZNodeChildChangeHandler): Unit = {
zNodeChildChangeHandlers.put(zNodeChildChangeHandler.path, zNodeChildChangeHandler)
}
/**
* Unregister the handler from ZooKeeperClient. This is just a local operation.
* @param path the path of the handler to unregister
*/
def unregisterZNodeChildChangeHandler(path: String): Unit = {
zNodeChildChangeHandlers.remove(path)
}
/**
* @param stateChangeHandler
*/
def registerStateChangeHandler(stateChangeHandler: StateChangeHandler): Unit = inReadLock(initializationLock) {
if (stateChangeHandler != null)
stateChangeHandlers.put(stateChangeHandler.name, stateChangeHandler)
}
/**
*
* @param name
*/
def unregisterStateChangeHandler(name: String): Unit = inReadLock(initializationLock) {
stateChangeHandlers.remove(name)
}
def close(): Unit = {
info("Closing.")
// Shutdown scheduler outside of lock to avoid deadlock if scheduler
// is waiting for lock to process session expiry. Close expiry thread
// first to ensure that new clients are not created during close().
expiryScheduler.shutdown()
inWriteLock(initializationLock) {
zNodeChangeHandlers.clear()
zNodeChildChangeHandlers.clear()
stateChangeHandlers.clear()
zooKeeper.close()
metricNames.foreach(removeMetric(_))
}
info("Closed.")
}
def sessionId: Long = inReadLock(initializationLock) {
zooKeeper.getSessionId
}
// Only for testing
private[kafka] def currentZooKeeper: ZooKeeper = inReadLock(initializationLock) {
zooKeeper
}
private def reinitialize(): Unit = {
// Initialization callbacks are invoked outside of the lock to avoid deadlock potential since their completion
// may require additional Zookeeper requests, which will block to acquire the initialization lock
stateChangeHandlers.values.foreach(callBeforeInitializingSession _)
inWriteLock(initializationLock) {
if (!connectionState.isAlive) {
zooKeeper.close()
info(s"Initializing a new session to $connectString.")
// retry forever until ZooKeeper can be instantiated
var connected = false
while (!connected) {
try {
zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher)
connected = true
} catch {
case e: Exception =>
info("Error when recreating ZooKeeper, retrying after a short sleep", e)
Thread.sleep(1000)
}
}
}
}
stateChangeHandlers.values.foreach(callAfterInitializingSession _)
}
/**
* Close the zookeeper client to force session reinitialization. This is visible for testing only.
*/
private[zookeeper] def forceReinitialize(): Unit = {
zooKeeper.close()
reinitialize()
}
private def callBeforeInitializingSession(handler: StateChangeHandler): Unit = {
try {
handler.beforeInitializingSession()
} catch {
case t: Throwable =>
error(s"Uncaught error in handler ${handler.name}", t)
}
}
private def callAfterInitializingSession(handler: StateChangeHandler): Unit = {
try {
handler.afterInitializingSession()
} catch {
case t: Throwable =>
error(s"Uncaught error in handler ${handler.name}", t)
}
}
// Visibility for testing
private[zookeeper] def scheduleSessionExpiryHandler(): Unit = {
expiryScheduler.scheduleOnce("zk-session-expired", () => {
info("Session expired.")
reinitialize()
})
}
// package level visibility for testing only
private[zookeeper] object ZooKeeperClientWatcher extends Watcher {
override def process(event: WatchedEvent): Unit = {
debug(s"Received event: $event")
Option(event.getPath) match {
case None =>
val state = event.getState
stateToMeterMap.get(state).foreach(_.mark())
inLock(isConnectedOrExpiredLock) {
isConnectedOrExpiredCondition.signalAll()
}
if (state == KeeperState.AuthFailed) {
error("Auth failed.")
stateChangeHandlers.values.foreach(_.onAuthFailure())
} else if (state == KeeperState.Expired) {
scheduleSessionExpiryHandler()
}
case Some(path) =>
(event.getType: @unchecked) match {
case EventType.NodeChildrenChanged => zNodeChildChangeHandlers.get(path).foreach(_.handleChildChange())
case EventType.NodeCreated => zNodeChangeHandlers.get(path).foreach(_.handleCreation())
case EventType.NodeDeleted => zNodeChangeHandlers.get(path).foreach(_.handleDeletion())
case EventType.NodeDataChanged => zNodeChangeHandlers.get(path).foreach(_.handleDataChange())
}
}
}
}
}
trait StateChangeHandler {
val name: String
def beforeInitializingSession(): Unit = {}
def afterInitializingSession(): Unit = {}
def onAuthFailure(): Unit = {}
}
trait ZNodeChangeHandler {
val path: String
def handleCreation(): Unit = {}
def handleDeletion(): Unit = {}
def handleDataChange(): Unit = {}
}
trait ZNodeChildChangeHandler {
val path: String
def handleChildChange(): Unit = {}
}
// Thin wrapper for zookeeper.Op
sealed trait ZkOp {
def toZookeeperOp: Op
}
case class CreateOp(path: String, data: Array[Byte], acl: Seq[ACL], createMode: CreateMode) extends ZkOp {
override def toZookeeperOp: Op = Op.create(path, data, acl.asJava, createMode)
}
case class DeleteOp(path: String, version: Int) extends ZkOp {
override def toZookeeperOp: Op = Op.delete(path, version)
}
case class SetDataOp(path: String, data: Array[Byte], version: Int) extends ZkOp {
override def toZookeeperOp: Op = Op.setData(path, data, version)
}
case class CheckOp(path: String, version: Int) extends ZkOp {
override def toZookeeperOp: Op = Op.check(path, version)
}
case class ZkOpResult(zkOp: ZkOp, rawOpResult: OpResult)
sealed trait AsyncRequest {
/**
* This type member allows us to define methods that take requests and return responses with the correct types.
* See ``ZooKeeperClient.handleRequests`` for example.
*/
type Response <: AsyncResponse
def path: String
def ctx: Option[Any]
}
case class CreateRequest(path: String, data: Array[Byte], acl: Seq[ACL], createMode: CreateMode,
ctx: Option[Any] = None) extends AsyncRequest {
type Response = CreateResponse
}
case class DeleteRequest(path: String, version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = DeleteResponse
}
case class ExistsRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = ExistsResponse
}
case class GetDataRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetDataResponse
}
case class SetDataRequest(path: String, data: Array[Byte], version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = SetDataResponse
}
case class GetAclRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetAclResponse
}
case class SetAclRequest(path: String, acl: Seq[ACL], version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = SetAclResponse
}
case class GetChildrenRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetChildrenResponse
}
case class MultiRequest(zkOps: Seq[ZkOp], ctx: Option[Any] = None) extends AsyncRequest {
type Response = MultiResponse
override def path: String = null
}
sealed abstract class AsyncResponse {
def resultCode: Code
def path: String
def ctx: Option[Any]
/** Return None if the result code is OK and KeeperException otherwise. */
def resultException: Option[KeeperException] =
if (resultCode == Code.OK) None else Some(KeeperException.create(resultCode, path))
/**
* Throw KeeperException if the result code is not OK.
*/
def maybeThrow(): Unit = {
if (resultCode != Code.OK)
throw KeeperException.create(resultCode, path)
}
def metadata: ResponseMetadata
}
case class ResponseMetadata(sendTimeMs: Long, receivedTimeMs: Long) {
def responseTimeMs: Long = receivedTimeMs - sendTimeMs
}
case class CreateResponse(resultCode: Code, path: String, ctx: Option[Any], name: String,
metadata: ResponseMetadata) extends AsyncResponse
case class DeleteResponse(resultCode: Code, path: String, ctx: Option[Any],
metadata: ResponseMetadata) extends AsyncResponse
case class ExistsResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class GetDataResponse(resultCode: Code, path: String, ctx: Option[Any], data: Array[Byte], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class SetDataResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class GetAclResponse(resultCode: Code, path: String, ctx: Option[Any], acl: Seq[ACL], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class SetAclResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class GetChildrenResponse(resultCode: Code, path: String, ctx: Option[Any], children: Seq[String], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class MultiResponse(resultCode: Code, path: String, ctx: Option[Any], zkOpResults: Seq[ZkOpResult],
metadata: ResponseMetadata) extends AsyncResponse
class ZooKeeperClientException(message: String) extends RuntimeException(message)
class ZooKeeperClientExpiredException(message: String) extends ZooKeeperClientException(message)
class ZooKeeperClientAuthFailedException(message: String) extends ZooKeeperClientException(message)
class ZooKeeperClientTimeoutException(message: String) extends ZooKeeperClientException(message)
| noslowerdna/kafka | core/src/main/scala/kafka/zookeeper/ZooKeeperClient.scala | Scala | apache-2.0 | 24,257 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.common.json
import com.twitter.zipkin.common.Endpoint
import java.nio.ByteBuffer
import java.net.InetAddress
/**
* Container for sanitized endpoint data.
* This differs from thrift endpoint in that port is unsigned
* and the address is a dotted quad string.
*/
case class JsonEndpoint(ipv4: String, port: Int, serviceName: String)
object JsonEndpoint {
def apply(host: Endpoint) = {
new JsonEndpoint(host.getHostAddress, host.getUnsignedPort, host.serviceName)
}
}
| dduvnjak/zipkin | zipkin-web/src/main/scala/com/twitter/zipkin/common/json/JsonEndpoint.scala | Scala | apache-2.0 | 1,105 |
package eu.gruchala.trampolines
import eu.gruchala.BaseSpec
class TrampolinedStateSpecs extends BaseSpec {
//Seem to be working randomly on Scala 2.12
"Trampolined State implementation" ignore {
"safely finish execution" when {
"combined with zipIndex function" in {
TStateOps.zipIndex(List.fill(10000)("A")) should not be empty
}
}
}
}
| leszekgruchala/scala-exercises | src/test/scala/eu/gruchala/trampolines/TrampolinedStateSpecs.scala | Scala | apache-2.0 | 374 |
package org.neold
import TestingTools._
import org.scalatest._
import org.neold.adapters.Adapters._
import org.neold.adapters.Adapters.BatchAdapter._
class BatchTest extends FlatSpec with Matchers {
"[BATCH ENDPOINT] Neold" can "buffer and execute statements" in {
def countOpt(results : String, id : Int) = {
mapResultRow(toOption(results), 0, id){
_("count(n)").toInt
}.getOrElse(-1)
}
def countEither(results : String, id : Int) = {
toEither(results).fold(
_ => -1,
_(id)(0)("count(n)").toInt
)
}
neo.bufferQuery(countQuery, params)
neo.bufferQuery(insertQueryObjectParam, paramsObject)
neo.bufferQuery(countQuery, params)
neo.bufferQuery(deleteQuery, params)
neo.bufferQuery(countQuery, params)
val results = waitCompletion(neo.performBatch()())
val beforeCount = countOpt(results, 0)
val duringCount = countOpt(results, 2)
val afterCount = countOpt(results, 4)
beforeCount should equal (duringCount - 1)
beforeCount should equal (afterCount)
}
}
| elbywan/neold | src/test/scala/org/neold/BatchTest.scala | Scala | gpl-3.0 | 1,184 |
package org.jetbrains.plugins.scala.codeInspection.typeChecking
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.annotator.PatternAnnotator
import org.jetbrains.plugins.scala.codeInspection.typeChecking.PatternMayNeverMatchInspection.{ScPatternExpectedAndPatternType, inspectionName}
import org.jetbrains.plugins.scala.codeInspection.{AbstractInspection, InspectionBundle}
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScPattern
import org.jetbrains.plugins.scala.lang.psi.types.ComparingUtil._
import org.jetbrains.plugins.scala.lang.psi.types.api.ScTypePresentation
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScTypeExt}
/**
* Author: Svyatoslav Ilinskiy
* Date: 21.12.15.
*/
class PatternMayNeverMatchInspection extends AbstractInspection(inspectionName) {
override def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case pat@ScPatternExpectedAndPatternType(exTp, patType) =>
if (!PatternAnnotator.matchesPattern(exTp, patType) && !patType.conforms(exTp) &&
!isNeverSubType(exTp, patType)) {
//need to check so inspection highlighting doesn't interfere with PatterAnnotator's
val message = PatternMayNeverMatchInspection.message(exTp, patType)
holder.registerProblem(pat, message, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
}
}
object PatternMayNeverMatchInspection {
val inspectionId = "PatternMayNeverMatch"
val inspectionName = InspectionBundle.message("pattern.may.never.match")
def message(_expected: ScType, _found: ScType): String = {
val (expected, found) = ScTypePresentation.different(_expected, _found)
InspectionBundle.message("pattern.may.never.match", expected, found)
}
object ScPatternExpectedAndPatternType {
def unapply(pat: ScPattern): Option[(ScType, ScType)] = {
(pat.expectedType, PatternAnnotator.patternType(pat)) match {
case (Some(expected), Some(pattern)) => Option((expected, pattern))
case _ => None
}
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/typeChecking/PatternMayNeverMatchInspection.scala | Scala | apache-2.0 | 2,127 |
package gapt.testing
import ammonite.ops.FilePath
import gapt.expr.Const
import gapt.expr.formula.Atom
import gapt.logic.hol.CNFn
import gapt.proofs.context.Context
import gapt.proofs.context.mutable.MutableContext
import gapt.proofs.expansion.{ eliminateCutsET, eliminateDefsET }
import gapt.proofs.lk.transformations.LKToExpansionProof
import gapt.proofs.resolution._
import gapt.provers.prover9.Prover9Importer
import gapt.utils.{ LogHandler, Logger, MetricsPrinter }
object testResolutionToExpansion extends scala.App {
val logger = Logger( "testResolutionToExpansion" )
import logger._
val metricsPrinter = new MetricsPrinter
LogHandler.current.value = metricsPrinter
try time( "total" ) {
val Seq( p9proofFile, method ) = args.toSeq
metric( "file", p9proofFile )
metric( "method", method )
val ( resolution0, endSequent ) = time( "p9import" ) {
Prover9Importer.robinsonProofWithReconstructedEndSequent( FilePath( p9proofFile ), runFixDerivation = false )
}
metric( "size_res_dag", resolution0.dagLike.size )
metric( "size_res_tree", resolution0.treeLike.size )
val equational = containsEquationalReasoning( resolution0 )
metric( "equational", equational )
val proof = time( "extraction" ) {
method match {
case "vialk" =>
val resolution = time( "fixderivation" ) { fixDerivation( resolution0, CNFn( endSequent.toImplication ).toSeq ) }
val projections = time( "projections" ) {
Map() ++ resolution.subProofs.collect {
case in @ Input( clause ) =>
in -> PCNF( endSequent, clause.map( _.asInstanceOf[Atom] ) )
}
}
metric( "no_projs", projections.size )
metric( "size_projs", projections.view.map( _._2.treeLike.size ).sum )
val lk = time( "restolk" ) { ResolutionToLKProof( resolution, projections ) }
metric( "size_lk_tree", lk.treeLike.size )
metric( "size_lk_dag", lk.dagLike.size )
time( "lktoexp" ) { LKToExpansionProof( lk ) }
case "restoexp" =>
val resolution = time( "fixderivation" ) { fixDerivation( resolution0, endSequent ) }
metric( "size_res2_dag", resolution.dagLike.size )
metric( "size_res2_tree", resolution.treeLike.size )
implicit val ctx: Context = MutableContext.guess( resolution )
val expansionWithDefs = time( "withdefs" ) {
ResolutionToExpansionProof.withDefs(
resolution,
ResolutionToExpansionProof.inputsAsExpansionSequent )
}
metric( "size_withdefs", expansionWithDefs.size )
// none of the stuff below should actually happen with prover9 proofs
val defConsts = resolution.subProofs collect { case d: DefIntro => d.defConst: Const }
val withDefsCE = time( "cutelim1" ) { eliminateCutsET( expansionWithDefs ) }
val withoutDefs = time( "defelim" ) { eliminateDefsET( withDefsCE, !equational, defConsts ) }
time( "cutelim2" ) { eliminateCutsET( withoutDefs ) }
}
}
metric( "size_exp", proof.size )
metric( "status", "ok" )
} catch {
case t: Throwable =>
metric( "status", "exception" )
metric( "exception", t.getMessage )
}
}
| gapt/gapt | testing/src/main/scala/testResolutionToExpansion.scala | Scala | gpl-3.0 | 3,286 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.process.knn
import org.geotools.data.{DataStoreFinder, Query}
import org.geotools.factory.Hints
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.filter.text.ecql.ECQL
import org.joda.time.DateTime
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.data.{AccumuloDataStore, AccumuloFeatureStore}
import org.locationtech.geomesa.accumulo.index.{Constants, IndexSchemaBuilder}
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureFactory
import org.locationtech.geomesa.utils.geohash.VincentyModel
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.util.Random
case class TestEntry(wkt: String, id: String, dt: DateTime = new DateTime())
@RunWith(classOf[JUnitRunner])
class KNearestNeighborSearchProcessTest extends Specification {
sequential
val sftName = "geomesaKNNTestType"
val sft = SimpleFeatureTypes.createType(sftName, "geom:Point:srid=4326,dtg:Date,dtg_end_time:Date")
sft.getUserData.put(Constants.SF_PROPERTY_START_TIME,"dtg")
val ds = createStore
ds.createSchema(sft)
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
val clusterOfPoints = List[TestEntry](
TestEntry("POINT( -78.503547 38.035475 )", "rotunda"),
TestEntry("POINT( -78.503923 38.035536 )", "pavilion I"),
TestEntry("POINT( -78.504059 38.035308 )", "pavilion III"),
TestEntry("POINT( -78.504276 38.034971 )", "pavilion V"),
TestEntry("POINT( -78.504424 38.034628 )", "pavilion VII"),
TestEntry("POINT( -78.504617 38.034208 )", "pavilion IX"),
TestEntry("POINT( -78.503833 38.033938 )", "pavilion X"),
TestEntry("POINT( -78.503601 38.034343 )", "pavilion VIII"),
TestEntry("POINT( -78.503424 38.034721 )", "pavilion VI"),
TestEntry("POINT( -78.503180 38.035039 )", "pavilion IV"),
TestEntry("POINT( -78.503109 38.035278 )", "pavilion II"),
TestEntry("POINT( -78.505152 38.032704 )", "cabell"),
TestEntry("POINT( -78.510295 38.034283 )", "beams"),
TestEntry("POINT( -78.522288 38.032844 )", "mccormick"),
TestEntry("POINT( -78.520019 38.034511 )", "hep")
)
val distributedPoints = generateTestData(1000, 38.149894, -79.073639, 0.30)
// add the test points to the feature collection
addTestData(clusterOfPoints)
addTestData(distributedPoints)
// write the feature to the store
fs.addFeatures(featureCollection)
def createStore: AccumuloDataStore =
// the specific parameter values should not matter, as we
// are requesting a mock data store connection to Accumulo
DataStoreFinder.getDataStore(Map(
"instanceId" -> "mycloud",
"zookeepers" -> "zoo1:2181,zoo2:2181,zoo3:2181",
"user" -> "myuser",
"password" -> "mypassword",
"auths" -> "A,B,C",
"tableName" -> "testwrite",
"useMock" -> "true",
"indexSchemaFormat" -> new IndexSchemaBuilder("~").randomNumber(3).constant("TEST").geoHash(0, 3).date("yyyyMMdd").nextPart().geoHash(3, 2).nextPart().id().build(),
"featureEncoding" -> "avro")).asInstanceOf[AccumuloDataStore]
// utility method to generate random points about a central point
// note that these points will be uniform in cartesian space only
def generateTestData(num: Int, centerLat: Double, centerLon: Double, width: Double) = {
val rng = new Random(0)
(1 to num).map(i => {
val wkt = "POINT(" +
(centerLon + width * (rng.nextDouble() - 0.5)).toString + " " +
(centerLat + width * (rng.nextDouble() - 0.5)).toString + " " +
")"
val dt = new DateTime()
TestEntry(wkt, (100000 + i).toString, dt)
}).toList
}
// load data into the featureCollection
def addTestData(points: List[TestEntry]) = {
points.foreach { case e: TestEntry =>
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), e.id)
sf.setDefaultGeometry(WKTUtils.read(e.wkt))
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(sf)
}
}
// generates a single SimpleFeature
def queryFeature(label: String, lat: Double, lon: Double) = {
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), label)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lon $lat)"))
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
sf
}
// generates a very loose query
def wideQuery = {
val lat = 38.0
val lon = -78.50
val siteSize = 5.0
val minLat = lat - siteSize
val maxLat = lat + siteSize
val minLon = lon - siteSize
val maxLon = lon + siteSize
val queryString = s"BBOX(geom,$minLon, $minLat, $maxLon, $maxLat)"
val ecqlFilter = ECQL.toFilter(queryString)
//val fs = getTheFeatureSource(tableName, featureName)
//new Query(featureName, ecqlFilter, transform)
new Query(sftName, ecqlFilter)
}
// begin tests ------------------------------------------------
"GeoMesaKNearestNeighborSearch" should {
"find nothing within 10km of a single query point " in {
val inputFeatures = new DefaultFeatureCollection(sftName, sft)
inputFeatures.add(queryFeature("fan mountain", 37.878219, -78.692649))
val dataFeatures = fs.getFeatures()
val knn = new KNearestNeighborSearchProcess
knn.execute(inputFeatures, dataFeatures, 5, 500.0, 10000.0).size must equalTo(0)
}
"find 11 points within 400m of a point when k is set to 15 " in {
val inputFeatures = new DefaultFeatureCollection(sftName, sft)
inputFeatures.add(queryFeature("madison", 38.036871, -78.502720))
val dataFeatures = fs.getFeatures()
val knn = new KNearestNeighborSearchProcess
knn.execute(inputFeatures, dataFeatures, 15, 50.0, 400.0).size should be equalTo 11
}
"handle three query points, one of which will return nothing" in {
val inputFeatures = new DefaultFeatureCollection(sftName, sft)
inputFeatures.add(queryFeature("madison", 38.036871, -78.502720))
inputFeatures.add(queryFeature("fan mountain", 37.878219, -78.692649))
inputFeatures.add(queryFeature("blackfriars", 38.149185, -79.070569))
val dataFeatures = fs.getFeatures()
val knn = new KNearestNeighborSearchProcess
knn.execute(inputFeatures, dataFeatures, 5, 500.0, 5000.0).size must greaterThan(0)
}
"handle an empty query point collection" in {
val inputFeatures = new DefaultFeatureCollection(sftName, sft)
val dataFeatures = fs.getFeatures()
val knn = new KNearestNeighborSearchProcess
knn.execute(inputFeatures, dataFeatures, 100, 500.0, 5000.0).size must equalTo(0)
}
"handle non-point geometries in inputFeatures by ignoring them" in {
val sft = SimpleFeatureTypes.createType("lineStringKnn", "geom:LineString:srid=4326")
val inputFeatures = new DefaultFeatureCollection("lineStringKnn", sft)
val lineSF = SimpleFeatureBuilder.build(sft, List(), "route 29")
lineSF.setDefaultGeometry(WKTUtils.read(f"LINESTRING(-78.491 38.062, -78.474 38.082)"))
inputFeatures.add(lineSF)
val dataFeatures = fs.getFeatures()
val knn = new KNearestNeighborSearchProcess
val res = knn.execute(inputFeatures, dataFeatures, 100, 500.0, 5000.0)
res.size mustEqual 0
}
}
"runNewKNNQuery" should {
"return a NearestNeighbors object with features around Charlottesville in correct order" in {
val orderedFeatureIDs = List("rotunda",
"pavilion II",
"pavilion I",
"pavilion IV",
"pavilion III",
"pavilion VI",
"pavilion V",
"pavilion VII",
"pavilion VIII",
"pavilion IX",
"pavilion X",
"cabell",
"beams",
"hep",
"mccormick")
val knnResults =
KNNQuery.runNewKNNQuery(fs, wideQuery, 15, 500.0, 2500.0, queryFeature("madison", 38.036871, -78.502720))
// return the ordered neighbors and extract the SimpleFeatures
val knnFeatures = knnResults.getK.map { _.sf }
val knnIDs = knnFeatures.map { _.getID }
knnIDs must equalTo(orderedFeatureIDs)
}
"return a nearestNeighbors object with features around Staunton in correct order" in {
val k = 10
val referenceFeature = queryFeature("blackfriars", 38.149185, -79.070569)
val knnResults =
KNNQuery.runNewKNNQuery(fs, wideQuery, k, 5000.0, 50000.0, referenceFeature)
val knnFeatureIDs = knnResults.getK.map { _.sf.getID }
val directFeatures = fs.getFeatures().features.toList
val sortedByDist = directFeatures.sortBy (
a => VincentyModel.getDistanceBetweenTwoPoints(referenceFeature.point, a.point).getDistanceInMeters).take(k)
knnFeatureIDs.equals(sortedByDist.map{_.getID}) must beTrue
}
}
}
| mdzimmerman/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/process/knn/KNearestNeighborSearchProcessTest.scala | Scala | apache-2.0 | 9,593 |
package com.googlecode.kanbanik.builders
import org.bson.types.ObjectId
import com.googlecode.kanbanik.model.Board
import com.googlecode.kanbanik.model.Workflowitem
import com.googlecode.kanbanik.model.Workflow
import com.googlecode.kanbanik.dtos.{WorkflowitemDto, WorkflowDto}
class WorkflowitemBuilder extends BaseBuilder {
def buildShallowEntity(dto: WorkflowitemDto, parentWorkflow: Option[Workflow], board: Option[Board]): Workflowitem = {
new Workflowitem(
{
if (dto.id == null || !dto.id.isDefined) {
Some(new ObjectId)
} else {
Some(new ObjectId(dto.id.get))
}
},
dto.name,
dto.wipLimit.getOrElse(0),
dto.verticalSize.getOrElse(-1),
dto.itemType,
dto.version.getOrElse(0),
// don't calculate it if not needed
Workflow(),
parentWorkflow
)
}
def buildEntity(dto: WorkflowitemDto, parentWorkflow: Option[Workflow], board: Option[Board]): Workflowitem = {
val shallow = buildShallowEntity(dto, parentWorkflow, board)
if (dto.nestedWorkflow.isDefined) {
shallow.copy(nestedWorkflow = workflowBuilder.buildEntity(dto.nestedWorkflow.get, board))
} else {
shallow
}
}
def buildShallowDto(workflowitem: Workflowitem, parentWorkflow: Option[WorkflowDto]): WorkflowitemDto = {
WorkflowitemDto(
workflowitem.name,
Some(workflowitem.id.get.toString()),
Some(workflowitem.wipLimit),
workflowitem.itemType,
Some(workflowitem.version),
None,
Some(parentWorkflow.getOrElse(workflowBuilder.buildShallowDto(workflowitem.parentWorkflow, parentBoard(parentWorkflow)))),
Some(workflowitem.verticalSize)
)
}
def buildDto(workflowitem: Workflowitem, parentWorkflow: Option[WorkflowDto]): WorkflowitemDto = {
val dto = buildShallowDto(workflowitem, parentWorkflow)
dto.copy(
nestedWorkflow = Some(workflowBuilder.buildDto(workflowitem.nestedWorkflow, parentBoard(parentWorkflow))),
parentWorkflow = Some(parentWorkflow.getOrElse(workflowBuilder.buildDto(workflowitem.parentWorkflow, parentBoard(parentWorkflow))))
)
}
def parentBoard(parentWorkflow: Option[WorkflowDto]) = {
if (!parentWorkflow.isDefined) {
None
} else {
Some(parentWorkflow.get.board)
}
}
def workflowBuilder = new WorkflowBuilder
}
| mortenpoulsen/kanbanik | kanbanik-server/src/main/scala/com/googlecode/kanbanik/builders/WorkflowitemBuilder.scala | Scala | apache-2.0 | 2,333 |
/*
* Copyright (c) 2014 Pascal Voitot
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scaledn
package macros
import org.scalatest._
import scala.util.{Try, Success, Failure}
import scaledn._
import scaledn.macros._
class MacrosSpec extends FlatSpec with Matchers with TryValues {
"EDN Macros" should "parse basic types" in {
val e: String = EDN("\\"toto\\"")
e should equal ("toto")
val bt: Boolean = EDN("true")
bt should equal (true)
val bf: Boolean = EDN("false")
bf should equal (false)
val l: Long = EDN("123")
l should equal (123L)
val d: Double = EDN("123.456")
d should equal (123.456)
val bi: BigInt = EDN("123M")
bi should equal (BigInt("123"))
val bd: BigDecimal = EDN("123.456N")
bd should equal (BigDecimal("123.456"))
val s: EDNSymbol = EDN("foo/bar")
s should equal (EDNSymbol("foo/bar", Some("foo")))
val kw: EDNKeyword = EDN(":foo/bar")
kw should equal (EDNKeyword(EDNSymbol("foo/bar", Some("foo"))))
val nil: EDNNil.type = EDN("nil")
nil should equal (EDNNil)
val list: List[Long] = EDN("(1 2 3)")
list should equal (List(1L, 2L, 3L))
val list2: List[Any] = EDN("""(1 "toto" 3)""")
list2 should equal (List(1L, "toto", 3L))
val vector: Vector[String] = EDN("""["tata" "toto" "tutu"]""")
vector should equal (Vector("tata", "toto", "tutu"))
val vector2: Vector[Any] = EDN("""[1 "toto" 3]""")
vector2 should equal (Vector(1L, "toto", 3L))
val set: Set[Double] = EDN("#{1.23 2.45 3.23}")
set should equal (Set(1.23, 2.45, 3.23))
val set2: Set[Any] = EDN("""#{1 "toto" 3}""")
set2 should equal (Set(1L, "toto", 3L))
val map: Map[Long, String] = EDN("""{ 1 "toto", 2 "tata", 3 "tutu" }""")
map should equal (Map(1 -> "toto", 2 -> "tata", 3 -> "tutu"))
val map2: Map[Any, Any] = EDN("""{ 1 "toto", "tata" 2, 3 "tutu" }""")
map2 should equal (Map(1 -> "toto", "tata" -> 2, 3 -> "tutu"))
}
it should "parse seq" in {
val s: Seq[Any] = EDNs("""(1 2 3) "toto" [true false] :foo/bar""")
s should equal (Seq(
Seq(1L, 2L, 3L),
"toto",
Vector(true, false),
EDNKeyword(EDNSymbol("foo/bar", Some("foo")))
))
}
it should "parse single to hlist" in {
import shapeless.{HNil, ::}
import shapeless.record._
import shapeless.syntax.singleton._
val s: Long :: String :: Boolean :: HNil = EDNH("""(1 "toto" true)""")
s should equal (1L :: "toto" :: true :: HNil)
val s2: Long = EDNH("""1""")
s2 should equal (1L)
val s3 = EDNH("""{1 "toto" true 1.234 "foo" (1 2 3)}""")
s3 should equal (
1L ->> "toto" ::
true ->> 1.234 ::
"foo" ->> List(1L, 2L, 3L) ::
HNil
)
val s4 = EDNHR("""{1 "toto" true 1.234 "foo" (1 2 3)}""")
s4 should equal (
1L ->> "toto" ::
true ->> 1.234 ::
"foo" ->> (1L :: 2L :: 3L :: HNil) ::
HNil
)
}
it should "parse multiple to hlist" in {
import shapeless.{HNil, ::}
val s: List[Long] :: String :: Vector[Boolean] :: EDNKeyword :: HNil = EDNHs("""(1 2 3) "toto" [true false] :foo/bar""")
s should equal (
Seq(1L, 2L, 3L) ::
"toto" ::
Vector(true, false) ::
EDNKeyword(EDNSymbol("foo/bar", Some("foo"))) ::
HNil
)
val s2 = EDNHRs("""(1 2 3) "toto" [true false] :foo/bar""")
s2 should equal (
(1L :: 2L :: 3L :: HNil) ::
"toto" ::
(true :: false :: HNil) ::
EDNKeyword(EDNSymbol("foo/bar", Some("foo"))) ::
HNil
)
}
it should "use string interpolation" in {
import shapeless.{HNil, ::}
val l = 123L
val s = List("foo", "bar")
val r: Long = EDN(s"$l")
val r1: Seq[Any] = EDN(s"($l $s)")
val r2: Long :: List[String] :: HNil = EDNH(s"($l $s)")
}
} | mandubian/scaledn | macros/src/test/scala/MacrosSpec.scala | Scala | apache-2.0 | 4,327 |
package controllers
import javax.inject.Inject
import models.BidHelper
import models.dao._
import models.images.Images
import play.api.Configuration
import play.api.cache.Cached
import play.api.data.Forms._
import play.api.data._
import play.api.i18n.I18nSupport
import play.api.mvc._
import play.twirl.api.HtmlFormat
import views._
class Items @Inject() (
mainMenu: MainMenu,
itemDAO: ItemDAO,
bidDAO: BidDAO,
categoryDAO: CategoryDAO,
val userDAO: UserDAO,
bidHelper: BidHelper,
cached: Cached,
val controllerComponents: ControllerComponents,
indexTemplate: views.html.index
)(implicit configuration: Configuration, images: Images)
extends SecuredController
with I18nSupport {
def bidForm(minValue: Int = 1): Form[(String, Int, Boolean)] = Form(
tuple(
"email" -> email,
"value" -> number(min = minValue),
"notifyBetterBids" -> boolean
)
)
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def itemDetailsPage(item: Item, form: Form[(String, Int, Boolean)] = bidForm())(implicit
request: Request[AnyContent]
): HtmlFormat.Appendable = {
val user: Option[User] = request.session.get("email").map(emailToUser(_).get)
indexTemplate(body = html.itemDetails(item, bidDAO.highest(item.id), form), menu = mainMenu.menu, user = user)
}
def newBid(itemId: Int): Action[AnyContent] = Action { implicit request =>
itemDAO.findById(itemId) match {
case Some(item) =>
val maxBid = bidDAO.highest(itemId).map(_.value.toInt + 1).getOrElse(1)
val minValue = math.max(maxBid, item.minValue.toInt)
bidForm(minValue)
.bindFromRequest()
.fold(
formWithErrors => BadRequest(itemDetailsPage(item, formWithErrors)),
{
case (email, value, notify) =>
bidHelper.processBid(email, value, notify, itemId, routes.Items.details(itemId).absoluteURL())
Redirect(routes.Items.details(itemId))
}
)
case None => NotFound("")
}
}
def details(itemId: Int): EssentialAction = cached((_: RequestHeader) => s"item-${itemId}", 5) {
Action { implicit request =>
itemDAO.findById(itemId) match {
case Some(item) => Ok(itemDetailsPage(item))
case None => NotFound("that product doesn't exist!") //TODO: create a nice 404 page
}
}
}
def highestBid(itemId: Int): Action[AnyContent] = Action {
bidDAO.highest(itemId) match {
case Some(bid) => Ok(bid.value.toString)
case None => NotFound
}
}
def list: Action[AnyContent] = l(sold = false)
def listSold: Action[AnyContent] = l(sold = true)
def listCat(cat: String): Action[AnyContent] = l(Some(cat), false)
def listCatSold(cat: String): Action[AnyContent] = l(Some(cat), true)
def l(category: Option[String] = None, sold: Boolean): Action[AnyContent] = Action { implicit request =>
category
.map { cat =>
categoryDAO
.findByName(cat)
.map { c =>
Ok(indexTemplate(body = html.body(itemsHigherBids(itemDAO.all(c, sold))), menu = mainMenu.menu))
}
.getOrElse(Redirect("/"))
}
.getOrElse {
Ok(indexTemplate(body = html.body(itemsHigherBids(itemDAO.all(sold))), menu = mainMenu.menu))
}
}
def itemsHigherBids(items: Seq[Item]): Seq[(Item, Option[Bid])] = items.map(i => (i, bidDAO.highest(i.id)))
}
| jcranky/lojinha | app/controllers/Items.scala | Scala | gpl-3.0 | 3,479 |
package formacion.example
/**
* Simple interfaz
*/
trait Asesino {
def asesina() : String;
}
| anavidad3/PoC-spark-scala-maven | src/test/scala/formacion/example/Asesino.scala | Scala | apache-2.0 | 101 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tour
import org.mongodb.scala._
import org.mongodb.scala.bson.ObjectId
import org.mongodb.scala.model.Filters._
import org.mongodb.scala.model.Sorts._
import org.mongodb.scala.model.Updates._
import tour.Helpers._
/**
* The QuickTour code example
*/
object QuickTourCaseClass {
//scalastyle:off method.length
/**
* Run this main method to see the output of this quick example.
*
* @param args takes an optional single argument for the connection string
* @throws Throwable if an operation fails
*/
def main(args: Array[String]): Unit = {
// Create the case class
object Person {
def apply(firstName: String, lastName: String): Person = Person(new ObjectId(), firstName, lastName);
}
case class Person(_id: ObjectId, firstName: String, lastName: String)
// Create a codec for the Person case class
import org.mongodb.scala.bson.codecs.Macros._
import org.mongodb.scala.bson.codecs.DEFAULT_CODEC_REGISTRY
import org.bson.codecs.configuration.CodecRegistries.{ fromRegistries, fromProviders }
val codecRegistry = fromRegistries(fromProviders(classOf[Person]), DEFAULT_CODEC_REGISTRY)
// Create the client
val mongoClient: MongoClient = if (args.isEmpty) MongoClient() else MongoClient(args.head)
// get handle to "mydb" database
val database: MongoDatabase = mongoClient.getDatabase("mydb").withCodecRegistry(codecRegistry)
// get a handle to the "test" collection
val collection: MongoCollection[Person] = database.getCollection("test")
collection.drop().results()
// make a document and insert it
val person: Person = Person("Ada", "Lovelace")
collection.insertOne(person).results()
// get it (since it's the only one in there since we dropped the rest earlier on)
collection.find.first().printResults()
// now, lets add lots of little documents to the collection so we can explore queries and cursors
val people: Seq[Person] = Seq(
Person("Charles", "Babbage"),
Person("George", "Boole"),
Person("Gertrude", "Blanch"),
Person("Grace", "Hopper"),
Person("Ida", "Rhodes"),
Person("Jean", "Bartik"),
Person("John", "Backus"),
Person("Lucy", "Sanders"),
Person("Tim", "Berners Lee"),
Person("Zaphod", "Beeblebrox")
)
collection.insertMany(people).printResults()
// Querying
collection.find().first().printHeadResult()
// Query Filters
collection.find(equal("firstName", "Ida")).first().printHeadResult()
// now use a range query to get a larger subset
collection.find(regex("firstName", "^G")).sort(ascending("lastName")).printResults()
// Update One
collection.updateOne(equal("lastName", "Berners Lee"), set("lastName", "Berners-Lee")).printHeadResult("Update Result: ")
// Delete One
collection.deleteOne(equal("firstName", "Zaphod")).printHeadResult("Delete Result: ")
// Clean up
collection.drop().results()
// release resources
mongoClient.close()
}
}
| rozza/mongo-scala-driver | examples/src/test/scala/tour/QuickTourCaseClass.scala | Scala | apache-2.0 | 3,634 |
package org.jetbrains.plugins.scala.decompiler.scalasig
import scala.collection.immutable.ArraySeq
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 19-Jul-17
*/
class ScalaSig(val entries: Array[Entry]) {
private var initialized: Boolean = false
def get(idx: Int): Entry = entries(idx)
def isInitialized: Boolean = initialized
def finished(): Unit = initialized = true
private val classes = ArrayBuffer.empty[ClassSymbol]
private val objects = ArrayBuffer.empty[ObjectSymbol]
private val symAnnots = ArrayBuffer.empty[SymAnnot]
private val parentToChildren = mutable.HashMap.empty[Int, ArrayBuffer[Symbol]]
def topLevelClasses: Iterable[ClassSymbol] = classes.filter(isTopLevelClass)
def topLevelObjects: Iterable[ObjectSymbol] = objects.filter(isTopLevel)
def findCompanionClass(objectSymbol: ObjectSymbol): Option[ClassSymbol] = {
val owner: Symbol = objectSymbol.symbolInfo.owner.get
val name = objectSymbol.name
classes.find(c => c.info.owner.get.eq(owner) && c.name == name)
}
def children(symbol: ScalaSigSymbol): Iterable[Symbol] = {
parentToChildren.keysIterator.find(get(_) eq symbol) match {
case None => Iterable.empty
case Some(i) => parentToChildren(i)
}
}
def attributes(symbol: ScalaSigSymbol): Iterable[SymAnnot] = {
def sameSymbol(ann: SymAnnot) = (ann.symbol.get, symbol) match {
case (s, t) if s == t => true
case (m1: MethodSymbol, m2: MethodSymbol) if equiv(m1, m2) => true
case _ => false
}
val forSameSymbol = symAnnots.filter(sameSymbol)
forSameSymbol.iterator.distinctBy(_.typeRef).to(ArraySeq)
}
def addClass(c: ClassSymbol): Unit = classes += c
def addObject(o: ObjectSymbol): Unit = objects += o
def addAttribute(a: SymAnnot): Unit = symAnnots += a
def addChild(parent: Option[Ref[Symbol]], child: Symbol): Unit = {
parent.foreach { ref =>
val children = parentToChildren.getOrElseUpdate(ref.index, ArrayBuffer.empty)
children += child
}
}
private def isTopLevel(symbol: Symbol): Boolean = symbol.parent match {
case Some(_: ExternalSymbol) => true
case _ => false
}
private def isTopLevelClass(symbol: Symbol): Boolean = !symbol.isModule && isTopLevel(symbol)
private def equiv(m1: MethodSymbol, m2: MethodSymbol) = {
def unwrapType(t: Type) = t match {
case NullaryMethodType(Ref(tp)) => tp
case _ => t
}
m1.name == m2.name && m1.parent == m2.parent &&
unwrapType(m1.infoType) == unwrapType(m2.infoType)
}
def syntheticSymbols(): Seq[Symbol] =
parentToChildren.valuesIterator.flatten.filter(_.isSynthetic).toList
}
| JetBrains/intellij-scala | scala/decompiler/src/org/jetbrains/plugins/scala/decompiler/scalasig/ScalaSig.scala | Scala | apache-2.0 | 2,701 |
/*
* The MIT License
*
* Copyright (c) 2019 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.vcf.api
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.commons.io.Writer
import com.fulcrumgenomics.util.Io
import htsjdk.samtools.Defaults
import htsjdk.variant.variantcontext.writer.{Options, VariantContextWriter, VariantContextWriterBuilder}
import java.nio.file.Files
import java.nio.file.LinkOption.NOFOLLOW_LINKS
/**
* Writes [[Variant]]s to a file or other storage mechanism.
*
* @param writer the underlying HTSJDK writer
* @param header the header of the VCF
*/
class VcfWriter private(private val writer: VariantContextWriter, val header: VcfHeader) extends Writer[Variant] {
override def write(variant: Variant): Unit = writer.add(VcfConversions.toJavaVariant(variant, header))
override def close(): Unit = writer.close()
}
object VcfWriter {
var DefaultUseAsyncIo: Boolean = Defaults.USE_ASYNC_IO_WRITE_FOR_TRIBBLE
/**
* Creates a [[VcfWriter]] that will write to the give path. The path must end in either
* - `.vcf` to create an uncompressed VCF file
* - `.vcf.gz` to create a block-gzipped VCF file
* - `.bcf` to create a binary BCF file
*
* @param path the path to write to
* @param header the header of the VCF
* @return a VariantWriter to write to the given path
*/
def apply(path: PathToVcf, header: VcfHeader, async: Boolean = DefaultUseAsyncIo): VcfWriter = {
import com.fulcrumgenomics.fasta.Converters.ToSAMSequenceDictionary
val javaHeader = VcfConversions.toJavaHeader(header)
val builder = new VariantContextWriterBuilder()
.setOutputPath(path)
.setReferenceDictionary(header.dict.asSam)
.setOption(Options.ALLOW_MISSING_FIELDS_IN_HEADER)
.setBuffer(Io.bufferSize)
if (Files.isRegularFile(path, NOFOLLOW_LINKS)) {
builder.setOption(Options.INDEX_ON_THE_FLY)
} else {
builder.unsetOption(Options.INDEX_ON_THE_FLY)
builder.setIndexCreator(null)
}
if (async) builder.setOption(Options.USE_ASYNC_IO) else builder.unsetOption(Options.USE_ASYNC_IO)
val writer = builder.build()
writer.writeHeader(javaHeader)
new VcfWriter(writer, header)
}
}
| fulcrumgenomics/fgbio | src/main/scala/com/fulcrumgenomics/vcf/api/VcfWriter.scala | Scala | mit | 3,313 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.ssg.bdt.nlp
import scala.collection.mutable.ArrayBuffer
import breeze.linalg.{Vector => BV}
private[nlp] class Node extends Serializable {
var x = 0
var y = 0
var alpha = 0.0
var beta = 0.0
var cost = 0.0
var bestCost = 0.0
var prev: Option[Node] = None
var fVector = 0
val lPath = new ArrayBuffer[Path]()
val rPath = new ArrayBuffer[Path]()
/**
* simplify the log likelihood.
*/
def logSumExp(x: Double, y: Double, flg: Boolean): Double = {
val MINUS_LOG_EPSILON = 50.0
if (flg) y
else {
val vMin: Double = math.min(x, y)
val vMax: Double = math.max(x, y)
if (vMax > vMin + MINUS_LOG_EPSILON) vMax else vMax + math.log(math.exp(vMin - vMax) + 1.0)
}
}
def calcAlpha(nodes: ArrayBuffer[Node]): Unit = {
alpha = 0.0
var i = 0
while (i < lPath.length) {
alpha = logSumExp(alpha, lPath(i).cost + nodes(lPath(i).lNode).alpha, i == 0)
i += 1
}
alpha += cost
}
def calcBeta(nodes: ArrayBuffer[Node]): Unit = {
beta = 0.0
var i = 0
while (i < rPath.length) {
beta = logSumExp(beta, rPath(i).cost + nodes(rPath(i).rNode).beta, i == 0)
i += 1
}
beta += cost
}
def calExpectation(
expected: BV[Double],
Z: Double,
size: Int,
featureCache: ArrayBuffer[Int],
nodes: ArrayBuffer[Node]): Unit = {
val c: Double = math.exp(alpha + beta - cost - Z)
var idx: Int = fVector
while (featureCache(idx) != -1) {
expected(featureCache(idx) + y) += c
idx += 1
}
var i = 0
while (i < lPath.length) {
lPath(i).calExpectation(expected, Z, size, featureCache, nodes)
i += 1
}
}
}
private[nlp] class Path extends Serializable {
var rNode = 0
var lNode = 0
var cost = 0.0
var fVector = 0
def calExpectation(
expected: BV[Double],
Z: Double,
size: Int,
featureCache: ArrayBuffer[Int],
nodes: ArrayBuffer[Node]): Unit = {
val c: Double = math.exp(nodes(lNode).alpha + cost + nodes(rNode).beta - Z)
var idx: Int = fVector
while (featureCache(idx) != -1) {
expected(featureCache(idx) + nodes(lNode).y * size + nodes(rNode).y) += c
idx += 1
}
}
def add(lnd: Int, rnd: Int, nodes: ArrayBuffer[Node]): Unit = {
lNode = lnd
rNode = rnd
nodes(lNode).rPath.append(this)
nodes(rNode).lPath.append(this)
}
}
| Intel-bigdata/CRF-Spark | src/main/scala/com/intel/ssg/bdt/nlp/Graph.scala | Scala | apache-2.0 | 3,217 |
/*
* Copyright 2012 Pellucid and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package datomisca
package macros
import scala.language.experimental.macros
import scala.reflect.macros.Context
import scala.collection.mutable
import scala.collection.JavaConverters._
import java.{lang => jl}
import java.{math => jm}
import clojure.{lang => clj}
private[datomisca] class Helper[C <: Context](val c: C) {
import c.universe._
private def abortWithMessage(message: String) =
c.abort(c.enclosingPosition, message)
def literalEDN(edn: Any, stk: mutable.Stack[c.Tree] = mutable.Stack.empty[c.Tree]): c.Tree =
edn match {
case b: java.lang.Boolean =>
literalBoolean(b)
case s: java.lang.String =>
q"$s"
case c: java.lang.Character =>
literalCharacter(c)
case s: clj.Symbol =>
literalCljSymbol(s, stk)
case k: clj.Keyword =>
literalCljKeyword(k)
case l: java.lang.Long =>
literalLong(l)
case d: java.lang.Double =>
literalDouble(d)
case d: java.math.BigDecimal =>
literalBigDecimal(d)
case i: clj.BigInt =>
literalCljBigInt(i)
case r: clj.Ratio =>
literalCljRatio(r)
case coll: clj.PersistentVector =>
literalVector(coll, stk)
case coll: clj.PersistentList =>
literalList(coll, stk)
case coll: clj.IPersistentMap =>
literalMap(coll, stk)
case coll: clj.PersistentHashSet =>
literalSet(coll, stk)
case x =>
if (x == null)
abortWithMessage("nil is not supported")
else
abortWithMessage(s"unexpected value $x with ${x.getClass}")
}
def literalBoolean(b: jl.Boolean): c.Tree =
q"new _root_.java.lang.Boolean(${b.booleanValue})"
def literalCljSymbol(s: clj.Symbol, stk: mutable.Stack[c.Tree]): c.Tree = {
val m = s.meta
if (m == null) {
if (s.getName() == "!")
try {
val t = stk.pop()
if (t.tpe =:= typeOf[String]) {
q"""_root_.datomic.Util.read("\"%s\"".format($t))"""
} else {
q"_root_.datomic.Util.read($t.toString)"
}
} catch {
case ex: NoSuchElementException =>
abortWithMessage("The symbol '!' is reserved by Datomisca")
}
else
q"_root_.clojure.lang.Symbol.intern(${s.getNamespace()}, ${s.getName()})"
} else {
val metaT = literalMap(m, stk)
q"_root_.clojure.lang.Symbol.intern(${s.getNamespace()}, ${s.getName()}).withMeta($metaT).asInstanceOf[clojure.lang.Symbol]"
}
}
def literalCljKeyword(k: clj.Keyword): c.Tree =
q"_root_.clojure.lang.Keyword.intern(${k.getNamespace()}, ${k.getName()})"
def literalLong(l: jl.Long): c.Tree =
q"new _root_.java.lang.Long(${l.longValue})"
def literalDouble(d: jl.Double): c.Tree =
q"new _root_.java.lang.Double(${d.doubleValue})"
def literalCljBigInt(k: clj.BigInt): c.Tree =
q"_root_.clojure.lang.BigInt.fromBigInteger(new _root_._root_.java.math.BigInteger(${k.toString}))"
def literalCljRatio(r: clj.Ratio): c.Tree =
q"new _root_.clojure.lang.Ratio(new _root_.java.math.BigInteger(${r.numerator.toString}), new _root_.java.math.BigInteger(${r.denominator.toString}))"
def literalBigDecimal(d: jm.BigDecimal): c.Tree =
q"new _root_.java.math.BigDecimal(${d.toString})"
def literalCharacter(char: jl.Character): c.Tree =
q"_root_.java.lang.Character.valueOf(${char.charValue()})"
def literalVector(coll: clj.PersistentVector, stk: mutable.Stack[c.Tree]): c.Tree = {
val args = coll.iterator.asScala.map(literalEDN(_, stk)).toList
q"_root_.clojure.lang.PersistentVector.create(_root_.java.util.Arrays.asList(..$args))"
}
def literalList(coll: clj.PersistentList, stk: mutable.Stack[c.Tree]): c.Tree = {
val args = coll.iterator.asScala.map(literalEDN(_, stk)).toList
q"_root_.clojure.lang.PersistentList.create(_root_.java.util.Arrays.asList(..$args))"
}
def literalMap(coll: clj.IPersistentMap, stk: mutable.Stack[c.Tree]): c.Tree = {
val freshName = newTermName(c.fresh("map$"))
val builder = List.newBuilder[c.Tree]
builder += q"val $freshName = new _root_.java.util.HashMap[AnyRef, AnyRef](${coll.count()})"
for (o <- coll.iterator.asScala) {
val e = o.asInstanceOf[clj.MapEntry]
val keyT = literalEDN(e.key(), stk)
val valT = literalEDN(e.`val`(), stk)
builder += q"${freshName}.put($keyT, $valT)"
}
builder += q"_root_.clojure.lang.PersistentArrayMap.create($freshName)"
q"{ ..${builder.result} }"
}
def literalSet(coll: clj.PersistentHashSet, stk: mutable.Stack[c.Tree]): c.Tree = {
val args = coll.iterator.asScala.map(literalEDN(_, stk)).toList
q"_root_.clojure.lang.PersistentHashSet.create(java.util.Arrays.asList(..$args))"
}
def literalQueryRules(rules: c.Tree): c.Expr[QueryRules] =
c.Expr[QueryRules](q"new _root_.datomisca.QueryRules($rules)")
def literalQuery(query: c.Tree, inputSize: Int, outputSize: Int): c.Expr[AbstractQuery] = {
val typeArgs =
List.fill(inputSize)(tq"AnyRef") :+
(outputSize match {
case 0 => tq"Unit"
case 1 => tq"Any"
case n =>
val typeName = newTypeName("Tuple" + n)
val args = List.fill(n)(tq"Any")
tq"$typeName[..$args]"
})
val queryClassName =
Select(
Select(
Select(
Ident(newTermName("_root_")),
newTermName("datomisca")),
newTermName("gen")),
newTypeName("TypedQuery" + inputSize))
c.Expr[AbstractQuery](q"new $queryClassName[..$typeArgs]($query)")
}
}
| Enalmada/datomisca | macros/src/main/scala_2.10/datomisca/macros/Helper.scala | Scala | apache-2.0 | 6,213 |
/**
* Copyright 2013 Gianluca Amato
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.domains.numerical.ppl
import it.unich.jandom.domains.numerical.LinearForm
import parma_polyhedra_library._
/**
* This is a collection of methods used by the PPL-based numerical domains.
* @author Gianluca Amato <gamato@unich.it>
*/
private[jandom] object PPLUtils {
/**
* Converts a `LinearForm` into a pair made of a `Linear_Expression` object and a
* `Coefficient` object, which is the denumerator to be used in linear assignments.
* @param coeff the homogeneous coefficients.
* @param known the in-homogeneous coefficient.
*/
def toPPLLinearExpression(lf: LinearForm[Double]): (Linear_Expression, Coefficient) = {
if (lf.toPPL != null)
lf.toPPL.asInstanceOf[(Linear_Expression, Coefficient)]
else {
val coeffs = lf.coeffs map { BigDecimal.exact(_) }
val maxScale = (coeffs map { _.scale }).max
val denumerator = BigDecimal(10) pow maxScale
val newcoeffs = coeffs map { (x: BigDecimal) => (x * denumerator).toBigIntExact.get.bigInteger }
var le: Linear_Expression = new Linear_Expression_Coefficient(new Coefficient(newcoeffs(0)))
for (i <- 0 until lf.dimension) {
le = le.sum((new Linear_Expression_Variable(new Variable(i)).times(new Coefficient(newcoeffs(i + 1)))))
}
val result = (le, new Coefficient(denumerator.toBigIntExact.get.bigInteger))
lf.toPPL = result
result
}
}
/**
* Converts a PPL linear expression couple with a coefficient for the denominator into a LinearForm.
*/
def fromPPLExpression(e: Linear_Expression): LinearForm[Double] = {
e match {
case e: Linear_Expression_Coefficient => LinearForm.c(e.argument().getBigInteger().doubleValue())
case e: Linear_Expression_Difference => fromPPLExpression(e.left_hand_side()) - fromPPLExpression(e.right_hand_side())
case e: Linear_Expression_Sum => fromPPLExpression(e.left_hand_side()) - fromPPLExpression(e.right_hand_side())
case e: Linear_Expression_Times => fromPPLExpression(e.linear_expression()) * e.coefficient().getBigInteger().doubleValue()
case e: Linear_Expression_Unary_Minus => - fromPPLExpression(e.argument())
case e: Linear_Expression_Variable => LinearForm.v(e.argument().id().toInt)
}
}
/**
* Converts a PPL Constraints into a sequence of LinearForms. The conversion in only
* approximate since we cannot represent open constraints.
*/
def fromPPLConstraint(c: Constraint): Seq[LinearForm[Double]] = {
val exp = c.left_hand_side().subtract(c.right_hand_side())
val lf = fromPPLExpression(exp)
c.kind match {
case Relation_Symbol.EQUAL => Seq(lf, -lf)
case Relation_Symbol.LESS_OR_EQUAL | Relation_Symbol.LESS_THAN => Seq(lf)
case Relation_Symbol.GREATER_THAN | Relation_Symbol.GREATER_OR_EQUAL => Seq(-lf)
case Relation_Symbol.NOT_EQUAL => Seq()
}
}
/**
* Determines whether a PPL constraint has an exact representation as a sequence of linear form.
*/
def isRepresentableAsLinearForms(c: Constraint): Boolean = {
c.kind match {
case Relation_Symbol.EQUAL | Relation_Symbol.LESS_OR_EQUAL | Relation_Symbol.GREATER_OR_EQUAL => true
case _ => false
}
}
/**
* Generates a string representation of a constraint system.
* @param cs a constraint system
* @param vars the variables to use for the string form
*/
def constraintsToString(cs: Constraint_System, vars: Seq[String]): String = {
import collection.JavaConversions._
val vs = new Variable_Stringifier {
def stringify(x: Long) = vars(x.toInt)
}
Variable.setStringifier(vs)
val result = for (c <- cs) yield c.toString
Variable.setStringifier(null)
result.mkString("[ ", " , ", " ]")
}
/**
* Converts a sequence into a partial function.
* @param rho the original sequence. If `rho(i)=j` and `j>0`, the resulting partial
* function maps `i` to `j`. If `j=0`, then `i` is not in the domain of the resulting
* function.
*/
def sequenceToPartialFunction(rho: Seq[Int]): Partial_Function = {
val pf = new Partial_Function
for ((newi, i) <- rho.zipWithIndex; if newi >= 0) {
pf.insert(i, newi)
}
pf
}
} | francescaScozzari/Jandom | core/src/main/ppl/it/unich/jandom/domains/numerical/ppl/PPLUtils.scala | Scala | lgpl-3.0 | 4,926 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.db
import com.netflix.atlas.core.model.DefaultSettings
import com.netflix.atlas.core.model.DsType
import com.netflix.atlas.core.model.FunctionTimeSeq
import com.netflix.atlas.core.model.TimeSeries
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
/**
* Simple database with a predefined set of time series.
*/
class StaticDatabase(config: Config)
extends SimpleStaticDatabase(DataSet.get(config.getString("dataset")), config)
object StaticDatabase {
/** Create a simple database with a range of fixed integer values. Mostly used for testing. */
def range(s: Int, e: Int): Database = {
val len = e.toString.length
val ts = (s to e).map { i =>
val tagsBuilder = Map.newBuilder[String, String]
tagsBuilder += "name" -> s"%0${len}d".format(i)
tagsBuilder += "class" -> (if (i % 2 == 1) "odd" else "even")
if (probablyPrime(i))
tagsBuilder += "prime" -> "probably"
val seq = new FunctionTimeSeq(DsType.Gauge, DefaultSettings.stepSize, _ => i)
TimeSeries(tagsBuilder.result(), seq)
}
new SimpleStaticDatabase(ts.toList, config)
}
/** Generate a database with some synthetic data used for demos and examples. */
def demo: Database = {
new SimpleStaticDatabase(DataSet.staticAlertSet, config)
}
private def config: Config = ConfigFactory.load().getConfig("atlas.core.db")
private def probablyPrime(v: Int): Boolean = BigInt(v).isProbablePrime(100)
}
| brharrington/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/db/StaticDatabase.scala | Scala | apache-2.0 | 2,096 |
object Test {
implicit class Foo(sc: StringContext) {
object q {
def apply(arg: Any*): Int = 3
}
}
def f = {
val _parent = 3
q"val hello = $_parent"
q"class $_" // error // error
}
}
| dotty-staging/dotty | tests/neg/i1779.scala | Scala | apache-2.0 | 218 |
package scala.meta.eden
import org.scalatest.FunSuite
import scala.{meta => m}
import dotty.tools.dotc._
import ast.{tpd, untpd}
import parsing.Parsers.Parser
import util.SourceFile
import core.Contexts.ContextBase
import scala.meta.dialects.Dotty
trait EdenSuite extends FunSuite {
implicit val ctx = (new ContextBase).initialCtx
implicit def dottyParse(code: String): untpd.Tree = {
val (_, stats) = new Parser(new SourceFile("<meta>", code.toCharArray)).templateStatSeq()
stats match { case List(stat) => stat; case stats => untpd.Thicket(stats) }
}
implicit def metaParse(code: String): m.Stat = {
import scala.meta._
code.parse[m.Stat].get
}
def syntactic(code: String, expect: m.Stat) = {
test(code) {
val dTree: untpd.Tree = code
var convertedTree: m.Tree = dTree
assert(expect.structure == convertedTree.structure)
}
}
def syntactic(code: String, verbose: Boolean = false): Unit = {
test(code) {
val mTree: m.Tree = code
val dTree: untpd.Tree = code
var convertedTree: m.Tree = null
try { convertedTree = dTree } finally {
if (convertedTree == null || mTree.structure != convertedTree.structure || verbose)
debug
}
def debug = {
println("<------------")
println("code:" + code)
println("dotty:" + dTree)
println("meta:" + mTree.structure)
if (convertedTree != null) println("conv:" + convertedTree.structure)
println("------------>")
}
assert(mTree.structure == convertedTree.structure)
}
}
}
| liufengyun/eden | src/test/scala/dotty/eden/EdenSuite.scala | Scala | bsd-3-clause | 1,592 |
package katas.scala.classic_katas.datamunging
import org.scalatest.FunSuite
import scala.io.Source
/**
* User: dima
* Date: 24/06/2012
*/
class DataMunging2 extends FunSuite {
test("should find rows with min temperature spread and min different between for and against goals") {
assert(findMinRow("/Users/dima/IdeaProjects/katas/src/main/scala/ru/katas/n4/weather.dat", 1, 2, 3) == ("14", 2))
assert(findMinRow("/Users/dima/IdeaProjects/katas/src/main/scala/ru/katas/n4/football.dat", 2, 7, 9) == ("Aston_Villa", 1))
}
def findMinRow(fileName: String, idCol: Int, minCol: Int, maxCol: Int): (String, Int) = {
Source.fromFile(fileName).getLines().toSeq.filter(_.matches(" +\\\\d+.*"))
.map(_.replace("*", "").split(" +")).map { row => (row(idCol), (row(minCol).toInt - row(maxCol).toInt).abs)}.minBy(_._2)
}
} | dkandalov/katas | scala/src/katas/scala/classic_katas/datamunging/DataMunging2.scala | Scala | unlicense | 828 |
package org.openapitools.models
import io.circe._
import io.finch.circe._
import io.circe.generic.semiauto._
import io.circe.java8.time._
import org.openapitools._
import org.openapitools.models.PipelineRunNodeedges
import scala.collection.immutable.Seq
/**
*
* @param Underscoreclass
* @param displayName
* @param durationInMillis
* @param edges
* @param id
* @param result
* @param startTime
* @param state
*/
case class PipelineRunNode(Underscoreclass: Option[String],
displayName: Option[String],
durationInMillis: Option[Int],
edges: Option[Seq[PipelineRunNodeedges]],
id: Option[String],
result: Option[String],
startTime: Option[String],
state: Option[String]
)
object PipelineRunNode {
/**
* Creates the codec for converting PipelineRunNode from and to JSON.
*/
implicit val decoder: Decoder[PipelineRunNode] = deriveDecoder
implicit val encoder: ObjectEncoder[PipelineRunNode] = deriveEncoder
}
| cliffano/swaggy-jenkins | clients/scala-finch/generated/src/main/scala/org/openapitools/models/PipelineRunNode.scala | Scala | mit | 1,076 |
package com.karasiq.bootstrap.alert
import com.karasiq.bootstrap.context.RenderingContext
import com.karasiq.bootstrap.utils.Utils
trait UniversalAlerts { self: RenderingContext with Alerts with Utils ⇒
import scalaTags.all._
import BootstrapAttrs._
type Alert = UniversalAlert
object Alert extends AlertFactory {
def create(style: AlertStyle): UniversalAlert = {
new UniversalAlert(style)
}
lazy val link: Tag = {
a("alert-link".addClass)
}
}
class UniversalAlert(val style: AlertStyle) extends AbstractAlert {
private[this] val classes = Seq("alert", "alert-dismissible", "fade", "in")
def closeButton: Tag = {
button(`type` := "button", "close".addClass, `data-dismiss` := "alert",
aria.label := "Close", span(aria.hidden := true, raw("×")))
}
override def renderTag(md: ModifierT*): TagT = {
div(classes.map(_.addClass), style, role := "alert")(
closeButton,
md
)
}
}
}
| Karasiq/scalajs-bootstrap | library/shared/src/main/scala/com/karasiq/bootstrap/alert/UniversalAlerts.scala | Scala | mit | 994 |
/*
*
* __________ ______ __ ________ _ __ ______
* /_ __/ __ \\/ _/ / / / / _/ __ \\/ | / / / ____/
* / / / /_/ // // / / / / // / / / |/ / / / __
* / / / _, _// // /___/ /____/ // /_/ / /| / / /_/ /
* /_/ /_/ |_/___/_____/_____/___/\\____/_/ |_/ \\____/
*
* Copyright (C) 2017 Himchan Park (chan150@dgist.ac.kr)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package kr.acon.io
import java.io.DataOutputStream
import it.unimi.dsi.fastutil.longs.LongOpenHashBigSet
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.hadoop.mapred.{FileOutputFormat, JobConf, RecordWriter}
import org.apache.hadoop.util.{Progressable, ReflectionUtils}
abstract class BaseOutputFormat extends FileOutputFormat[Long, LongOpenHashBigSet] {
@inline def getRecordWriter(out: DataOutputStream): RecordWriter[Long, LongOpenHashBigSet]
@inline override def getRecordWriter(ignored: FileSystem,
job: JobConf,
name: String,
progress: Progressable) = {
val isCompressed = FileOutputFormat.getCompressOutput(job)
if (!isCompressed) {
val file = FileOutputFormat.getTaskOutputPath(job, name)
val fs = file.getFileSystem(job)
val fileOut = fs.create(file, progress)
getRecordWriter(fileOut)
} else {
val codecClass = FileOutputFormat.getOutputCompressorClass(job, classOf[GzipCodec])
val codec = ReflectionUtils.newInstance(codecClass, job)
val file = FileOutputFormat.getTaskOutputPath(job, name + codec.getDefaultExtension())
val fs = file.getFileSystem(job)
val fileOut = fs.create(file, progress)
val fileOutWithCodec = new DataOutputStream(codec.createOutputStream(fileOut))
getRecordWriter(fileOutWithCodec)
}
}
}
| chan150/TrillionG | src/main/scala/kr/acon/io/BaseOutputFormat.scala | Scala | apache-2.0 | 2,446 |
package spgui.widgets.abilityhandler
import java.util.UUID
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import spgui.communication._
import sp.domain._
import Logic._
object AbilityHandlerWidget {
import sp.devicehandler.{APIVirtualDevice => vdapi}
import sp.abilityhandler.{APIAbilityHandler => abapi}
case class State(resources: List[vdapi.Resource], abilities: List[abapi.Ability], abilityState: Map[ID, SPValue])
private class Backend($: BackendScope[Unit, State]) {
val abObs = BackendCommunication.getWebSocketStatusObserver( mess => {
if (mess) sendToAB(abapi.GetAbilities)
}, abapi.topicResponse)
val vdObs = BackendCommunication.getWebSocketStatusObserver( mess => {
if (mess) sendToVD(vdapi.GetResources)
}, vdapi.topicResponse)
val vdapiHandler = BackendCommunication.getMessageObserver(handleVDMess, vdapi.topicResponse)
val abapiHandler = BackendCommunication.getMessageObserver(handleABMess, abapi.topicResponse)
def handleVDMess(mess: SPMessage): Unit = {
mess.body.to[vdapi.Response].map{
case vdapi.Resources(r) =>
$.modState(s => s.copy(resources = r)).runNow()
case x =>
println(s"AbilityHandlerWidget - TODO: $x")
}
}
def handleABMess(mess: SPMessage): Unit = {
mess.body.to[abapi.Response].map{
case abapi.Abilities(a) =>
$.modState(s => s.copy(abilities = a)).runNow()
case abapi.AbilityState(id, state) =>
$.modState{s =>
val ns = s.abilityState ++ state
s.copy(abilityState = ns)}.runNow()
case x =>
println(s"AbilityHandlerWidget - answers - TODO: $x")
}
}
def render(s: State) = {
<.div(
<.h2("Ability Handler"),
<.br(),
<.button(
^.className := "btn btn-default",
^.onClick --> sendToVD(vdapi.GetResources), "Get resources"
),
<.button(
^.className := "btn btn-default",
^.onClick --> sendToAB(abapi.GetAbilities), "Get abilities"
),
renderResources(s),
renderAbilities(s)
)
}
def renderResources(s: State) = {
<.table(
^.width:="400px",
<.caption("Resources"),
<.thead(
<.tr(
<.th(^.width:="100px","Name")
)
),
<.tbody(
s.resources.map(r=> {
<.tr(
<.td(r.name)
)
}).toTagMod
)
)
}
def getAbilityState(s: SPValue): String = {
s.getAs[String]("state").getOrElse("")
}
def getAbilityCount(s: SPValue): Int = {
s.getAs[Int]("count").getOrElse(0)
}
def renderAbilities(s: State) = {
<.table(
^.width:="550px",
<.caption("Abilties"),
<.thead(
<.tr(
<.th(^.width:="200px","Name"),
<.th(^.width:="100px","State"),
<.th(^.width:="50px","Count"),
<.th(^.width:="100px","Start"),
<.th(^.width:="100px","Reset")
)
),
<.tbody(
s.abilities.sortBy(a=>a.name).map(a=> {
<.tr(
<.td(a.name),
<.td(getAbilityState(s.abilityState.getOrElse(a.id, SPValue.empty))),
<.td(getAbilityCount(s.abilityState.getOrElse(a.id, SPValue.empty))),
<.td(<.button(
^.className := "btn btn-sm",
^.onClick --> sendToAB(abapi.StartAbility(a.id)), "Start"
)),
<.td(<.button(
^.className := "btn btn-sm",
^.onClick --> sendToAB(abapi.ForceResetAbility(a.id)), "Reset"
))
)
}).toTagMod
)
)
}
def onUnmount() = {
println("Unmounting")
vdapiHandler.kill()
abapiHandler.kill()
Callback.empty
}
def sendToVD(mess: vdapi.Request): Callback = {
val h = SPHeader(from = "AbilityHandlerWidget", to = vdapi.service,
reply = SPValue("AbilityHandlerWidget"), reqID = java.util.UUID.randomUUID())
val json = SPMessage.make(SPValue(h), SPValue(mess))
BackendCommunication.publish(json, vdapi.topicRequest)
Callback.empty
}
def sendToAB(mess: abapi.Request): Callback = {
val h = SPHeader(from = "AbilityHandlerWidget", to = abapi.service,
reply = SPValue("AbilityHandlerWidget"), reqID = java.util.UUID.randomUUID())
val json = SPMessage.make(SPValue(h), SPValue(mess))
BackendCommunication.publish(json, abapi.topicRequest)
Callback.empty
}
}
private val component = ScalaComponent.builder[Unit]("AbilityHandlerWidget")
.initialState(State(resources = List(), abilities = List(), abilityState = Map()))
.renderBackend[Backend]
.componentWillUnmount(_.backend.onUnmount())
.build
def apply() = spgui.SPWidget(spwb => component())
}
| kristoferB/SP | spcontrol/frontend/src/main/scala/spgui/widgets/abilityhandler/AbilityHandler.scala | Scala | mit | 4,953 |
package spark.bagel.examples
import spark._
import spark.SparkContext._
import spark.bagel._
import spark.bagel.Bagel._
import scala.xml.{XML,NodeSeq}
/**
* Run PageRank on XML Wikipedia dumps from http://wiki.freebase.com/wiki/WEX. Uses the "articles"
* files from there, which contains one line per wiki article in a tab-separated format
* (http://wiki.freebase.com/wiki/WEX/Documentation#articles).
*/
object WikipediaPageRank {
def main(args: Array[String]) {
if (args.length < 5) {
System.err.println("Usage: WikipediaPageRank <inputFile> <threshold> <numSplits> <host> <usePartitioner>")
System.exit(-1)
}
System.setProperty("spark.serializer", "spark.KryoSerializer")
System.setProperty("spark.kryo.registrator", classOf[PRKryoRegistrator].getName)
val inputFile = args(0)
val threshold = args(1).toDouble
val numSplits = args(2).toInt
val host = args(3)
val usePartitioner = args(4).toBoolean
val sc = new SparkContext(host, "WikipediaPageRank")
// Parse the Wikipedia page data into a graph
val input = sc.textFile(inputFile)
println("Counting vertices...")
val numVertices = input.count()
println("Done counting vertices.")
println("Parsing input file...")
var vertices = input.map(line => {
val fields = line.split("\\t")
val (title, body) = (fields(1), fields(3).replace("\\\\n", "\\n"))
val links =
if (body == "\\\\N")
NodeSeq.Empty
else
try {
XML.loadString(body) \\\\ "link" \\ "target"
} catch {
case e: org.xml.sax.SAXParseException =>
System.err.println("Article \\""+title+"\\" has malformed XML in body:\\n"+body)
NodeSeq.Empty
}
val outEdges = links.map(link => new String(link.text)).toArray
val id = new String(title)
(id, new PRVertex(1.0 / numVertices, outEdges))
})
if (usePartitioner)
vertices = vertices.partitionBy(new HashPartitioner(sc.defaultParallelism)).cache
else
vertices = vertices.cache
println("Done parsing input file.")
// Do the computation
val epsilon = 0.01 / numVertices
val messages = sc.parallelize(Array[(String, PRMessage)]())
val utils = new PageRankUtils
val result =
Bagel.run(
sc, vertices, messages, combiner = new PRCombiner(),
numSplits = numSplits)(
utils.computeWithCombiner(numVertices, epsilon))
// Print the result
System.err.println("Articles with PageRank >= "+threshold+":")
val top =
(result
.filter { case (id, vertex) => vertex.value >= threshold }
.map { case (id, vertex) => "%s\\t%s\\n".format(id, vertex.value) }
.collect.mkString)
println(top)
}
}
| joeywen/spark_cpp_api | bagel/src/main/scala/spark/bagel/examples/WikipediaPageRank.scala | Scala | bsd-3-clause | 2,777 |
package synthesis
object Extractors
//
// import scala.tools.nsc._
//
// /** Contains extractors to pull-out interesting parts of the Scala ASTs. */
// trait Extractors {
// val global: Global
// val pluginInstance: FunCheckPlugin
//
// import global._
// import global.definitions._
//
// object StructuralExtractors {
// object ScalaPredef {
// /** Extracts method calls from scala.Predef. */
// def unapply(tree: Tree): Option[String] = tree match {
// case Select(Select(This(scalaName),predefName),symName)
// if("scala".equals(scalaName.toString) && "Predef".equals(predefName.toString)) =>
// Some(symName.toString)
// case _ => None
// }
// }
//
// object EnsuredExpression {
// /** Extracts the 'ensuring' contract from an expression. */
// def unapply(tree: Tree): Option[(Tree,Function)] = tree match {
// case Apply(
// Select(
// Apply(
// TypeApply(
// ScalaPredef("any2Ensuring"),
// TypeTree() :: Nil),
// body :: Nil),
// ensuringName),
// (anonymousFun @ Function(ValDef(_, resultName, resultType, EmptyTree) :: Nil,
// contractBody)) :: Nil)
// if("ensuring".equals(ensuringName.toString)) => Some((body,anonymousFun))
// case _ => None
// }
// }
//
// object RequiredExpression {
// /** Extracts the 'require' contract from an expression (only if it's the
// * first call in the block). */
// def unapply(tree: Tree): Option[(Tree,Tree)] = tree match {
// case Block(Apply(ScalaPredef("require"), contractBody :: Nil) :: Nil, body) =>
// Some((body,contractBody))
// case _ => None
// }
// }
//
// object ExObjectDef {
// /** Matches an object with no type parameters, and regardless of its
// * visibility. Does not match on the automatically generated companion
// * objects of case classes (or any synthetic class). */
// def unapply(cd: ClassDef): Option[(String,Template)] = cd match {
// case ClassDef(_, name, tparams, impl) if (cd.symbol.isModuleClass && tparams.isEmpty && !cd.symbol.hasFlag(symtab.Flags.SYNTHETIC)) => {
// Some((name.toString, impl))
// }
// case _ => None
// }
// }
//
// object ExAbstractClass {
// /** Matches an abstract class or a trait with no type parameters, no
// * constrctor args (in the case of a class), no implementation details,
// * no abstract members. */
// def unapply(cd: ClassDef): Option[(String)] = cd match {
// case ClassDef(_, name, tparams, impl) if (cd.symbol.isTrait && tparams.isEmpty && impl.body.length == 2) => {
// println(name + " seems to be a cool trait")
// Some(name.toString)
// }
// case _ => None
// }
// }
//
// object ExCaseClass {
//
// }
//
// object ExFunctionDef {
// /** Matches a function with a single list of arguments, no type
// * parameters and regardless of its visibility. */
// def unapply(dd: DefDef): Option[(String,Seq[ValDef],Tree,Tree)] = dd match {
// case DefDef(_, name, tparams, vparamss, tpt, rhs) if(tparams.isEmpty && vparamss.size == 1) => Some((name.toString, vparamss(0), tpt, rhs))
// case _ => None
// }
// }
// }
//
// object ExpressionExtractors {
// object ExBooleanLiteral {
// def unapply(tree: Tree): Option[Boolean] = tree match {
// case Literal(Constant(true)) => Some(true)
// case Literal(Constant(false)) => Some(false)
// case _ => None
// }
// }
//
// object ExInt32Literal {
// def unapply(tree: Tree): Option[Int] = tree match {
// case Literal(c @ Constant(i)) if c.tpe == IntClass.tpe => Some(c.intValue)
// case _ => None
// }
// }
// }
//
// object TypeExtractors {
//
// }
// }
| epfl-lara/comfusy | src/main/scala/Extractors.scala | Scala | bsd-2-clause | 4,046 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.indexes
import org.quantintel.ql.math.{Constants, Closeness}
import org.quantintel.ql.time.{TimeSeries, Date, Calendar}
import org.quantintel.ql.util.{Observability, Observer, DefaultObservable, Observable}
/**
*
* Abstract representation of a Financial Index.
*
* A statistic measure of change in an economy or a securities market. In the case
* of financial markets, and index is an imaginary portfolio of securities representing a
* particular market or a portion of it. Each index has its own calculation methodology
* and is usually expressed in terms of a change from a base value. Thus, the percentage
* change is more important than the actual numeric value.
* source - Investopedia 2014
*
*
* @author Paul Bernard
*/
abstract class Index extends Observable with Observability {
/**
*
* @return name of the index
*/
def name() : String
/**
*
* @return the calendar that defines the valid fixing dates
*/
def fixingCalendar() : Calendar
/**
*
* @param fixingDate the fixing date to be tested
* @return true if the fixing date is valid
*/
def isValidFixingDate(fixingDate: Date): Boolean
/**
*
* @param fixingDate the actual calendar date of the fixing(no settlement days)
* @param forecastTodaysFixing forecast todays fixing
* @return the fixing at the given date.
*/
def fixing (fixingDate: Date, forecastTodaysFixing: Boolean) : Double
def timeSeries : TimeSeries[Double] = IndexManager.getHistory(name())
def addFixing(date: Date, value: Double) {
addFixing(date, value, forceOverwrite = false)
}
def addFixing(date: Date, value: Double, forceOverwrite: Boolean) {
val tag : String = name()
var missingFixing: Boolean = false
var validFixing : Boolean = false
var noInvalidFixing: Boolean = true
var noDuplicateFixing : Boolean = true
val h : TimeSeries[Double] = IndexManager.getHistory(tag)
validFixing = isValidFixingDate(date)
val currentValue: Double = h.get(date)
missingFixing = forceOverwrite || Closeness.isClose(currentValue, Constants.NULL_REAL)
if (validFixing) {
if (missingFixing) {
h.put(date, value)
} else if (Closeness.isClose(currentValue, value)) {
// no op
} else {
noDuplicateFixing = false
}
} else {
noInvalidFixing = false
}
IndexManager.setHistory(tag, h)
}
def addFixing(dates: Iterator[Date], values: Iterator[Double], forceOverwite: Boolean){
val tag : String = name()
var missingFixing : Boolean = false
var validFixing : Boolean = false
var noInvalidFixing: Boolean = true
var noDuplicateFixing : Boolean = true
val h : TimeSeries[Double] = IndexManager.getHistory(tag)
for (date: Date <- dates.toIterator){
val value : Double = values.next()
validFixing = isValidFixingDate(date)
val currentValue : Double = h.get(date)
missingFixing = forceOverwite || Closeness.isClose(currentValue, Constants.NULL_REAL)
if(validFixing){
if(missingFixing) {
h.put(date, value)
} else if (Closeness.isClose(currentValue, value)){
// no op
} else {
noDuplicateFixing = false
}
} else {
noInvalidFixing = false
}
}
IndexManager.setHistory(tag, h)
}
def clearFixing() {
IndexManager.clearHistory(name())
}
def fixing(fixingDate: Date) : Double = fixing(fixingDate, forecastTodaysFixing = false)
}
| pmularien/spectrum-old | financial/src/main/scala/org/quantintel/ql/indexes/Index.scala | Scala | apache-2.0 | 4,217 |
package org.buttercoin.engine
import java.util.UUID
import org.buttercoin.common.util._
import org.buttercoin.common.models.core.AccountID
import org.buttercoin.common.models.money._
import org.buttercoin.common.models.order
import order.{ TradeCompletionType, LiquidityMaker, LiquidityTaker }
import org.buttercoin.common.models.currency._
import org.buttercoin.engine.messages.LedgerDeposit
import org.buttercoin.common.messages.{ Request, CreditTrade }
import scalaz.{ Order => zOrder, Ordering => zOrdering, _ }
import Scalaz._
import shapeless.{ HNil, :: }
import shapeless.syntax.singleton._
import shapeless.record._
package object book {
// kept for tests
object Stack {
def apply[PC <: Currency : math.Ordering : CurrencyFactory, QC <: Currency : CurrencyFactory]
(implicit ne1: PC =!= QC) =
new Stack[PC, QC] {
var priceOrdering = implicitly[math.Ordering[PC]]
var priceFactory = implicitly[CurrencyFactory[PC]]
var qtyFactory = implicitly[CurrencyFactory[QC]]
}
}
trait Stack[PC <: Currency, QC <: Currency] extends orders.Stack[PC, QC] {
import market.Output
trait Store {
self: Book =>
def insert(order: OrderT): Unit
def remove(order: OrderT): OrderT
def removeById(order: UUID): Option[OrderT]
def removeAll(orders: Set[OrderT]): Unit
def size: Int
def top: Option[OrderT]
def allOrders: Iterator[OrderT]
}
trait OrderMatching {
self: Book with Store with OutputGenerator =>
trait FillResult2
case object NoMatch2 extends FillResult2
implicit val rFactory: CurrencyFactory[RC]
implicit val oFactory: CurrencyFactory[OC]
def matchOrders[T <: FillOrder](fillWith: T)
= {
var curFill: Option[T] = Some(fillWith)
var spent = rFactory(0)
var earned = oFactory(0)
var partial = false
var reopen: Option[(OrderT, (OrderT, OrderT))] = None
var lastPrice: Option[PC] = None
def tally(order: OrderT): Unit = {
spent = spent + order.received
earned = earned + order.offered
}
val completed = allOrders.takeWhile { curOrder =>
curFill map { x =>
x.compareMatch(curOrder) match {
case MatchAll =>
// The current order fully matches the fill order
curFill = None
lastPrice = Some(curOrder.price)
val q = x.quantityAtPrice(curOrder.price)
if (q === curOrder.quantity) {
// We're exactly equal, nothing to reopen
tally(curOrder)
} else {
// The current order is bigger, split it and reopen the rest
val (fill, remaining) = curOrder.split(q)
reopen = Some(curOrder, fill -> remaining)
tally(fill)
}
true
case MatchSome =>
val (_, open) = x.split(curOrder)
tally(curOrder)
curFill = Some(open.asInstanceOf[T]) //.asInstanceOf[FillOrder[T]])
lastPrice = Some(curOrder.price)
true
case MatchNone => false
}
} getOrElse(false)
}
("closed" ->> completed.toSet) ::
("earned" ->> earned) ::
("spent" ->> spent) ::
("residual" ->> curFill) ::
("reopened" ->> reopen) ::
("lastPrice" ->> lastPrice) :: HNil
}
def fill[T <: FillOrder](fillWith: T): ((Option[T], Option[PC]), Output) = {
val res = matchOrders(fillWith)
removeAll(res("closed"))
val filled = res("reopened").map { x =>
val (original, (_, r)) = x
insert(r)
res("closed") - original
} getOrElse(res("closed"))
val earned = if(res("spent").amount > 0 && res("earned").amount > 0 ) {
if(res("residual").isEmpty) {
genFilled(fillWith, res("spent"), res("earned"))
} else {
genReopened(fillWith, res("spent"), res("earned"))
}
} else {
mzero[Output]
}
val outputs = filled foldMap (genFilled(_))
val reOut = res("reopened") map { x =>
val (_, (f, _)) = x
genReopened(f)
} getOrElse(mzero[Output])
val missOut = res("residual") map(genMiss(_)) getOrElse(mzero[Output])
(res("residual"), res("lastPrice")) -> (outputs |+| reOut |+| earned |+| missOut)
}
}
trait Book extends Store with OrderParity with OrderMatching with OutputGenerator with Serializable {
import scala.collection.immutable.{ SortedSet, HashMap }
type OrderT <: orders.Meta with Limit
with OrderParity.Of[this.type]
with BookOrdered[OrderT]
with LimitSplittable[OrderT]
type FillOrder = orders.Meta with Matching.Aux[OrderT] with Splittable[_] with Offered
var set = SortedSet[OrderT]()
var idMap = Map[UUID, OrderT]()
def open(order: OrderT) = {
insert(order)
genOpened(order)
}
def cancel(orderId: UUID, accountId: AccountID) = {
idMap.get(orderId) flatMap { order =>
if(order.accountId == accountId) {
Some(genCanceled(remove(order)))
} else {
None
}
}
}
def insert(order: OrderT) = {
set = set + order
idMap = idMap + (order.orderId -> order)
}
def remove(order: OrderT) = {
set = set - order
idMap = idMap - order.orderId
order
}
// NOTE - This doesnt seem to be called by anyone externally,
// but it should likely include accountId to ensure the order
// being removed by the caller belongs to account (i.e. cancel)
def removeById(id: UUID) = idMap.get(id).map(remove)
def removeAll(orders: Set[OrderT]) = {
set = set &~ orders
idMap = idMap -- orders.map(_.orderId)
}
def size = set.size
def top = set.headOption
def allOrders = set.toIterator
}
class BidBook extends Book with Bid {
type OrderT = BidLimitOrder
val rFactory = implicitly[CurrencyFactory[RC]]
val oFactory = implicitly[CurrencyFactory[OC]]
}
class AskBook extends Book with Ask {
type OrderT = AskLimitOrder
val rFactory = implicitly[CurrencyFactory[RC]]
val oFactory = implicitly[CurrencyFactory[OC]]
}
trait OutputGenerator {
self: Book =>
import shapeless._
import org.buttercoin.common.models.orderInfo.OrderInfo
def genMiss(miss: FillOrder): Output = miss match {
case _: Market =>
order.Event(
miss.orderId,
miss.accountId,
order.Canceled(miss.offered)
).point[Vector] ->
LedgerDeposit(miss.accountId, miss.offered).point[Vector]
case _ => mzero[Output]
}
def genOpened(opened: OrderT): Output =
order.Event(
opened.orderId,
opened.accountId,
order.Opened(opened.quantity)
).point[Vector] -> mzero[Vector[Request]]
def genFilled(order: OrderT): Output = {
genFilled(order, order.offered, order.received, LiquidityMaker)
}
def genFilled(filled: orders.Meta, spent: Currency, earned: Currency, liqType: TradeCompletionType=LiquidityTaker): Output = {
order.Event(
filled.orderId,
filled.accountId,
order.Filled(spent, earned, liqType)
).point[Vector] ->
CreditTrade(filled.accountId, filled.orderId, earned, liqType).point[Vector]
}
def genReopened(filled: OrderT): Output = {
genReopened(filled, filled.offered, filled.received, LiquidityMaker)
}
def genReopened(filled: orders.Meta, spent: Currency, earned: Currency, liqType: TradeCompletionType=LiquidityTaker): Output = {
order.Event(
filled.orderId,
filled.accountId,
order.Reopened(spent, earned, liqType)
).point[Vector] ->
CreditTrade(filled.accountId, filled.orderId, earned, liqType).point[Vector]
}
def genCanceled: OrderT => Output = { canceled =>
order.Event(
canceled.orderId,
canceled.accountId,
order.Canceled(canceled.offered)
).point[Vector] ->
LedgerDeposit(canceled.accountId, canceled.offered).point[Vector]
}
}
}
}
| buttercoin/engine | core/src/main/scala/datastore/book.scala | Scala | mit | 8,586 |
package org.jetbrains.plugins.scala.editor.enterHandler
import com.intellij.application.options.CodeStyle
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.Result
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegateAdapter
import com.intellij.formatting.IndentInfo
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.actionSystem.EditorActionHandler
import com.intellij.openapi.editor.{Document, Editor, EditorModificationUtilEx}
import com.intellij.openapi.util.Ref
import com.intellij.psi._
import com.intellij.psi.codeStyle.CommonCodeStyleSettings.IndentOptions
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.DocumentUtil
import com.intellij.util.text.CharArrayUtil
import org.jetbrains.annotations.NotNull
import org.jetbrains.plugins.scala.editor.enterHandler.Scala3IndentationBasedSyntaxEnterHandler._
import org.jetbrains.plugins.scala.editor.{AutoBraceUtils, DocumentExt, PsiWhiteSpaceOps, ScalaEditorUtils}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.formatting.ScalaBlock
import org.jetbrains.plugins.scala.lang.lexer.{ScalaTokenType, ScalaTokenTypes}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScCommentOwner, ScEnumCases, ScExtensionBody}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScExportStmt
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.util.IndentUtil
/**
* Other indentation-related Platform logic:
* - [[com.intellij.codeInsight.editorActions.EnterHandler#executeWriteActionInner]]
* - [[com.intellij.formatting.FormatProcessor#getIndent]]
* - [[com.intellij.psi.codeStyle.lineIndent.LineIndentProvider#getLineIndent]]<br>
* [[com.intellij.psi.impl.source.codeStyle.lineIndent.IndentCalculator#getIndentString]]
* (when [[com.intellij.psi.impl.source.codeStyle.lineIndent.JavaLikeLangLineIndentProvider]] is used)
*
* Other indentation-related Scala Plugin logic:
* - [[org.jetbrains.plugins.scala.lang.formatting.ScalaBlock.getChildAttributes]]<br>
* used to calculate alignment and indent for new blocks when pressing Enter
* - [[org.jetbrains.plugins.scala.lang.formatting.processors.ScalaIndentProcessor.getChildIndent]]
* used to calcualte indent for existing elements
* - [[org.jetbrains.plugins.scala.lang.formatting.ScalaBlock.isIncomplete]]<br>
* used when typing after incomplete block, in the beginning of some structure, e.g.: {{{
* def foo = <caret>
* }}}
* - [[org.jetbrains.plugins.scala.editor.ScalaLineIndentProvider.getLineIndent]]
*/
class Scala3IndentationBasedSyntaxEnterHandler extends EnterHandlerDelegateAdapter {
// NOTE: maybe we could move some logic here? investigate whether it has any advantages
override def invokeInsideIndent(newLineCharOffset: Int, editor: Editor, dataContext: DataContext): Boolean =
super.invokeInsideIndent(newLineCharOffset, editor, dataContext)
override def preprocessEnter(
file: PsiFile,
editor: Editor,
caretOffsetRef: Ref[Integer],
caretAdvance: Ref[Integer],
dataContext: DataContext,
originalHandler: EditorActionHandler
): Result = {
if (!file.is[ScalaFile])
return Result.Continue
if (!file.useIndentationBasedSyntax)
return Result.Continue
if (!CodeInsightSettings.getInstance.SMART_INDENT_ON_ENTER)
return Result.Continue
val caretOffset = caretOffsetRef.get.intValue
val document = editor.getDocument
val caretIsAtTheEndOfLine = isCaretAtTheEndOfLine(caretOffset, document)
val result = if (caretIsAtTheEndOfLine) {
// from [[com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.preprocessEnter]]:
// Important Note: A document associated with the editor may have modifications which are not reflected yet in the PSI file.
// If any operations with PSI are needed including a search for PSI elements, the document must be committed first to update the PSI.
document.commit(editor.getProject)
val elementAtCaret = ScalaEditorUtils.findElementAtCaret_WithFixedEOF(file, document, caretOffset)
if (elementAtCaret == null)
return Result.Continue
val indentOptions = CodeStyle.getIndentOptions(file)
val documentText = document.getCharsSequence
val caretIndent = EnterHandlerUtils.calcCaretIndent(caretOffset, documentText, indentOptions.TAB_SIZE)
val caretIndentSize = caretIndent.getOrElse(Int.MaxValue) // using MaxValue if the caret isn't inside code indent
checkCaretAfterEmptyCaseClauseArrow(elementAtCaret, caretIndentSize, indentOptions) match {
case Some(clause) =>
// WORKAROUND:
// press Enter after the case clause WITHOUT any code in the body
// `case _ =><caret>` (with potential spaces around caret)
insertNewLineWithSpacesAtCaret(editor, document, clause, indentOptions, extraSpaces = CodeStyle.getIndentSize(file), needRemoveTrailingSpaces = true)
Result.Stop
case _ =>
val indentedElementOpt = previousElementInIndentationContext(elementAtCaret, caretIndentSize, indentOptions)
indentedElementOpt match {
case Some((indentedElement, _)) =>
insertNewLineWithSpacesAtCaret(editor, document, indentedElement, indentOptions, needRemoveTrailingSpaces = true)
Result.Stop
case _ =>
Result.Continue
}
}
}
else {
// looks like document commit is not required in this particular case
val elementAtCaret = ScalaEditorUtils.findElementAtCaret_WithFixedEOF(file, document, caretOffset)
if (elementAtCaret != null) {
indentCodeAfterCaseClauseArrow(document, elementAtCaret, caretOffset)
}
Result.Continue
}
//println(s"preprocessEnter: $result")
result
}
}
object Scala3IndentationBasedSyntaxEnterHandler {
/**
* The method returns '''Some(element)''' - if the element before the caret is in indentation context
*
* The method returns '''None''' - otherwise, or for incomplete blocks
* Examples of incomplete blocks:<br> {{{
* def foo = <caret>
* Option(42) match <caret>
* }}}
*
*
* Examples:
* {{{
* // returns `statement2`
* def foo =
* statement1
* statement2<caret>
*
* // returns `println(2)`
* class A {
* def foo = {
* if 2 + 2 == 42 then
* println(1)
* println(2) <caret>
* }
* }}}
*
* If the caret is at a blank line, then it's indent is checked with the "indented element" candidate.
* If the caret is indented more or equal than the previous element, then it's meant that the caret is inside
* same indentation context. If the caret is indented less, then the element is skipped.<br>
*
* Examples (using 4-space indent for a better visibility)
* {{{
* // returns `println(2)`
* class A:
* def foo =
* println("start")
* if 2 + 2 == 42 then
* println(1)
* println(2)
* <caret>
*
* // returns `if ...`
* class A:
* def foo =
* println("start")
* if 2 + 2 == 42 then
* println(1)
* println(2)
* <caret>
*
* // returns `def foo ...`
* class A:
* def foo =
* println("start")
* if 2 + 2 == 42 then
* println(1)
* println(2)
* <caret>
* }}}
*
*
* @todo extract to [[AutoBraceUtils]] if want to reuse e.g. in backspace handlers?
*/
private[editor] def previousElementInIndentationContext(
@NotNull elementAtCaret: PsiElement,
caretIndentSize: Int,
indentOptions: IndentOptions
): Option[(PsiElement, Int)] = {
// NOTE 1: there are still some issues with Scala3 + tabs, see: SCL-18817
// NOTE 2: compiler indent calculation in Scala3 is a bit different from ours,
// according to http://dotty.epfl.ch/docs/reference/other-new-features/indentation.html
// "Indentation prefixes can consist of spaces and/or tabs. Indentation widths are the indentation prefixes themselves"
val lastRealElement = getLastRealElement(elementAtCaret)
val result = if (lastRealElement == null)
None
else if (lastRealElement.is[PsiErrorElement])
None
else {
val elementAtCaretEndOffset = elementAtCaret.endOffset
var result: Option[(PsiElement, Int)] = None
// For a given last real elment on the line traverse the tree searching for an indented element
// (yes, it's not very pretty, but the logic of tree traversal is not simple and it's easier to modify the imperative code)
var current = lastRealElement
var continue = true
while (continue) {
if (current != null && current.endOffset <= elementAtCaretEndOffset && !current.is[PsiFile]) {
toIndentedElement(current, caretIndentSize, indentOptions) match {
case Some(value) =>
result = Some(value)
continue = false
case None =>
val nextElementToProcess = {
val prevCode = current.prevSiblingNotWhitespace.orNull
val isInSemicolonSeparatedList =
current.elementType == ScalaTokenTypes.tSEMICOLON ||
prevCode != null && prevCode.elementType == ScalaTokenTypes.tSEMICOLON
if (isInSemicolonSeparatedList)
prevCode
else
current.getParent
}
current = nextElementToProcess
}
}
else {
continue = false
}
}
result
}
//println(s"indentedElement: $result")
result
}
/**
* @param elementAtCaret non-whitespace - if the caret located is in the end of document<br>
* whitespace - otherwise
* @example
* input:{{{
* def foo =
* 42 //comment<caret>
* }}}
* output: {{{
* 42
* }}}
* input:{{{
* def foo =
* 1; 2; 3; //comment<caret>
* }}}
* output: {{{
* ;
* }}}
* NOTE: the semicolons are handled later for Scala 3
*/
private[editor] def getLastRealElement(elementAtCaret: PsiElement): PsiElement = {
val beforeWhitespace = elementAtCaret match {
case ws: PsiWhiteSpace => PsiTreeUtil.prevLeaf(ws) match {
case null =>
return null // can be null when getLastRealElement is called during typing in "auto-braces" feature
case prev => prev
}
case el => el
}
val withLineCommentSkipped = beforeWhitespace match {
// for line comment we use prevCodeLeaf instead of prevSibling
// because currently line comments are not attached to the line in indentation-based block
case c: PsiComment if !c.startsFromNewLine() => PsiTreeUtil.prevCodeLeaf(c) match {
case null => c
case prev => prev
}
case el => el
}
withLineCommentSkipped
}
private def toIndentedElement(
element: PsiElement,
caretIndentSize: Int,
indentOptions: IndentOptions
): Option[(PsiElement, Int)] = {
if (isElementInIndentationContext(element))
for {
elementIndentSize <- elementIndentSize(element, maxElementIndentSize = caretIndentSize, indentOptions.TAB_SIZE)
} yield {
(element, elementIndentSize)
}
else None
}
private def isElementInIndentationContext(element: PsiElement): Boolean = {
// TODO: it should be just ScBlockStatement, without ScCommentOwner:
// according to the language spec, definitions are also block statements,
// but in our hierarchy they are not, we should try adding ScBlockStatement to all Definition PSI hierarchy
val isBlockChild = element.is[ScBlockStatement, ScExportStmt] ||
element.isInstanceOf[ScCommentOwner] ||
element.elementType == ScalaTokenTypes.tSEMICOLON
element match {
// An indentation region can start after one of the following tokens:
// = => ?=> <- catch do else finally for
// if match return then throw try while yield
case _ if isBlockChild =>
val parent = element.getParent
val isInsideIndentationBlock = parent match {
case block: ScBlock =>
!block.isEnclosedByBraces
case _ => false
}
// This check is actual when body consists from a single element.
// In this case parser doesn't wrap it into a ScBodyExpr PSI element
val isInsideIndentationBlock_AsSingleBlockElement1 = {
val prevCodeLeaf = PsiTreeUtil.prevCodeLeaf(element)
prevCodeLeaf != null && (prevCodeLeaf.elementType match {
case ScalaTokenTypes.tASSIGN |
ScalaTokenTypes.tFUNTYPE |
ScalaTokenType.ImplicitFunctionArrow |
ScalaTokenTypes.tCHOOSE |
ScalaTokenTypes.kYIELD |
ScalaTokenTypes.kDO |
ScalaTokenType.ThenKeyword |
ScalaTokenTypes.kELSE |
ScalaTokenTypes.kTRY |
ScalaTokenTypes.kFINALLY |
// NOTE: these expressions are handled specially, using some PSI extractors,
// For them not enough to just check the previous token: previous token can be ')'
// or some element of condition / enumerator
//
//ScalaTokenTypes.kIF |
//ScalaTokenTypes.kFOR |
//ScalaTokenTypes.kWHILE |
ScalaTokenTypes.kCATCH |
//ScalaTokenTypes.kMATCH // case clauses are handled specially
ScalaTokenTypes.kRETURN |
ScalaTokenTypes.kTHROW =>
true
case _ =>
false
})
}
val isInsideIndentationBlock_AsSingleBlockElement2 = parent match {
case ScIf(_, thenBranch, elseBranch) => thenBranch.contains(element) || elseBranch.contains(element)
case ScWhile(_, Some(`element`)) => true // TODO: use just expression extractor (condition is ignored, but calculated redundantly)
case ScFor(_, `element`) => true // TODO: use just body extractor (same reason)
case _ => false
}
val isInIndentationContext =
parent.is[ScTemplateBody] ||
parent.is[ScExtensionBody] ||
isInsideIndentationBlock ||
isInsideIndentationBlock_AsSingleBlockElement1 ||
isInsideIndentationBlock_AsSingleBlockElement2
isInIndentationContext
case clause: ScCaseClause =>
/**
* WORKAROUND:
* press Enter / Backspace after the LAST case clause WITH some code on the same line with clause arrow
* before: {{{
* ref match
* case _ => doSomething()<caret>
* }}}
*
* after: {{{
* ref match
* case _ => doSomething()
* <caret>
* }}}
*
* NOTE: in case clauses with braces, this automatically works via `ScalaBlock.getChildAttributes`.
* However with braceless clauses selected formatter block belongs to the parent scope, not to the clauses
* so wrong indent is used without this workaround
*/
val isLastClause = clause.getNextSibling == null
val isBracelessClauses: Boolean = {
val clauses = clause.getParent
val prev = PsiTreeUtil.prevCodeLeaf(clauses)
prev != null && prev.elementType != ScalaTokenTypes.tLBRACE
}
isLastClause && isBracelessClauses
case _: PsiComment =>
true
case _: ScEnumCases =>
true
case _ =>
false
}
}
private def elementIndentSize(element: PsiElement, maxElementIndentSize: Int, tabSize: Int): Option[Int] = {
val indentWs = EnterHandlerUtils.precededIndentWhitespace(element)
indentWs match {
case Some(ws) =>
val elementIndentSize = IndentUtil.calcLastLineIndent(ws.getChars, tabSize)
/** see docs and examples in [[previousElementInIndentationContext]] */
if (elementIndentSize <= maxElementIndentSize) {
// Incomplete elements are handled in the end of enter handling by IntelliJ when adjusting indents.
// see: com.intellij.codeInsight.editorActions.EnterHandler.executeWriteActionInner
// todo (optimization): we could try to stop processing parents of the original elementAtCaret,
// once we detect some incomplete parent
// (currently we continue processing parents and do this check each time
if (!ScalaBlock.isIncomplete(element.getNode))
Some(elementIndentSize)
else None
}
else None
case _ => None
}
}
/**
* When the caret is just after case clause arrow `=>` and just before some code position {{{
* expr match
* case 42 => <caret>println("hello")
* }}}
* we need to insert an extra space before the code.
* Otherwise Scala 3 parser will not parse the code as a child of the cause clause and it will not be indented:
* {{{
* BAD:
* 1 match
* case 2 =>
* <CARET>3
*
* GOOD:
* 1 match
* case 2 =>
* <CARET> 3
* }}}
*/
private def indentCodeAfterCaseClauseArrow(
document: Document,
elementAtCaret: PsiElement,
caretOffset: Int
): Unit =
if (isCaretAfterCaseClauseArrowBeforeCode(elementAtCaret, caretOffset)) {
document.insertString(caretOffset, " ")
}
// `case _ => <caret>ref`
private def isCaretAfterCaseClauseArrowBeforeCode(elementAtCaret: PsiElement, caretOffset: Int): Boolean = {
val prevLeaf = PsiTreeUtil.prevCodeLeaf(elementAtCaret)
prevLeaf match {
case ElementType(ScalaTokenTypes.tFUNTYPE) && Parent(_: ScCaseClause) if caretOffset == elementAtCaret.startOffset =>
true
case _ =>
false
}
}
/** @return Some(caseClause) if element before the caret represents a
* case clause without any code after the caret:
* {{{case _ =><caret><new line> (with optional spaces around caret)}}}
*/
private def checkCaretAfterEmptyCaseClauseArrow(
elementAtCaret: PsiElement,
caretIndentSize: Int,
indentOptions: IndentOptions,
): Option[ScCaseClause] = {
val canBeAfterCaseClauseArrow =
elementAtCaret match {
// `case _ =><caret>EOF` (no whitespaces around caret, caret is at the end of file)
// in this case element at caret represents empty case clause body
case block: ScBlock => block.getFirstChild == null
case _: PsiWhiteSpace => true
case _ => false
}
if (canBeAfterCaseClauseArrow) {
val prevLeaf = PsiTreeUtil.prevLeaf(elementAtCaret) match {
case b: ScBlock => PsiTreeUtil.prevLeaf(b)
case el => el
}
prevLeaf match {
case ElementType(ScalaTokenTypes.tFUNTYPE) && Parent(clause: ScCaseClause) =>
val caretIsIndentedFromClause = elementIndentSize(clause, caretIndentSize, indentOptions.TAB_SIZE).isDefined
if (caretIsIndentedFromClause) Some(clause)
else None
case _ => None
}
}
else None
}
private val SpaceOrTab = " \\t"
/** The logic is inspired by [[com.intellij.openapi.editor.actions.EnterAction.insertNewLineAtCaret]] */
private def insertNewLineWithSpacesAtCaret(
editor: Editor,
document: Document,
indentedElement: PsiElement,
indentOptions: IndentOptions,
needRemoveTrailingSpaces: Boolean = false,
extraSpaces: Int = 0
): Unit = {
val text = document.getCharsSequence
val caretOffset = editor.getCaretModel.getOffset
val prevIndentLineStartOffset = DocumentUtil.getLineStartOffset(indentedElement.startOffset, document)
val prevIndentWsEndOffset = CharArrayUtil.shiftForward(text, prevIndentLineStartOffset, SpaceOrTab)
// in case caret is placed before some element inside whitespace:
// def foo =
// <caret> 42
val prevIndentWsEndOffsetUntilCaret = prevIndentWsEndOffset.min(caretOffset)
val spacesOnNewLine = text.subSequence(prevIndentLineStartOffset, prevIndentWsEndOffsetUntilCaret)
val indentSize = IndentUtil.calcIndent(spacesOnNewLine, indentOptions.TAB_SIZE) + extraSpaces
val indentString = new IndentInfo(1, indentSize, 0).generateNewWhiteSpace(indentOptions)
document.insertString(caretOffset, indentString)
val newCaretOffset = caretOffset + indentString.length
editor.getCaretModel.moveToOffset(newCaretOffset)
EditorModificationUtilEx.scrollToCaret(editor)
editor.getSelectionModel.removeSelection()
if (needRemoveTrailingSpaces) {
removeTrailingSpaces(document, newCaretOffset)
}
}
private def removeTrailingSpaces(document: Document, startOffset: Int): Unit = {
val documentText = document.getCharsSequence
val endOffset = CharArrayUtil.shiftForward(documentText, startOffset, SpaceOrTab)
if (endOffset == documentText.length() || documentText.charAt(endOffset) == '\\n') {
document.deleteString(startOffset, endOffset)
}
}
private def isCaretAtTheEndOfLine(caretOffset: Int, document: Document): Boolean = {
val documentText = document.getCharsSequence
val shifted = CharArrayUtil.shiftForward(documentText, caretOffset, SpaceOrTab)
shifted == documentText.length || documentText.charAt(shifted) == '\\n'
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/editor/enterHandler/Scala3IndentationBasedSyntaxEnterHandler.scala | Scala | apache-2.0 | 22,177 |
package 四法
trait Result
case class ResultS(tail: Result) extends Result
case object ResultT extends Result
| djx314/ubw | a60-四/src/main/scala/四法/Result.scala | Scala | bsd-3-clause | 125 |
package info.hupel.isabelle.sbt
import sbt._
import sbtassembly.AssemblyPlugin
import sbtassembly.AssemblyPlugin.autoImport._
object LibisabelleAssemblyPlugin extends AutoPlugin {
override def requires = LibisabellePlugin && AssemblyPlugin
override def trigger = allRequirements
override def projectSettings: Seq[Setting[_]] = Seq(
assemblyMergeStrategy in assembly := {
case PathList(".libisabelle", ".files") => MergeStrategy.concat
case path => (assemblyMergeStrategy in assembly).value(path)
}
)
}
| larsrh/sbt-libisabelle | src/main/scala/LibisabelleAssemblyPlugin.scala | Scala | apache-2.0 | 536 |
package rx.lang.scala.schedulers
import rx.lang.scala.Scheduler
object ComputationScheduler {
/**
* [[rx.lang.scala.Scheduler]] intended for computational work.
* <p>
* This can be used for event-loops, processing callbacks and other computational work.
* <p>
* Do not perform IO-bound work on this scheduler. Use [[rx.lang.scala.schedulers.IOScheduler]] instead.
*
* @return [[rx.lang.scala.Scheduler]] for computation-bound work.
*/
def apply(): ComputationScheduler = {
new ComputationScheduler(rx.schedulers.Schedulers.computation())
}
}
class ComputationScheduler private[scala] (val asJavaScheduler: rx.Scheduler)
extends Scheduler {} | zjrstar/RxScala | src/main/scala/rx/lang/scala/schedulers/ComputationScheduler.scala | Scala | apache-2.0 | 680 |
package com.arcusys.valamis.lesson.scorm.service.sequencing
import com.arcusys.valamis.lesson.scorm.model.sequencing._
import com.arcusys.valamis.lesson.scorm.model.tracking.ActivityStateTree
import com.escalatesoft.subcut.inject.{ Injectable, BindingModule }
/**
* Termination Request Process [TB.2.3]
* Ends the current attempt on the Current Activity
*/
class TerminationRequestService(implicit val bindingModule: BindingModule) extends TerminationRequestServiceContract with Injectable {
private val rollupService = inject[RollupServiceContract]
private val endAttemptService = inject[EndAttemptServiceContract]
/**
* Termination Request Process [TB.2.3]
* Ends the current attempt on the Current Activity
* @param tree Activity state tree for attempt
* @param requestType termination request
* @return validity of the termination request; may return a sequencing request; may return an exception code
*/
def apply(tree: ActivityStateTree, requestType: TerminationRequestType.Value): TerminationResponse =
tree.currentActivity match {
// If the sequencing session has not begun, there is nothing to terminate.
case None => TerminationResponse.invalid("Current Activity is not defined / Sequencing session has not begun")
case Some(currentActivity) => {
if ((requestType == TerminationRequestType.Exit || requestType == TerminationRequestType.Abandon) && !currentActivity.item.active)
// If the current activity has already been terminated, there is nothing to terminate.
TerminationResponse.invalid("Current Activity already terminated")
else requestType match {
case TerminationRequestType.Exit => exit(tree)
case TerminationRequestType.ExitAll => exitAll(tree)
case TerminationRequestType.SuspendAll => suspendAll(tree)
case TerminationRequestType.Abandon => abandon(tree)
case TerminationRequestType.AbandonAll => abandonAll(tree)
case _ => TerminationResponse.invalid("Undefined termination request")
}
}
}
/**
* Termination request is Exit
* @return Termination request process response
*/
private def exit(tree: ActivityStateTree): TerminationResponse = {
def applyPostConditionRules: TerminationResponse =
tree.applyPostConditionRules match {
case SequencingRulesResponse(Some(TerminationRequestType.ExitAll), _) => exitAll(tree)
case SequencingRulesResponse(Some(TerminationRequestType.ExitParent), _) => {
//If we exit the parent of the current activity, move the current activity to the parent of the current activity
if (!tree.currentActivity.get.isRoot) {
tree.currentActivity = tree.currentActivity.get.parent
endAttemptService(tree.currentActivity.get)
applyPostConditionRules // on the new current activity
} else TerminationResponse.invalid("Activity tree root has no parent")
}
case SequencingRulesResponse(_, sequencingRequestOption) =>
// If the attempt on the root of the Activity Tree is ending without a Retry, the Sequencing Session also ends
if (tree.currentActivity.get.isRoot && sequencingRequestOption != Some(SequencingRequestType.Retry)) TerminationResponse(SequencingRequestType.Exit)
else TerminationResponseValid(sequencingRequestOption)
}
// Ensure the state of the current activity is up to date
endAttemptService(tree.currentActivity.get)
// Check if any of the current activity's ancestors need to terminate
// Part of [TB.2.1]. May change the Current Activity
tree.applyExitConditionRules match {
case Some(exitActivity) => {
//End the current attempt on all active descendents (Terminate Descendent Attempts Process [UP.3]) + End the current attempt on the 'exiting' activity
tree.currentActivity.get.pathTo(exitActivity, includeAncestor = true, includeThis = false) foreach endAttemptService.apply
// Move the current activity to the activity that has been identified for termination
tree.currentActivity = Some(exitActivity)
}
case _ => {}
}
applyPostConditionRules
}
/**
* Termination request is ExitAll
* @return Termination request process response
*/
private def exitAll(tree: ActivityStateTree): TerminationResponse = {
if (tree.currentActivity.get.item.active) endAttemptService(tree.currentActivity.get)
endAttemptService(tree)
tree.currentActivity = Some(tree)
TerminationResponse(SequencingRequestType.Exit)
}
/**
* Termination request is SuspendAll
* @return Termination request process response
*/
private def suspendAll(tree: ActivityStateTree): TerminationResponse = {
val currentActivity = tree.currentActivity.get
// If the current activity is active or already suspended, suspend it and all of its descendants
if (currentActivity.item.active || currentActivity.item.suspended) {
// Ensure that any status change to this activity is propagated through the entire activity tree
rollupService(currentActivity)
tree.suspendedActivity = Some(currentActivity)
} else {
if (!currentActivity.isRoot) tree.suspendedActivity = Some(currentActivity.parent.get)
else return TerminationResponse.invalid("Cannot suspend an inactive root")
}
// Form the activity path as the ordered series of all activities from the Suspended Activity to the root of the activity tree, inclusive
val path = tree.suspendedActivity.get.pathToRoot
path.foreach(node => {
node.item.active = false
node.item.suspended = true
})
tree.currentActivity = Some(tree)
TerminationResponse(SequencingRequestType.Exit)
}
/**
* Termination request is Abandon
* @return Termination request process response
*/
private def abandon(tree: ActivityStateTree): TerminationResponse = {
tree.currentActivity.get.item.active = false
TerminationResponseValid()
}
/**
* Termination request is AbandonAll
* @return Termination request process response
*/
private def abandonAll(tree: ActivityStateTree): TerminationResponse = {
// Form the activity path as the ordered series of all activities from the Suspended Activity to the root of the activity tree, inclusive
val path = tree.currentActivity.get.pathToRoot
path.foreach(activity => {
activity.item.active = false
})
tree.currentActivity = Some(tree)
TerminationResponse(SequencingRequestType.Exit)
}
} | ViLPy/Valamis | valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/service/sequencing/TerminationRequestService.scala | Scala | lgpl-3.0 | 6,624 |
package tryp
package core
import org.log4s.getLogger
trait Logging { x =>
protected def loggerName: List[String] = Nil
protected val moduleName = "tryp"
protected implicit val log = {
val parts = if (loggerName.isEmpty) List(this.className) else loggerName
getLogger(parts.foldLeft(moduleName) { case (a, b) => s"$a.$b" })
}
protected val logd = getLogger("tryp.dev")
protected def dbg[A](a: A) = {
logd.info(a.toString.yellow)
a
}
protected def dbgLines[A](a: collection.GenTraversableOnce[A]) = {
a.toList map (_.toString) foreach (logd.info(_))
a
}
protected implicit class ToDebugPrinter[A](a: A)
{
def dbg = x dbg a
}
protected implicit class ToDebugLinesPrinter[A](
a: collection.GenTraversableOnce[A])
{
def dbgLines = x dbgLines a
}
}
| tek/pulsar | core/src/log.scala | Scala | mit | 818 |
package org.zalando.jsonapi
import org.zalando.jsonapi.model.Links.Link
import org.zalando.jsonapi.model.RootObject.ResourceObjects
import scala.collection.immutable.{Seq ⇒ ImmutableSeq}
/**
* The model package, containing partially covered Jsonapi specification.
*/
package object model {
/**
* A root, top-level object.
*/
case class RootObject(data: Option[RootObject.Data] = None,
links: Option[Links] = None,
errors: Option[Errors] = None,
meta: Option[Meta] = None,
included: Option[Included] = None,
jsonApi: Option[JsonApi] = None)
/**
* A companion object for root level support types.
*/
object RootObject {
sealed trait Data
case class ResourceObject(`type`: String,
id: Option[String] = None,
attributes: Option[Attributes] = None,
relationships: Option[Relationships] = None,
links: Option[Links] = None,
meta: Option[Meta] = None)
extends Data
/**
* A collection of [[ResourceObject]] objects.
*/
case class ResourceObjects(array: ImmutableSeq[ResourceObject]) extends Data
}
/**
* A collection of [[Link]] objects.
*/
type Links = ImmutableSeq[Link]
/**
* Companion object for links.
*/
object Links {
sealed trait Link
/**
* A link of the "self" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class Self(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "related" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class Related(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "first" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class First(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "last" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class Last(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "next" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class Next(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "prev" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class Prev(url: String, meta: Option[Meta]) extends Link
/**
* A link of the "about" type.
* @param url The url to link to.
* @param meta The optional meta to link to.
*/
case class About(url: String, meta: Option[Meta]) extends Link
}
/**
* A collection of [[Attribute]] objects.
*/
type Attributes = ImmutableSeq[Attribute]
/**
* The representation of an attribute of the root object.
* @param name the name of the attribute
* @param value the value of the attribute
*/
case class Attribute(name: String, value: JsonApiObject.Value)
/**
* A collection of [[Error]] objects.
*/
type Errors = ImmutableSeq[Error]
/**
* The representation of an error object.
* @param id an unique identifier of the error
* @param links the links of the error
* @param status the HTTP status code of the error
* @param code an application specific code of the error
* @param title a short human-readable description about the error
* @param detail a detailed human-readable description about the error
* @param source the source of the error
* @param meta the meta information about the error
*/
case class Error(id: Option[String] = None,
links: Option[Links] = None,
status: Option[String] = None,
code: Option[String] = None,
title: Option[String] = None,
detail: Option[String] = None,
source: Option[ErrorSource] = None,
meta: Option[Meta] = None)
/**
* An object containing references to the source of the error.
* @param pointer the optional pointer based on <a href="https://tools.ietf.org/html/rfc6901">RFC6901</a> standard
* @param parameter a optional string indicating which URI query parameter caused the error
*/
case class ErrorSource(pointer: Option[String] = None, parameter: Option[String] = None)
/**
* The meta object as a map of string - json object value pairs
*/
type Meta = Map[String, JsonApiObject.Value]
/**
* An array of resource objects.
* @param resourceObjects the array
*/
case class Included(resourceObjects: ResourceObjects)
/**
* A collection of [[JsonApiProperty]] objects.
*/
type JsonApi = ImmutableSeq[JsonApiProperty]
/**
* An information about the implementation.
* @param name the name of the json api implementation detail
* @param value the value of the json api implementation detail
*/
case class JsonApiProperty(name: String, value: JsonApiObject.Value)
/**
* A companion object for json api implementation specific data.
*/
object JsonApiObject {
sealed trait Value
/**
* An attribute value that is string-typed.
* @param value the string value
*/
case class StringValue(value: String) extends Value
/**
* An attribute value that is number-typed.
* @param value the number value
*/
case class NumberValue(value: BigDecimal) extends Value
/**
* An attribute value that is boolean-typed.
* @param value the boolean value
*/
case class BooleanValue(value: Boolean) extends Value
/**
* An attribute value that is list(key, value)-typed.
* @param value the list of key-value pairs
*/
case class JsObjectValue(value: Attributes) extends Value
/**
* An attribute value that is array-typed.
* @param value the array value
*/
case class JsArrayValue(value: Seq[Value]) extends Value
/**
* An attribute value that is null.
*/
case object NullValue extends Value
/**
* An attribute value that is true
*/
val TrueValue = BooleanValue(true)
/**
* An attribute value that is false
*/
val FalseValue = BooleanValue(false)
}
/**
* A collection of [[Relationship]] objects.
*/
type Relationships = Map[String, Relationship]
/**
* An object represents the relationship and describes underlying object.
* @param links the links of underlying object
* @param data the data of underlying object
*/
case class Relationship(links: Option[Links] = None, data: Option[RootObject.Data] = None)
}
| texvex/scala-jsonapi | src/main/scala/org/zalando/jsonapi/model/package.scala | Scala | mit | 7,007 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom
/**
* This package contains element representations that can be compared for (some notion of "value") equality, unlike normal yaidom nodes.
* That notion of equality is simple to understand, but "naive". The user is of the API must take control over what is compared
* for equality.
*
* See [[eu.cdevreeze.yaidom.resolved.Node]] for why this package is named `resolved`.
*
* The most important difference with normal `Elem`s is that qualified names do not occur,
* but only expanded (element and attribute) names. This reminds of James Clark notation for XML trees and
* expanded names, where qualified names are absent.
*
* Moreover, the only nodes in this package are element and text nodes.
*
* Below follows a simple example query, using the uniform query API:
* {{{
* // Note the import of package resolved, and not of its members. That is indeed a best practice!
* import eu.cdevreeze.yaidom.resolved
*
* val resolvedBookstoreElem = resolved.Elem.from(bookstoreElem)
*
* val scalaBookAuthors =
* for {
* bookElem <- resolvedBookstoreElem \ EName("{http://bookstore/book}Book")
* if (bookElem \@ EName("ISBN")).contains("978-0981531649")
* authorElem <- bookElem \\ EName("{http://bookstore/author}Author")
* } yield authorElem
* }}}
* The query for Scala book authors would have been exactly the same if normal `Elem`s had been used instead of `resolved.Elem`s
* (replacing `resolvedBookstoreElem` by `bookstoreElem`)!
*
* @author Chris de Vreeze
*/
package object resolved
| dvreeze/yaidom | shared/src/main/scala/eu/cdevreeze/yaidom/resolved/package.scala | Scala | apache-2.0 | 2,157 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jnsaf.secret
/**
* Created by fgwei on 3/16/17.
*/
class Data {
private var d: String = _
def set(d: String): Unit = this.d = d
def get(): String = this.d
}
| arguslab/Argus-SAF | jnsaf/src/test/scala/org/argus/jnsaf/secret/Data.scala | Scala | apache-2.0 | 542 |
// Copyright (c) 2013, Johns Hopkins University. All rights reserved.
// This software is released under the 2-clause BSD license.
// See /LICENSE.txt
// Travis Wolfe, twolfe18@gmail.com, 30 July 2013
package edu.jhu.hlt.parma.diagnostics
import edu.jhu.hlt.parma.inference._
import edu.jhu.hlt.parma.types._
import edu.jhu.hlt.parma.experiments._
import edu.jhu.hlt.parma.util._
import scala.collection.JavaConversions._
import java.util.logging._
import java.io.File
object CanonicalMentionDiagnostics {
val log = Logger.getLogger(this.getClass.getName)
val OUTPUT = "diagnostics.canonical.mention.file"
def printCanonicalMentions(corpus: DocAlignmentCorpus[DocAlignment]) {
val outfile = ParmaConfig.getFile(OUTPUT)
if(outfile == null || !outfile.exists) {
log.info("[printCanonicalMentions] skipping because no file was specified in parma.config with " + OUTPUT)
return
}
Profiler.startTask("diagnotics:canonical_mentions")
log.info("[printCanonicalMentions] writing canonical mentions to " + outfile)
val writer = FileUtils.getWriter(outfile)
// function to print out canonical mentions
val pcm = (ac: ArgumentCoref, doc: Document) => {
val canonical = CanonicalMentionFinder.canonicalMention(ac, doc)
writer.write("coref chain =\\n")
ac.foreach(m => {
val score = CanonicalMentionFinder.scoreCanonicalMention(doc, m.location)
val s = Describe.argument(m, doc)
val pos = doc.getMentionTokens(m).map(_.getPosTag).mkString(" ")
val ner = doc.getMentionTokens(m).map(_.getNerTag).mkString(" ")
writer.write("\\t%s \\t POS = %s \\t NER = %s \\t score = %.3g\\n".format(s, pos, ner, score))
})
writer.write("canonical = %s\\n".format(Describe.argument(canonical, doc)))
writer.newLine
}
// only go over train mentions for now
corpus.trainAlignments.foreach(da => {
da.report.corefs.filter(_.size > 1).foreach(ac => pcm(ac, da.report))
da.passage.corefs.filter(_.size > 1).foreach(ac => pcm(ac, da.passage))
})
// for each postitive alignment, just write out the headwords next to each other
writer.newLine
writer.newLine
writer.write("====================== just head words for (all) true alignments ======================\\n")
for(da <- corpus.trainAlignments ++ corpus.testAlignments) {
writer.write("alignment %s\\n".format(da.id))
for(a <- da.possibleAlignments)
writer.write(Describe.justHeadWord(a, da.report, da.passage) + "\\n")
}
writer.close
Profiler.endTask("diagnotics:canonical_mentions")
}
}
| hltcoe/parma | src/main/scala/edu/jhu/hlt/parma/diagnostics/CanonicalMentionDiagnostics.scala | Scala | bsd-2-clause | 2,735 |
package de.tu_berlin.impro3.spark
import de.tu_berlin.impro3.core.{Algorithm => CoreAlgorithm}
import net.sourceforge.argparse4j.inf.{Namespace, Subparser}
object Algorithm {
object Command {
// argument names
val KEY_MASTER = "master"
}
abstract class Command[A <: Algorithm](implicit m: scala.reflect.Manifest[A]) extends CoreAlgorithm.Command {
override def setup(parser: Subparser): Unit = {
// add parameters
parser.addArgument(s"--${Command.KEY_MASTER}")
.`type`[String](classOf[String])
.dest(Command.KEY_MASTER)
.metavar("URL")
.help("Spark master (default: local[*])")
parser.setDefault(Command.KEY_MASTER, "local[*]")
}
}
}
abstract class Algorithm(val sparkMaster: String) extends CoreAlgorithm {
def this(ns: Namespace) = this(ns.get[String](Algorithm.Command.KEY_MASTER))
}
| joroKr21/spatio-temporal-dynamics | impro3-ws14-spark/src/main/scala/de/tu_berlin/impro3/spark/Algorithm.scala | Scala | apache-2.0 | 873 |
package com.twitter.finagle
private object ThriftMuxUtil {
val role = Stack.Role("ProtocolRecorder")
def classForName(name: String) =
try Class.forName(name) catch {
case cause: ClassNotFoundException =>
throw new IllegalArgumentException("Iface is not a valid thrift iface", cause)
}
val protocolRecorder: Stackable[ServiceFactory[mux.Request, mux.Response]] =
new Stack.Module1[param.Stats, ServiceFactory[mux.Request, mux.Response]] {
val role = ThriftMuxUtil.role
val description = "Record ThriftMux protocol usage"
def make(_stats: param.Stats, next: ServiceFactory[mux.Request, mux.Response]) = {
val param.Stats(stats) = _stats
stats.scope("protocol").provideGauge("thriftmux")(1)
next
}
}
}
| sveinnfannar/finagle | finagle-thriftmux/src/main/scala/com/twitter/finagle/ThriftMuxUtil.scala | Scala | apache-2.0 | 786 |
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.Chart
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 15/11/17.
*/
/**
* Chart Service
*/
trait ChartService extends Service with api.service.ChartService {
override def getAll() : ServiceCall[NotUsed, List[Chart]]
override def getById(id: Int): ServiceCall[NotUsed, Chart]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, Chart]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[Chart]]
def descriptor = {
import Service._
named("chart").withCalls(
pathCall("/api/v1_0_0/chart/all", getAll _) ,
pathCall("/api/v1_0_0/chart/:id", getById _),
pathCall("/api/v1_0_0/chart/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/chart?pageNo&pageSize", getAllByPage _)
)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/ChartService.scala | Scala | gpl-3.0 | 1,981 |
/*
* Copyright 2017 helloscala.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helloscala.http.session.converters
import scala.collection.JavaConverters._
import scala.language.implicitConversions
object MapConverters {
implicit def toImmutableMap[A, B](m: java.util.Map[A, B]): scala.collection.immutable.Map[A, B] =
m.asScala.toMap
}
| helloscala/helloscala | hs-akka-http-session/src/main/scala/helloscala/http/session/converters/MapConverters.scala | Scala | apache-2.0 | 874 |
package io.buoyant.router.h2
import com.twitter.concurrent.AsyncQueue
import com.twitter.conversions.time._
import com.twitter.finagle.Service
import com.twitter.finagle.buoyant.h2.service.{H2Classifier, H2ReqRep, H2ReqRepFrame}
import com.twitter.finagle.buoyant.h2.{Frame, Headers, Request, Response, Status, Stream}
import com.twitter.finagle.service.{ResponseClass, RetryBudget}
import com.twitter.finagle.stats.{InMemoryStatsReceiver, StatsReceiver}
import com.twitter.finagle.util.DefaultTimer
import com.twitter.io.Buf
import com.twitter.util._
import io.buoyant.test.FunSuite
import scala.{Stream => SStream}
class ClassifiedRetryFilterTest extends FunSuite {
val classifier: H2Classifier = new H2Classifier {
override val responseClassifier: PartialFunction[H2ReqRep, ResponseClass] = {
case H2ReqRep(_, Throw(_)) => ResponseClass.NonRetryableFailure
case H2ReqRep(_, Return(rsp)) if rsp.headers.get("retry") == Some("true") => ResponseClass.RetryableFailure
case H2ReqRep(_, Return(rsp)) if rsp.headers.get("retry") == Some("false") => ResponseClass.NonRetryableFailure
}
override val streamClassifier: PartialFunction[H2ReqRepFrame, ResponseClass] = {
case H2ReqRepFrame(_, Return((_, Some(Return(f: Frame.Trailers))))) if f.get("retry") == Some("true") =>
ResponseClass.RetryableFailure
case H2ReqRepFrame(_, Return((_, Some(Return(f: Frame.Trailers))))) if f.get("retry") == Some("false") =>
ResponseClass.NonRetryableFailure
case _ =>
ResponseClass.Success
}
}
implicit val timer = DefaultTimer
def filter(stats: StatsReceiver) = new ClassifiedRetryFilter(
stats,
classifier,
SStream.continually(0.millis),
RetryBudget.Infinite
)
def read(stream: Stream): Future[(Buf, Option[Frame.Trailers])] = {
if (stream.isEmpty) Future.exception(new IllegalStateException("empty stream"))
else stream.read().flatMap {
case f: Frame.Data if f.isEnd =>
f.release()
Future.value((f.buf, None))
case f: Frame.Trailers =>
f.release()
Future.value((Buf.Empty, Some(f)))
case f: Frame.Data =>
f.release()
read(stream).map { case (next, trailers) => (f.buf.concat(next), trailers) }
}
}
def readStr(stream: Stream): Future[String] = read(stream).map {
case (buf, _) =>
Buf.Utf8.unapply(buf).get
}
class TestService(tries: Int = 3) extends Service[Request, Response] {
@volatile var i = 0
override def apply(request: Request): Future[Response] = {
readStr(request.stream).map { str =>
i += 1
assert(str == "hello")
val rspQ = new AsyncQueue[Frame]()
rspQ.offer(Frame.Data("good", eos = false))
rspQ.offer(Frame.Data("bye", eos = false))
rspQ.offer(Frame.Trailers("retry" -> (i != tries).toString, "i" -> i.toString))
Response(Status.Ok, Stream(rspQ))
}
}
}
test("retries") {
val reqQ = new AsyncQueue[Frame]
reqQ.offer(Frame.Data("hel", eos = false))
reqQ.offer(Frame.Data("lo", eos = true))
val reqStream = Stream(reqQ)
val req = Request(Headers.empty, reqStream)
val stats = new InMemoryStatsReceiver
val svc = filter(stats).andThen(new TestService())
val rsp = await(svc(req))
val (buf, Some(trailers)) = await(read(rsp.stream))
assert(Buf.Utf8("goodbye") == buf)
assert(trailers.get("i") == Some("3"))
assert(trailers.get("retry") == Some("false"))
assert(stats.counters(Seq("retries", "total")) == 2)
assert(stats.stats(Seq("retries", "per_request")) == Seq(2f))
assert(stats.counters.get(Seq("retries", "request_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "response_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "classification_timeout")) == None)
}
test("response not retryable") {
val reqQ = new AsyncQueue[Frame]
reqQ.offer(Frame.Data("hel", eos = false))
reqQ.offer(Frame.Data("lo", eos = true))
val reqStream = Stream(reqQ)
val req = Request(Headers.empty, reqStream)
val stats = new InMemoryStatsReceiver
val svc = filter(stats).andThen(new TestService(tries = 1))
val rsp = await(svc(req))
val (buf, Some(trailers)) = await(read(rsp.stream))
assert(Buf.Utf8("goodbye") == buf)
assert(trailers.get("i") == Some("1"))
assert(trailers.get("retry") == Some("false"))
assert(stats.counters.get(Seq("retries", "total")) == None)
assert(stats.stats(Seq("retries", "per_request")) == Seq(0f))
assert(stats.counters.get(Seq("retries", "request_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "response_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "classification_timeout")) == None)
}
test("request stream too long to retry") {
val reqQ = new AsyncQueue[Frame]
reqQ.offer(Frame.Data("hel", eos = false))
reqQ.offer(Frame.Data("lo", eos = true))
val reqStream = Stream(reqQ)
val req = Request(Headers.empty, reqStream)
val stats = new InMemoryStatsReceiver
val svc = new ClassifiedRetryFilter(
stats,
classifier,
SStream.continually(0.millis),
RetryBudget.Infinite,
requestBufferSize = 3
).andThen(new TestService())
val rsp = await(svc(req))
val (buf, Some(trailers)) = await(read(rsp.stream))
assert(Buf.Utf8("goodbye") == buf)
assert(trailers.get("i") == Some("1"))
assert(trailers.get("retry") == Some("true")) // response is retryable but req stream too long
assert(stats.counters.get(Seq("retries", "total")) == None)
assert(stats.stats(Seq("retries", "per_request")) == Seq(0f))
assert(stats.counters(Seq("retries", "request_stream_too_long")) == 1)
assert(stats.counters.get(Seq("retries", "response_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "classification_timeout")) == None)
}
test("response stream too long to retry") {
val reqQ = new AsyncQueue[Frame]
reqQ.offer(Frame.Data("hel", eos = false))
reqQ.offer(Frame.Data("lo", eos = true))
val reqStream = Stream(reqQ)
val req = Request(Headers.empty, reqStream)
val stats = new InMemoryStatsReceiver
val svc = new ClassifiedRetryFilter(
stats,
classifier,
SStream.continually(0.millis),
RetryBudget.Infinite,
responseBufferSize = 4
).andThen(new TestService())
val rsp = await(svc(req))
val (buf, Some(trailers)) = await(read(rsp.stream))
assert(Buf.Utf8("goodbye") == buf)
assert(trailers.get("i") == Some("1"))
assert(trailers.get("retry") == Some("true")) // response is retryable but response stream too long
assert(stats.counters.get(Seq("retries", "total")) == None)
assert(stats.stats(Seq("retries", "per_request")) == Seq(0f))
assert(stats.counters.get(Seq("retries", "request_stream_too_long")) == None)
assert(stats.counters(Seq("retries", "response_stream_too_long")) == 1)
assert(stats.counters.get(Seq("retries", "classification_timeout")) == None)
}
test("early classification") {
val rspQ = new AsyncQueue[Frame]()
val stats = new InMemoryStatsReceiver
val svc = filter(stats).andThen(Service.mk { req: Request =>
Future.value(Response(Headers("retry" -> "false", ":status" -> "200"), Stream(rspQ)))
})
// if early classification is possible, the response should not be buffered
val rsp = await(svc(Request(Headers.empty, Stream.empty())))
rspQ.offer(Frame.Data("foo", eos = true))
val frame = await(rsp.stream.read()).asInstanceOf[Frame.Data]
assert(frame.buf == Buf.Utf8("foo"))
await(frame.release())
assert(stats.counters.get(Seq("retries", "total")) == None)
assert(stats.stats(Seq("retries", "per_request")) == Seq(0f))
assert(stats.counters.get(Seq("retries", "request_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "response_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "classification_timeout")) == None)
}
test("classification timeout") {
val rspQ = new AsyncQueue[Frame]()
val stats = new InMemoryStatsReceiver
val timer = new MockTimer
val svc = new ClassifiedRetryFilter(
stats,
classifier,
SStream.continually(0.millis),
RetryBudget.Infinite,
classificationTimeout = 1.second
)(timer).andThen(Service.mk { req: Request =>
Future.value(Response(Status.Ok, Stream(rspQ)))
})
Time.withCurrentTimeFrozen { tc =>
val rspF = svc(Request(Headers.empty, Stream.empty()))
assert(!rspF.isDefined)
tc.advance(1.second)
timer.tick()
val rsp = await(rspF)
rspQ.offer(Frame.Data("foo", eos = true))
val frame = await(rsp.stream.read()).asInstanceOf[Frame.Data]
assert(frame.buf == Buf.Utf8("foo"))
frame.release()
}
assert(stats.counters.get(Seq("retries", "total")) == None)
assert(stats.stats(Seq("retries", "per_request")) == Seq(0f))
assert(stats.counters.get(Seq("retries", "request_stream_too_long")) == None)
assert(stats.counters.get(Seq("retries", "response_stream_too_long")) == None)
assert(stats.counters(Seq("retries", "classification_timeout")) == 1)
}
}
| denverwilliams/linkerd | router/h2/src/test/scala/io/buoyant/router/h2/ClassifiedRetryFilterTest.scala | Scala | apache-2.0 | 9,311 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.invoker
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.StaleParameter
import org.apache.openwhisk.core.entity.{Identity, View}
import org.apache.openwhisk.core.entity.types.AuthStore
import scala.concurrent.{ExecutionContext, Future}
import spray.json.DefaultJsonProtocol._
import scala.concurrent.duration.FiniteDuration
/**
* The namespace blacklist gets all namespaces that are throttled to 0 or blocked from the database.
*
* The caller is responsible for periodically updating the blacklist with `refreshBlacklist`.
*
* @param authStore Subjects database with the limit-documents.
*/
class NamespaceBlacklist(authStore: AuthStore) {
private var blacklist: Set[String] = Set.empty
/**
* Check if the identity, who invoked the activation is in the blacklist.
*
* @param identity which invoked the action.
* @return whether or not the current identity is considered blacklisted
*/
def isBlacklisted(identity: Identity): Boolean = blacklist.contains(identity.namespace.name.asString)
/** Refreshes the current blacklist from the database. */
def refreshBlacklist()(implicit ec: ExecutionContext, tid: TransactionId): Future[Set[String]] = {
authStore
.query(
table = NamespaceBlacklist.view.name,
startKey = List.empty,
endKey = List.empty,
skip = 0,
limit = Int.MaxValue,
includeDocs = false,
descending = true,
reduce = false,
stale = StaleParameter.UpdateAfter)
.map(_.map(_.fields("key").convertTo[String]).toSet)
.map { newBlacklist =>
blacklist = newBlacklist
newBlacklist
}
}
}
object NamespaceBlacklist {
val view = View("namespaceThrottlings", "blockedNamespaces")
}
/** Configuration relevant to the namespace blacklist */
case class NamespaceBlacklistConfig(pollInterval: FiniteDuration)
| starpit/openwhisk | core/invoker/src/main/scala/org/apache/openwhisk/core/invoker/NamespaceBlacklist.scala | Scala | apache-2.0 | 2,741 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.client
import com.sun.jersey.core.util.MultivaluedMapImpl
import net.lshift.diffa.kernel.participants._
import com.sun.jersey.api.client.ClientResponse
import org.apache.commons.io.IOUtils
import net.lshift.diffa.kernel.util.MissingObjectException
import net.lshift.diffa.kernel.util.AlertCodes._
import org.apache.http.util.EntityUtils
import org.slf4j.LoggerFactory
import net.lshift.diffa.kernel.config._
/**
* JSON/REST content adapter client.
*/
class ContentParticipantRestClient(pair: PairRef,
scanUrl: String,
serviceLimitsView: PairServiceLimitsView,
credentialsLookup:DomainCredentialsLookup)
extends InternalRestClient(pair, scanUrl, serviceLimitsView, credentialsLookup)
with ContentParticipantRef {
val log = LoggerFactory.getLogger(getClass)
def retrieveContent(identifier: String) = {
val params = new MultivaluedMapImpl()
params.add("identifier", identifier)
def prepareRequest(query:Option[QueryParameterCredentials]) = buildGetRequest(params, query)
val (httpClient, httpGet) = maybeAuthenticate(prepareRequest)
try {
val response = httpClient.execute(httpGet)
response.getStatusLine.getStatusCode match {
case 200 => EntityUtils.toString(response.getEntity)
case 404 => throw new MissingObjectException(identifier)
case _ =>
log.error("%s - %s".format(formatAlertCode(pair, CONTENT_RETRIEVAL_FAILED), EntityUtils.toString(response.getEntity)))
throw new Exception("Participant content retrieval failed")
}
}
finally {
shutdownImmediate(httpClient)
}
}
} | lshift/diffa | client-support/src/main/scala/net/lshift/diffa/client/ContentParticipantRestClient.scala | Scala | apache-2.0 | 2,333 |
/*
* Copyright 1998-2016 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.topic
import java.sql.ResultSet
import javax.sql.DataSource
import com.google.common.collect.ImmutableMap
import org.springframework.dao.DuplicateKeyException
import org.springframework.jdbc.core.RowMapper
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate
import org.springframework.scala.jdbc.core.JdbcTemplate
import org.springframework.stereotype.Repository
import ru.org.linux.tag.TagInfo
import scala.collection.JavaConverters._
@Repository
class TopicTagDao(ds:DataSource) {
private val jdbcTemplate = new JdbcTemplate(ds)
private val namedJdbcTemplate = new NamedParameterJdbcTemplate(ds)
/**
* Добавление тега к топику.
*
* @param msgId идентификационный номер топика
* @param tagId идентификационный номер тега
*/
def addTag(msgId:Int, tagId:Int):Unit = {
try {
jdbcTemplate.update("INSERT INTO tags VALUES(?,?)", msgId, tagId)
} catch {
case _:DuplicateKeyException ⇒
}
}
/**
* Удаление тега у топика.
*
* @param msgId идентификационный номер топика
* @param tagId идентификационный номер тега
*/
def deleteTag(msgId:Int, tagId:Int):Unit = {
jdbcTemplate.update("DELETE FROM tags WHERE msgid=? and tagid=?", msgId, tagId)
}
/**
* Получить список тегов топика.
*
* @param msgid идентификационный номер топика
* @return список тегов топика
*/
def getTags(msgid:Int):Seq[TagInfo] = {
jdbcTemplate.queryAndMap(
"SELECT tags_values.value, tags_values.counter, tags_values.id FROM tags, tags_values WHERE tags.msgid=? AND tags_values.id=tags.tagid ORDER BY value",
msgid
) { (rs, _) => TagInfo(rs.getString("value"), rs.getInt("counter"), rs.getInt("id")) }
}
/**
* Получение количества тегов, которые будут изменены для топиков (величина прироста использования тега).
*
* @param oldTagId идентификационный номер старого тега
* @param newTagId идентификационный номер нового тега
* @return величина прироста использования тега
*/
def getCountReplacedTags(oldTagId:Int, newTagId:Int):Int = {
jdbcTemplate.queryForSeq[Integer](
"SELECT count (tagid) FROM tags WHERE tagid=? AND msgid NOT IN (SELECT msgid FROM tags WHERE tagid=?)",
oldTagId,
newTagId
).head
}
/**
* Замена тега в топиках другим тегом.
*
* @param oldTagId идентификационный номер старого тега
* @param newTagId идентификационный номер нового тега
*/
def replaceTag(oldTagId:Int, newTagId:Int):Unit = {
jdbcTemplate.update(
"UPDATE tags SET tagid=? WHERE tagid=? AND msgid NOT IN (SELECT msgid FROM tags WHERE tagid=?)",
newTagId,
oldTagId,
newTagId
)
}
/**
* Удаление тега из топиков.
*
* @param tagId идентификационный номер тега
*/
def deleteTag(tagId:Int): Unit = {
jdbcTemplate.update("DELETE FROM tags WHERE tagid=?", tagId)
}
/**
* пересчёт счётчиков использования.
*/
def reCalculateAllCounters():Unit = {
jdbcTemplate.update("update tags_values set counter = (select count(*) from tags join topics on tags.msgid=topics.id where tags.tagid=tags_values.id and not deleted)")
}
def getTags(topics: Seq[Int]): Seq[(Int, TagInfo)] = {
if (topics.isEmpty) {
Vector.empty
} else {
val topicIds = topics.asJava
namedJdbcTemplate.query(
"SELECT msgid, tags_values.value, tags_values.counter, tags_values.id FROM tags, tags_values WHERE tags.msgid in (:list) AND tags_values.id=tags.tagid ORDER BY value",
ImmutableMap.of("list", topicIds),
new RowMapper[(Int, TagInfo)]() {
def mapRow(resultSet: ResultSet, rowNum: Int): (Int, TagInfo) =
resultSet.getInt("msgid") -> TagInfo(
resultSet.getString("value"),
resultSet.getInt("counter"),
resultSet.getInt("id")
)
}).asScala
}
}
/**
* Увеличить счётчик использования тега.
*
* @param tagId идентификационный номер тега
* @param tagCount на какое значение изменить счётчик
*/
def increaseCounterById(tagId: Int, tagCount: Int):Unit = {
jdbcTemplate.update("UPDATE tags_values SET counter=counter+? WHERE id=?", tagCount, tagId)
}
def processTopicsByTag(tagId: Int, f: (Int ⇒ Unit)): Unit = {
jdbcTemplate.queryAndProcess("SELECT msgid FROM tags WHERE tags.tagid=?", tagId) { rs ⇒ f(rs.getInt(1)) }
}
}
| fat0troll/lorsource | src/main/scala/ru/org/linux/topic/TopicTagDao.scala | Scala | apache-2.0 | 5,704 |
package stdlib
import byteR.JVMInstruction
import frontend._
abstract class LibraryPackage {
def apply(name: List[String]): Option[ASTType]
/* This should list all the prefixes with which to
* refer to a certain package. e.g. for Real.fromInt,
* 'Real' would be the package, and 'fromInt' the name. */
val prefixesAccepted: List[List[String]]
}
| j-c-w/mlc | src/main/scala/stdlib/LibraryPackage.scala | Scala | gpl-3.0 | 360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition}
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.objectinspector._
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.CastSupport
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.client.HiveClientImpl
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{BooleanType, DataType}
import org.apache.spark.util.Utils
/**
* The Hive table scan operator. Column and partition pruning are both handled.
*
* @param requestedAttributes Attributes to be fetched from the Hive table.
* @param relation The Hive table be scanned.
* @param partitionPruningPred An optional partition pruning predicate for partitioned table.
*/
private[hive]
case class HiveTableScanExec(
requestedAttributes: Seq[Attribute],
relation: HiveTableRelation,
partitionPruningPred: Seq[Expression])(
@transient private val sparkSession: SparkSession)
extends LeafExecNode with CastSupport {
require(partitionPruningPred.isEmpty || relation.isPartitioned,
"Partition pruning predicates only supported for partitioned tables.")
override def conf: SQLConf = sparkSession.sessionState.conf
override def nodeName: String = s"Scan hive ${relation.tableMeta.qualifiedName}"
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def producedAttributes: AttributeSet = outputSet ++
AttributeSet(partitionPruningPred.flatMap(_.references))
private val originalAttributes = AttributeMap(relation.output.map(a => a -> a))
override val output: Seq[Attribute] = {
// Retrieve the original attributes based on expression ID so that capitalization matches.
requestedAttributes.map(originalAttributes)
}
// Bind all partition key attribute references in the partition pruning predicate for later
// evaluation.
private lazy val boundPruningPred = partitionPruningPred.reduceLeftOption(And).map { pred =>
require(pred.dataType == BooleanType,
s"Data type of predicate $pred must be ${BooleanType.catalogString} rather than " +
s"${pred.dataType.catalogString}.")
BindReferences.bindReference(pred, relation.partitionCols)
}
@transient private lazy val hiveQlTable = HiveClientImpl.toHiveTable(relation.tableMeta)
@transient private lazy val tableDesc = new TableDesc(
hiveQlTable.getInputFormatClass,
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata)
// Create a local copy of hadoopConf,so that scan specific modifications should not impact
// other queries
@transient private lazy val hadoopConf = {
val c = sparkSession.sessionState.newHadoopConf()
// append columns ids and names before broadcast
addColumnMetadataToConf(c)
c
}
@transient private lazy val hadoopReader = new HadoopTableReader(
output,
relation.partitionCols,
tableDesc,
sparkSession,
hadoopConf)
private def castFromString(value: String, dataType: DataType) = {
cast(Literal(value), dataType).eval(null)
}
private def addColumnMetadataToConf(hiveConf: Configuration): Unit = {
// Specifies needed column IDs for those non-partitioning columns.
val columnOrdinals = AttributeMap(relation.dataCols.zipWithIndex)
val neededColumnIDs = output.flatMap(columnOrdinals.get).map(o => o: Integer)
HiveShim.appendReadColumns(hiveConf, neededColumnIDs, output.map(_.name))
val deserializer = tableDesc.getDeserializerClass.newInstance
deserializer.initialize(hiveConf, tableDesc.getProperties)
// Specifies types and object inspectors of columns to be scanned.
val structOI = ObjectInspectorUtils
.getStandardObjectInspector(
deserializer.getObjectInspector,
ObjectInspectorCopyOption.JAVA)
.asInstanceOf[StructObjectInspector]
val columnTypeNames = structOI
.getAllStructFieldRefs.asScala
.map(_.getFieldObjectInspector)
.map(TypeInfoUtils.getTypeInfoFromObjectInspector(_).getTypeName)
.mkString(",")
hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypeNames)
hiveConf.set(serdeConstants.LIST_COLUMNS, relation.dataCols.map(_.name).mkString(","))
}
/**
* Prunes partitions not involve the query plan.
*
* @param partitions All partitions of the relation.
* @return Partitions that are involved in the query plan.
*/
private[hive] def prunePartitions(partitions: Seq[HivePartition]) = {
boundPruningPred match {
case None => partitions
case Some(shouldKeep) => partitions.filter { part =>
val dataTypes = relation.partitionCols.map(_.dataType)
val castedValues = part.getValues.asScala.zip(dataTypes)
.map { case (value, dataType) => castFromString(value, dataType) }
// Only partitioned values are needed here, since the predicate has already been bound to
// partition key attribute references.
val row = InternalRow.fromSeq(castedValues)
shouldKeep.eval(row).asInstanceOf[Boolean]
}
}
}
// exposed for tests
@transient lazy val rawPartitions = {
val prunedPartitions =
if (sparkSession.sessionState.conf.metastorePartitionPruning &&
partitionPruningPred.size > 0) {
// Retrieve the original attributes based on expression ID so that capitalization matches.
val normalizedFilters = partitionPruningPred.map(_.transform {
case a: AttributeReference => originalAttributes(a)
})
sparkSession.sessionState.catalog.listPartitionsByFilter(
relation.tableMeta.identifier,
normalizedFilters)
} else {
sparkSession.sessionState.catalog.listPartitions(relation.tableMeta.identifier)
}
prunedPartitions.map(HiveClientImpl.toHivePartition(_, hiveQlTable))
}
protected override def doExecute(): RDD[InternalRow] = {
// Using dummyCallSite, as getCallSite can turn out to be expensive with
// with multiple partitions.
val rdd = if (!relation.isPartitioned) {
Utils.withDummyCallSite(sqlContext.sparkContext) {
hadoopReader.makeRDDForTable(hiveQlTable)
}
} else {
Utils.withDummyCallSite(sqlContext.sparkContext) {
hadoopReader.makeRDDForPartitionedTable(prunePartitions(rawPartitions))
}
}
val numOutputRows = longMetric("numOutputRows")
// Avoid to serialize MetastoreRelation because schema is lazy. (see SPARK-15649)
val outputSchema = schema
rdd.mapPartitionsWithIndexInternal { (index, iter) =>
val proj = UnsafeProjection.create(outputSchema)
proj.initialize(index)
iter.map { r =>
numOutputRows += 1
proj(r)
}
}
}
override def doCanonicalize(): HiveTableScanExec = {
val input: AttributeSeq = relation.output
HiveTableScanExec(
requestedAttributes.map(QueryPlan.normalizeExprId(_, input)),
relation.canonicalized.asInstanceOf[HiveTableRelation],
QueryPlan.normalizePredicates(partitionPruningPred, input))(sparkSession)
}
override def otherCopyArgs: Seq[AnyRef] = Seq(sparkSession)
}
| michalsenkyr/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala | Scala | apache-2.0 | 8,709 |
class InvalidCommandException extends MemcacheClientException | dallasgutauckis/xsltd | src/InvalidCommandException.scala | Scala | bsd-3-clause | 61 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.bin
import java.io.ByteArrayOutputStream
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.bin.BinaryEncodeCallback.{ByteArrayCallback, ByteStreamCallback}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinaryEncodeCallbackTest extends Specification {
"BinaryEncodeCallback" should {
"encode and decode simple attributes" in {
val initial = EncodedValues(1200, 45.0f, 49.0f, System.currentTimeMillis(), -1L)
ByteArrayCallback.apply(initial.trackId, initial.lat, initial.lon, initial.dtg)
val encoded = ByteArrayCallback.result
encoded must haveLength(16)
val decoded = BinaryOutputEncoder.decode(encoded)
decoded.lat mustEqual initial.lat
decoded.lon mustEqual initial.lon
Math.abs(decoded.dtg - initial.dtg) must beLessThan(1000L) // dates get truncated to nearest second
decoded.trackId mustEqual initial.trackId
}
"encode and decode optional simple attributes" in {
val initial = EncodedValues(0, 45.0f, 49.0f, System.currentTimeMillis(), -1L)
ByteArrayCallback.apply(initial.trackId, initial.lat, initial.lon, initial.dtg)
val encoded = ByteArrayCallback.result
encoded must haveLength(16)
val decoded = BinaryOutputEncoder.decode(encoded)
decoded.lat mustEqual initial.lat
decoded.lon mustEqual initial.lon
Math.abs(decoded.dtg - initial.dtg) must beLessThan(1000L) // dates get truncated to nearest second
decoded.trackId mustEqual initial.trackId
}
"encode and decode extended attributes" in {
val initial = EncodedValues(1200, 45.0f, 49.0f, System.currentTimeMillis(), 10L)
ByteArrayCallback.apply(initial.trackId, initial.lat, initial.lon, initial.dtg, initial.label)
val encoded = ByteArrayCallback.result
encoded must haveLength(24)
val decoded = BinaryOutputEncoder.decode(encoded)
decoded.lat mustEqual initial.lat
decoded.lon mustEqual initial.lon
Math.abs(decoded.dtg - initial.dtg) must beLessThan(1000L) // dates get truncated to nearest second
decoded.trackId mustEqual initial.trackId
decoded.label mustEqual initial.label
}
"encode and decode to an output stream" in {
val time = System.currentTimeMillis()
val one = EncodedValues(1200, 45.0f, 49.0f, time, 1000L)
val two = EncodedValues(1201, 45.0f, 49.0f, time - 100, 3000L)
val out = new ByteArrayOutputStream(48)
val callback = new ByteStreamCallback(out)
callback.apply(one.trackId, one.lat, one.lon, one.dtg, one.label)
callback.apply(two.trackId, two.lat, two.lon, two.dtg, two.label)
val array = out.toByteArray
array must haveLength(48)
val decodedOne = BinaryOutputEncoder.decode(array.splitAt(24)._1)
decodedOne.lat mustEqual one.lat
decodedOne.lon mustEqual one.lon
Math.abs(decodedOne.dtg - one.dtg) must beLessThan(1000L) // dates get truncated to nearest second
decodedOne.trackId mustEqual one.trackId
decodedOne.label mustEqual one.label
val decodedTwo = BinaryOutputEncoder.decode(array.splitAt(24)._2)
decodedTwo.lat mustEqual two.lat
decodedTwo.lon mustEqual two.lon
Math.abs(decodedTwo.dtg - two.dtg) must beLessThan(1000L) // dates get truncated to nearest second
decodedTwo.trackId mustEqual two.trackId
decodedTwo.label mustEqual two.label
}
"encode faster to an output stream" in {
skipped("integration")
val times = 10000
val one = EncodedValues(1200, 45.0f, 49.0f, System.currentTimeMillis(), 10000L)
val out = new ByteArrayOutputStream(24 * times)
val streamCallback = new ByteStreamCallback(out)
// the first test run always takes a long time, even with some initialization...
// flip the order to get a sense of how long each takes
val start2 = System.currentTimeMillis()
(0 to times).foreach(_ => ByteArrayCallback.apply(one.trackId, one.lat, one.lon, one.dtg, one.label))
val total2 = System.currentTimeMillis() - start2
println(s"array took $total2 ms")
val start = System.currentTimeMillis()
(0 to times).foreach(_ => streamCallback.apply(one.trackId, one.lat, one.lon, one.dtg, one.label))
val total = System.currentTimeMillis() - start
println(s"stream took $total ms")
println
success
}
}
} | elahrvivaz/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/bin/BinaryEncodeCallbackTest.scala | Scala | apache-2.0 | 5,080 |
package spark.deploy
private[spark] class ApplicationDescription(
val name: String,
val cores: Int,
val memoryPerSlave: Int,
val command: Command,
val sparkHome: String)
extends Serializable {
val user = System.getProperty("user.name", "<unknown>")
override def toString: String = "ApplicationDescription(" + name + ")"
}
| koeninger/spark | core/src/main/scala/spark/deploy/ApplicationDescription.scala | Scala | bsd-3-clause | 351 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.errors.TreeNodeException
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.parser.ParserUtils
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, UnaryNode}
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.catalyst.util.quoteIdentifier
import org.apache.spark.sql.types.{DataType, Metadata, StructType}
/**
* Thrown when an invalid attempt is made to access a property of a tree that has yet to be fully
* resolved.
*/
class UnresolvedException[TreeType <: TreeNode[_]](tree: TreeType, function: String)
extends TreeNodeException(tree, s"Invalid call to $function on unresolved object", null)
/**
* Holds the name of a relation that has yet to be looked up in a catalog.
*
* @param tableIdentifier table name
*/
case class UnresolvedRelation(tableIdentifier: TableIdentifier)
extends LeafNode {
/** Returns a `.` separated name for this relation. */
def tableName: String = tableIdentifier.unquotedString
override def output: Seq[Attribute] = Nil
override lazy val resolved = false
}
/**
* An inline table that has not been resolved yet. Once resolved, it is turned by the analyzer into
* a [[org.apache.spark.sql.catalyst.plans.logical.LocalRelation]].
*
* @param names list of column names
* @param rows expressions for the data
*/
case class UnresolvedInlineTable(
names: Seq[String],
rows: Seq[Seq[Expression]])
extends LeafNode {
lazy val expressionsResolved: Boolean = rows.forall(_.forall(_.resolved))
override lazy val resolved = false
override def output: Seq[Attribute] = Nil
}
/**
* A table-valued function, e.g.
* {{{
* select id from range(10);
*
* // Assign alias names
* select t.a from range(10) t(a);
* }}}
*
* @param functionName name of this table-value function
* @param functionArgs list of function arguments
* @param outputNames alias names of function output columns. If these names given, an analyzer
* adds [[Project]] to rename the output columns.
*/
case class UnresolvedTableValuedFunction(
functionName: String,
functionArgs: Seq[Expression],
outputNames: Seq[String])
extends LeafNode {
override def output: Seq[Attribute] = Nil
override lazy val resolved = false
}
/**
* Holds the name of an attribute that has yet to be resolved.
*/
case class UnresolvedAttribute(nameParts: Seq[String]) extends Attribute with Unevaluable {
def name: String =
nameParts.map(n => if (n.contains(".")) s"`$n`" else n).mkString(".")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier")
override lazy val resolved = false
override def newInstance(): UnresolvedAttribute = this
override def withNullability(newNullability: Boolean): UnresolvedAttribute = this
override def withQualifier(newQualifier: Seq[String]): UnresolvedAttribute = this
override def withName(newName: String): UnresolvedAttribute = UnresolvedAttribute.quoted(newName)
override def withMetadata(newMetadata: Metadata): Attribute = this
override def withExprId(newExprId: ExprId): UnresolvedAttribute = this
override def toString: String = s"'$name"
override def sql: String = name match {
case ParserUtils.escapedIdentifier(_) | ParserUtils.qualifiedEscapedIdentifier(_, _) => name
case _ => quoteIdentifier(name)
}
}
object UnresolvedAttribute {
/**
* Creates an [[UnresolvedAttribute]], parsing segments separated by dots ('.').
*/
def apply(name: String): UnresolvedAttribute = new UnresolvedAttribute(name.split("\\\\."))
/**
* Creates an [[UnresolvedAttribute]], from a single quoted string (for example using backticks in
* HiveQL. Since the string is consider quoted, no processing is done on the name.
*/
def quoted(name: String): UnresolvedAttribute = new UnresolvedAttribute(Seq(name))
/**
* Creates an [[UnresolvedAttribute]] from a string in an embedded language. In this case
* we treat it as a quoted identifier, except for '.', which must be further quoted using
* backticks if it is part of a column name.
*/
def quotedString(name: String): UnresolvedAttribute =
new UnresolvedAttribute(parseAttributeName(name))
/**
* Used to split attribute name by dot with backticks rule.
* Backticks must appear in pairs, and the quoted string must be a complete name part,
* which means `ab..c`e.f is not allowed.
* Escape character is not supported now, so we can't use backtick inside name part.
*/
def parseAttributeName(name: String): Seq[String] = {
def e = new AnalysisException(s"syntax error in attribute name: $name")
val nameParts = scala.collection.mutable.ArrayBuffer.empty[String]
val tmp = scala.collection.mutable.ArrayBuffer.empty[Char]
var inBacktick = false
var i = 0
while (i < name.length) {
val char = name(i)
if (inBacktick) {
if (char == '`') {
inBacktick = false
if (i + 1 < name.length && name(i + 1) != '.') throw e
} else {
tmp += char
}
} else {
if (char == '`') {
if (tmp.nonEmpty) throw e
inBacktick = true
} else if (char == '.') {
if (name(i - 1) == '.' || i == name.length - 1) throw e
nameParts += tmp.mkString
tmp.clear()
} else {
tmp += char
}
}
i += 1
}
if (inBacktick) throw e
nameParts += tmp.mkString
nameParts.toSeq
}
}
/**
* Represents an unresolved generator, which will be created by the parser for
* the [[org.apache.spark.sql.catalyst.plans.logical.Generate]] operator.
* The analyzer will resolve this generator.
*/
case class UnresolvedGenerator(name: FunctionIdentifier, children: Seq[Expression])
extends Generator {
override def elementSchema: StructType = throw new UnresolvedException(this, "elementTypes")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
override def prettyName: String = name.unquotedString
override def toString: String = s"'$name(${children.mkString(", ")})"
override def eval(input: InternalRow = null): TraversableOnce[InternalRow] =
throw new UnsupportedOperationException(s"Cannot evaluate expression: $this")
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode =
throw new UnsupportedOperationException(s"Cannot generate code for expression: $this")
override def terminate(): TraversableOnce[InternalRow] =
throw new UnsupportedOperationException(s"Cannot terminate expression: $this")
}
case class UnresolvedFunction(
name: FunctionIdentifier,
children: Seq[Expression],
isDistinct: Boolean)
extends Expression with Unevaluable {
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
override def prettyName: String = name.unquotedString
override def toString: String = s"'$name(${children.mkString(", ")})"
}
object UnresolvedFunction {
def apply(name: String, children: Seq[Expression], isDistinct: Boolean): UnresolvedFunction = {
UnresolvedFunction(FunctionIdentifier(name, None), children, isDistinct)
}
}
/**
* Represents all of the input attributes to a given relational operator, for example in
* "SELECT * FROM ...". A [[Star]] gets automatically expanded during analysis.
*/
abstract class Star extends LeafExpression with NamedExpression {
override def name: String = throw new UnresolvedException(this, "name")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier")
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance")
override lazy val resolved = false
def expand(input: LogicalPlan, resolver: Resolver): Seq[NamedExpression]
}
/**
* Represents all of the input attributes to a given relational operator, for example in
* "SELECT * FROM ...".
*
* This is also used to expand structs. For example:
* "SELECT record.* from (SELECT struct(a,b,c) as record ...)
*
* @param target an optional name that should be the target of the expansion. If omitted all
* targets' columns are produced. This can either be a table name or struct name. This
* is a list of identifiers that is the path of the expansion.
*/
case class UnresolvedStar(target: Option[Seq[String]]) extends Star with Unevaluable {
/**
* Returns true if the nameParts match the qualifier of the attribute
*
* There are two checks: i) Check if the nameParts match the qualifier fully.
* E.g. SELECT db.t1.* FROM db1.t1 In this case, the nameParts is Seq("db1", "t1") and
* qualifier of the attribute is Seq("db1","t1")
* ii) If (i) is not true, then check if nameParts is only a single element and it
* matches the table portion of the qualifier
*
* E.g. SELECT t1.* FROM db1.t1 In this case nameParts is Seq("t1") and
* qualifier is Seq("db1","t1")
* SELECT a.* FROM db1.t1 AS a
* In this case nameParts is Seq("a") and qualifier for
* attribute is Seq("a")
*/
private def matchedQualifier(
attribute: Attribute,
nameParts: Seq[String],
resolver: Resolver): Boolean = {
val qualifierList = attribute.qualifier
val matched = nameParts.corresponds(qualifierList)(resolver) || {
// check if it matches the table portion of the qualifier
if (nameParts.length == 1 && qualifierList.nonEmpty) {
resolver(nameParts.head, qualifierList.last)
} else {
false
}
}
matched
}
override def expand(
input: LogicalPlan,
resolver: Resolver): Seq[NamedExpression] = {
// If there is no table specified, use all input attributes.
if (target.isEmpty) return input.output
val expandedAttributes = input.output.filter(matchedQualifier(_, target.get, resolver))
if (expandedAttributes.nonEmpty) return expandedAttributes
// Try to resolve it as a struct expansion. If there is a conflict and both are possible,
// (i.e. [name].* is both a table and a struct), the struct path can always be qualified.
val attribute = input.resolve(target.get, resolver)
if (attribute.isDefined) {
// This target resolved to an attribute in child. It must be a struct. Expand it.
attribute.get.dataType match {
case s: StructType => s.zipWithIndex.map {
case (f, i) =>
val extract = GetStructField(attribute.get, i)
Alias(extract, f.name)()
}
case _ =>
throw new AnalysisException("Can only star expand struct data types. Attribute: `" +
target.get + "`")
}
} else {
val from = input.inputSet.map(_.name).mkString(", ")
val targetString = target.get.mkString(".")
throw new AnalysisException(s"cannot resolve '$targetString.*' given input columns '$from'")
}
}
override def toString: String = target.map(_ + ".").getOrElse("") + "*"
}
/**
* Represents all of the input attributes to a given relational operator, for example in
* "SELECT `(id)?+.+` FROM ...".
*
* @param table an optional table that should be the target of the expansion. If omitted all
* tables' columns are produced.
*/
case class UnresolvedRegex(regexPattern: String, table: Option[String], caseSensitive: Boolean)
extends Star with Unevaluable {
override def expand(input: LogicalPlan, resolver: Resolver): Seq[NamedExpression] = {
val pattern = if (caseSensitive) regexPattern else s"(?i)$regexPattern"
table match {
// If there is no table specified, use all input attributes that match expr
case None => input.output.filter(_.name.matches(pattern))
// If there is a table, pick out attributes that are part of this table that match expr
case Some(t) => input.output.filter(a => a.qualifier.nonEmpty &&
resolver(a.qualifier.last, t)).filter(_.name.matches(pattern))
}
}
override def toString: String = table.map(_ + "." + regexPattern).getOrElse(regexPattern)
}
/**
* Used to assign new names to Generator's output, such as hive udtf.
* For example the SQL expression "stack(2, key, value, key, value) as (a, b)" could be represented
* as follows:
* MultiAlias(stack_function, Seq(a, b))
*
* @param child the computation being performed
* @param names the names to be associated with each output of computing [[child]].
*/
case class MultiAlias(child: Expression, names: Seq[String])
extends UnaryExpression with NamedExpression with Unevaluable {
override def name: String = throw new UnresolvedException(this, "name")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier")
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance")
override lazy val resolved = false
override def toString: String = s"$child AS $names"
}
/**
* Represents all the resolved input attributes to a given relational operator. This is used
* in the data frame DSL.
*
* @param expressions Expressions to expand.
*/
case class ResolvedStar(expressions: Seq[NamedExpression]) extends Star with Unevaluable {
override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance")
override def expand(input: LogicalPlan, resolver: Resolver): Seq[NamedExpression] = expressions
override def toString: String = expressions.mkString("ResolvedStar(", ", ", ")")
}
/**
* Extracts a value or values from an Expression
*
* @param child The expression to extract value from,
* can be Map, Array, Struct or array of Structs.
* @param extraction The expression to describe the extraction,
* can be key of Map, index of Array, field name of Struct.
*/
case class UnresolvedExtractValue(child: Expression, extraction: Expression)
extends BinaryExpression with Unevaluable {
override def left: Expression = child
override def right: Expression = extraction
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
override def toString: String = s"$child[$extraction]"
override def sql: String = s"${child.sql}[${extraction.sql}]"
}
/**
* Holds the expression that has yet to be aliased.
*
* @param child The computation that is needs to be resolved during analysis.
* @param aliasFunc The function if specified to be called to generate an alias to associate
* with the result of computing [[child]]
*
*/
case class UnresolvedAlias(
child: Expression,
aliasFunc: Option[Expression => String] = None)
extends UnaryExpression with NamedExpression with Unevaluable {
override def toAttribute: Attribute = throw new UnresolvedException(this, "toAttribute")
override def qualifier: Seq[String] = throw new UnresolvedException(this, "qualifier")
override def exprId: ExprId = throw new UnresolvedException(this, "exprId")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def name: String = throw new UnresolvedException(this, "name")
override def newInstance(): NamedExpression = throw new UnresolvedException(this, "newInstance")
override lazy val resolved = false
}
/**
* Aliased column names resolved by positions for subquery. We could add alias names for output
* columns in the subquery:
* {{{
* // Assign alias names for output columns
* SELECT col1, col2 FROM testData AS t(col1, col2);
* }}}
*
* @param outputColumnNames the [[LogicalPlan]] on which this subquery column aliases apply.
* @param child the logical plan of this subquery.
*/
case class UnresolvedSubqueryColumnAliases(
outputColumnNames: Seq[String],
child: LogicalPlan)
extends UnaryNode {
override def output: Seq[Attribute] = Nil
override lazy val resolved = false
}
/**
* Holds the deserializer expression and the attributes that are available during the resolution
* for it. Deserializer expression is a special kind of expression that is not always resolved by
* children output, but by given attributes, e.g. the `keyDeserializer` in `MapGroups` should be
* resolved by `groupingAttributes` instead of children output.
*
* @param deserializer The unresolved deserializer expression
* @param inputAttributes The input attributes used to resolve deserializer expression, can be empty
* if we want to resolve deserializer by children output.
*/
case class UnresolvedDeserializer(deserializer: Expression, inputAttributes: Seq[Attribute] = Nil)
extends UnaryExpression with Unevaluable with NonSQLExpression {
// The input attributes used to resolve deserializer expression must be all resolved.
require(inputAttributes.forall(_.resolved), "Input attributes must all be resolved.")
override def child: Expression = deserializer
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
}
case class GetColumnByOrdinal(ordinal: Int, dataType: DataType) extends LeafExpression
with Unevaluable with NonSQLExpression {
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
}
/**
* Represents unresolved ordinal used in order by or group by.
*
* For example:
* {{{
* select a from table order by 1
* select a from table group by 1
* }}}
* @param ordinal ordinal starts from 1, instead of 0
*/
case class UnresolvedOrdinal(ordinal: Int)
extends LeafExpression with Unevaluable with NonSQLExpression {
override def dataType: DataType = throw new UnresolvedException(this, "dataType")
override def foldable: Boolean = throw new UnresolvedException(this, "foldable")
override def nullable: Boolean = throw new UnresolvedException(this, "nullable")
override lazy val resolved = false
}
| icexelloss/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala | Scala | apache-2.0 | 20,818 |
import org.scalatest.FunSuite
import scala.annotation.StaticAnnotation
// TODO: DavidDudson: simplify with argument macros
class Expansion extends FunSuite {
test("Nested macro should expand with identity") {
@identity
object Foo {
@helloWorld def bar() = "test"
}
assert(Foo.bar() === "hello world")
}
test("Nested macro of the same name should expand") {
var letters = ""
@appendA
def foo() = {
@appendA
def bar() = {}
bar()
}
foo()
assert(letters === "aa")
}
test("Nested macro should expand with different macros") {
var letters = ""
@appendA
def foo() = {
@appendB
def bar() = {}
bar()
}
foo()
assert(letters === "ba")
}
// Provided the above test passes... This proves that
// Macros expansion order:
// Left -> Right
// In -> Out
test("Verify expansion order") {
var letters = ""
@appendB
@appendC
def foo() = {
@appendA
def bar() = {}
bar()
}
foo()
assert(letters === "abc")
}
test("Nested macro should expand with inner identity macro") {
var letters = ""
@appendA
def foo() = {
@identity
def bar() = {}
bar()
}
foo()
assert(letters === "a")
}
test("Nested macro should expand with outer identity macro") {
var letters = ""
@identity
def foo() = {
@appendA
def bar() = {}
bar()
}
foo()
assert(letters === "a")
}
test("Placebo after expandee should compile and work") {
var letters = ""
@appendA
@placebo
def bar() = {}
bar()
assert(letters === "a")
}
test("Placebo before expandee should compile and work") {
var letters = ""
@placebo
@appendA
def bar() = {}
bar()
assert(letters === "a")
}
test("Multiple expandees of same kinds with others in between should expand") {
var letters = ""
@appendA
@identity
@appendB
def bar() = {}
bar()
assert(letters === "ab")
}
test("Multiple expandees of similar kinds should expand in the correct order") {
var letters = ""
@appendA
@appendB
def bar() = {}
bar()
assert(letters === "ab")
}
test("Identity expandee followed by regular expandee should expand correctly") {
var letters = ""
@identity
@appendA
def bar() = {}
bar()
assert(letters === "a")
}
test("Regular expandee followed by Identity expandee should expand correctly") {
var letters = ""
@appendA
@identity
def bar() = {}
bar()
assert(letters === "a")
}
test("Placebo in package doesn't accidentally get removed if second") {
var letters = ""
@appendA
@placebo.appendA
def bar() = {}
bar()
assert(letters === "a")
}
test("Placebo in package doesn't accidentally get removed if first") {
var letters = ""
@placebo.appendA
@appendA
def bar() = {}
bar()
assert(letters === "a")
}
test("Regular arguments are supported") {
@param("hello world")
class SomeClass1
}
test("Named arguments are supported") {
@namedParam(some = "text")
class SomeClass2
}
test("Repeated arguments are supported") {
@repeatedParam(foos: _*)
class SomeClass3
}
test("Annotations on classes expand not only classes, but also companion objects") {
trait Bar {
val k: Int = 3
}
@companion
class Foo(id: Int) {
val i: Int = 1
}
object Foo extends Bar {
val j: Int = 2
}
@companion
class Baz(id: Int) {
val a: String = "abc"
}
}
test("Type parameters are supported") {
@tparam[Int]
class Foo
}
test("Private in block return") {
@genLargeNumberOfStats
class foo
}
test("Expansion of type def with companion") {
object SomeObject {
@identity type A = Int
object A
}
}
test("Ensure companion is passed into macro with typedef") {
object AnotherObject {
@typeWithCompanion type A = Int
object A
}
}
test("Ensure typedefs without companions are not in a block") {
object AThirdObject {
@typeWithoutCompanion type A = Int
}
}
}
// Note: We cannot actually wrap this in test()
// as it cannot be an inner class definition.
// The best we can do is make sure this compiles
// TODO: David Dudson -> Make a more elaborate test case
// eg. A macro that creates a macro that is then used
// requiring 3 seperate compilation steps
@identity
class SomeClass4 extends StaticAnnotation {
inline def apply(a: Any): Any = meta(a)
}
| xeno-by/paradise | tests/meta/src/test/scala/compile/Expansion.scala | Scala | bsd-3-clause | 4,652 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fetch
import java.net.URL
import java.util
import javax.xml.namespace.QName
import com.github.sardine.DavResource
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatestplus.play.PlaySpec
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class WebDavTreeTest extends PlaySpec {
import WebDavTree.readyToCollectFile
val base = "http://somedavserver.com:81"
private def folder(path: String, name: String, files: List[WebDavFile]) = {
WebDavFile(new URL(s"$base/$path$name"), name, kb = 0L, files = files,
isDirectory = true,
isDataFile = false,
isPlainText = false)
}
private def leafFile(path: String, name: String, kb: Long = 0L) = {
WebDavFile(new URL(s"$base/$path$name"), name, kb = kb,
isDataFile = name.endsWith(".zip") || name.endsWith(".csv"),
isPlainText = name.endsWith(".txt"))
}
// "fix app.conf" should { "" ignore { fail() }}
"WebDavTree.name" should {
"handle name with dot" in {
leafFile("webdav/abp/40", "DVD2.txt").name must be("DVD2")
}
"handle name without dot" in {
val dir = folder("abi/39", "full", Nil)
dir.name must be("full")
}
}
"find available" should {
"""
discover one specified product with two epochs
and ignore any unimportant files
and ignore products except those asked for
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abi", List(
folder("webdav/abi/", "39", List(
folder("webdav/abi/39/", "full", List(
leafFile("webdav/abi/39/full/", "x1.zip"),
leafFile("webdav/abi/39/full/", readyToCollectFile)
))
))
)),
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
leafFile("webdav/abp/39/full/", "abp1.zip"),
leafFile("webdav/abp/39/full/", readyToCollectFile)
))
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
folder("webdav/abp/40/full/", "data", List(
leafFile("webdav/abp/40/full/data/", "DVD1.zip"),
leafFile("webdav/abp/40/full/data/", "DVD2.zip")
)),
leafFile("webdav/abp/40/full/", readyToCollectFile)
))
)),
folder("webdav/abp/", "41", List(
folder("webdav/abp/41/", "full", List(
folder("webdav/abp/41/full/", "data", List(
leafFile("webdav/abp/41/full/data/", "file1.zip"),
leafFile("webdav/abp/41/full/data/", "file2.zip"),
leafFile("webdav/abp/41/full/data/", readyToCollectFile)
))
))
))
))
))
)
// when
val list = tree.findAvailableFor("abp")
// then
list must be(List(
OSGBProduct("abp", 39, List(leafFile("webdav/abp/39/full/", "abp1.zip"))),
OSGBProduct("abp", 40, List(leafFile("webdav/abp/40/full/data/", "DVD1.zip"), leafFile("webdav/abp/40/full/data/", "DVD2.zip"))),
OSGBProduct("abp", 41, List(leafFile("webdav/abp/41/full/data/", "file1.zip"), leafFile("webdav/abp/41/full/data/", "file2.zip")))
))
}
"""
discover one specified product with one specified epoch
and ignore any unrelated files
and ignore products except those asked for
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abi", List(
folder("webdav/abi/", "39", List(
folder("webdav/abi/39/", "full", List(
leafFile("webdav/abi/39/full/", "DVD1.zip")
))
))
)),
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
leafFile("webdav/abp/39/full/", "DVD1.zip"),
leafFile("webdav/abp/39/full", readyToCollectFile),
leafFile("webdav/abp/39/full/", "ignore.this")
))
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
leafFile("webdav/abp/40/full/", "DVD1.zip"),
leafFile("webdav/abp/40/full/", "DVD2.zip"),
leafFile("webdav/abp/40/full", readyToCollectFile)
))
))
))
)))
// when
val product = tree.findAvailableFor("abp", 39)
// then
product must be(Some(OSGBProduct("abp", 39, List(leafFile("webdav/abp/39/full/", "DVD1.zip")))))
}
"""
discover nothing when the set is incomplete
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abp", List(
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
leafFile("webdav/abp/40/full/", "DVD1.zip"),
leafFile("webdav/abp/40/full/", "DVD2.zip") // missing ready-to-collect.txt
))
))
))
)))
// when
val list = tree.findAvailableFor("abp")
// then
list must be(Nil)
}
"""
discover two products with one epoch in subdirectories
and ignore any unrelated files
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abi", List(
folder("webdav/abi/", "39", List(
folder("webdav/abi/39/", "full", List(
folder("webdav/abp/39/full/", "data", List(
leafFile("webdav/abi/39/full/data/", "001.zip")
))
)),
leafFile("webdav/abi/39/full/", readyToCollectFile)
))
)),
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
folder("webdav/abp/39/full/", "data", List(
leafFile("webdav/abp/39/full/data/", "001.zip")
))
)),
leafFile("webdav/abp/39/", readyToCollectFile) // not in the right place
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
folder("webdav/abp/40/full/", "data", List(
leafFile("webdav/abp/40/full/data/", "001.zip"),
leafFile("webdav/abp/40/full/data/", "002.zip"),
leafFile("webdav/abp/40/full/data/", "003.zip"),
leafFile("webdav/abp/40/full/data/", "ignore.this")
)),
leafFile("webdav/abp/40/full", readyToCollectFile) // correct
))
))
))
)))
// when
val list = tree.findAvailableFor("abp")
// then
list must be(List(
OSGBProduct("abp", 40,
List(leafFile("webdav/abp/40/full/data/", "001.zip"), leafFile("webdav/abp/40/full/data/", "002.zip"), leafFile("webdav/abp/40/full/data/", "003.zip")))))
}
}
"find latest" should {
"""
discover one product with one epoch where all products have corresponding ready-to-collect marker
and ignore any unimportant files
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abi", List(
folder("webdav/abi/", "39", List(
folder("webdav/abi/39/", "full", List(
leafFile("webdav/abi/39/full/", "DVD1.zip"),
leafFile("webdav/abi/39/full/", readyToCollectFile)
))
))
)),
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
leafFile("webdav/abp/39/full/", "DVD1.csv"),
leafFile("webdav/abp/39/full", readyToCollectFile)
))
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
leafFile("webdav/abp/40/full/", "DVD1.csv"),
leafFile("webdav/abp/40/full/", "DVD2.csv"),
leafFile("webdav/abp/40/full/", readyToCollectFile),
leafFile("webdav/abp/40/full/", "ignore.this")
))
))
))
)))
// when
val list = tree.findLatestFor("abp")
// then
list must be(Some(OSGBProduct("abp", 40, List(leafFile("webdav/abp/40/full/", "DVD1.csv"), leafFile("webdav/abp/40/full/", "DVD2.csv")))))
}
"""
discover one product with one epoch where a ready-to-collect.txt marker exists
but there are no other txt marker files
and ignore any unimportant files
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
leafFile("webdav/abp/39/full/", "DVD1.zip"),
leafFile("webdav/abp/39/full/", readyToCollectFile)
))
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
folder("webdav/abp/40/full/", "data", List(
leafFile("webdav/abp/40/full/data/", "ABP_2016-05-18_001_csv.zip"),
leafFile("webdav/abp/40/full/data/", "ABP_2016-05-18_002_csv.zip"),
leafFile("webdav/abp/40/full/data/", "ignore.this")
)),
leafFile("webdav/abp/40/full/", readyToCollectFile)
))
))
))
)))
// when
val list = tree.findLatestFor("abp")
// then
list must be(Some(OSGBProduct("abp", 40,
List(leafFile("webdav/abp/40/full/data/", "ABP_2016-05-18_001_csv.zip"), leafFile("webdav/abp/40/full/data/", "ABP_2016-05-18_002_csv.zip")))))
}
}
"toString" should {
"""
produce helpful listings of files and directories
""" in {
// given
val tree = WebDavTree(
folder("/", "webdav", List(
folder("webdav/", "abi", List(
folder("webdav/abi/", "39", List(
folder("webdav/abi/39/", "full", List(
leafFile("webdav/abi/39/full/", "DVD1.zip", 123456L),
leafFile("webdav/abi/39/full/", "DVD1.txt", 1L)
))
))
)),
folder("webdav/", "abp", List(
folder("webdav/abp/", "39", List(
folder("webdav/abp/39/", "full", List(
leafFile("webdav/abp/39/full/", "DVD1.csv", 999L),
leafFile("webdav/abp/39/full/", "DVD1.txt", 0L)
))
)),
folder("webdav/abp/", "40", List(
folder("webdav/abp/40/", "full", List(
folder("webdav/abp/40/full/", "data", List(
leafFile("webdav/abp/40/full/data/", "AddressBasePremium_FULL_2016-05-18_001_csv.zip", 8877L),
leafFile("webdav/abp/40/full/data/", "AddressBasePremium_FULL_2016-05-18_001_csv.txt"),
leafFile("webdav/abp/40/full/data/", "AddressBasePremium_FULL_2016-05-18_002_csv.zip", 9988L),
leafFile("webdav/abp/40/full/data/", "AddressBasePremium_FULL_2016-05-18_002_csv.txt"),
leafFile("webdav/abp/40/full/data/", "ignore.this", 4321L)
))
))
))
))
)))
// when
val info = tree.indentedString
// then
info must be(
"""webdav/
| abi/
| 39/
| full/
| DVD1.zip (data) 123456 KiB
| DVD1.txt (txt) 1 KiB
| abp/
| 39/
| full/
| DVD1.csv (data) 999 KiB
| DVD1.txt (txt) 0 KiB
| 40/
| full/
| data/
| AddressBasePremium_FULL_2016-05-18_001_csv.zip (data) 8877 KiB
| AddressBasePremium_FULL_2016-05-18_001_csv.txt (txt) 0 KiB
| AddressBasePremium_FULL_2016-05-18_002_csv.zip (data) 9988 KiB
| AddressBasePremium_FULL_2016-05-18_002_csv.txt (txt) 0 KiB
| ignore.this 4321 KiB
|""".stripMargin)
}
}
}
class StubDavResource(href: String, creation: util.Date, modified: util.Date, contentType: String, contentLength: Long,
etag: String, displayName: String, resourceTypes: util.List[QName],
contentLanguage: String, supportedReports: util.List[QName],
customProps: util.Map[QName, String])
extends DavResource(href, creation, modified,
contentType, contentLength,
etag, displayName, resourceTypes,
contentLanguage, supportedReports,
customProps)
object StubDavResource {
def dir(href: String, name: String): StubDavResource = file(href, name, "httpd/unix-directory")
def file(href: String, name: String, contentType: String): StubDavResource = {
val now = new util.Date()
new StubDavResource(href, now, now, contentType, 123, "etag", name,
Nil.asJava, "en", Nil.asJava, Map[QName, String]().asJava)
}
}
| andywhardy/address-reputation-ingester | test/unit/fetch/WebDavTreeTest.scala | Scala | apache-2.0 | 14,519 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.effect.IO
import minitest.TestSuite
import monix.eval.Task
import monix.execution.Ack.Continue
import monix.execution.schedulers.TestScheduler
import monix.reactive.{Observable, Observer}
import monix.execution.exceptions.DummyException
object DoOnSubscribeSuite extends TestSuite[TestScheduler] {
def setup(): TestScheduler = TestScheduler()
def tearDown(s: TestScheduler): Unit = {
assert(s.state.tasks.isEmpty,
"TestScheduler should have no pending tasks")
}
test("doOnSubscribe should work") { implicit s =>
var elem = 0
Observable
.now(10)
.doOnSubscribeF { () => elem = 20 }
.foreach { x => elem = elem / x }
s.tick()
assertEquals(elem, 2)
}
test("doOnSubscribe should protect against error") { implicit s =>
val dummy = DummyException("dummy")
var wasThrown: Throwable = null
Observable
.range(1,10)
.doOnSubscribe(Task.raiseError[Unit](dummy))
.unsafeSubscribeFn(new Observer[Long] {
def onNext(elem: Long) = Continue
def onComplete() = ()
def onError(ex: Throwable) = wasThrown = ex
})
s.tick()
assertEquals(wasThrown, dummy)
}
test("doAfterSubscribe should work") { implicit s =>
var elem = 0
Observable
.now(10)
.doAfterSubscribeF { () => elem = 20 }
.foreach { x => elem = elem / x }
s.tick()
assertEquals(elem, 2)
}
test("doAfterSubscribe should protect against error") { implicit s =>
val dummy = DummyException("dummy")
var wasThrown: Throwable = null
Observable
.range(1,10)
.doAfterSubscribeF(IO.raiseError[Unit](dummy))
.unsafeSubscribeFn(new Observer[Long] {
def onNext(elem: Long) = Continue
def onComplete() = ()
def onError(ex: Throwable) = wasThrown = ex
})
s.tick()
assertEquals(wasThrown, dummy)
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DoOnSubscribeSuite.scala | Scala | apache-2.0 | 2,600 |
package webserviceclients.audit2
import scala.concurrent.Future
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.TrackingId
trait AuditService {
def send(auditRequest: AuditRequest, trackingId: TrackingId): Future[Unit]
} | dvla/vrm-retention-online | app/webserviceclients/audit2/AuditService.scala | Scala | mit | 243 |
package models.slick
import bay.driver.CustomizedPgDriver
import java.time._
import io.circe._
import shared.models.slick.default._
object Default extends {
val profile = bay.driver.CustomizedPgDriver
} with Default
trait Default {
val profile: bay.driver.CustomizedPgDriver
import profile.api._
import slick.model.ForeignKeyAction
// NOTE: GetResult mappers for plain SQL are only generated for tables where Slick knows how to map the types of all columns.
import slick.jdbc.{GetResult => GR}
/** GetResult implicit for fetching UserGroup objects using plain SQL queries */
implicit def GetResultUserGroup(implicit e0: GR[String]): GR[UserGroup] = GR{
prs => import prs._
UserGroup(<<[String])
}
/** Table description of table UserGroup. Objects of this class serve as prototypes for rows in queries. */
class UserGroupTable(_tableTag: Tag) extends profile.api.Table[UserGroup](_tableTag, "UserGroup") {
def * = name <> (UserGroup, UserGroup.unapply)
/** Maps whole row to an option. Useful for outer joins. */
def ? = Rep.Some(name).shaped.<>(r => r.map(_=> UserGroup(r.get)), (_:Any) => throw new Exception("Inserting into ? projection not supported."))
/** Database column name SqlType(varchar), PrimaryKey */
val name: Rep[String] = column[String]("name", O.PrimaryKey)
}
/** Collection-like TableQuery object for table userGroups */
lazy val userGroups = new TableQuery(tag => new UserGroupTable(tag))
/** GetResult implicit for fetching User objects using plain SQL queries */
implicit def GetResultUser(implicit e0: GR[String], e1: GR[OffsetDateTime], e2: GR[Option[OffsetDateTime]], e3: GR[Option[String]], e4: GR[Option[Int]]): GR[User] = GR{
prs => import prs._
val r = (<<?[Int], <<[String], <<[String], <<[OffsetDateTime], <<?[OffsetDateTime], <<?[OffsetDateTime], <<?[String])
import r._
User.tupled((_2, _3, _4, _5, _6, _7, _1)) // putting AutoInc last
}
/** Table description of table User. Objects of this class serve as prototypes for rows in queries. */
class UserTable(_tableTag: Tag) extends profile.api.Table[User](_tableTag, "User") {
def * = (email, password, created, lastLogin, lastAction, resetPasswordToken, Rep.Some(id)) <> (User.tupled, User.unapply)
/** Maps whole row to an option. Useful for outer joins. */
def ? = (Rep.Some(email), Rep.Some(password), Rep.Some(created), lastLogin, lastAction, resetPasswordToken, Rep.Some(id)).shaped.<>({r=>import r._; _1.map(_=> User.tupled((_1.get, _2.get, _3.get, _4, _5, _6, _7)))}, (_:Any) => throw new Exception("Inserting into ? projection not supported."))
/** Database column email SqlType(varchar) */
val email: Rep[String] = column[String]("email")
/** Database column password SqlType(varchar) */
val password: Rep[String] = column[String]("password")
/** Database column created SqlType(timestamptz) */
val created: Rep[OffsetDateTime] = column[OffsetDateTime]("created")
/** Database column lastLogin SqlType(timestamptz), Default(None) */
val lastLogin: Rep[Option[OffsetDateTime]] = column[Option[OffsetDateTime]]("lastLogin", O.Default(None))
/** Database column lastAction SqlType(timestamptz), Default(None) */
val lastAction: Rep[Option[OffsetDateTime]] = column[Option[OffsetDateTime]]("lastAction", O.Default(None))
/** Database column resetPasswordToken SqlType(varchar), Default(None) */
val resetPasswordToken: Rep[Option[String]] = column[Option[String]]("resetPasswordToken", O.Default(None))
/** Database column id SqlType(serial), AutoInc, PrimaryKey */
val id: Rep[Int] = column[Int]("id", O.AutoInc, O.PrimaryKey)
/** Uniqueness Index over (email) (database name User_email_key) */
val index1 = index("User_email_key", email, unique=true)
}
/** Collection-like TableQuery object for table users */
lazy val users = new TableQuery(tag => new UserTable(tag))
/** GetResult implicit for fetching UserToUserGroup objects using plain SQL queries */
implicit def GetResultUserToUserGroup(implicit e0: GR[Int], e1: GR[String]): GR[UserToUserGroup] = GR{
prs => import prs._
val r = (<<[Int], <<[String])
import r._
UserToUserGroup.tupled((_1, _2)) // putting AutoInc last
}
/** Table description of table UserToUserGroup. Objects of this class serve as prototypes for rows in queries. */
class UserToUserGroupTable(_tableTag: Tag) extends profile.api.Table[UserToUserGroup](_tableTag, "UserToUserGroup") {
def * = (userId, groupName) <> (UserToUserGroup.tupled, UserToUserGroup.unapply)
/** Maps whole row to an option. Useful for outer joins. */
def ? = (Rep.Some(userId), Rep.Some(groupName)).shaped.<>({r=>import r._; _1.map(_=> UserToUserGroup.tupled((_1.get, _2.get)))}, (_:Any) => throw new Exception("Inserting into ? projection not supported."))
/** Database column userId SqlType(int4) */
val userId: Rep[Int] = column[Int]("userId")
/** Database column groupName SqlType(varchar) */
val groupName: Rep[String] = column[String]("groupName")
/** Foreign key referencing users (database name UserToUserGroup_userId_fkey) */
lazy val userTableFk = foreignKey("UserToUserGroup_userId_fkey", userId, users)(r => r.id, onUpdate=ForeignKeyAction.NoAction, onDelete=ForeignKeyAction.NoAction)
/** Foreign key referencing userGroups (database name UserToUserGroup_groupName_fkey) */
lazy val userGroupTableFk = foreignKey("UserToUserGroup_groupName_fkey", groupName, userGroups)(r => r.name, onUpdate=ForeignKeyAction.NoAction, onDelete=ForeignKeyAction.NoAction)
}
/** Collection-like TableQuery object for table userToUserGroups */
lazy val userToUserGroups = new TableQuery(tag => new UserToUserGroupTable(tag))
}
| Daxten/BayTemplate-ScalaJs | dbschema/src/main/scala/models/slick/Default.scala | Scala | apache-2.0 | 5,830 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.File
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.master.{ApplicationInfo, DriverInfo, WorkerInfo, WorkerResourceInfo}
import org.apache.spark.deploy.worker.{DriverRunner, ExecutorRunner}
import org.apache.spark.resource.{ResourceInformation, ResourceRequirement}
import org.apache.spark.resource.ResourceUtils.{FPGA, GPU}
private[deploy] object DeployTestUtils {
def createAppDesc(): ApplicationDescription = {
val cmd = new Command("mainClass", List("arg1", "arg2"), Map(), Seq(), Seq(), Seq())
new ApplicationDescription("name", Some(4), 1234, cmd, "appUiUrl")
}
def createAppInfo() : ApplicationInfo = {
val appDesc = createAppDesc()
val appInfo = new ApplicationInfo(JsonConstants.appInfoStartTime,
"id", appDesc.copy(resourceReqsPerExecutor = createResourceRequirement),
JsonConstants.submitDate, null, Int.MaxValue)
appInfo.endTime = JsonConstants.currTimeInMillis
appInfo
}
def createDriverCommand(): Command = new Command(
"org.apache.spark.FakeClass", Seq("WORKER_URL", "USER_JAR", "mainClass"),
Map(("K1", "V1"), ("K2", "V2")), Seq("cp1", "cp2"), Seq("lp1", "lp2"), Seq("-Dfoo")
)
def createDriverDesc(): DriverDescription =
new DriverDescription("hdfs://some-dir/some.jar", 100, 3, false, createDriverCommand())
def createDriverInfo(): DriverInfo = {
val dDesc = createDriverDesc().copy(resourceReqs = createResourceRequirement)
val dInfo = new DriverInfo(3, "driver-3", dDesc, JsonConstants.submitDate)
dInfo.withResources(createResourceInformation)
dInfo
}
def createWorkerInfo(): WorkerInfo = {
val gpuResource = new WorkerResourceInfo(GPU, Seq("0", "1", "2"))
val fpgaResource = new WorkerResourceInfo(FPGA, Seq("3", "4", "5"))
val resources = Map(GPU -> gpuResource, FPGA -> fpgaResource)
val workerInfo = new WorkerInfo("id", "host", 8080, 4, 1234, null,
"http://publicAddress:80", resources)
workerInfo.lastHeartbeat = JsonConstants.currTimeInMillis
workerInfo
}
def createExecutorRunner(execId: Int, withResources: Boolean = false): ExecutorRunner = {
val resources = if (withResources) {
createResourceInformation
} else {
Map.empty[String, ResourceInformation]
}
new ExecutorRunner(
"appId",
execId,
createAppDesc(),
4,
1234,
null,
"workerId",
"http://",
"host",
123,
"publicAddress",
new File("sparkHome"),
new File("workDir"),
"spark://worker",
new SparkConf,
Seq("localDir"),
ExecutorState.RUNNING,
resources)
}
def createDriverRunner(driverId: String): DriverRunner = {
val conf = new SparkConf()
new DriverRunner(
conf,
driverId,
new File("workDir"),
new File("sparkHome"),
createDriverDesc(),
null,
"spark://worker",
new SecurityManager(conf))
}
private def createResourceInformation: Map[String, ResourceInformation] = {
val gpuResource = new ResourceInformation(GPU, Array("0", "1", "2"))
val fpgaResource = new ResourceInformation(FPGA, Array("3", "4", "5"))
Map(GPU -> gpuResource, FPGA -> fpgaResource)
}
private def createResourceRequirement: Seq[ResourceRequirement] = {
Seq(ResourceRequirement("gpu", 3), ResourceRequirement("fpga", 3))
}
}
| pgandhi999/spark | core/src/test/scala/org/apache/spark/deploy/DeployTestUtils.scala | Scala | apache-2.0 | 4,217 |
// Automatically-generated code
// === Init ===
// Transfo time: 0ms Stringifying time: 124ms
{
val xs_0 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_1 = xs_0.toIndexedSeq;
val sch_2 = ((x_1): scala.collection.IndexedSeq[scala.Int]);
val x_3 = sch_2.size;
val x_4 = new sfusion.Sequence[scala.Int]((() => sfusion.impl.`package`.fromIndexed[scala.Int](sch_2)), scala.`package`.Left.apply[scala.Int, scala.Nothing](x_3));
x_4.show(10)
}
// === HL ===
// Transfo time: 3ms Stringifying time: 21ms
// Same as above.
// === Impl ===
// Transfo time: 61ms Stringifying time: 135ms
{
val sch_0 = "";
val xs_1 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_2 = xs_1.toIndexedSeq;
val sch_3 = ((x_2): scala.collection.IndexedSeq[scala.Int]);
val x_4 = sch_3.size;
val x_5 = new sfusion.Sequence[scala.Int]((() => sfusion.impl.`package`.fromIndexed[scala.Int](sch_3)), scala.`package`.Left.apply[scala.Int, scala.Nothing](x_4));
val x_6 = scala.StringContext.apply("Sequence(", ")");
var truncated_7: scala.Boolean = true;
val x_8 = x_5.under;
val x_9 = x_8.apply();
val x_10 = sfusion.impl.`package`.onFinish[scala.Int](x_9)((() => truncated_7 = false));
val withSep_15 = sfusion.impl.`package`.mapHeadTail[scala.Int, java.lang.String](x_10)(((a_11: scala.Int) => {
val x_12 = scala.StringContext.apply(sch_0, sch_0);
x_12.s(a_11)
}))(((a_13: scala.Int) => {
val x_14 = scala.StringContext.apply(",", sch_0);
x_14.s(a_13)
}));
val withTrunc_16 = sfusion.impl.`package`.take[java.lang.String](withSep_15)(10);
val flat_19 = sfusion.impl.`package`.fold[java.lang.String, java.lang.String](withTrunc_16)(sch_0)(((x$4_17: java.lang.String, x$5_18: java.lang.String) => x$4_17.+(x$5_18)));
val x_20 = truncated_7;
x_6.s(if (x_20)
flat_19.+(",...")
else
flat_19)
}
// === CtorInline ===
// Transfo time: 28ms Stringifying time: 89ms
{
val sch_0 = "";
val xs_1 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_2 = xs_1.toIndexedSeq;
val sch_3 = ((x_2): scala.collection.IndexedSeq[scala.Int]);
val x_4 = sch_3.size;
val x_5 = scala.StringContext.apply("Sequence(", ")");
var truncated_6: scala.Boolean = true;
val x_7 = sfusion.impl.`package`.fromIndexed[scala.Int](sch_3);
val x_8 = sfusion.impl.`package`.onFinish[scala.Int](x_7)((() => truncated_6 = false));
val withSep_13 = sfusion.impl.`package`.mapHeadTail[scala.Int, java.lang.String](x_8)(((a_9: scala.Int) => {
val x_10 = scala.StringContext.apply(sch_0, sch_0);
x_10.s(a_9)
}))(((a_11: scala.Int) => {
val x_12 = scala.StringContext.apply(",", sch_0);
x_12.s(a_11)
}));
val withTrunc_14 = sfusion.impl.`package`.take[java.lang.String](withSep_13)(10);
val flat_17 = sfusion.impl.`package`.fold[java.lang.String, java.lang.String](withTrunc_14)(sch_0)(((x$4_15: java.lang.String, x$5_16: java.lang.String) => x$4_15.+(x$5_16)));
val x_18 = truncated_6;
x_5.s(if (x_18)
flat_17.+(",...")
else
flat_17)
}
// === ImplOptim ===
// Transfo time: 23ms Stringifying time: 85ms
{
val lsch_0 = squid.utils.Lazy.apply[java.lang.String]("");
val xs_1 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_2 = xs_1.toIndexedSeq;
val sch_3 = ((x_2): scala.collection.IndexedSeq[scala.Int]);
val x_4 = sch_3.size;
val x_5 = scala.StringContext.apply("Sequence(", ")");
var truncated_6: scala.Boolean = true;
val x_7 = sfusion.impl.`package`.fromIndexed[scala.Int](sch_3);
val x_8 = sfusion.impl.`package`.onFinish[scala.Int](x_7)((() => truncated_6 = false));
val withSep_13 = sfusion.impl.`package`.mapHeadTail[scala.Int, java.lang.String](x_8)(((a_9: scala.Int) => {
val x_10 = scala.StringContext.apply(lsch_0.value, lsch_0.value);
x_10.s(a_9)
}))(((a_11: scala.Int) => {
val x_12 = scala.StringContext.apply(",", lsch_0.value);
x_12.s(a_11)
}));
val withTrunc_14 = sfusion.impl.`package`.take[java.lang.String](withSep_13)(10);
val strAcc_15 = new scala.collection.mutable.StringBuilder();
sfusion.impl.`package`.foreach[java.lang.String](withTrunc_14)(((s_16: java.lang.String) => {
strAcc_15.++=(s_16.toString());
()
}));
val x_17 = strAcc_15.result();
val x_18 = truncated_6;
x_5.s(if (x_18)
x_17.+(",...")
else
x_17)
}
// === Imperative ===
// Transfo time: 110ms Stringifying time: 109ms
{
val xs_0 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_1 = xs_0.toIndexedSeq;
val sch_2 = ((x_1): scala.collection.IndexedSeq[scala.Int]);
val x_3 = sch_2.size;
val x_4 = scala.StringContext.apply("Sequence(", ")");
var truncated_5: scala.Boolean = true;
val x_6 = sch_2.length;
var i_7: scala.Int = 0;
var first_8: scala.Boolean = true;
var taken_9: scala.Int = 0;
val strAcc_10 = new scala.collection.mutable.StringBuilder();
while ({
val x_11 = i_7;
x_11.<(x_6).&&({
val sch_12 = "";
val x_13 = i_7;
val x_14 = sch_2.apply(x_13);
val x_15 = i_7;
i_7 = x_15.+(1);
val x_16 = first_8;
val x_19 = if (x_16)
{
first_8 = false;
val x_17 = scala.StringContext.apply(sch_12, sch_12);
x_17.s(x_14)
}
else
{
val x_18 = scala.StringContext.apply(",", sch_12);
x_18.s(x_14)
};
val x_20 = taken_9;
x_20.<(10).&&({
val x_21 = taken_9;
taken_9 = x_21.+(1);
strAcc_10.++=(x_19.toString());
true.&&({
val x_22 = taken_9;
x_22.<(10)
})
})
})
})
()
;
val x_23 = i_7;
val sch_24 = x_23.==(x_6);
if (sch_24)
truncated_5 = false
else
();
sch_24.||({
val x_25 = taken_9;
x_25.==(10)
});
val x_26 = strAcc_10.result();
val x_27 = truncated_5;
x_4.s(if (x_27)
x_26.+(",...")
else
x_26)
}
// === FlatMapFusion ===
// Transfo time: 2ms Stringifying time: 59ms
// Same as above.
// === LateImperative ===
// Transfo time: 0ms Stringifying time: 53ms
// Same as above.
// === VarFlattening ===
// Transfo time: 2ms Stringifying time: 52ms
// Same as above.
// === Low-Level Norm ===
// Transfo time: 60ms Stringifying time: 104ms
{
val xs_0 = scala.collection.Seq.apply[scala.Int](1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
val x_1 = xs_0.toIndexedSeq;
val sch_2 = ((x_1): scala.collection.IndexedSeq[scala.Int]);
val x_3 = sch_2.size;
val x_4 = scala.StringContext.apply("Sequence(", ")");
var truncated_5: scala.Boolean = true;
val x_6 = sch_2.length;
var i_7: scala.Int = 0;
var first_8: scala.Boolean = true;
var taken_9: scala.Int = 0;
val strAcc_10 = new scala.collection.mutable.StringBuilder();
while ({
val x_11 = i_7;
x_11.<(x_6).&&({
val sch_12 = "";
val x_13 = i_7;
val x_14 = sch_2.apply(x_13);
val x_15 = i_7;
i_7 = x_15.+(1);
val x_16 = first_8;
val x_19 = if (x_16)
{
first_8 = false;
val x_17 = scala.StringContext.apply(sch_12, sch_12);
x_17.s(x_14)
}
else
{
val x_18 = scala.StringContext.apply(",", sch_12);
x_18.s(x_14)
};
val x_20 = taken_9;
x_20.<(10).&&({
val x_21 = taken_9;
taken_9 = x_21.+(1);
strAcc_10.++=(x_19.toString());
val x_22 = taken_9;
x_22.<(10)
})
})
})
()
;
val x_23 = i_7;
val sch_24 = x_23.==(x_6);
if (sch_24)
truncated_5 = false
else
();
val x_26 = sch_24.`unary_!`.&&({
val x_25 = taken_9;
x_25.==(10).`unary_!`
});
val x_27 = strAcc_10.result();
val x_28 = truncated_5;
x_4.s(if (x_28)
x_27.+(",...")
else
x_27)
}
// === ReNorm (should be the same) ===
// Transfo time: 50ms Stringifying time: 48ms
// Same as above.
| epfldata/squid | example_gen/test/sfusion/Basics.scala | Scala | apache-2.0 | 7,927 |
package com.tritondigital.consul.http.client
import com.ning.http.client.{AsyncHttpClient, Response}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class ConsulRequestBuilder(requestBuilder: AsyncHttpClient#BoundRequestBuilder, host: String = "localhost", port: Int = 8500, cache: Option[Cache] = None) {
def withConsul(host: String = "localhost", port: Int = 8500): ConsulRequestBuilder = new ConsulRequestBuilder(requestBuilder, host, port)
def withCache(cache: Cache): ConsulRequestBuilder = new ConsulRequestBuilder(requestBuilder, host, port, Some(cache))
def execute(): Future[Response] = {
val resolve: (String, String, Int) => Future[Node] = ConsulClient.resolve(_, _, _, cache)
val futureRequestBuilder = RequestBuilderUtils.replaceIpAndPortIfNecessary(requestBuilder, resolve(_, host, port))
futureRequestBuilder.flatMap(new FutureRequestBuilder(_).execute())
}
}
| tritondigital/ConsulHttpClient | src/main/scala/com/tritondigital/consul/http/client/ConsulRequestBuilder.scala | Scala | mit | 943 |
package capitulo02
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.internal.runners.JUnit4ClassRunner
@RunWith(classOf[JUnit4ClassRunner])
class ProdutoHelloJUnit {
def prod(s: String) = {
var res: Long = 1;
for (ch <- s) res *= ch.toLong
res
}
@Test
def test(){
assert(prod("Hello") == 9415087488L)
}
} | celioeduardo/scala-impatient | src/test/scala/capitulo02/ProdutoHelloJUnit.scala | Scala | mit | 363 |
/*
* SpreadsheetViewImpl.scala
* (SysSon)
*
* Copyright (c) 2013-2017 Institute of Electronic Music and Acoustics, Graz.
* Copyright (c) 2014-2019 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package at.iem.sysson
package gui
package impl
import at.iem.sysson.gui.impl.AbstractPlotViewImpl.PlotData
import de.sciss.desktop.UndoManager
import de.sciss.lucre.matrix.gui.DimensionIndex
import de.sciss.lucre.stm.Sys
import de.sciss.lucre.swing.{View, deferTx}
import de.sciss.swingplus.ListView
import de.sciss.synth.proc.Universe
import javax.swing.table.{AbstractTableModel, DefaultTableColumnModel, TableColumn}
import scala.swing.ScrollPane.BarPolicy
import scala.swing.Table.{AutoResizeMode, ElementMode}
import scala.swing.{ScrollPane, Table}
object SpreadsheetViewImpl {
def apply[S <: Sys[S]](plot: Plot[S], stats: PlotStatsView[S])(implicit tx: S#Tx, universe: Universe[S],
undoManager: UndoManager): View[S] = {
new Impl[S](stats).init(plot)
}
private final class Impl[S <: Sys[S]](statsView: PlotStatsView[S])(implicit val universe: Universe[S],
val undoManager: UndoManager)
extends AbstractPlotViewImpl[S] {
override def init(plot: Plot[S])(implicit tx: S#Tx): Impl.this.type = {
super.init(plot)
deferTx(guiInit())
this
}
private[this] var _plotData = new PlotData(
"", "", new Array[Float](0), "", "", new Array[Float](0), "", "", new Array[Array[Float]](0), is1D = false)
// called on EDT
protected def updatePlot(data: PlotData): Unit = {
val colSizeChanged = data.hData.length != _plotData.hData.length
val colDataChanged = colSizeChanged || !data.hData.sameElements(_plotData.hData)
val rowSizeChanged = data.vData.length != _plotData.vData.length
val rowDataChanged = rowSizeChanged || !data.vData.sameElements(_plotData.vData)
_plotData = data
if (colDataChanged) {
mTableColumn.updateHeader()
mTable.fireTableStructureChanged()
} else {
mTable.fireTableDataChanged()
}
if (rowDataChanged) {
updateRows()
}
}
private[this] object mTable extends AbstractTableModel {
def getRowCount : Int = _plotData.vData.length
def getColumnCount: Int = if (_plotData.is1D) 1 else _plotData.hData.length
def getValueAt(rowIdx: Int, colIdx: Int): AnyRef = {
// val f = if (_plotData.mData.length > rowIdx) {
// val row = _plotData.mData(rowIdx)
// if (row.length > colIdx) row(colIdx) else Float.NaN
// } else Float.NaN
val f = if (_plotData.is1D) _plotData.mData(0)(rowIdx) else _plotData.mData(rowIdx)(colIdx)
f.toString // XXX TODO
}
}
private[this] object mTableColumn extends DefaultTableColumnModel {
def updateHeader(): Unit =
if (_plotData.is1D) updateHeader1D() else updateHeader2D()
private def mkColumn(colIdx: Int, name: String): Unit = {
val col = new TableColumn(colIdx)
col.setHeaderValue("")
col.setMinWidth (80)
col.setPreferredWidth(80)
addColumn(col)
}
private def updateHeader1D(): Unit = {
val oldNum = getColumnCount
val newNum = 1
val stop1 = math.min(oldNum, newNum)
var colIdx = 0
while (colIdx < stop1) {
val col = getColumn(colIdx)
col.setHeaderValue("")
colIdx += 1
}
while (colIdx < newNum) {
mkColumn(colIdx, "")
colIdx += 1
}
while (colIdx < oldNum) {
val col = getColumn(newNum)
removeColumn(col)
colIdx += 1
}
}
private def updateHeader2D(): Unit = {
import DimensionIndex.{shouldUseUnitsString, unitsStringFormatter}
val units = _plotData.hUnits
val lbUnits = shouldUseUnitsString(units)
val data = _plotData.hData
val labels = if (lbUnits) {
val fmt = unitsStringFormatter(units)
data.map(fmt(_))
} else {
data.map(_.toString)
}
val oldNum = getColumnCount
val newNum = data.length
val stop1 = math.min(oldNum, newNum)
var colIdx = 0
while (colIdx < stop1) {
val col = getColumn(colIdx)
col.setHeaderValue(labels(colIdx))
colIdx += 1
}
while (colIdx < newNum) {
mkColumn(colIdx, labels(colIdx))
colIdx += 1
}
while (colIdx < oldNum) {
val col = getColumn(newNum)
removeColumn(col)
colIdx += 1
}
}
}
private[this] val mList = ListView.Model.empty[String]
private def updateRows(): Unit = {
import DimensionIndex.{shouldUseUnitsString, unitsStringFormatter}
val units = _plotData.vUnits
val lbUnits = shouldUseUnitsString(units)
val data = _plotData.vData
val labels = if (lbUnits) {
val fmt = unitsStringFormatter(units)
data.map(fmt(_))
} else {
data.map(_.toString)
}
val oldNum = mList.size
val newNum = data.length
val stop1 = math.min(oldNum, newNum)
var colIdx = 0
while (colIdx < stop1) {
mList.update(colIdx, labels(colIdx))
colIdx += 1
}
while (colIdx < newNum) {
mList += labels(colIdx)
colIdx += 1
}
if (oldNum > newNum) {
mList.remove(newNum, oldNum - newNum)
}
}
private def guiInit(): Unit = {
val ggTable = new Table
ggTable.peer.setAutoCreateColumnsFromModel(false)
ggTable.peer.setColumnModel(mTableColumn)
ggTable.autoResizeMode = AutoResizeMode.Off
ggTable.model = mTable
ggTable.selection.elementMode = ElementMode.Cell
// cf. http://www.java2s.com/Code/Java/Swing-Components/TableRowHeaderExample.htm
// XXX TODO -- this looks nicer:
// http://stackoverflow.com/questions/8187639/jtable-with-titled-rows-and-columns#8187799
val ggRows = new ListView[String](mList)
ggRows.fixedCellHeight = ggTable.rowHeight
ggRows.enabled = false
// fixedCellWidth = 160 // maxRow.toString.length * 13
// fixedCellHeight = 24 // rowHeightIn
// visibleRowCount = 12 // inVisRows
// }
val ggScroll = new ScrollPane(ggTable)
ggScroll.horizontalScrollBarPolicy = BarPolicy.Always
ggScroll.verticalScrollBarPolicy = BarPolicy.Always
ggScroll.rowHeaderView = Some(ggRows)
component = ggScroll
}
}
}
| iem-projects/sysson | src/main/scala/at/iem/sysson/gui/impl/SpreadsheetViewImpl.scala | Scala | gpl-3.0 | 6,920 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.{Attribute, GenericRowWithSchema}
import org.apache.spark.sql.connector.catalog.{CatalogV2Util, SupportsNamespaces}
import org.apache.spark.sql.types.StructType
/**
* Physical plan node for describing a namespace.
*/
case class DescribeNamespaceExec(
output: Seq[Attribute],
catalog: SupportsNamespaces,
namespace: Seq[String],
isExtended: Boolean) extends V2CommandExec {
private val toRow = {
RowEncoder(StructType.fromAttributes(output)).resolveAndBind().createSerializer()
}
override protected def run(): Seq[InternalRow] = {
val rows = new ArrayBuffer[InternalRow]()
val ns = namespace.toArray
val metadata = catalog.loadNamespaceMetadata(ns)
rows += toCatalystRow("Namespace Name", ns.last)
CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES.foreach { p =>
rows ++= Option(metadata.get(p)).map(toCatalystRow(p.capitalize, _))
}
if (isExtended) {
val properties = metadata.asScala -- CatalogV2Util.NAMESPACE_RESERVED_PROPERTIES
if (properties.nonEmpty) {
rows += toCatalystRow("Properties", properties.toSeq.mkString("(", ",", ")"))
}
}
rows.toSeq
}
private def toCatalystRow(strs: String*): InternalRow = {
toRow(new GenericRowWithSchema(strs.toArray, schema)).copy()
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DescribeNamespaceExec.scala | Scala | apache-2.0 | 2,381 |
package momijikawa.p2pscalaproto
import akka.actor._
import scala.concurrent.duration._
class ActorFinder(path: String) {
}
| windymelt/p2pScalaProto | src/main/scala/momijikawa/p2pscalaproto/ActorFinder.scala | Scala | bsd-3-clause | 127 |
package com.shocktrade.server.dao
package securities
import com.shocktrade.common.forms.ResearchOptions
import com.shocktrade.common.models.quote._
import com.shocktrade.server.common.LoggerFactory
import io.scalajs.npm.mongodb._
import io.scalajs.util.JsUnderOrHelper._
import scala.concurrent.{ExecutionContext, Future}
import scala.scalajs.js
/**
* Securities DAO
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
@js.native
trait SecuritiesDAO extends Collection
/**
* Securities DAO Companion
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
object SecuritiesDAO {
/**
* Securities DAO Enrichment
* @param dao the given [[SecuritiesDAO securities DAO]]
*/
implicit class SecuritiesDAOEnrichment(val dao: SecuritiesDAO) extends AnyVal {
@inline
def exploreIndustries(sector: String): js.Promise[js.Array[ExploreQuote]] = {
dao.aggregate[ExploreQuote](js.Array(
$match("active" $eq true, "sector" $eq sector, "industry" $ne null),
$group("_id" -> "$industry", "total" $sum 1)
)).toArray()
}
@inline
def exploreSectors(implicit ec: ExecutionContext): js.Promise[js.Array[ExploreQuote]] = {
dao.aggregate[ExploreQuote](js.Array(
$match("active" $eq true, "sector" $ne null),
$group("_id" -> "$sector", "total" $sum 1)
)).toArray()
}
@inline
def exploreSubIndustries(sector: String, industry: String): js.Promise[js.Array[ExploreQuote]] = {
dao.aggregate[ExploreQuote](js.Array(
$match("active" $eq true, "sector" $eq sector, "industry" $eq industry, "subIndustry" $ne null),
$group("_id" -> "$subIndustry", "total" $sum 1)
)).toArray()
}
@inline
def findCompleteQuote(symbol: String)(implicit ec: ExecutionContext): Future[Option[CompleteQuote]] = {
dao.findOneFuture[CompleteQuote]("symbol" $eq symbol)
}
@inline
def findQuotesByIndustry(sector: String, industry: String): js.Promise[js.Array[ResearchQuote]] = {
val query = doc("active" $eq true, "sector" $eq sector, "industry" $eq industry, $or("subIndustry" $eq null, "subIndustry" $eq "", "subIndustry" $exists false))
dao.find[ResearchQuote](query, projection = ResearchQuote.Fields.toProjection).toArray()
}
@inline
def findQuotesBySubIndustry(sector: String, industry: String, subIndustry: String): js.Promise[js.Array[ResearchQuote]] = {
val query = doc("active" $eq true, "sector" $eq sector, "industry" $eq industry, "subIndustry" $eq subIndustry)
dao.find[ResearchQuote](query, projection = ResearchQuote.Fields.toProjection).toArray()
}
@inline
def findQuote[T <: js.Any](symbol: String, fields: Seq[String])(implicit ec: ExecutionContext): Future[Option[T]] = {
dao.findOneFuture[T]("symbol" $eq symbol, fields = js.Array(fields: _*))
}
@inline
def findQuotes[T <: js.Any](selector: js.Any, fields: Seq[String]): js.Promise[js.Array[T]] = {
dao.find[T](selector, projection = fields.toProjection).toArray()
}
@inline
def findQuotesBySymbols[T <: js.Any](symbols: Seq[String], fields: Seq[String]): js.Promise[js.Array[T]] = {
dao.find[T]("symbol" $in js.Array(symbols: _*), projection = fields.toProjection).toArray()
}
@inline
def research(options: ResearchOptions): js.Promise[js.Array[ResearchQuote]] = {
// build the query
val selector = doc("active" $eq true, "symbol" $ne null)
toRange("beta", options.betaMin, options.betaMax) foreach (selector ++= _)
toRange("changePct", options.changeMin, options.changeMax) foreach (selector ++= _)
toRange("lastTrade", options.priceMin, options.priceMax) foreach (selector ++= _)
toRange("spread", options.spreadMin, options.spreadMax) foreach (selector ++= _)
toRange("volume", options.volumeMin, options.volumeMax) foreach (selector ++= _)
toRange("avgVolume10Day", options.avgVolumeMin, options.avgVolumeMax) foreach (selector ++= _)
LoggerFactory.getLogger(getClass()).info("query: %j", selector)
// is there an array of sort fields?
val sortFields: js.Array[js.Any] = options.sortFields map (_ flatMap { sf =>
js.Array(sf.field, sf.direction).asInstanceOf[js.Array[js.Any]]
}) getOrElse {
val sortField = options.sortBy.flat.getOrElse("symbol")
val sortDirection = if (options.reverse.isTrue) -1 else 1
js.Array(sortField, sortDirection)
}
// determine the maximum number of results
val maxResults = options.maxResults.flat.getOrElse(25)
// perform the query
dao.find[ResearchQuote](selector, projection = ResearchQuote.Fields.toProjection)
.limit(maxResults)
.sort(sortFields)
.toArray()
}
@inline
def search(searchTerm: String, maxResults: Int): js.Promise[js.Array[AutoCompleteQuote]] = {
dao.find[AutoCompleteQuote](
// { active : true, $or : [ {symbol : { $regex: ^?0, $options:'i' }}, {name : { $regex: ^?0, $options:'i' }} ] }
selector = doc(
"active" $eq true, "symbol" $ne null,
$or("symbol" $regex(s"^$searchTerm", ignoreCase = true), "name" $regex(s"^$searchTerm", ignoreCase = true))
),
projection = AutoCompleteQuote.Fields.toProjection)
.sort(js.Array("name", 1))
.limit(maxResults)
.toArray()
}
@inline
private def toRange(field: String, minValue: js.UndefOr[Double], maxValue: js.UndefOr[Double]) = {
(minValue.flat.toOption, maxValue.flat.toOption) match {
case (Some(min), Some(max)) => Some(doc(field between minValue -> maxValue))
case (Some(min), None) => Some(doc(field $gte min))
case (None, Some(max)) => Some(doc(field $lte max))
case (None, None) => None
}
}
}
/**
* Securities DAO Constructors
* @param db the given [[Db database]]
*/
implicit class SecuritiesDAOConstructors(val db: Db) extends AnyVal {
/**
* Retrieves the Securities DAO instance
* @return the [[SecuritiesDAO Securities DAO]] instance
*/
@inline
def getSecuritiesDAO: SecuritiesDAO = {
db.collection("Securities").asInstanceOf[SecuritiesDAO]
}
}
} | ldaniels528/shocktrade.js | app/server/dao/src/main/scala/com/shocktrade/server/dao/securities/SecuritiesDAO.scala | Scala | apache-2.0 | 6,252 |
object project extends ProjectSettings {
def scalaVersion = "2.10.4"
def version = "1.1.0"
def name = "json-lift"
def description = "Rapture JSON/Lift provides support the Lift parser in Rapture JSON"
def dependencies = Seq(
"json" -> "1.1.0"
)
def thirdPartyDependencies = Seq(
("net.liftweb", "lift-json_2.10", "2.5")
)
def imports = Seq(
"rapture.core._",
"rapture.json._",
"jsonBackends.lift._"
)
}
| joescii/rapture-json-lift | project/project.scala | Scala | apache-2.0 | 450 |
package org.apache.spot.proxy
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.types.{StructType, StructField, StringType}
import scala.io.Source
import org.apache.spot.proxy.ProxySchema._
object ProxyFeedback {
/**
* Load the feedback file for proxy data.
*
* @param sc Spark context.
* @param sqlContext Spark SQL context.
* @param feedbackFile Local machine path to the proxy feedback file.
* @param duplicationFactor Number of words to create per flagged feedback entry.
* @return DataFrame of the feedback events.
*/
def loadFeedbackDF(sc: SparkContext,
sqlContext: SQLContext,
feedbackFile: String,
duplicationFactor: Int): DataFrame = {
val feedbackSchema = StructType(
List(StructField(Date, StringType, nullable= true),
StructField(Time, StringType, nullable= true),
StructField(ClientIP, StringType, nullable= true),
StructField(Host, StringType, nullable= true),
StructField(ReqMethod, StringType, nullable= true),
StructField(UserAgent, StringType, nullable= true),
StructField(ResponseContentType, StringType, nullable= true),
StructField(RespCode, StringType, nullable= true),
StructField(FullURI, StringType, nullable= true)))
if (new java.io.File(feedbackFile).exists) {
val dateIndex = 0
val timeIndex = 1
val clientIpIndex = 2
val hostIndex = 3
val reqMethodIndex = 4
val userAgentIndex = 5
val resContTypeIndex = 6
val respCodeIndex = 11
val fullURIIndex = 18
val fullURISeverityIndex = 22
val lines = Source.fromFile(feedbackFile).getLines().toArray.drop(1)
val feedback: RDD[String] = sc.parallelize(lines)
sqlContext.createDataFrame(feedback.map(_.split("\\t"))
.filter(row => row(fullURISeverityIndex).trim.toInt == 3)
.map(row => Row.fromSeq(List(row(dateIndex),
row(timeIndex),
row(clientIpIndex),
row(hostIndex),
row(reqMethodIndex),
row(userAgentIndex),
row(resContTypeIndex),
row(respCodeIndex),
row(fullURIIndex))))
.flatMap(row => List.fill(duplicationFactor)(row)), feedbackSchema)
.select(Date, Time, ClientIP, Host, ReqMethod, UserAgent, ResponseContentType, RespCode, FullURI)
} else {
sqlContext.createDataFrame(sc.emptyRDD[Row], feedbackSchema)
}
}
}
| kpeiruza/incubator-spot | spot-ml/src/main/scala/org/apache/spot/proxy/ProxyFeedback.scala | Scala | apache-2.0 | 2,556 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{File, NotSerializableException}
import java.util.Locale
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Queue
import org.apache.commons.io.FileUtils
import org.scalatest.{Assertions, BeforeAndAfter, PrivateMethodTester}
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Timeouts
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.metrics.source.Source
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.Utils
class StreamingContextSuite extends SparkFunSuite with BeforeAndAfter with Timeouts with Logging {
val master = "local[2]"
val appName = this.getClass.getSimpleName
val batchDuration = Milliseconds(500)
val sparkHome = "someDir"
val envPair = "key" -> "value"
val conf = new SparkConf().setMaster(master).setAppName(appName)
var sc: SparkContext = null
var ssc: StreamingContext = null
after {
if (ssc != null) {
ssc.stop()
ssc = null
}
if (sc != null) {
sc.stop()
sc = null
}
}
test("from no conf constructor") {
ssc = new StreamingContext(master, appName, batchDuration)
assert(ssc.sparkContext.conf.get("spark.master") === master)
assert(ssc.sparkContext.conf.get("spark.app.name") === appName)
}
test("from no conf + spark home") {
ssc = new StreamingContext(master, appName, batchDuration, sparkHome, Nil)
assert(ssc.conf.get("spark.home") === sparkHome)
}
test("from no conf + spark home + env") {
ssc = new StreamingContext(master, appName, batchDuration,
sparkHome, Nil, Map(envPair))
assert(ssc.conf.getExecutorEnv.contains(envPair))
}
test("from conf with settings") {
val myConf = SparkContext.updatedConf(new SparkConf(false), master, appName)
myConf.set("spark.dummyTimeConfig", "10s")
ssc = new StreamingContext(myConf, batchDuration)
assert(ssc.conf.getTimeAsSeconds("spark.dummyTimeConfig", "-1") === 10)
}
test("from existing SparkContext") {
sc = new SparkContext(master, appName)
ssc = new StreamingContext(sc, batchDuration)
}
test("from existing SparkContext with settings") {
val myConf = SparkContext.updatedConf(new SparkConf(false), master, appName)
myConf.set("spark.dummyTimeConfig", "10s")
ssc = new StreamingContext(myConf, batchDuration)
assert(ssc.conf.getTimeAsSeconds("spark.dummyTimeConfig", "-1") === 10)
}
test("from checkpoint") {
val myConf = SparkContext.updatedConf(new SparkConf(false), master, appName)
myConf.set("spark.dummyTimeConfig", "10s")
val ssc1 = new StreamingContext(myConf, batchDuration)
addInputStream(ssc1).register()
ssc1.start()
val cp = new Checkpoint(ssc1, Time(1000))
assert(
Utils.timeStringAsSeconds(cp.sparkConfPairs
.toMap.getOrElse("spark.dummyTimeConfig", "-1")) === 10)
ssc1.stop()
val newCp = Utils.deserialize[Checkpoint](Utils.serialize(cp))
assert(
newCp.createSparkConf().getTimeAsSeconds("spark.dummyTimeConfig", "-1") === 10)
ssc = new StreamingContext(null, newCp, null)
assert(ssc.conf.getTimeAsSeconds("spark.dummyTimeConfig", "-1") === 10)
}
test("checkPoint from conf") {
val checkpointDirectory = Utils.createTempDir().getAbsolutePath()
val myConf = SparkContext.updatedConf(new SparkConf(false), master, appName)
myConf.set("spark.streaming.checkpoint.directory", checkpointDirectory)
ssc = new StreamingContext(myConf, batchDuration)
assert(ssc.checkpointDir != null)
}
test("state matching") {
import StreamingContextState._
assert(INITIALIZED === INITIALIZED)
assert(INITIALIZED != ACTIVE)
}
test("start and stop state check") {
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
assert(ssc.getState() === StreamingContextState.INITIALIZED)
ssc.start()
assert(ssc.getState() === StreamingContextState.ACTIVE)
ssc.stop()
assert(ssc.getState() === StreamingContextState.STOPPED)
// Make sure that the SparkContext is also stopped by default
intercept[Exception] {
ssc.sparkContext.makeRDD(1 to 10)
}
}
test("start with non-serializable DStream checkpoints") {
val checkpointDir = Utils.createTempDir()
ssc = new StreamingContext(conf, batchDuration)
ssc.checkpoint(checkpointDir.getAbsolutePath)
addInputStream(ssc).foreachRDD { rdd =>
// Refer to this.appName from inside closure so that this closure refers to
// the instance of StreamingContextSuite, and is therefore not serializable
rdd.count() + appName
}
// Test whether start() fails early when checkpointing is enabled
val exception = intercept[NotSerializableException] {
ssc.start()
}
assert(exception.getMessage().contains("DStreams with their functions are not serializable"))
assert(ssc.getState() !== StreamingContextState.ACTIVE)
assert(StreamingContext.getActive().isEmpty)
}
test("start failure should stop internal components") {
ssc = new StreamingContext(conf, batchDuration)
val inputStream = addInputStream(ssc)
val updateFunc = (values: Seq[Int], state: Option[Int]) => {
Some(values.sum + state.getOrElse(0))
}
inputStream.map(x => (x, 1)).updateStateByKey[Int](updateFunc)
// Require that the start fails because checkpoint directory was not set
intercept[Exception] {
ssc.start()
}
assert(ssc.getState() === StreamingContextState.STOPPED)
assert(ssc.scheduler.isStarted === false)
}
test("start should set local properties of streaming jobs correctly") {
ssc = new StreamingContext(conf, batchDuration)
ssc.sc.setJobGroup("non-streaming", "non-streaming", true)
val sc = ssc.sc
@volatile var jobGroupFound: String = ""
@volatile var jobDescFound: String = ""
@volatile var jobInterruptFound: String = ""
@volatile var customPropFound: String = ""
@volatile var allFound: Boolean = false
addInputStream(ssc).foreachRDD { rdd =>
jobGroupFound = sc.getLocalProperty(SparkContext.SPARK_JOB_GROUP_ID)
jobDescFound = sc.getLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION)
jobInterruptFound = sc.getLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL)
customPropFound = sc.getLocalProperty("customPropKey")
allFound = true
}
ssc.sc.setLocalProperty("customPropKey", "value1")
ssc.start()
// Local props set after start should be ignored
ssc.sc.setLocalProperty("customPropKey", "value2")
eventually(timeout(10 seconds), interval(10 milliseconds)) {
assert(allFound === true)
}
// Verify streaming jobs have expected thread-local properties
assert(jobGroupFound === null)
assert(jobDescFound.contains("Streaming job from"))
assert(jobInterruptFound === "false")
assert(customPropFound === "value1")
// Verify current thread's thread-local properties have not changed
assert(sc.getLocalProperty(SparkContext.SPARK_JOB_GROUP_ID) === "non-streaming")
assert(sc.getLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION) === "non-streaming")
assert(sc.getLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL) === "true")
assert(sc.getLocalProperty("customPropKey") === "value2")
}
test("start multiple times") {
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
ssc.start()
assert(ssc.getState() === StreamingContextState.ACTIVE)
ssc.start()
assert(ssc.getState() === StreamingContextState.ACTIVE)
}
test("stop multiple times") {
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
ssc.start()
ssc.stop()
assert(ssc.getState() === StreamingContextState.STOPPED)
ssc.stop()
assert(ssc.getState() === StreamingContextState.STOPPED)
}
test("stop before start") {
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
ssc.stop() // stop before start should not throw exception
assert(ssc.getState() === StreamingContextState.STOPPED)
}
test("start after stop") {
// Regression test for SPARK-4301
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
ssc.stop()
intercept[IllegalStateException] {
ssc.start() // start after stop should throw exception
}
assert(ssc.getState() === StreamingContextState.STOPPED)
}
test("stop only streaming context") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
// Explicitly do not stop SparkContext
ssc = new StreamingContext(conf, batchDuration)
sc = ssc.sparkContext
addInputStream(ssc).register()
ssc.start()
ssc.stop(stopSparkContext = false)
assert(ssc.getState() === StreamingContextState.STOPPED)
assert(sc.makeRDD(1 to 100).collect().size === 100)
sc.stop()
// Implicitly do not stop SparkContext
conf.set("spark.streaming.stopSparkContextByDefault", "false")
ssc = new StreamingContext(conf, batchDuration)
sc = ssc.sparkContext
addInputStream(ssc).register()
ssc.start()
ssc.stop()
assert(sc.makeRDD(1 to 100).collect().size === 100)
sc.stop()
}
test("stop(stopSparkContext=true) after stop(stopSparkContext=false)") {
ssc = new StreamingContext(master, appName, batchDuration)
addInputStream(ssc).register()
ssc.stop(stopSparkContext = false)
assert(ssc.sc.makeRDD(1 to 100).collect().size === 100)
ssc.stop(stopSparkContext = true)
// Check that the SparkContext is actually stopped:
intercept[Exception] {
ssc.sc.makeRDD(1 to 100).collect()
}
}
test("stop gracefully") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
conf.set("spark.dummyTimeConfig", "3600s")
sc = new SparkContext(conf)
for (i <- 1 to 4) {
logInfo("==================================\n\n\n")
ssc = new StreamingContext(sc, Milliseconds(100))
@volatile var runningCount = 0
TestReceiver.counter.set(1)
val input = ssc.receiverStream(new TestReceiver)
input.count().foreachRDD { rdd =>
val count = rdd.first()
runningCount += count.toInt
logInfo("Count = " + count + ", Running count = " + runningCount)
}
ssc.start()
eventually(timeout(10.seconds), interval(10.millis)) {
assert(runningCount > 0)
}
ssc.stop(stopSparkContext = false, stopGracefully = true)
logInfo("Running count = " + runningCount)
logInfo("TestReceiver.counter = " + TestReceiver.counter.get())
assert(
TestReceiver.counter.get() == runningCount + 1,
"Received records = " + TestReceiver.counter.get() + ", " +
"processed records = " + runningCount
)
Thread.sleep(100)
}
}
test("stop gracefully even if a receiver misses StopReceiver") {
// This is not a deterministic unit. But if this unit test is flaky, then there is definitely
// something wrong. See SPARK-5681
val conf = new SparkConf().setMaster(master).setAppName(appName)
sc = new SparkContext(conf)
ssc = new StreamingContext(sc, Milliseconds(100))
val input = ssc.receiverStream(new TestReceiver)
input.foreachRDD(_ => {})
ssc.start()
// Call `ssc.stop` at once so that it's possible that the receiver will miss "StopReceiver"
failAfter(30000 millis) {
ssc.stop(stopSparkContext = true, stopGracefully = true)
}
}
test("stop slow receiver gracefully") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
conf.set("spark.streaming.gracefulStopTimeout", "20000s")
sc = new SparkContext(conf)
logInfo("==================================\n\n\n")
ssc = new StreamingContext(sc, Milliseconds(100))
var runningCount = 0
SlowTestReceiver.receivedAllRecords = false
// Create test receiver that sleeps in onStop()
val totalNumRecords = 15
val recordsPerSecond = 1
val input = ssc.receiverStream(new SlowTestReceiver(totalNumRecords, recordsPerSecond))
input.count().foreachRDD { rdd =>
val count = rdd.first()
runningCount += count.toInt
logInfo("Count = " + count + ", Running count = " + runningCount)
}
ssc.start()
ssc.awaitTerminationOrTimeout(500)
ssc.stop(stopSparkContext = false, stopGracefully = true)
logInfo("Running count = " + runningCount)
assert(runningCount > 0)
assert(runningCount == totalNumRecords)
Thread.sleep(100)
}
test ("registering and de-registering of streamingSource") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
ssc = new StreamingContext(conf, batchDuration)
assert(ssc.getState() === StreamingContextState.INITIALIZED)
addInputStream(ssc).register()
ssc.start()
val sources = StreamingContextSuite.getSources(ssc.env.metricsSystem)
val streamingSource = StreamingContextSuite.getStreamingSource(ssc)
assert(sources.contains(streamingSource))
assert(ssc.getState() === StreamingContextState.ACTIVE)
ssc.stop()
val sourcesAfterStop = StreamingContextSuite.getSources(ssc.env.metricsSystem)
val streamingSourceAfterStop = StreamingContextSuite.getStreamingSource(ssc)
assert(ssc.getState() === StreamingContextState.STOPPED)
assert(!sourcesAfterStop.contains(streamingSourceAfterStop))
}
test("awaitTermination") {
ssc = new StreamingContext(master, appName, batchDuration)
val inputStream = addInputStream(ssc)
inputStream.map(x => x).register()
// test whether start() blocks indefinitely or not
failAfter(2000 millis) {
ssc.start()
}
// test whether awaitTermination() exits after give amount of time
failAfter(1000 millis) {
ssc.awaitTerminationOrTimeout(500)
}
// test whether awaitTermination() does not exit if not time is given
val exception = intercept[Exception] {
failAfter(1000 millis) {
ssc.awaitTermination()
throw new Exception("Did not wait for stop")
}
}
assert(exception.isInstanceOf[TestFailedDueToTimeoutException], "Did not wait for stop")
var t: Thread = null
// test whether wait exits if context is stopped
failAfter(10000 millis) { // 10 seconds because spark takes a long time to shutdown
t = new Thread() {
override def run() {
Thread.sleep(500)
ssc.stop()
}
}
t.start()
ssc.awaitTermination()
}
// SparkContext.stop will set SparkEnv.env to null. We need to make sure SparkContext is stopped
// before running the next test. Otherwise, it's possible that we set SparkEnv.env to null after
// the next test creates the new SparkContext and fail the test.
t.join()
}
test("awaitTermination after stop") {
ssc = new StreamingContext(master, appName, batchDuration)
val inputStream = addInputStream(ssc)
inputStream.map(x => x).register()
failAfter(10000 millis) {
ssc.start()
ssc.stop()
ssc.awaitTermination()
}
}
test("awaitTermination with error in task") {
ssc = new StreamingContext(master, appName, batchDuration)
val inputStream = addInputStream(ssc)
inputStream
.map { x => throw new TestException("error in map task"); x }
.foreachRDD(_.count())
val exception = intercept[Exception] {
ssc.start()
ssc.awaitTerminationOrTimeout(5000)
}
assert(exception.getMessage.contains("map task"), "Expected exception not thrown")
}
test("awaitTermination with error in job generation") {
ssc = new StreamingContext(master, appName, batchDuration)
val inputStream = addInputStream(ssc)
inputStream.transform { rdd => throw new TestException("error in transform"); rdd }.register()
val exception = intercept[TestException] {
ssc.start()
ssc.awaitTerminationOrTimeout(5000)
}
assert(exception.getMessage.contains("transform"), "Expected exception not thrown")
}
test("awaitTerminationOrTimeout") {
ssc = new StreamingContext(master, appName, batchDuration)
val inputStream = addInputStream(ssc)
inputStream.map(x => x).register()
ssc.start()
// test whether awaitTerminationOrTimeout() return false after give amount of time
failAfter(1000 millis) {
assert(ssc.awaitTerminationOrTimeout(500) === false)
}
var t: Thread = null
// test whether awaitTerminationOrTimeout() return true if context is stopped
failAfter(10000 millis) { // 10 seconds because spark takes a long time to shutdown
t = new Thread() {
override def run() {
Thread.sleep(500)
ssc.stop()
}
}
t.start()
assert(ssc.awaitTerminationOrTimeout(10000) === true)
}
// SparkContext.stop will set SparkEnv.env to null. We need to make sure SparkContext is stopped
// before running the next test. Otherwise, it's possible that we set SparkEnv.env to null after
// the next test creates the new SparkContext and fail the test.
t.join()
}
test("getOrCreate") {
val conf = new SparkConf().setMaster(master).setAppName(appName)
// Function to create StreamingContext that has a config to identify it to be new context
var newContextCreated = false
def creatingFunction(): StreamingContext = {
newContextCreated = true
new StreamingContext(conf, batchDuration)
}
// Call ssc.stop after a body of code
def testGetOrCreate(body: => Unit): Unit = {
newContextCreated = false
try {
body
} finally {
if (ssc != null) {
ssc.stop()
}
ssc = null
}
}
val emptyPath = Utils.createTempDir().getAbsolutePath()
// getOrCreate should create new context with empty path
testGetOrCreate {
ssc = StreamingContext.getOrCreate(emptyPath, creatingFunction _)
assert(ssc != null, "no context created")
assert(newContextCreated, "new context not created")
}
val corruptedCheckpointPath = createCorruptedCheckpoint()
// getOrCreate should throw exception with fake checkpoint file and createOnError = false
intercept[Exception] {
ssc = StreamingContext.getOrCreate(corruptedCheckpointPath, creatingFunction _)
}
// getOrCreate should throw exception with fake checkpoint file
intercept[Exception] {
ssc = StreamingContext.getOrCreate(
corruptedCheckpointPath, creatingFunction _, createOnError = false)
}
// getOrCreate should create new context with fake checkpoint file and createOnError = true
testGetOrCreate {
ssc = StreamingContext.getOrCreate(
corruptedCheckpointPath, creatingFunction _, createOnError = true)
assert(ssc != null, "no context created")
assert(newContextCreated, "new context not created")
}
val checkpointPath = createValidCheckpoint()
// getOrCreate should recover context with checkpoint path, and recover old configuration
testGetOrCreate {
ssc = StreamingContext.getOrCreate(checkpointPath, creatingFunction _)
assert(ssc != null, "no context created")
assert(!newContextCreated, "old context not recovered")
assert(ssc.conf.get("someKey") === "someValue", "checkpointed config not recovered")
}
// getOrCreate should recover StreamingContext with existing SparkContext
testGetOrCreate {
sc = new SparkContext(conf)
ssc = StreamingContext.getOrCreate(checkpointPath, creatingFunction _)
assert(ssc != null, "no context created")
assert(!newContextCreated, "old context not recovered")
assert(!ssc.conf.contains("someKey"), "checkpointed config unexpectedly recovered")
}
}
test("getActive and getActiveOrCreate") {
require(StreamingContext.getActive().isEmpty, "context exists from before")
sc = new SparkContext(conf)
var newContextCreated = false
def creatingFunc(): StreamingContext = {
newContextCreated = true
val newSsc = new StreamingContext(sc, batchDuration)
val input = addInputStream(newSsc)
input.foreachRDD { rdd => rdd.count }
newSsc
}
def testGetActiveOrCreate(body: => Unit): Unit = {
newContextCreated = false
try {
body
} finally {
if (ssc != null) {
ssc.stop(stopSparkContext = false)
}
ssc = null
}
}
// getActiveOrCreate should create new context and getActive should return it only
// after starting the context
testGetActiveOrCreate {
ssc = StreamingContext.getActiveOrCreate(creatingFunc _)
assert(ssc != null, "no context created")
assert(newContextCreated === true, "new context not created")
assert(StreamingContext.getActive().isEmpty,
"new initialized context returned before starting")
ssc.start()
assert(StreamingContext.getActive() === Some(ssc),
"active context not returned")
assert(StreamingContext.getActiveOrCreate(creatingFunc _) === ssc,
"active context not returned")
ssc.stop()
assert(StreamingContext.getActive().isEmpty,
"inactive context returned")
assert(StreamingContext.getActiveOrCreate(creatingFunc _) !== ssc,
"inactive context returned")
}
// getActiveOrCreate and getActive should return independently created context after activating
testGetActiveOrCreate {
ssc = creatingFunc() // Create
assert(StreamingContext.getActive().isEmpty,
"new initialized context returned before starting")
ssc.start()
assert(StreamingContext.getActive() === Some(ssc),
"active context not returned")
assert(StreamingContext.getActiveOrCreate(creatingFunc _) === ssc,
"active context not returned")
ssc.stop()
assert(StreamingContext.getActive().isEmpty,
"inactive context returned")
}
}
test("getActiveOrCreate with checkpoint") {
// Function to create StreamingContext that has a config to identify it to be new context
var newContextCreated = false
def creatingFunction(): StreamingContext = {
newContextCreated = true
new StreamingContext(conf, batchDuration)
}
// Call ssc.stop after a body of code
def testGetActiveOrCreate(body: => Unit): Unit = {
require(StreamingContext.getActive().isEmpty) // no active context
newContextCreated = false
try {
body
} finally {
if (ssc != null) {
ssc.stop()
}
ssc = null
}
}
val emptyPath = Utils.createTempDir().getAbsolutePath()
val corruptedCheckpointPath = createCorruptedCheckpoint()
val checkpointPath = createValidCheckpoint()
// getActiveOrCreate should return the current active context if there is one
testGetActiveOrCreate {
ssc = new StreamingContext(
conf.clone.set("spark.streaming.clock", "org.apache.spark.util.ManualClock"), batchDuration)
addInputStream(ssc).register()
ssc.start()
val returnedSsc = StreamingContext.getActiveOrCreate(checkpointPath, creatingFunction _)
assert(!newContextCreated, "new context created instead of returning")
assert(returnedSsc.eq(ssc), "returned context is not the activated context")
}
// getActiveOrCreate should create new context with empty path
testGetActiveOrCreate {
ssc = StreamingContext.getActiveOrCreate(emptyPath, creatingFunction _)
assert(ssc != null, "no context created")
assert(newContextCreated, "new context not created")
}
// getActiveOrCreate should throw exception with fake checkpoint file and createOnError = false
intercept[Exception] {
ssc = StreamingContext.getOrCreate(corruptedCheckpointPath, creatingFunction _)
}
// getActiveOrCreate should throw exception with fake checkpoint file
intercept[Exception] {
ssc = StreamingContext.getActiveOrCreate(
corruptedCheckpointPath, creatingFunction _, createOnError = false)
}
// getActiveOrCreate should create new context with fake
// checkpoint file and createOnError = true
testGetActiveOrCreate {
ssc = StreamingContext.getActiveOrCreate(
corruptedCheckpointPath, creatingFunction _, createOnError = true)
assert(ssc != null, "no context created")
assert(newContextCreated, "new context not created")
}
// getActiveOrCreate should recover context with checkpoint path, and recover old configuration
testGetActiveOrCreate {
ssc = StreamingContext.getActiveOrCreate(checkpointPath, creatingFunction _)
assert(ssc != null, "no context created")
assert(!newContextCreated, "old context not recovered")
assert(ssc.conf.get("someKey") === "someValue")
}
}
test("multiple streaming contexts") {
sc = new SparkContext(
conf.clone.set("spark.streaming.clock", "org.apache.spark.util.ManualClock"))
ssc = new StreamingContext(sc, Seconds(1))
val input = addInputStream(ssc)
input.foreachRDD { rdd => rdd.count }
ssc.start()
// Creating another streaming context should not create errors
val anotherSsc = new StreamingContext(sc, Seconds(10))
val anotherInput = addInputStream(anotherSsc)
anotherInput.foreachRDD { rdd => rdd.count }
val exception = intercept[IllegalStateException] {
anotherSsc.start()
}
assert(exception.getMessage.contains("StreamingContext"), "Did not get the right exception")
}
test("DStream and generated RDD creation sites") {
testPackage.test()
}
test("throw exception on using active or stopped context") {
val conf = new SparkConf()
.setMaster(master)
.setAppName(appName)
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock")
ssc = new StreamingContext(conf, batchDuration)
require(ssc.getState() === StreamingContextState.INITIALIZED)
val input = addInputStream(ssc)
val transformed = input.map { x => x}
transformed.foreachRDD { rdd => rdd.count }
def testForException(clue: String, expectedErrorMsg: String)(body: => Unit): Unit = {
withClue(clue) {
val ex = intercept[IllegalStateException] {
body
}
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(expectedErrorMsg))
}
}
ssc.start()
require(ssc.getState() === StreamingContextState.ACTIVE)
testForException("no error on adding input after start", "start") {
addInputStream(ssc) }
testForException("no error on adding transformation after start", "start") {
input.map { x => x * 2 } }
testForException("no error on adding output operation after start", "start") {
transformed.foreachRDD { rdd => rdd.collect() } }
ssc.stop()
require(ssc.getState() === StreamingContextState.STOPPED)
testForException("no error on adding input after stop", "stop") {
addInputStream(ssc) }
testForException("no error on adding transformation after stop", "stop") {
input.map { x => x * 2 } }
testForException("no error on adding output operation after stop", "stop") {
transformed.foreachRDD { rdd => rdd.collect() } }
}
test("queueStream doesn't support checkpointing") {
val checkpointDirectory = Utils.createTempDir().getAbsolutePath()
def creatingFunction(): StreamingContext = {
val _ssc = new StreamingContext(conf, batchDuration)
val rdd = _ssc.sparkContext.parallelize(1 to 10)
_ssc.checkpoint(checkpointDirectory)
_ssc.queueStream[Int](Queue(rdd)).register()
_ssc
}
ssc = StreamingContext.getOrCreate(checkpointDirectory, creatingFunction _)
ssc.start()
eventually(timeout(10000 millis)) {
assert(Checkpoint.getCheckpointFiles(checkpointDirectory).size > 1)
}
ssc.stop()
val e = intercept[SparkException] {
ssc = StreamingContext.getOrCreate(checkpointDirectory, creatingFunction _)
}
// StreamingContext.validate changes the message, so use "contains" here
assert(e.getCause.getMessage.contains("queueStream doesn't support checkpointing. " +
"Please don't use queueStream when checkpointing is enabled."))
}
test("Creating an InputDStream but not using it should not crash") {
ssc = new StreamingContext(master, appName, batchDuration)
val input1 = addInputStream(ssc)
val input2 = addInputStream(ssc)
val output = new TestOutputStream(input2)
output.register()
val batchCount = new BatchCounter(ssc)
ssc.start()
// Just wait for completing 2 batches to make sure it triggers
// `DStream.getMaxInputStreamRememberDuration`
batchCount.waitUntilBatchesCompleted(2, 10000)
// Throw the exception if crash
ssc.awaitTerminationOrTimeout(1)
ssc.stop()
}
test("SPARK-18560 Receiver data should be deserialized properly.") {
// Start a two nodes cluster, so receiver will use one node, and Spark jobs will use the
// other one. Then Spark jobs need to fetch remote blocks and it will trigger SPARK-18560.
val conf = new SparkConf().setMaster("local-cluster[2,1,1024]").setAppName(appName)
ssc = new StreamingContext(conf, Milliseconds(100))
val input = ssc.receiverStream(new TestReceiver)
val latch = new CountDownLatch(1)
@volatile var stopping = false
input.count().foreachRDD { rdd =>
// Make sure we can read from BlockRDD
if (rdd.collect().headOption.getOrElse(0L) > 0 && !stopping) {
// Stop StreamingContext to unblock "awaitTerminationOrTimeout"
stopping = true
new Thread() {
setDaemon(true)
override def run(): Unit = {
ssc.stop(stopSparkContext = true, stopGracefully = false)
latch.countDown()
}
}.start()
}
}
ssc.start()
ssc.awaitTerminationOrTimeout(60000)
// Wait until `ssc.top` returns. Otherwise, we may finish this test too fast and leak an active
// SparkContext. Note: the stop codes in `after` will just do nothing if `ssc.stop` in this test
// is running.
assert(latch.await(60, TimeUnit.SECONDS))
}
def addInputStream(s: StreamingContext): DStream[Int] = {
val input = (1 to 100).map(i => 1 to i)
val inputStream = new TestInputStream(s, input, 1)
inputStream
}
def createValidCheckpoint(): String = {
val testDirectory = Utils.createTempDir().getAbsolutePath()
val checkpointDirectory = Utils.createTempDir().getAbsolutePath()
ssc = new StreamingContext(conf.clone.set("someKey", "someValue"), batchDuration)
ssc.checkpoint(checkpointDirectory)
ssc.textFileStream(testDirectory).foreachRDD { rdd => rdd.count() }
ssc.start()
try {
eventually(timeout(30000 millis)) {
assert(Checkpoint.getCheckpointFiles(checkpointDirectory).size > 1)
}
} finally {
ssc.stop()
}
checkpointDirectory
}
def createCorruptedCheckpoint(): String = {
val checkpointDirectory = Utils.createTempDir().getAbsolutePath()
val fakeCheckpointFile = Checkpoint.checkpointFile(checkpointDirectory, Time(1000))
FileUtils.write(new File(fakeCheckpointFile.toString()), "blablabla")
assert(Checkpoint.getCheckpointFiles(checkpointDirectory).nonEmpty)
checkpointDirectory
}
}
class TestException(msg: String) extends Exception(msg)
/** Custom receiver for testing whether all data received by a receiver gets processed or not */
class TestReceiver extends Receiver[Int](StorageLevel.MEMORY_ONLY) with Logging {
var receivingThreadOption: Option[Thread] = None
def onStart() {
val thread = new Thread() {
override def run() {
logInfo("Receiving started")
while (!isStopped) {
store(TestReceiver.counter.getAndIncrement)
}
logInfo("Receiving stopped at count value of " + TestReceiver.counter.get())
}
}
receivingThreadOption = Some(thread)
thread.start()
}
def onStop() {
// no clean to be done, the receiving thread should stop on it own, so just wait for it.
receivingThreadOption.foreach(_.join())
}
}
object TestReceiver {
val counter = new AtomicInteger(1)
}
/** Custom receiver for testing whether a slow receiver can be shutdown gracefully or not */
class SlowTestReceiver(totalRecords: Int, recordsPerSecond: Int)
extends Receiver[Int](StorageLevel.MEMORY_ONLY) with Logging {
var receivingThreadOption: Option[Thread] = None
def onStart() {
val thread = new Thread() {
override def run() {
logInfo("Receiving started")
for(i <- 1 to totalRecords) {
Thread.sleep(1000 / recordsPerSecond)
store(i)
}
SlowTestReceiver.receivedAllRecords = true
logInfo(s"Received all $totalRecords records")
}
}
receivingThreadOption = Some(thread)
thread.start()
}
def onStop() {
// Simulate slow receiver by waiting for all records to be produced
while (!SlowTestReceiver.receivedAllRecords) {
Thread.sleep(100)
}
// no clean to be done, the receiving thread should stop on it own
}
}
object SlowTestReceiver {
var receivedAllRecords = false
}
/** Streaming application for testing DStream and RDD creation sites */
package object testPackage extends Assertions {
def test() {
val conf = new SparkConf().setMaster("local").setAppName("CreationSite test")
val ssc = new StreamingContext(conf, Milliseconds(100))
try {
val inputStream = ssc.receiverStream(new TestReceiver)
// Verify creation site of DStream
val creationSite = inputStream.creationSite
assert(creationSite.shortForm.contains("receiverStream") &&
creationSite.shortForm.contains("StreamingContextSuite")
)
assert(creationSite.longForm.contains("testPackage"))
// Verify creation site of generated RDDs
var rddGenerated = false
var rddCreationSiteCorrect = false
var foreachCallSiteCorrect = false
inputStream.foreachRDD { rdd =>
rddCreationSiteCorrect = rdd.creationSite == creationSite
foreachCallSiteCorrect =
rdd.sparkContext.getCallSite().shortForm.contains("StreamingContextSuite")
rddGenerated = true
}
ssc.start()
eventually(timeout(10000 millis), interval(10 millis)) {
assert(rddGenerated && rddCreationSiteCorrect, "RDD creation site was not correct")
assert(rddGenerated && foreachCallSiteCorrect, "Call site in foreachRDD was not correct")
}
} finally {
ssc.stop()
}
}
}
/**
* Helper methods for testing StreamingContextSuite
* This includes methods to access private methods and fields in StreamingContext and MetricsSystem
*/
private object StreamingContextSuite extends PrivateMethodTester {
private val _sources = PrivateMethod[ArrayBuffer[Source]]('sources)
private def getSources(metricsSystem: MetricsSystem): ArrayBuffer[Source] = {
metricsSystem.invokePrivate(_sources())
}
private val _streamingSource = PrivateMethod[StreamingSource]('streamingSource)
private def getStreamingSource(streamingContext: StreamingContext): StreamingSource = {
streamingContext.invokePrivate(_streamingSource())
}
}
| aokolnychyi/spark | streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala | Scala | apache-2.0 | 36,515 |
/*
* Copyright (C) 2012 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.environment.batch.authentication
import org.openmole.tool.crypto.Cypher
trait CypheredPassword {
def cypheredPassword: String
def password(implicit decrypt: Cypher) =
if (cypheredPassword == null || cypheredPassword == "") ""
else decrypt.decrypt(cypheredPassword)
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.environment.batch/src/main/scala/org/openmole/plugin/environment/batch/authentication/CypheredPassword.scala | Scala | agpl-3.0 | 1,005 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.library
import org.junit.Assert._
import org.junit.Test
import scala.scalajs.js
class FinalizationRegistryTest {
@Test def testMethods(): Unit = {
val registry = new js.FinalizationRegistry[js.Date, String, Any]((heldValue: String) => ())
val obj1 = new js.Date()
registry.register(obj1, "foo")
val obj2 = new js.Date()
registry.register(obj2, "bar", obj2)
val obj3 = new js.Date()
val unregisterToken = new js.Object()
registry.register(obj3, "bar", unregisterToken)
val nonExistingUnregisterToken = new js.Object()
assertFalse(registry.unregister(nonExistingUnregisterToken))
assertFalse(registry.unregister(obj1))
assertTrue(registry.unregister(obj2))
assertFalse(registry.unregister(obj2))
assertTrue(registry.unregister(unregisterToken))
assertFalse(registry.unregister(unregisterToken))
}
}
| scala-js/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/library/FinalizationRegistryTest.scala | Scala | apache-2.0 | 1,175 |
package org.orbroker
/**
* Broker execution callback.
*/
package object callback {
} | nilskp/orbroker | src/main/scala/org/orbroker/callback/package.scala | Scala | mit | 95 |
package filodb.jmh
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations.{Mode, Scope, State}
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.BenchmarkMode
import org.openjdk.jmh.annotations.OutputTimeUnit
import spire.syntax.cfor._
import filodb.memory.NativeMemoryManager
import filodb.memory.format._
/**
* Measures the speed of encoding different types of data,
* including just Filo vector encoding and encoding from RowReaders.
*
* For a description of the JMH measurement modes, see
* https://github.com/ktoso/sbt-jmh/blob/master/src/sbt-test/sbt-jmh/jmh-run/
* src/main/scala/org/openjdk/jmh/samples/JMHSample_02_BenchmarkModes.scala
*/
@State(Scope.Thread)
class EncodingBenchmark {
import scala.util.Random.{alphanumeric, nextInt}
import vectors._
// Ok, create an IntColumn and benchmark it.
val numValues = 10000
val memFactory = new NativeMemoryManager(100 * 1024 * 1024)
val randomInts = (0 until numValues).map(i => util.Random.nextInt)
val randomLongs = randomInts.map(_.toLong)
// NOTE: results show that time spent is heavily influenced by ratio of unique strings...
val numUniqueStrings = 500
val maxStringLength = 15
val minStringLength = 5
val naChance = 0.05 //5% of values will be NA
def randString(len: Int): String = alphanumeric.take(len).mkString
val uniqueStrings = (0 until numUniqueStrings).map { i =>
randString(minStringLength + nextInt(maxStringLength - minStringLength))
}
val randomStrings = (0 until numValues).map(i => uniqueStrings(nextInt(numUniqueStrings)))
val intArray = randomInts.toArray
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def newIntVectorEncoding(): Unit = {
val cb = IntBinaryVector.appendingVector(memFactory, numValues)
cforRange { 0 until numValues } { i =>
cb.addData(intArray(i))
}
cb.optimize(memFactory)
cb.dispose()
}
val cbAdder = IntBinaryVector.appendingVector(memFactory, numValues)
// TODO: post method to free up space
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def growableIntVectorAddData(): Unit = {
cbAdder.reset()
cforRange { 0 until numValues } { i =>
cbAdder.addData(intArray(i))
}
}
val noNAAdder = IntBinaryVector.appendingVectorNoNA(memFactory, numValues)
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def noNAIntVectorAddData(): Unit = {
noNAAdder.reset()
cforRange { 0 until numValues } { i =>
noNAAdder.addData(intArray(i))
}
}
val utf8strings = randomStrings.map(ZeroCopyUTF8String.apply).toArray
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def newUtf8VectorEncoding(): Unit = {
val cb = UTF8Vector.appendingVector(memFactory, numValues, maxStringLength * numUniqueStrings)
cforRange { 0 until numValues } { i =>
cb.addData(utf8strings(i))
}
cb.optimize(memFactory)
cb.dispose()
}
// TODO: RowReader based vector building
val utf8cb = UTF8Vector.appendingVector(memFactory, numValues, maxStringLength * numUniqueStrings)
cforRange { 0 until numValues } { i =>
utf8cb.addData(utf8strings(i))
}
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def newUtf8AddVector(): Unit = {
val cb = UTF8Vector.appendingVector(memFactory, numValues, maxStringLength * numUniqueStrings)
cb.addVector(utf8cb)
cb.dispose()
}
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
def newDictUtf8VectorEncoding(): Unit = {
val hint = Encodings.AutoDictString(samplingRate = 0.5)
UTF8Vector(memFactory, utf8strings).optimize(memFactory, hint)
}
}
| tuplejump/FiloDB | jmh/src/main/scala/filodb.jmh/EncodingBenchmark.scala | Scala | apache-2.0 | 3,841 |
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigRenderOptions
import akka.actor._
//import java.io.File
import java.io.PrintWriter
import Task._
object Workflow extends RunCommand with WhoAmI {
var realtime = false
var tasks = scala.collection.mutable.Map[String,Task]()
def run():Unit = {
// Get owner of this process
val whoami = whoAmI()
// Compute the user's chiltepin directory
val chiltepinDir = whoami.home + "/.chiltepin"
// Create chiltepin var dir
val varDir = new java.io.File(chiltepinDir + "/var")
if (! varDir.exists) varDir.mkdirs
// Create chiltepin etc dir
val etcDir = new java.io.File(chiltepinDir + "/etc")
if (! etcDir.exists) etcDir.mkdirs
// Compute the name of the user config file
val chiltepinConfigFile = new java.io.File(chiltepinDir + "/etc/chiltepin.conf")
// Get default configuration for the chiltepin
val defaultConfig = ConfigFactory.load()
// Get the current user config
val config = if (chiltepinConfigFile.exists) {
ConfigFactory.parseFile(chiltepinConfigFile).withFallback(defaultConfig)
}
else {
defaultConfig
}
// Get the server mode
val serverMode = config.getString("chiltepin.server-mode")
// Get the workflow config for the selected server mode
val gatewayConfig = config.getConfig(s"chiltepin.wfGateway.$serverMode").withFallback(config.getConfig("chiltepin.wfGateway")).withFallback(config.getConfig("chiltepin.workflow"))
// Update the user config file to make sure it is up-to-date with the current options
new PrintWriter(chiltepinDir + "/etc/chiltepin.conf") { write("chiltepin " + config.getConfig("chiltepin").root.render(ConfigRenderOptions.defaults().setOriginComments(false))); close }
// Set up actor system
val systemGateway = ActorSystem("WFGateway",gatewayConfig)
// Create the Workflow Gateway actor
val wfGateway = systemGateway.actorOf(Props[WFGateway], name = "wfGateway")
// Run the workflow
wfGateway ! WFGateway.Run
}
def inspect():Unit = {
println(s"realtime = $realtime")
for ((name, task) <- tasks) {
println (s"$name.cmd = ${task.cmd}")
println (s"$name.opt = ${task.opt}")
println (s"$name.env = ${task.env}")
}
}
}
| christopherwharrop/chiltepin | src/main/scala/workflow.scala | Scala | apache-2.0 | 2,316 |
package ee.cone.c4gate.deep_session
import ee.cone.c4gate.FilterPredicateBuilderApp
trait DeepFilterPredicateBuilderApp extends FilterPredicateBuilderApp
| conecenter/c4proto | extra_lib/src/main/scala/ee/cone/c4gate/deep_session/DeepFilterPredicateBuilderApp.scala | Scala | apache-2.0 | 156 |
package go.echo
import io.gatling.core.Predef._;
import io.gatling.http.Predef._;
/**
* Created by fox on 8/15/16.
*/
class EchoPingPongSimulation extends Simulation {
val httpProtocol = http
.baseURL("http://192.168.10.66:9700")
.inferHtmlResources()
.acceptHeader("*/*")
val users = 20000
val scn = scenario("ping")
.exec(http("ping")
.get("/ping")
.check(status.is(200)))
setUp(scn.inject(atOnceUsers(users)).protocols(httpProtocol))
}
| foxundermoon/gatling-test | src/gatling/scala/go/echo/EcchoPingPongSimulation.scala | Scala | mit | 481 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.util.concurrent
import java.util.concurrent.ConcurrentLinkedQueue
import java.{util => ju}
import org.junit.Assert._
import org.junit.Test
import org.scalajs.testsuite.javalib.util.{AbstractCollectionFactory, AbstractCollectionTest}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
class ConcurrentLinkedQueueTest extends AbstractCollectionTest {
override def factory: ConcurrentLinkedQueueFactory = new ConcurrentLinkedQueueFactory
@Test def should_store_and_remove_ordered_integers(): Unit = {
val pq = factory.empty[Int]
assertEquals(0, pq.size())
assertTrue(pq.add(111))
assertEquals(1, pq.size())
assertTrue(pq.add(222))
assertEquals(2, pq.size())
assertEquals(111, pq.poll())
assertEquals(1, pq.size())
assertEquals(222, pq.poll())
assertTrue(pq.add(222))
assertTrue(pq.add(222))
assertTrue(pq.remove(222))
assertTrue(pq.remove(222))
assertFalse(pq.remove(222))
}
@Test def should_store_and_remove_strings(): Unit = {
val pq = factory.empty[String]
assertEquals(0, pq.size())
assertTrue(pq.add("aaa"))
assertEquals(1, pq.size())
assertTrue(pq.add("bbb"))
assertEquals(2, pq.size())
assertEquals("aaa", pq.poll())
assertEquals(1, pq.size())
assertEquals("bbb", pq.poll())
assertTrue(pq.add("bbb"))
assertTrue(pq.add("bbb"))
assertTrue(pq.remove("bbb"))
assertTrue(pq.remove("bbb"))
assertFalse(pq.remove("bbb"))
assertNull(pq.poll())
}
@Test def should_store_Double_even_in_corner_cases(): Unit = {
val pq = factory.empty[Double]
assertTrue(pq.add(1.0))
assertTrue(pq.add(+0.0))
assertTrue(pq.add(-0.0))
assertTrue(pq.add(Double.NaN))
assertTrue(pq.poll.equals(1.0))
assertTrue(pq.poll.equals(+0.0))
assertTrue(pq.poll.equals(-0.0))
assertTrue(pq.peek.isNaN)
assertTrue(pq.remove(Double.NaN))
assertTrue(pq.isEmpty)
}
@Test def could_be_instantiated_with_a_prepopulated_Collection(): Unit = {
val l = Set(1, 5, 2, 3, 4).asJavaCollection
val pq = factory.newFrom(l)
assertEquals(5, pq.size())
for (i <- l.asScala) {
assertEquals(i, pq.poll())
}
assertTrue(pq.isEmpty)
}
@Test def should_be_cleared_in_a_single_operation(): Unit = {
val l = Set(1, 5, 2, 3, 4).asJavaCollection
val pq = factory.newFrom(l)
assertEquals(5, pq.size())
pq.clear()
assertEquals(0, pq.size())
}
@Test def should_add_multiple_elemnt_in_one_operation(): Unit = {
val l = Set(1, 5, 2, 3, 4).asJavaCollection
val pq = factory.empty[Int]
assertEquals(0, pq.size())
pq.addAll(l)
assertEquals(5, pq.size())
pq.add(6)
assertEquals(6, pq.size())
}
@Test def should_check_contained_values_even_in_double_corner_cases(): Unit = {
val pq = factory.empty[Double]
assertTrue(pq.add(11111.0))
assertEquals(1, pq.size())
assertTrue(pq.contains(11111.0))
assertEquals(11111.0, pq.iterator.next(), 0.0)
assertTrue(pq.add(Double.NaN))
assertEquals(2, pq.size())
assertTrue(pq.contains(Double.NaN))
assertFalse(pq.contains(+0.0))
assertFalse(pq.contains(-0.0))
assertTrue(pq.remove(Double.NaN))
assertTrue(pq.add(+0.0))
assertEquals(2, pq.size())
assertFalse(pq.contains(Double.NaN))
assertTrue(pq.contains(+0.0))
assertFalse(pq.contains(-0.0))
assertTrue(pq.remove(+0.0))
assertTrue(pq.add(-0.0))
assertEquals(2, pq.size())
assertFalse(pq.contains(Double.NaN))
assertFalse(pq.contains(+0.0))
assertTrue(pq.contains(-0.0))
assertTrue(pq.add(+0.0))
assertTrue(pq.add(Double.NaN))
assertTrue(pq.contains(Double.NaN))
assertTrue(pq.contains(+0.0))
assertTrue(pq.contains(-0.0))
}
@Test def should_provide_a_weakly_consistent_iterator(): Unit = {
val queue = factory.empty[Int]
queue.add(1)
queue.add(2)
val iter1 = queue.iterator()
assertEquals(1, iter1.next())
assertTrue(iter1.hasNext)
queue.remove(2)
assertTrue(iter1.hasNext)
assertEquals(2, iter1.next())
assertFalse(iter1.hasNext)
val queue2 = factory.empty[Int]
queue2.add(1)
queue2.add(2)
queue2.add(3)
val iter2 = queue2.iterator()
assertEquals(1, iter2.next())
iter2.remove()
assertEquals(2, iter2.next())
assertEquals(3, iter2.next())
}
}
class ConcurrentLinkedQueueFactory extends AbstractCollectionFactory {
override def implementationName: String =
"java.util.concurrent.ConcurrentLinkedQueue"
override def empty[E: ClassTag]: ConcurrentLinkedQueue[E] =
new ConcurrentLinkedQueue[E]()
def newFrom[E](coll: ju.Collection[E]): ConcurrentLinkedQueue[E] =
new ConcurrentLinkedQueue[E](coll)
}
| SebsLittleHelpers/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/concurrent/ConcurrentLinkedQueueTest.scala | Scala | apache-2.0 | 5,094 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import models.api.returns._
import play.api.data.Forms.{of, single}
import play.api.data.format.Formatter
import play.api.data.{Form, FormError}
object PaymentFrequencyForm {
val paymentFrequency: String = "value"
val quarterly = "quarterly"
val monthly = "monthly"
val paymentFrequencyNotProvidedKey = "aas.paymentFrequency.notProvided"
def apply(): Form[PaymentFrequency] = Form(
single(
paymentFrequency -> of(formatter)
)
)
def formatter: Formatter[PaymentFrequency] = new Formatter[PaymentFrequency] {
override def bind(key: String, data: Map[String, String]): Either[Seq[FormError], PaymentFrequency] = {
data.get(key) match {
case Some(`quarterly`) => Right(QuarterlyPayment)
case Some(`monthly`) => Right(MonthlyPayment)
case _ => Left(Seq(FormError(key, paymentFrequencyNotProvidedKey)))
}
}
override def unbind(key: String, value: PaymentFrequency): Map[String, String] = {
Map(
key -> {
value match {
case QuarterlyPayment => quarterly
case MonthlyPayment => monthly
}
}
)
}
}
}
| hmrc/vat-registration-frontend | app/forms/PaymentFrequencyForm.scala | Scala | apache-2.0 | 1,770 |
object funcionalSideEffect{
def main(args: Array[String]): Unit ={
sideEffect(args)
}
//funcional xa que non usa var pero ten o side effect do print
def sideEffect(args: Array[String]): Unit = args.foreach(println)
}
| jmlb23/scala | libro_odersky/scripts_CH3/funcional/funcionalSideEffect.scala | Scala | gpl-3.0 | 229 |
package com.criteo.dev.cluster.config
import java.time.Instant
/**
* Checkpoint for data copy
* @param created
* @param updated
* @param todo Tables to be copied
* @param finished Tables copied with success
* @param failed Tables failed to copy
* @param invalid Invalid tables
*/
case class Checkpoint(
created: Instant,
updated: Instant,
todo: Set[String] = Set.empty,
finished: Set[String] = Set.empty,
failed: Set[String] = Set.empty,
invalid: Set[String] = Set.empty
)
| criteo/berilia | src/main/scala/com/criteo/dev/cluster/config/Checkpoint.scala | Scala | apache-2.0 | 650 |
package com.yiguang.perf
/**
* Created by yigli on 14-12-22.
*/
object TestFor extends App{
val start = System.currentTimeMillis()
fibonacci(35)
val end = System.currentTimeMillis()
println(end-start)
private def fibonacci(n:Int):Int = {
if(n <= 2){
return 1;
}else{
return (fibonacci(n-1) + fibonacci(n-2))
}
}
}
| liyiguang/perftool | src/test/scala/com/yiguang/perf/TestFor.scala | Scala | apache-2.0 | 362 |
package com.softwaremill.bootzooka.test
import com.typesafe.scalalogging.LazyLogging
/**
* @param shellCommand Command to run the process.
* @param processGrepStrings Strings which will be used when grepping the process list to determine the process's pid.
*/
class KillableProcess(shellCommand: String, processGrepStrings: String*) extends LazyLogging {
var process: Process = _
def start(): Process = {
process = new ProcessBuilder(
"sh",
"-c",
shellCommand
).start()
process
}
def sendSigInt() {
sendSig(2)
}
def sendSigKill() {
sendSig(9)
}
def sendSigTerm() {
sendSig(15)
}
def sendSig(sig: Int) {
for (pid <- readPids()) {
logger.info(s"Sending signal $sig to pid $pid")
Shell.runShellCommand("kill -" + sig + " " + pid).waitFor()
}
process = null
}
def readPids(): Iterator[String] = {
Shell.readProcessPids(processGrepStrings: _*)
}
} | umitunal/bootzooka | backend/src/test/scala/com/softwaremill/bootzooka/test/KillableProcess.scala | Scala | apache-2.0 | 951 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.client.socket
import java.util.UUID
import akka.actor.{Props, ActorRef, ActorSystem}
import com.ibm.spark.communication.actors.{DealerSocketActor, ReqSocketActor, SubSocketActor}
object SocketFactory {
def apply(socketConfig: SocketConfig) = {
new SocketFactory(socketConfig)
}
}
/**
* A Factor class to provide various socket connections for IPython Kernel Spec.
*
* @param socketConfig The configuration for the sockets to be properly
* instantiated
*/
class SocketFactory(socketConfig: SocketConfig) {
/**
* Represents the identity shared between Shell and Stdin connections.
*/
private val ZmqIdentity = UUID.randomUUID().toString
val HeartbeatConnection = SocketConnection(
socketConfig.transport, socketConfig.ip, socketConfig.hb_port)
val ShellConnection = SocketConnection(
socketConfig.transport, socketConfig.ip, socketConfig.shell_port)
val IOPubConnection = SocketConnection(
socketConfig.transport, socketConfig.ip, socketConfig.iopub_port)
val StdinConnection = SocketConnection(
socketConfig.transport, socketConfig.ip, socketConfig.stdin_port)
/**
* Creates a ZeroMQ request socket representing the client endpoint for
* heartbeat messages.
*
* @param system The actor system the socket actor will belong
* @param listener The actor who will receive
*
* @return The ActorRef created for the socket connection
*/
def HeartbeatClient(system: ActorSystem, listener: ActorRef) : ActorRef = {
system.actorOf(Props(classOf[ReqSocketActor], HeartbeatConnection.toString, listener))
// ZeroMQExtension(system).newReqSocket(Array(
// Listener(listener), Connect(HeartbeatConnection.toString)
// ))
}
/**
* Creates a ZeroMQ request socket representing the client endpoint for shell
* messages. Generates an id for
* <a href="http://api.zeromq.org/2-1:zmq-setsockopt#toc6">
* Router/Dealer message routing</a>.
*
* @param system The actor system the socket actor will belong
* @param listener The actor who will receive
*
* @return The ActorRef created for the socket connection
*/
def ShellClient(system: ActorSystem, listener: ActorRef) : ActorRef = {
system.actorOf(Props(classOf[DealerSocketActor], ShellConnection.toString, listener))
//socket.setIdentity(ZmqIdentity)
// ZeroMQExtension(system).newDealerSocket(Array(
// Listener(listener), Connect(ShellConnection.toString),
// Identity(ZmqIdentity)
// ))
}
/**
* Creates a ZeroMQ reply socket representing the client endpoint for stdin
* messages. Generates an id for
* <a href="http://api.zeromq.org/2-1:zmq-setsockopt#toc6">
* Router/Dealer message routing</a>.
*
* @param system The actor system the socket actor will belong
* @param listener The actor who will receive
*
* @return The ActorRef created for the socket connection
*/
def StdinClient(system: ActorSystem, listener: ActorRef) : ActorRef = {
system.actorOf(Props(classOf[DealerSocketActor], StdinConnection.toString, listener))
//socket.setIdentity(ZmqIdentity)
// ZeroMQExtension(system).newDealerSocket(Array(
// Listener(listener), Connect(StdinConnection.toString),
// Identity(ZmqIdentity)
// ))
}
/**
* Creates a ZeroMQ request socket representing the client endpoint for IOPub
* messages.
*
* @param system The actor system the socket actor will belong
* @param listener The actor who will receive
*
* @return The ActorRef created for the socket connection
*/
def IOPubClient(system: ActorSystem, listener: ActorRef) : ActorRef = {
system.actorOf(Props(classOf[SubSocketActor], IOPubConnection.toString, listener))
//socket.subscribe(ZMQ.SUBSCRIPTION_ALL)
// ZeroMQExtension(system).newSubSocket(Array(
// Listener(listener), Connect(IOPubConnection.toString), SubscribeAll
// ))
}
} | yeghishe/spark-kernel | client/src/main/scala/com/ibm/spark/kernel/protocol/v5/client/socket/SocketFactory.scala | Scala | apache-2.0 | 4,544 |
object Test extends App {
println(Macros.foo_with_implicits_enabled)
println(Macros.foo_with_implicits_disabled)
}
| scala/scala | test/files/run/macro-typecheck-implicitsdisabled/Test_2.scala | Scala | apache-2.0 | 119 |
package de.htwg.zeta.common.format.project.gdsl.diagram
import de.htwg.zeta.common.models.project.gdsl.diagram.Palette
import de.htwg.zeta.common.models.project.gdsl.shape.Edge
import de.htwg.zeta.common.models.project.gdsl.shape.Node
import de.htwg.zeta.common.models.project.gdsl.shape.Resizing
import de.htwg.zeta.common.models.project.gdsl.shape.Size
import de.htwg.zeta.common.models.project.gdsl.style.Style
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import play.api.libs.json.JsSuccess
import play.api.libs.json.Json
//noinspection ScalaStyle
class PaletteFormatTest extends AnyFreeSpec with Matchers {
"A PaletteFormat should" - {
"write an object" in {
val result = PaletteFormat().writes(Palette("testPalette", List(
Node(
name = "TestNode",
conceptElement = "TextNodeConcept",
edges = List(Edge(
name = "TestEdge",
conceptElement = "LinkTest",
target = "TestNode",
style = Style.defaultStyle,
placings = List()
)),
size = Size(10, 15, 15, 5, 20, 10),
style = Style.defaultStyle,
resizing = Resizing(horizontal = true, vertical = true, proportional = false),
geoModels = List()
)
)))
result.toString() shouldBe
"""{"name":"testPalette","nodes":["TestNode"]}"""
}
"read an object" in {
val result = PaletteFormat().reads(Json.parse(
"""{"name":"testPalette","nodes":["TestNode"]}"""
))
result shouldBe JsSuccess(Palette("testPalette", List()))
}
"fail in reading an invalid input" in {
val result = PaletteFormat().reads(Json.parse(
"""{"invalid":{"r":23}}"""
))
result.isSuccess shouldBe false
}
}
}
| Zeta-Project/zeta | api/common/src/test/scala/de/htwg/zeta/common/format/project/gdsl/diagram/PaletteFormatTest.scala | Scala | bsd-2-clause | 1,821 |
package tadp_grupo5
import scala.collection.mutable.Buffer
class Sucursal (volumenDeposito : Int, val pais : String) {
var paquetesEnSalir : Buffer[Paquete] = Buffer()
var paquetesEnEntrar : Buffer[Paquete] = Buffer()
var transportes : Buffer[Transporte] = Buffer()
def capacidad : Int = volumenDeposito - paquetesEnEntrar.map(_.volumen).sum - paquetesEnSalir.map(_.volumen).sum
def esCasaCentral: Boolean = false
def asignarPaquete(paquete : Paquete) {
if(transportes.size != 0){
var transporte : Option[Transporte] = transportes.find( x => x.puedeLlevarPaquete(paquete))
transporte.get.asignarPaquete(paquete)
}
}
def paquetesPendientes : Buffer[Paquete] = paquetesEnSalir.filterNot(x => transportes.exists(_.pedidos.contains(x)))
def asignarPendientes(){
var paquetes = paquetesPendientes
if(paquetes != 0) paquetes.foreach(x => asignarPaquete(x))
}
def notificarPaqueteAEntrar(paquete : Paquete) {
validarCapacidad(paquete)
paquetesEnEntrar += paquete
}
def notificarPaqueteASalir(paquete : Paquete) {
validarCapacidad(paquete)
paquetesEnSalir += paquete
asignarPaquete(paquete)
asignarPendientes
}
def descargarEnvios(pedidos : Buffer[Paquete]){
pedidos.foreach(x => descargarEnvio(x))
}
def descargarEnvio(pedido : Paquete){
if(pedido.sucursalDestino == this){
paquetesEnEntrar = paquetesEnEntrar.filterNot(_== pedido)
}
else paquetesEnSalir = paquetesEnSalir.filterNot(_== pedido)
}
def validarCapacidad(paquete : Paquete) = if (capacidad < paquete.volumen) throw new SucursalSinCapacidad()
}
case class CasaCentral(volumenDeposito : Int, override val pais : String) extends Sucursal(volumenDeposito, pais){
override def esCasaCentral : Boolean = true
}
case class SucursalSinCapacidad() extends Exception | JonaC22/TADP_2C2014_GRUPO_5 | TP2_Scala/Objeto-Puro/src/tadp_grupo5/Sucursal.scala | Scala | mit | 1,862 |
package com.avsystem.commons
package mongo.core.ops
import com.avsystem.commons.mongo.DocKey
import org.bson.BsonValue
trait DocKeyKeyHandling[T] extends Any with KeyHandling {
protected def docKey: DocKey[T, _ <: BsonValue]
override protected def key: String = docKey.key
}
| AVSystem/scala-commons | commons-mongo/jvm/src/main/scala/com/avsystem/commons/mongo/core/ops/DocKeyKeyHandling.scala | Scala | mit | 282 |
/*
// Copyright 2012/2013 de Gustavo Steinberg, Flavio Soares, Pierre Andrews, Gustavo Salazar Torres, Thomaz Abramo
//
// Este arquivo é parte do programa Vigia Político. O projeto Vigia
// Político é um software livre; você pode redistribuí-lo e/ou
// modificá-lo dentro dos termos da GNU Affero General Public License
// como publicada pela Fundação do Software Livre (FSF); na versão 3 da
// Licença. Este programa é distribuído na esperança que possa ser útil,
// mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a
// qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a licença para
// maiores detalhes. Você deve ter recebido uma cópia da GNU Affero
// General Public License, sob o título "LICENCA.txt", junto com este
// programa, se não, acesse http://www.gnu.org/licenses/
*/
package models
import play.api.db._
import play.api.Play.current
import play.api.libs.json._
import play.api.libs.json.util._
import play.api.libs.json.Writes._
import play.api.libs.functional.syntax._
import java.util.Date
import anorm._
import anorm.SqlParser._
case class Comission(id: Pk[Long], name: String, prefix: String)
object Comission {
val simple = {
(get[Pk[Long]]("id") ~
get[String]("name") ~
get[String]("prefix")) map {
case id ~ name ~ prefix =>
Comission(id, name, prefix)
}
}
implicit object PkFormat extends Format[Pk[Long]] {
def reads(json: JsValue): JsResult[Pk[Long]] = JsSuccess(
json.asOpt[Long].map(id => Id(id)).getOrElse(NotAssigned))
def writes(id: Pk[Long]): JsValue = id.map(JsNumber(_)).getOrElse(JsNull)
}
implicit val comissionWrites = Json.writes[Comission]
implicit val comissionReads = Json.reads[Comission]
def find(name: String): Option[Comission] = {
DB.withConnection { implicit connection =>
SQL("select * from comissions where name={name}").on(
'name -> name).as(Comission.simple singleOpt)
}
}
def findByUser(user: User): Seq[Comission] = {
DB.withConnection { implicit connection =>
SQL("select * from comissions inner join user_comissions on comissions.id=user_comissions.comission_id where user_comissions.user_id={userId} order by comissions.id").on(
'userId -> user.id).as(Comission.simple *)
}
}
def findAll(): Seq[Comission] = {
DB.withConnection { implicit connection =>
SQL("select * from comissions").as(Comission.simple *)
}
}
def findOrCreate(name: String, prefix: String): Option[Comission] = {
DB.withConnection { implicit connection =>
find(name) match {
case Some(com) => Some(com)
case None =>
val idOpt: Option[Long] = SQL("""INSERT INTO comissions
(name, prefix)
VALUES
({name}, {prefix})""")
.on(
'name -> name,
'prefix -> prefix).executeInsert()
idOpt.map { id => Comission(Id(id), name, prefix) }
}
}
}
}
| cidadao-automatico/cidadao-server | app/models/Comission.scala | Scala | agpl-3.0 | 2,985 |
package settings.membership
import java.util.UUID
import scala.annotation.implicitNotFound
import auth.models.User
import log.OrgaEvent
import log.EventsSource
import log.events.joinRequest.JoinResponseEventDto
import play.api.libs.json.JsString
import play.api.libs.json.Json
import play.api.libs.json.Writes
import settings.account.Account
import utils.TransacMode
import utils.Transition
import settings.account.Account
import common.typeAliases._
import common.Command
import war.central.AddParticipantCmd
import war.central.AddAttackCmd
import war.central.AddDefenseCmd
import war.central.AddGuessCmd
import war.central.StartBattleCmd
import war.central.EndWarCmd
import war.central.UndoCmd
import war.central.WarEvent
import war.central.StartBattleEvent
import war.central.EndWarEvent
import war.central.AddParticipantEvent
import war.central.AddAttackEvent
import war.central.AddDefenseEvent
import war.central.AddGuessEvent
object Role {
type Code = Char
private[this] val codeMap = Map(Leader.code -> Leader, Coleader.code -> Coleader, Veteran.code -> Veteran, Novice.code -> Novice)
def decode(code: Code) = codeMap(code)
implicit val jsonWrites: Writes[Role] = Writes[Role](_.toJson)
}
trait Role {
val canAcceptJoinRequests: Boolean
val canRejectJoinRequests: Boolean
val canJoinDirectly: Boolean
def canDo(cmd: Command, actorIcon: Icon): Boolean
def canUndo(event: WarEvent, actorIcon: Icon): Boolean
val code: Role.Code
def toJson = JsString(code.toString())
}
case object Leader extends Role {
override val canAcceptJoinRequests = true
override val canRejectJoinRequests = true
override val canJoinDirectly = true
override def canDo(cmd: Command, actorIcon: Icon): Boolean = Coleader.canDo(cmd, actorIcon)
override def canUndo(eventToUndo: WarEvent, actorIcon: Icon): Boolean = Coleader.canUndo(eventToUndo, actorIcon)
override val code = 'L'
}
case object Coleader extends Role {
override val canAcceptJoinRequests = true
override val canRejectJoinRequests = true
override val canJoinDirectly = true
override def canDo(cmd: Command, actorIcon: Icon): Boolean = Veteran.canDo(cmd, actorIcon) || (cmd match {
case _: StartBattleCmd => true
case _: EndWarCmd => true
case _ => false
})
override def canUndo(eventToUndo: WarEvent, actorIcon: Icon): Boolean = Veteran.canUndo(eventToUndo, actorIcon) || (eventToUndo match {
case _: StartBattleEvent => true
case _: EndWarEvent => true
case _ => false
})
override val code = 'C'
}
case object Veteran extends Role {
override val canAcceptJoinRequests = true
override val canRejectJoinRequests = true
override val canJoinDirectly = false
override def canDo(cmd: Command, actorIcon: Icon): Boolean = Novice.canDo(cmd, actorIcon) || (cmd match {
case _: AddParticipantCmd => true
case _: AddAttackCmd => true
case _: AddDefenseCmd => true
case _ => false
})
override def canUndo(eventToUndo: WarEvent, actorIcon: Icon): Boolean = Novice.canUndo(eventToUndo, actorIcon) || (eventToUndo match {
case _: AddParticipantEvent => true
case _: AddAttackEvent => true
case _: AddDefenseEvent => true
case _ => false
})
override val code = 'V'
}
case object Novice extends Role {
override val canAcceptJoinRequests = false
override val canRejectJoinRequests = false
override val canJoinDirectly = false
override def canDo(cmd: Command, actorIcon: Icon): Boolean = cmd match {
case ap: AddParticipantCmd => ap.iconTag == actorIcon.tag // can add only himself
case _: AddGuessCmd => true
case _: UndoCmd => true
case _ => false
}
override def canUndo(eventToUndo: WarEvent, actorIcon: Icon): Boolean = eventToUndo match {
case ap: AddParticipantEvent => ap.iconName == actorIcon.name // can remove only himself
case _: AddGuessEvent => true
case _ => false
}
override val code = 'N'
}
case class Organization(id: Organization.Id, clanName: String, clanTag: String, description: Option[String])
object Organization {
type Id = Int
implicit val jsonFormat = Json.writes[Organization]
}
/**
* Each organization member, present and past, have a single icon which represents him inside the organization. Even after having left it.
* When an account joins back, he gets the same icon. Icons are owned by the organization, not by the account that holds it.
*/
case class Icon(organizationId: Organization.Id, tag: Icon.Tag, name: String, role: Role, holder: Account.Id)
object Icon {
type Tag = Int
}
/**Association between an account and an organization */
//case class Membership(organizationId: Organization.Id, memberTag: Icon.Tag, accountId: Account.Id, requestEventId: Event.Id, responseEventId: Event.Id, accepterMemberTag: Icon.Tag)
trait MembershipSrv extends EventsSource[OrgaEvent] {
def getMembershipStatusOf(accountId: Account.Id): Transition[TransacMode, MembershipStatusDto]
/**Gives all the [[Organization]]s that fulfill the received criteria.*/
def searchOrganizations(criteria: SearchOrganizationsCmd): Transition[TransacMode, Seq[Organization]]
def sendJoinRequest(userId: User.Id, sendJoinRequestCmd: SendJoinRequestCmd): Transition[TransacMode, MembershipStatusDto]
def cancelJoinRequest(accountId: Account.Id, becauseAccepted: Boolean): Transition[TransacMode, MembershipStatusDto]
def leaveOrganization(accountId: Account.Id): Transition[TransacMode, MembershipStatusDto]
/**
* Creates a new [[Organization]] and stores it into the underlying DB.
* @return the created [[Organization]]
*/
def createOrganization(userId: User.Id, project: CreateOrganizationCmd): Transition[TransacMode, (Organization, IconDto)]
def getOrganizationOf(accountId: Account.Id): Transition[TransacMode, Option[Organization.Id]]
}
| readren/coc-war-organizer | app/settings/membership/MembershipSrv.scala | Scala | apache-2.0 | 5,950 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import java.util.concurrent.TimeUnit
import scala.util.{Failure, Success, Try}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Checkpoint, CheckpointWriter, Time}
import org.apache.spark.streaming.api.python.PythonDStream
import org.apache.spark.streaming.util.RecurringTimer
import org.apache.spark.util.{Clock, EventLoop, ManualClock, Utils}
/** Event classes for JobGenerator */
private[scheduler] sealed trait JobGeneratorEvent
private[scheduler] case class GenerateJobs(time: Time) extends JobGeneratorEvent
private[scheduler] case class ClearMetadata(time: Time) extends JobGeneratorEvent
private[scheduler] case class DoCheckpoint(
time: Time, clearCheckpointDataLater: Boolean) extends JobGeneratorEvent
private[scheduler] case class ClearCheckpointData(time: Time) extends JobGeneratorEvent
/**
* This class generates jobs from DStreams as well as drives checkpointing and cleaning
* up DStream metadata.
*/
private[streaming]
class JobGenerator(jobScheduler: JobScheduler) extends Logging {
private val ssc = jobScheduler.ssc
private val conf = ssc.conf
private val graph = ssc.graph
val clock = {
val clockClass = ssc.sc.conf.get(
"spark.streaming.clock", "org.apache.spark.util.SystemClock")
try {
Utils.classForName[Clock](clockClass).getConstructor().newInstance()
} catch {
case e: ClassNotFoundException if clockClass.startsWith("org.apache.spark.streaming") =>
val newClockClass = clockClass.replace("org.apache.spark.streaming", "org.apache.spark")
Utils.classForName[Clock](newClockClass).getConstructor().newInstance()
}
}
private val timer = new RecurringTimer(clock, ssc.graph.batchDuration.milliseconds,
longTime => eventLoop.post(GenerateJobs(new Time(longTime))), "JobGenerator")
// This is marked lazy so that this is initialized after checkpoint duration has been set
// in the context and the generator has been started.
private lazy val shouldCheckpoint = ssc.checkpointDuration != null && ssc.checkpointDir != null
private lazy val checkpointWriter = if (shouldCheckpoint) {
new CheckpointWriter(this, ssc.conf, ssc.checkpointDir, ssc.sparkContext.hadoopConfiguration)
} else {
null
}
// eventLoop is created when generator starts.
// This not being null means the scheduler has been started and not stopped
private var eventLoop: EventLoop[JobGeneratorEvent] = null
// last batch whose completion,checkpointing and metadata cleanup has been completed
private var lastProcessedBatch: Time = null
/** Start generation of jobs */
def start(): Unit = synchronized {
if (eventLoop != null) return // generator has already been started
// Call checkpointWriter here to initialize it before eventLoop uses it to avoid a deadlock.
// See SPARK-10125
checkpointWriter
eventLoop = new EventLoop[JobGeneratorEvent]("JobGenerator") {
override protected def onReceive(event: JobGeneratorEvent): Unit = processEvent(event)
override protected def onError(e: Throwable): Unit = {
jobScheduler.reportError("Error in job generator", e)
}
}
eventLoop.start()
if (ssc.isCheckpointPresent) {
restart()
} else {
startFirstTime()
}
}
/**
* Stop generation of jobs. processReceivedData = true makes this wait until jobs
* of current ongoing time interval has been generated, processed and corresponding
* checkpoints written.
*/
def stop(processReceivedData: Boolean): Unit = synchronized {
if (eventLoop == null) return // generator has already been stopped
if (processReceivedData) {
logInfo("Stopping JobGenerator gracefully")
val timeWhenStopStarted = System.nanoTime()
val stopTimeoutMs = conf.getTimeAsMs(
"spark.streaming.gracefulStopTimeout", s"${10 * ssc.graph.batchDuration.milliseconds}ms")
val pollTime = 100
// To prevent graceful stop to get stuck permanently
def hasTimedOut: Boolean = {
val diff = TimeUnit.NANOSECONDS.toMillis((System.nanoTime() - timeWhenStopStarted))
val timedOut = diff > stopTimeoutMs
if (timedOut) {
logWarning("Timed out while stopping the job generator (timeout = " + stopTimeoutMs + ")")
}
timedOut
}
// Wait until all the received blocks in the network input tracker has
// been consumed by network input DStreams, and jobs have been generated with them
logInfo("Waiting for all received blocks to be consumed for job generation")
while(!hasTimedOut && jobScheduler.receiverTracker.hasUnallocatedBlocks) {
Thread.sleep(pollTime)
}
logInfo("Waited for all received blocks to be consumed for job generation")
// Stop generating jobs
val stopTime = timer.stop(interruptTimer = false)
logInfo("Stopped generation timer")
// Wait for the jobs to complete and checkpoints to be written
def haveAllBatchesBeenProcessed: Boolean = {
lastProcessedBatch != null && lastProcessedBatch.milliseconds == stopTime
}
logInfo("Waiting for jobs to be processed and checkpoints to be written")
while (!hasTimedOut && !haveAllBatchesBeenProcessed) {
Thread.sleep(pollTime)
}
logInfo("Waited for jobs to be processed and checkpoints to be written")
graph.stop()
} else {
logInfo("Stopping JobGenerator immediately")
// Stop timer and graph immediately, ignore unprocessed data and pending jobs
timer.stop(true)
graph.stop()
}
// First stop the event loop, then stop the checkpoint writer; see SPARK-14701
eventLoop.stop()
if (shouldCheckpoint) checkpointWriter.stop()
logInfo("Stopped JobGenerator")
}
/**
* Callback called when a batch has been completely processed.
*/
def onBatchCompletion(time: Time) {
eventLoop.post(ClearMetadata(time))
}
/**
* Callback called when the checkpoint of a batch has been written.
*/
def onCheckpointCompletion(time: Time, clearCheckpointDataLater: Boolean) {
if (clearCheckpointDataLater) {
eventLoop.post(ClearCheckpointData(time))
}
}
/** Processes all events */
private def processEvent(event: JobGeneratorEvent) {
logDebug("Got event " + event)
event match {
case GenerateJobs(time) => generateJobs(time)
case ClearMetadata(time) => clearMetadata(time)
case DoCheckpoint(time, clearCheckpointDataLater) =>
doCheckpoint(time, clearCheckpointDataLater)
case ClearCheckpointData(time) => clearCheckpointData(time)
}
}
/** Starts the generator for the first time */
private def startFirstTime() {
val startTime = new Time(timer.getStartTime())
graph.start(startTime - graph.batchDuration)
timer.start(startTime.milliseconds)
logInfo("Started JobGenerator at " + startTime)
}
/** Restarts the generator based on the information in checkpoint */
private def restart() {
// If manual clock is being used for testing, then
// either set the manual clock to the last checkpointed time,
// or if the property is defined set it to that time
if (clock.isInstanceOf[ManualClock]) {
val lastTime = ssc.initialCheckpoint.checkpointTime.milliseconds
val jumpTime = ssc.sc.conf.getLong("spark.streaming.manualClock.jump", 0)
clock.asInstanceOf[ManualClock].setTime(lastTime + jumpTime)
}
val batchDuration = ssc.graph.batchDuration
// Batches when the master was down, that is,
// between the checkpoint and current restart time
val checkpointTime = ssc.initialCheckpoint.checkpointTime
val restartTime = new Time(timer.getRestartTime(graph.zeroTime.milliseconds))
val downTimes = checkpointTime.until(restartTime, batchDuration)
logInfo("Batches during down time (" + downTimes.size + " batches): "
+ downTimes.mkString(", "))
// Batches that were unprocessed before failure
val pendingTimes = ssc.initialCheckpoint.pendingTimes.sorted(Time.ordering)
logInfo("Batches pending processing (" + pendingTimes.length + " batches): " +
pendingTimes.mkString(", "))
// Reschedule jobs for these times
val timesToReschedule = (pendingTimes ++ downTimes).filter { _ < restartTime }
.distinct.sorted(Time.ordering)
logInfo("Batches to reschedule (" + timesToReschedule.length + " batches): " +
timesToReschedule.mkString(", "))
timesToReschedule.foreach { time =>
// Allocate the related blocks when recovering from failure, because some blocks that were
// added but not allocated, are dangling in the queue after recovering, we have to allocate
// those blocks to the next batch, which is the batch they were supposed to go.
jobScheduler.receiverTracker.allocateBlocksToBatch(time) // allocate received blocks to batch
jobScheduler.submitJobSet(JobSet(time, graph.generateJobs(time)))
}
// Restart the timer
timer.start(restartTime.milliseconds)
logInfo("Restarted JobGenerator at " + restartTime)
}
/** Generate jobs and perform checkpointing for the given `time`. */
private def generateJobs(time: Time) {
// Checkpoint all RDDs marked for checkpointing to ensure their lineages are
// truncated periodically. Otherwise, we may run into stack overflows (SPARK-6847).
ssc.sparkContext.setLocalProperty(RDD.CHECKPOINT_ALL_MARKED_ANCESTORS, "true")
Try {
jobScheduler.receiverTracker.allocateBlocksToBatch(time) // allocate received blocks to batch
graph.generateJobs(time) // generate jobs using allocated block
} match {
case Success(jobs) =>
val streamIdToInputInfos = jobScheduler.inputInfoTracker.getInfo(time)
jobScheduler.submitJobSet(JobSet(time, jobs, streamIdToInputInfos))
case Failure(e) =>
jobScheduler.reportError("Error generating jobs for time " + time, e)
PythonDStream.stopStreamingContextIfPythonProcessIsDead(e)
}
eventLoop.post(DoCheckpoint(time, clearCheckpointDataLater = false))
}
/** Clear DStream metadata for the given `time`. */
private def clearMetadata(time: Time) {
ssc.graph.clearMetadata(time)
// If checkpointing is enabled, then checkpoint,
// else mark batch to be fully processed
if (shouldCheckpoint) {
eventLoop.post(DoCheckpoint(time, clearCheckpointDataLater = true))
} else {
// If checkpointing is not enabled, then delete metadata information about
// received blocks (block data not saved in any case). Otherwise, wait for
// checkpointing of this batch to complete.
val maxRememberDuration = graph.getMaxInputStreamRememberDuration()
jobScheduler.receiverTracker.cleanupOldBlocksAndBatches(time - maxRememberDuration)
jobScheduler.inputInfoTracker.cleanup(time - maxRememberDuration)
markBatchFullyProcessed(time)
}
}
/** Clear DStream checkpoint data for the given `time`. */
private def clearCheckpointData(time: Time) {
ssc.graph.clearCheckpointData(time)
// All the checkpoint information about which batches have been processed, etc have
// been saved to checkpoints, so its safe to delete block metadata and data WAL files
val maxRememberDuration = graph.getMaxInputStreamRememberDuration()
jobScheduler.receiverTracker.cleanupOldBlocksAndBatches(time - maxRememberDuration)
jobScheduler.inputInfoTracker.cleanup(time - maxRememberDuration)
markBatchFullyProcessed(time)
}
/** Perform checkpoint for the given `time`. */
private def doCheckpoint(time: Time, clearCheckpointDataLater: Boolean) {
if (shouldCheckpoint && (time - graph.zeroTime).isMultipleOf(ssc.checkpointDuration)) {
logInfo("Checkpointing graph for time " + time)
ssc.graph.updateCheckpointData(time)
checkpointWriter.write(new Checkpoint(ssc, time), clearCheckpointDataLater)
} else if (clearCheckpointDataLater) {
markBatchFullyProcessed(time)
}
}
private def markBatchFullyProcessed(time: Time) {
lastProcessedBatch = time
}
}
| techaddict/spark | streaming/src/main/scala/org/apache/spark/streaming/scheduler/JobGenerator.scala | Scala | apache-2.0 | 12,964 |
/*
* Copyright (c) 2010 Sony Pictures Imageworks Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution. Neither the name of Sony Pictures Imageworks nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.imageworks.migration
import org.slf4j.LoggerFactory
import java.sql.{
Connection,
PreparedStatement,
ResultSet
}
/**
* Due to the JVM erasure, the scala.Predef.ArrowAssoc.->
* method generates a Tuple2 and the following cannot be distinguished
*
* "table_name" -> "column1"
*
* "table_name" -> ("column1", "column2")
*
* After erasure a Tuple2[String,String] is identical to a
* Tuple2[String,Tuple2[String,String]]. So to work around this, the
* -> operator is redefined to operate only on String's, which
* effectively removes the type from the first type of the Tuple2 and
* allows it to be overloaded on the second type of the Tuple2. The
* MigrationArrowAssoc class has the new -> method.
*/
class MigrationArrowAssoc(s: String) {
def `->`(other: String): TableColumnDefinition = {
new TableColumnDefinition(s, Array(other))
}
def `->`(other: (String, String)): TableColumnDefinition = {
new TableColumnDefinition(s, Array(other._1, other._2))
}
}
abstract class Migration {
private final val logger = LoggerFactory.getLogger(this.getClass)
/**
* Concrete migration classes must define this method to migrate the
* database up to a new migration.
*/
def up()
/**
* Concrete migration classes must define this method to back out of
* this migration. If the migration cannot be reversed, then a
* IrreversibleMigrationException should be thrown.
*/
def down()
/**
* The raw connection to the database that underlies the logging
* connection. This is provided in case the real database
* connection is needed because the logging connection does not
* provide a required feature. This connection should not be used
* in normal use.
*
* This is set using property style dependency injection instead of
* constructor style injection, which makes for cleaner code for the
* users of this migration framework.
*/
private[migration] var rawConnectionOpt: Option[Connection] = None
/**
* Get the raw connection to the database the migration can use for
* any custom work. This connection is the raw connection that
* underlies the logging connection and does not log any operations
* performed on it. It should only be used when the logging
* connection does not provide a required feature. The Migration
* subclass must be careful with this connection and leave it in a
* good state, as all of the other migration methods defined in
* Migration use the same connection.
*/
def rawConnection = rawConnectionOpt.get
/**
* The connection to the database that is used for the migration.
* This connection also logs all operations performed on it.
*
* This is set using property style dependency injection instead of
* constructor style injection, which makes for cleaner code for the
* users of this migration framework.
*/
private[migration] var connectionOpt: Option[Connection] = None
/**
* Get the connection to the database the migration can use for any
* custom work. This connection logs all operations performed on
* it. The Migration subclass must be careful with this connection
* and leave it in a good state, as all of the other migration
* methods defined in Migration use the same connection.
*/
def connection = connectionOpt.get
/**
* The database adapter that will be used for the migration.
*
* This is set using property style dependency injection instead of
* constructor style injection, which makes for cleaner code for the
* users of this migration framework.
*/
private[migration] var adapterOpt: Option[DatabaseAdapter] = None
/**
* The database adapter that will be used for the migration.
*/
private def adapter = adapterOpt.get
/**
* The vendor of the database the migration is being run on.
*/
def databaseVendor: Vendor = adapter.vendor
/**
* Override the -> implicit definition to create a
* MigrationArrowAssoc instead of a scala.Predef.ArrowAssoc. See
* the above comment on the MigrationArrowAssoc class why this is
* done.
*/
implicit def stringToMigrationArrowAssoc(s: String): MigrationArrowAssoc = {
new MigrationArrowAssoc(s)
}
/**
* Convert a table and column name definition into a On foreign key
* instance.
*/
def on(definition: TableColumnDefinition): On = {
new On(definition)
}
/**
* Convert a table and column name definition into a References
* foreign key instance.
*/
def references(definition: TableColumnDefinition): References = {
new References(definition)
}
/**
* This value is true if the database implicitly adds an index on
* the column that has a foreign key constraint added to it.
*
* The following SQL can be used to test the database. The last
* statement will fail with a message that there already is an index
* on the column.
*
* create table parent (pk int primary key);
* create table child (pk int primary key, pk_parent int not null);
* alter table child
* add constraint idx_child_pk_parent foreign key (pk_parent)
* references parent (pk);
* create index idx_child_pk_parent on child (pk_parent);
*/
def addingForeignKeyConstraintCreatesIndex: Boolean = {
adapter.addingForeignKeyConstraintCreatesIndex
}
/**
* Execute the given SQL string using the migration's connection.
*
* @param sql the SQL to execute
*/
final def execute(sql: String) {
With.autoClosingStatement(connection.createStatement) { s =>
s.execute(sql)
}
}
/**
* Given a SQL string and a Function1[PreparedStatement,Unit], start
* a new transaction by turning off auto-commit mode on the
* connection then create a new prepared statement with the SQL
* string and pass the prepared statement to the closure argument.
* The closure should not perform the commit as this method will
* commit the transaction. If the closure throws an exception then
* the transaction is rolled back and the exception that caused the
* rollback is re-thrown. Finally, the auto-commit state is reset
* to the value the connection had before this method was called.
*
* @param sql the SQL text that will be prepared
* @param f the Function1[PreparedStatement,Unit] that will be given
* a new prepared statement
*/
final def withPreparedStatement(sql: String)(f: PreparedStatement => Unit) {
With.autoCommittingConnection(connection,
CommitUponReturnOrRollbackUponException) { c =>
With.autoClosingStatement(c.prepareStatement(sql))(f)
}
}
/**
* Given a SQL result set and a Function1[ResultSet,R], pass the
* result set to the closure. After the closure has completed,
* either normally via a return or by throwing an exception, close
* the result set.
*
* @param rs the SQL result set
* @param f the Function1[ResultSet,R] that will be given the result
* set
* @return the result of f if f returns normally
*/
final def withResultSet[R](rs: ResultSet)(f: ResultSet => R): R = {
With.autoClosingResultSet(rs)(f)
}
final def createTable(tableName: String,
options: TableOption*)(body: TableDefinition => Unit) {
val tableDefinition = new TableDefinition(adapter, tableName)
body(tableDefinition)
val sql = new java.lang.StringBuilder(512)
.append("CREATE TABLE ")
.append(adapter.quoteTableName(tableName))
.append(" (")
.append(tableDefinition.toSql)
.append(')')
.toString
execute(sql)
}
final def addColumn(tableName: String,
columnName: String,
columnType: SqlType,
options: ColumnOption*) {
val tableDefinition = new TableDefinition(adapter, tableName)
tableDefinition.column(columnName, columnType, options: _*)
val sql = new java.lang.StringBuilder(512)
.append("ALTER TABLE ")
.append(adapter.quoteTableName(tableName))
.append(" ADD ")
.append(tableDefinition.toSql)
.toString
execute(sql)
}
/**
* Alter the definition of an existing column.
*
* NOTE: if the original column definition uses CharacterSet() then
* it must be used here again, unless the base SQL data type is
* being changed. For example, on Oracle, creating a column without
* CharacterSet uses VARCHAR2 while using CharacterSet(Unicode) uses
* NVARCHAR2, so if the original column used CharacterSet(Unicode)
* and #alterColumn() is not passed CharacterSet(Unicode), then the
* column's data type will be change from NVARCHAR2 to VARCHAR2.
*
* @param tableName the name of the table with the column
* @param columnName the name of the column
* @param columnType the type the column is being altered to
* @param options a possibly empty array of column options to
* customize the column
*/
final def alterColumn(tableName: String,
columnName: String,
columnType: SqlType,
options: ColumnOption*) {
execute(adapter.alterColumnSql(tableName,
columnName,
columnType,
options: _*))
}
final def removeColumn(tableName: String,
columnName: String) {
execute(adapter.removeColumnSql(tableName, columnName))
}
final def dropTable(tableName: String) {
val sql = new java.lang.StringBuilder(512)
.append("DROP TABLE ")
.append(adapter.quoteTableName(tableName))
.toString
execute(sql)
}
private def indexNameFor(tableName: String,
columnNames: Array[String],
options: IndexOption*): (String, List[IndexOption]) = {
var opts = options.toList
var indexNameOpt: Option[String] = None
for (opt @ Name(name) <- opts) {
opts = opts filter { _ ne opt }
if (indexNameOpt.isDefined && indexNameOpt.get != name) {
logger.warn("Redefining the index name from '{}' to '{}'.",
Array[AnyRef](indexNameOpt.get, name): _*)
}
indexNameOpt = Some(name)
}
val name = indexNameOpt.getOrElse {
"idx_" +
tableName +
"_" +
columnNames.mkString("_")
}
(name, opts)
}
/**
* Add an index to a table on a non-empty list of column names. The
* name of the index is automatically generated unless Name() is
* given as an option.
*
* @param tableName the table to add the index to
* @param columnNames a list of one or more column names that the
* index will be on
* @param options a possibly empty list of index options to
* customize the creation of the index
*/
final def addIndex(tableName: String,
columnNames: Array[String],
options: IndexOption*) {
if (columnNames.isEmpty) {
throw new IllegalArgumentException("Adding an index requires at " +
"least one column name.")
}
var (name, opts) = indexNameFor(tableName, columnNames, options: _*)
var unique = false
for (opt @ Unique <- opts) {
opts = opts filter { _ ne opt }
unique = true
}
val a = adapter
val quotedColumnNames = columnNames.map {
a.quoteColumnName(_)
}.mkString(", ")
val sql = new java.lang.StringBuilder(512)
.append("CREATE ")
.append(if (unique) "UNIQUE " else "")
.append("INDEX ")
.append(a.quoteIndexName(None, name))
.append(" ON ")
.append(a.quoteTableName(tableName))
.append(" (")
.append(quotedColumnNames)
.append(")")
.toString
execute(sql)
}
/**
* Add an index to a table on a column. The name of the index is
* automatically generated unless Name() is given as an option.
*
* @param tableName the table to add the index to
* @param columnName the name of the column that the index will be
* on
* @param options a possibly empty list of index options to
* customize the creation of the index
*/
final def addIndex(tableName: String,
columnName: String,
options: IndexOption*) {
addIndex(tableName, Array(columnName), options: _*)
}
/**
* Remove an index on a table that is composed of a non-empty list
* of column names. The name of the index to remove is
* automatically generated unless Name() is given as an option.
*
* @param tableName the table to remove the index from
* @param columnNames a list of one or more column names that the
* index is on
* @param options a possibly empty list of index options to
* customize the removal of the index
*/
final def removeIndex(tableName: String,
columnNames: Array[String],
options: Name*) {
if (columnNames.isEmpty) {
throw new IllegalArgumentException("Removing an index requires at " +
"least one column name.")
}
val (name, _) = indexNameFor(tableName, columnNames, options: _*)
val sql = adapter.removeIndexSql(tableName, name)
execute(sql)
}
/**
* Remove an index on a column in a table. The name of the index to
* remove is automatically generated unless Name() is given as an
* option.
*
* @param tableName the table to remove the index from
* @param columnName the name of the column the index is on
* @param options a possibly empty list of index options to
* customize the removal of the index
*/
final def removeIndex(tableName: String,
columnName: String,
options: Name*) {
removeIndex(tableName, Array(columnName), options: _*)
}
/**
* Given a foreign key relationship, create a name for it, using a
* Name() if it is provided in the options.
*
* @param on the table and columns the foreign key constraint is on
* @param references the table and columns the foreign key
* constraint references
* @param options a varargs list of ForeignKeyOption's
* @return a two-tuple with the calculated name or the overridden
* name from a Name and the remaining options
*/
private def foreignKeyNameFor(on: On,
references: References,
options: ForeignKeyOption*): (String, List[ForeignKeyOption]) = {
var opts = options.toList
var fkNameOpt: Option[String] = None
for (opt @ Name(name) <- opts) {
opts = opts filter { _ ne opt }
if (fkNameOpt.isDefined && fkNameOpt.get != name) {
logger.warn("Redefining the foreign key name from '{}' to '{}'.",
Array[AnyRef](fkNameOpt.get, name): _*)
}
fkNameOpt = Some(name)
}
val name = fkNameOpt.getOrElse {
"fk_" +
on.tableName +
"_" +
on.columnNames.mkString("_") +
"_" +
references.tableName +
"_" +
references.columnNames.mkString("_")
}
(name, opts)
}
/**
* Add a foreign key to a table. The name of the foreign key is
* automatically generated unless Name() is given as an option.
*
* @param on the table and column name(s) to place the foreign key
* on
* @param references the table and column name(s) that the foreign
* key references
* @param options a possibly empty list of foreign key options to
* customize the creation of the foreign key
*/
def addForeignKey(on: On,
references: References,
options: ForeignKeyOption*) {
if (on.columnNames.length == 0) {
throw new IllegalArgumentException("Adding a foreign key constraint " +
"requires at least one column name " +
"in the table adding the constraint.")
}
if (references.columnNames.length == 0) {
throw new IllegalArgumentException("Adding a foreign key constraint " +
"requires at least one column name " +
"from the table being referenced.")
}
var (name, opts) = foreignKeyNameFor(on, references, options: _*)
val a = adapter
val quotedOnColumnNames = on.columnNames.map {
a.quoteColumnName(_)
}.mkString(", ")
val quotedReferencesColumnNames = references.columnNames.map {
a.quoteColumnName(_)
}.mkString(", ")
var onDeleteOpt: Option[OnDelete] = None
for (opt @ OnDelete(action) <- opts) {
if (onDeleteOpt.isDefined && action != onDeleteOpt.get.action) {
logger.warn("Overriding the ON DELETE action from '{}' to '{}'.",
Array[AnyRef](onDeleteOpt.get.action, action): _*)
}
opts = opts filter { _ ne opt }
onDeleteOpt = Some(opt)
}
var onUpdateOpt: Option[OnUpdate] = None
for (opt @ OnUpdate(action) <- opts) {
if (onUpdateOpt.isDefined && action != onUpdateOpt.get.action) {
logger.warn("Overriding the ON UPDATE action from '{}' to '{}'.",
Array[AnyRef](onUpdateOpt.get.action, action): _*)
}
opts = opts filter { _ ne opt }
onUpdateOpt = Some(opt)
}
val sb = new java.lang.StringBuilder(512)
.append("ALTER TABLE ")
.append(a.quoteTableName(on.tableName))
.append(" ADD CONSTRAINT ")
.append(name)
.append(" FOREIGN KEY (")
.append(quotedOnColumnNames)
.append(") REFERENCES ")
.append(a.quoteTableName(references.tableName))
.append(" (")
.append(quotedReferencesColumnNames)
.append(")")
val onDeleteSql = a.onDeleteSql(onDeleteOpt)
if (!onDeleteSql.isEmpty) {
sb.append(' ')
.append(onDeleteSql)
}
val onUpdateSql = a.onUpdateSql(onUpdateOpt)
if (!onUpdateSql.isEmpty) {
sb.append(' ')
.append(onUpdateSql)
}
execute(sb.toString)
}
/**
* Add a foreign key to a table. The name of the foreign key is
* automatically generated unless Name() is given as an option.
*
* @param references the table and column name(s) that the foreign
* key references
* @param on the table and column name(s) to place the foreign key
* on
* @param options a possibly empty list of foreign key options to
* customize the creation of the foreign key
*/
def addForeignKey(references: References,
on: On,
options: ForeignKeyOption*) {
addForeignKey(on, references, options: _*)
}
/**
* Remove a foreign key from a table. The name of the foreign key
* is automatically generated unless Name() is given as an option.
*
* @param on the table and column name(s) to remove the foreign key
* from
* @param references the table and column name(s) that the foreign
* key references
* @param options a possibly empty list of foreign key options to
* customize the removal of the foreign key
*/
def removeForeignKey(on: On,
references: References,
options: Name*) {
if (on.columnNames.length == 0) {
throw new IllegalArgumentException("Removing a foreign key constraint " +
"requires at least one column name " +
"in the table adding the constraint.")
}
if (references.columnNames.length == 0) {
throw new IllegalArgumentException("Removing a foreign key constraint " +
"requires at least one column name " +
"from the table being referenced.")
}
val (name, _) = foreignKeyNameFor(on, references, options: _*)
execute("ALTER TABLE " +
adapter.quoteTableName(on.tableName) +
" DROP " +
adapter.alterTableDropForeignKeyConstraintPhrase +
' ' +
name)
}
/**
* Remove a foreign key from a table. The name of the foreign key
* is automatically generated unless Name() is given as an option.
*
* @param references the table and column name(s) that the foreign
* key references
* @param on the table and column name(s) to remove the foreign key
* from
* @param options a possibly empty list of foreign key options to
* customize the removal of the foreign key
*/
def removeForeignKey(references: References,
on: On,
options: Name*) {
removeForeignKey(on, references, options: _*)
}
/**
* Add a grant on a table to one or more grantees.
*
* @param tableName the table name to add the grants to
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to grant to the
* grantees
*/
final def grant(tableName: String,
grantees: Array[User],
privileges: GrantPrivilegeType*) {
if (grantees.isEmpty) {
throw new IllegalArgumentException("Granting privileges requires " +
"at least one grantee.")
}
if (privileges.isEmpty) {
throw new IllegalArgumentException("Granting privileges requires " +
"at least one privilege.")
}
val sql = adapter.grantOnTableSql(tableName, grantees, privileges: _*)
execute(sql)
}
/**
* Add a grant on a table to one or more grantees.
*
* @param tableName the table name to add the grants to
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to grant to the
* grantees
*/
final def grant(tableName: String,
grantees: Array[String],
privileges: GrantPrivilegeType*) {
grant(tableName,
grantees map { adapter.userFactory.nameToUser(_) },
privileges: _*)
}
/**
* Add a grant on a table to a grantee.
*
* @param tableName the table name to add the grants to
* @param grantee the grantee to grant the privileges to
* @param privileges a non-empty array of privileges to grant to the
* grantee
*/
final def grant(tableName: String,
grantee: User,
privileges: GrantPrivilegeType*) {
grant(tableName, Array(grantee), privileges: _*)
}
/**
* Add a grant on a table to a grantee.
*
* @param tableName the table name to add the grants to
* @param grantee the grantee to grant the privileges to
* @param privileges a non-empty array of privileges to grant to the
* grantee
*/
final def grant(tableName: String,
grantee: String,
privileges: GrantPrivilegeType*) {
grant(tableName,
Array[User](adapter.userFactory.nameToUser(grantee)),
privileges: _*)
}
/**
* Remove privileges on a table from one or more grantees.
*
* @param tableName the table name to remove the grants from
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to remove from
* the grantees
*/
final def revoke(tableName: String,
grantees: Array[User],
privileges: GrantPrivilegeType*) {
if (grantees.isEmpty) {
throw new IllegalArgumentException("Revoking privileges requires " +
"at least one grantee.")
}
if (privileges.isEmpty) {
throw new IllegalArgumentException("Revoking privileges requires " +
"at least one privilege.")
}
val sql = adapter.revokeOnTableSql(tableName, grantees, privileges: _*)
execute(sql)
}
/**
* Remove privileges on a table from one or more grantees.
*
* @param tableName the table name to remove the grants from
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to remove from
* the grantees
*/
final def revoke(tableName: String,
grantees: Array[String],
privileges: GrantPrivilegeType*) {
revoke(tableName,
grantees map { adapter.userFactory.nameToUser(_) },
privileges: _*)
}
/**
* Remove privileges on a table from a grantee.
*
* @param tableName the table name to remove the grants from
* @param grantee the grantee to revoke privileges from
* @param privileges a non-empty array of privileges to remove from
* the grantee
*/
final def revoke(tableName: String,
grantee: User,
privileges: GrantPrivilegeType*) {
revoke(tableName, Array(grantee), privileges: _*)
}
/**
* Remove privileges on a table from a grantee.
*
* @param tableName the table name to remove the grants from
* @param grantee the grantee to revoke privileges from
* @param privileges a non-empty array of privileges to remove from
* the grantee
*/
final def revoke(tableName: String,
grantee: String,
privileges: GrantPrivilegeType*) {
revoke(tableName,
Array[User](adapter.userFactory.nameToUser(grantee)),
privileges: _*)
}
/**
* Grant one or more privileges to a schema.
*
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to grant to the
* grantees
*/
final def grantSchemaPrivilege(grantees: Array[User],
privileges: SchemaPrivilege*) {
if (grantees.isEmpty) {
throw new IllegalArgumentException("Granting privileges requires " +
"at least one grantee.")
}
if (privileges.isEmpty) {
throw new IllegalArgumentException("Granting privileges requires " +
"at least one privilege.")
}
val sql = adapter.grantOnSchemaSql(grantees, privileges: _*)
execute(sql)
}
/**
* Grant one or more privileges to a schema.
*
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to grant to the
* grantees
*/
final def grantSchemaPrivilege(grantees: Array[String],
privileges: SchemaPrivilege*) {
grantSchemaPrivilege(grantees map { adapter.userFactory.nameToUser(_) },
privileges: _*)
}
/**
* Grant one or more privileges to a schema.
*
* @param grantee the grantee to grant the privileges to
* @param privileges a non-empty array of privileges to grant to the
* grantee
*/
final def grantSchemaPrivilege(grantee: User,
privileges: SchemaPrivilege*) {
grantSchemaPrivilege(Array(grantee), privileges: _*)
}
/**
* Grant one or more privileges to a schema.
*
* @param grantee the grantee to grant the privileges to
* @param privileges a non-empty array of privileges to grant to the
* grantee
*/
final def grantSchemaPrivilege(grantee: String,
privileges: SchemaPrivilege*) {
grantSchemaPrivilege(adapter.userFactory.nameToUser(grantee),
privileges: _*)
}
/**
* Revoke one or more privileges from a schema.
*
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to revoke from the
* grantees
*/
final def revokeSchemaPrivilege(grantees: Array[User],
privileges: SchemaPrivilege*) {
if (grantees.isEmpty) {
throw new IllegalArgumentException("Revoking privileges requires " +
"at least one grantee.")
}
if (privileges.isEmpty) {
throw new IllegalArgumentException("Revoking privileges requires " +
"at least one privilege.")
}
val sql = adapter.revokeOnSchemaSql(grantees, privileges: _*)
execute(sql)
}
/**
* Revoke one or more privileges from a schema.
*
* @param grantees a non-empty array of grantees
* @param privileges a non-empty array of privileges to revoke from the
* grantees
*/
final def revokeSchemaPrivilege(grantees: Array[String],
privileges: SchemaPrivilege*) {
revokeSchemaPrivilege(grantees map { adapter.userFactory.nameToUser(_) },
privileges: _*)
}
/**
* Revoke one or more privileges from a schema.
*
* @param grantee the grantee to revoke the privileges from
* @param privileges a non-empty array of privileges to revoke from
* the grantee
*/
final def revokeSchemaPrivilege(grantee: User,
privileges: SchemaPrivilege*) {
revokeSchemaPrivilege(Array(grantee), privileges: _*)
}
/**
* Revoke one or more privileges from a schema.
*
* @param grantee the grantee to revoke the privileges from
* @param privileges a non-empty array of privileges to revoke from
* the grantee
*/
final def revokeSchemaPrivilege(grantee: String,
privileges: SchemaPrivilege*) {
revokeSchemaPrivilege(adapter.userFactory.nameToUser(grantee),
privileges: _*)
}
/**
* Add a CHECK constraint on a table and one or more columns. The
* constraint name is automatically generated unless Name() is given
* as an option.
*
* @param on the table and columns to add the CHECK constraint on
* @param expr the expression to check
* @param options a possibly empty list of check options to
* customize the creation of the CHECK constraint
*/
def addCheck(on: On,
expr: String,
options: CheckOption*) {
if (on.columnNames.isEmpty) {
throw new IllegalArgumentException("Adding a check constraint " +
"requires at least one column name " +
"in the table adding the constraint.")
}
val a = adapter
val (name, _) = a.generateCheckConstraintName(on, options: _*)
val sql = new java.lang.StringBuilder(512)
.append("ALTER TABLE ")
.append(a.quoteTableName(on.tableName))
.append(" ADD CONSTRAINT ")
.append(name)
.append(" CHECK (")
.append(expr)
.append(")")
.toString
if (adapter.supportsCheckConstraints)
execute(sql)
else
logger.warn("Database does not support CHECK constraints; ignoring " +
"request to add a CHECK constraint: {}",
sql)
}
/**
* Remove a CHECK constraint on a table and one or more columns.
* The constraint name is automatically generated unless Name() is
* given as an option.
*
* @param on the table and columns to remove the CHECK constraint
* from
* @param options a possibly empty list of check options to
* customize the removal of the CHECK constraint
*/
def removeCheck(on: On,
options: Name*) {
if (on.columnNames.isEmpty) {
throw new IllegalArgumentException("Removing a check constraint " +
"requires at least one column " +
"name in the table removing " +
"the constraint.")
}
val (name, _) = adapter.generateCheckConstraintName(on, options: _*)
val sql = new java.lang.StringBuilder(64)
.append("ALTER TABLE ")
.append(adapter.quoteTableName(on.tableName))
.append(" DROP CONSTRAINT ")
.append(name)
.toString
if (adapter.supportsCheckConstraints)
execute(sql)
else
logger.warn("Database does not support CHECK constraints; ignoring " +
"request to remove a CHECK constraint: {}",
sql)
}
}
| imageworks/scala-migrations | src/main/scala/com/imageworks/migration/Migration.scala | Scala | bsd-3-clause | 32,987 |
package org.jetbrains.plugins.scala
package lang.refactoring.changeSignature
import javax.swing.table.TableCellEditor
import com.intellij.codeInsight.daemon.impl.analysis.{FileHighlightingSetting, HighlightLevelUtil}
import com.intellij.openapi.project.Project
import com.intellij.psi._
import com.intellij.refactoring.changeSignature.ParameterTableModelBase.{NameColumn, TypeColumn}
import com.intellij.refactoring.changeSignature._
import com.intellij.util.ui.ColumnInfo
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaCodeFragment
import org.jetbrains.plugins.scala.lang.refactoring.changeSignature.ScalaParameterTableModel._
import org.jetbrains.plugins.scala.lang.refactoring.ui.ScalaCodeFragmentTableCellEditor
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 2014-08-29
*/
class ScalaParameterTableModel(typeContext: PsiElement,
defaultValueContext: PsiElement,
methodDescriptor: ScalaMethodDescriptor,
columnInfos: ColumnInfo[_, _]*)
extends ParameterTableModelBase[ScalaParameterInfo, ScalaParameterTableModelItem](typeContext, defaultValueContext, columnInfos: _*) {
val project = defaultValueContext.getProject
val initialParams: Seq[Seq[ScalaParameterInfo]] = methodDescriptor.parameters
private val lastParams = initialParams.flatMap(_.lastOption).dropRight(1)
private val codeFragments = ArrayBuffer[PsiElement]()
def this(typeContext: PsiElement, defaultValueContext: PsiElement, methodDescriptor: ScalaMethodDescriptor) {
this(typeContext, defaultValueContext, methodDescriptor,
new ScalaNameColumn(typeContext.getProject),
new ScalaTypeColumn(typeContext.getProject),
new ScalaDefaultValueColumn(typeContext.getProject))
}
override def createRowItem(parameterInfo: ScalaParameterInfo): ScalaParameterTableModelItem = {
val info = Option(parameterInfo).getOrElse(ScalaParameterInfo(project))
val paramTypeCodeFragment = new ScalaCodeFragment(project, info.typeText)
val defaultValueCodeFragment = new ScalaCodeFragment(project, info.getDefaultValue)
val fragments = Seq(paramTypeCodeFragment, defaultValueCodeFragment)
codeFragments ++= fragments
fragments.foreach(HighlightLevelUtil.forceRootHighlighting(_, FileHighlightingSetting.SKIP_HIGHLIGHTING))
paramTypeCodeFragment.setContext(typeContext.getParent, typeContext)
defaultValueCodeFragment.setContext(defaultValueContext.getParent, defaultValueContext)
defaultValueCodeFragment.setVisibilityChecker(JavaCodeFragment.VisibilityChecker.EVERYTHING_VISIBLE)
val newClauseParams = initialParams.flatMap(_.headOption).drop(1)
val startsNewClause = newClauseParams.contains(parameterInfo)
new ScalaParameterTableModelItem(info, paramTypeCodeFragment, defaultValueCodeFragment, startsNewClause)
}
def clear(): Unit = {
codeFragments.foreach(HighlightLevelUtil.forceRootHighlighting(_, FileHighlightingSetting.NONE))
}
}
object ScalaParameterTableModel {
class ScalaTypeColumn(project: Project) extends TypeColumn[ScalaParameterInfo, ScalaParameterTableModelItem](project, ScalaFileType.SCALA_FILE_TYPE) {
override def doCreateEditor(o: ScalaParameterTableModelItem): TableCellEditor = new ScalaCodeFragmentTableCellEditor(project)
}
class ScalaNameColumn(project: Project) extends NameColumn[ScalaParameterInfo, ScalaParameterTableModelItem](project)
class ScalaDefaultValueColumn(project: Project)
extends ParameterTableModelBase.DefaultValueColumn[ScalaParameterInfo, ScalaParameterTableModelItem](project, ScalaFileType.SCALA_FILE_TYPE) {
override def doCreateEditor(item: ScalaParameterTableModelItem): TableCellEditor = new ScalaCodeFragmentTableCellEditor(project)
}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/changeSignature/ScalaParameterTableModel.scala | Scala | apache-2.0 | 3,816 |
package com.evojam.mongodb.client.util
import scala.collection.JavaConversions._
import com.mongodb.async.{ AsyncBatchCursor, SingleResultCallback }
import rx.lang.scala.Observable
import rx.lang.scala.Subscriber
trait AsyncEnriched {
implicit class AsyncBatchCursorEnriched[T](wrapped: AsyncBatchCursor[T]) {
def takeFirstAsObservable = {
wrapped.setBatchSize(1)
val observable = asObservable
observable.doOnNext(_ => wrapped.close())
observable.take(1)
}
def asObservable: Observable[T] =
Observable[T](subscriber =>
wrapped.next(onNextCallback(subscriber, _.foreach(subscriber.onNext))))
def asBatchObservable(batchSize: Int): Observable[List[T]] = {
wrapped.setBatchSize(batchSize)
Observable[List[T]](subscriber =>
wrapped.next(onNextCallback(subscriber, chunk => subscriber.onNext(chunk.toList))))
}
private def onNextCallback[R](
subscriber: Subscriber[R],
f: java.util.List[T] => Unit): SingleResultCallback[java.util.List[T]] =
new SingleResultCallback[java.util.List[T]] {
override def onResult(result: java.util.List[T], t: Throwable) =
if (t == null) {
Option(result).foreach(f)
if (!wrapped.isClosed()) {
wrapped.next(onNextCallback(subscriber, f))
} else {
subscriber.onCompleted()
}
} else {
subscriber.onError(t)
wrapped.close()
}
}
}
}
| evojam/mongodb-driver-scala | src/main/scala/com/evojam/mongodb/client/util/AsyncEnriched.scala | Scala | apache-2.0 | 1,511 |
package org.leialearns.crystallize.util
import java.io.{StringWriter, Writer}
import scala.language.implicitConversions
trait Describable {
def description: String
def descriptionLength: Long
def writeDescription(writer: Writer): Unit = writer.write(description)
}
trait CompositeDescribable extends Describable {
override def description: String = {
val writer = new StringWriter
writeDescription(writer)
writer.toString
}
override def descriptionLength: Long = {
parts().map(_.descriptionLength).sum
}
override def writeDescription(writer: Writer): Unit = {
val stream = parts()
stream.head.writeDescription(writer)
stream.drop(1).foreach { d => writer.append(' '); d.writeDescription(writer) }
}
def parts: () => Stream[Describable]
}
object Describable {
def fromParts(traversable: Traversable[Describable]): Describable = {
new CompositeDescribable {
override def parts: () => Stream[Describable] = () => {
traversable.toStream
}
}
}
implicit def toDescribable[T](n: T)(implicit prefixFree: PrefixFreeIntegral[T]): Describable = {
new Describable {
override def description: String = prefixFree.prefixEncode(n)
override def descriptionLength: Long = prefixFree.descriptionLength(n)
}
}
implicit def booleanToDescribable(b: Boolean): Describable = {
new Describable {
override def description: String = Bit(b).asChar.toString
override def descriptionLength: Long = 1
}
}
} | jeroenvanmaanen/crystallize | src/main/scala/org/leialearns/crystallize/util/Describable.scala | Scala | lgpl-2.1 | 1,516 |
package blended.security.ssl.internal
import java.io.File
import java.util.{Calendar, Date, GregorianCalendar}
import blended.testsupport.BlendedTestSupport
import blended.testsupport.pojosr.PojoSrTestHelper
import org.scalatest.FreeSpec
class CertificateRefresherSpec extends FreeSpec with PojoSrTestHelper {
override def baseDir: String = new File(BlendedTestSupport.projectTestOutput).getAbsolutePath()
def newDate(year : Int, month: Int, day : Int, hour : Int = 0, minute: Int = 0) : Date = {
val cal : GregorianCalendar = new GregorianCalendar(year, month, day, hour, minute, 0)
cal.set(Calendar.MILLISECOND, 0)
cal.getTime()
}
implicit class RichDate(date: Date) {
def <(other: Date): Boolean = date.before(other)
def <=(other: Date): Boolean = !date.after(other)
def >(other: Date): Boolean = date.after(other)
def >=(other: Date): Boolean = !date.before(other)
}
"Automatic certificate refresher" - {
"Calculation of the best next schedule time for an refresh attempt" - {
val refresherConfig = RefresherConfig(
minValidDays = 10,
hourOfDay = 1,
minuteOfDay = 30,
onRefreshAction = RefresherConfig.Refresh)
"cert end + threshold is in future" in {
val now = newDate(100, 5, 1)
val validEnd = newDate(100, 5, 20)
val date = CertificateRefresher.nextRefreshScheduleTime(validEnd, refresherConfig, Some(now))
assert(date === newDate(100, 5, 10, 1, 30))
}
"cert end is in future, cert end + threshold not" - {
"schedule should be on same day" in {
val now = newDate(100, 5, 1)
val validEnd = newDate(100, 5, 5)
val date = CertificateRefresher.nextRefreshScheduleTime(validEnd, refresherConfig, Some(now))
assert(date === newDate(100, 5, 1, 1, 30))
}
"schedule should be on next day" in {
val now = newDate(100, 5, 1, 2, 0)
val validEnd = newDate(100, 5, 5)
val date = CertificateRefresher.nextRefreshScheduleTime(validEnd, refresherConfig, Some(now))
assert(date === newDate(100, 5, 2, 1, 30))
}
}
"cert end is in not in the future" - {
"schedule should be on same day" in {
val now = newDate(100, 5, 1)
val validEnd = newDate(100, 4, 20)
val date = CertificateRefresher.nextRefreshScheduleTime(validEnd, refresherConfig, Some(now))
assert(date === newDate(100, 5, 1, 1, 30))
}
"schedule should be on next day" in {
val now = newDate(100, 5, 1, 2, 0)
val validEnd = newDate(100, 4, 20)
val date = CertificateRefresher.nextRefreshScheduleTime(validEnd, refresherConfig, Some(now))
assert(date === newDate(100, 5, 2, 1, 30))
}
}
}
}
}
| lefou/blended | blended.security.ssl/src/test/scala/blended/security/ssl/internal/CertificateRefresherSpec.scala | Scala | apache-2.0 | 2,829 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.