code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
package monix.benchmarks
import java.util.concurrent.TimeUnit
import monix.eval.Task
import org.openjdk.jmh.annotations._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/** To do comparative benchmarks between versions:
*
* benchmarks/run-benchmark TaskMapCallsBenchmark
*
* This will generate results in `benchmarks/results`.
*
* Or to run the benchmark from within SBT:
*
* jmh:run -i 10 -wi 10 -f 2 -t 1 monix.benchmarks.TaskMapCallsBenchmark
*
* Which means "10 iterations", "10 warm-up iterations", "2 forks", "1 thread".
* Please note that benchmarks should be usually executed at least in
* 10 iterations (as a rule of thumb), but more is better.
*/
@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class TaskMapCallsBenchmark {
import TaskMapCallsBenchmark.test
@Benchmark
def one(): Long = test(12000, 1)
@Benchmark
def batch30(): Long = test(12000 / 30, 30)
@Benchmark
def batch120(): Long = test(12000 / 120, 120)
}
object TaskMapCallsBenchmark {
def test(iterations: Int, batch: Int): Long = {
val f = (x: Int) => x + 1
var task = Task.eval(0)
var j = 0
while (j < batch) { task = task.map(f); j += 1 }
var sum = 0L
var i = 0
while (i < iterations) {
sum += Await.result(task.runToFuture, Duration.Inf)
i += 1
}
sum
}
}
*/ | Wogan/monix | benchmarks/shared/src/main/scala/monix/benchmarks/TaskMapCallsBenchmark.scala | Scala | apache-2.0 | 2,090 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.planner.plan.schema.LegacyTableSourceTable
import org.apache.flink.table.planner.plan.utils._
import org.apache.flink.table.sources._
import org.apache.flink.util.CollectionUtil
import org.apache.calcite.plan.RelOptRule.{none, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.logical.{LogicalProject, LogicalTableScan}
import org.apache.calcite.rel.rules.ProjectRemoveRule
import org.apache.flink.table.api.TableException
/**
* Planner rule that pushes a [[LogicalProject]] into a [[LogicalTableScan]]
* which wraps a [[ProjectableTableSource]] or a [[NestedFieldsProjectableTableSource]].
*/
class PushProjectIntoLegacyTableSourceScanRule extends RelOptRule(
operand(classOf[LogicalProject],
operand(classOf[LogicalTableScan], none)),
"PushProjectIntoLegacyTableSourceScanRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val scan: LogicalTableScan = call.rel(1)
scan.getTable.unwrap(classOf[LegacyTableSourceTable[_]]) match {
case table: LegacyTableSourceTable[_] =>
table.tableSource match {
// projection pushdown is not supported for sources that provide time indicators
case r: DefinedRowtimeAttributes if !CollectionUtil.isNullOrEmpty(
r.getRowtimeAttributeDescriptors) => false
case p: DefinedProctimeAttribute if p.getProctimeAttribute != null => false
case _: ProjectableTableSource[_] => true
case _: NestedFieldsProjectableTableSource[_] => true
case _ => false
}
case _ => false
}
}
override def onMatch(call: RelOptRuleCall) {
val project: LogicalProject = call.rel(0)
val scan: LogicalTableScan = call.rel(1)
val usedFields = RexNodeExtractor.extractRefInputFields(project.getProjects)
// if no fields can be projected, we keep the original plan.
if (scan.getRowType.getFieldCount == usedFields.length) {
return
}
val tableSourceTable = scan.getTable.unwrap(classOf[LegacyTableSourceTable[_]])
val oldTableSource = tableSourceTable.tableSource
val (newTableSource, isProjectSuccess) = oldTableSource match {
case nested: NestedFieldsProjectableTableSource[_] =>
val nestedFields = RexNodeExtractor.extractRefNestedInputFields(
project.getProjects, usedFields)
(nested.projectNestedFields(usedFields, nestedFields), true)
case projecting: ProjectableTableSource[_] =>
(projecting.projectFields(usedFields), true)
case nonProjecting: TableSource[_] =>
// projection cannot be pushed to TableSource
(nonProjecting, false)
}
if (isProjectSuccess
&& newTableSource.explainSource().equals(oldTableSource.explainSource())) {
throw new TableException("Failed to push project into table source! "
+ "table source with pushdown capability must override and change "
+ "explainSource() API to explain the pushdown applied!")
}
// check that table schema of the new table source is identical to original
if (oldTableSource.getTableSchema != newTableSource.getTableSchema) {
throw new TableException("TableSchema of ProjectableTableSource must not be modified " +
"by projectFields() call. This is a bug in the implementation of the TableSource " +
s"${oldTableSource.getClass.getCanonicalName}.")
}
// project push down does not change the statistic, we can reuse origin statistic
val newTableSourceTable = tableSourceTable.copy(
newTableSource,
usedFields)
// row type is changed after project push down
val newScan = new LogicalTableScan(scan.getCluster, scan.getTraitSet, newTableSourceTable)
// rewrite input field in projections
val newProjects = RexNodeRewriter.rewriteWithNewFieldInput(project.getProjects, usedFields)
val newProject = project.copy(
project.getTraitSet,
newScan,
newProjects,
project.getRowType)
if (ProjectRemoveRule.isTrivial(newProject)) {
// drop project if the transformed program merely returns its input
call.transformTo(newScan)
} else {
call.transformTo(newProject)
}
}
}
object PushProjectIntoLegacyTableSourceScanRule {
val INSTANCE: RelOptRule = new PushProjectIntoLegacyTableSourceScanRule
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/PushProjectIntoLegacyTableSourceScanRule.scala | Scala | apache-2.0 | 5,220 |
package scala.slick.memory
import scala.language.{implicitConversions, existentials}
import scala.collection.mutable.Builder
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import scala.slick.action._
import scala.slick.ast._
import TypeUtil._
import scala.slick.compiler._
import scala.slick.profile.{RelationalDriver, RelationalProfile, Capability}
import scala.slick.relational.{ResultConverterCompiler, ResultConverter, CompiledMapping}
import scala.slick.util.{DumpInfo, ??}
/** A profile and driver for interpreted queries on top of the in-memory database. */
trait MemoryProfile extends RelationalProfile with MemoryQueryingProfile { driver: MemoryDriver =>
type SchemaDescription = SchemaDescriptionDef
type InsertInvoker[T] = InsertInvokerDef[T]
type QueryExecutor[R] = QueryExecutorDef[R]
type Backend = HeapBackend
val backend: Backend = HeapBackend
val simple: SimpleQL = new SimpleQL {}
val Implicit: Implicits = simple
val api: API = new API {}
lazy val queryCompiler = compiler + new MemoryCodeGen
lazy val updateCompiler = compiler
lazy val deleteCompiler = compiler
lazy val insertCompiler = QueryCompiler(Phase.assignUniqueSymbols, new InsertCompiler(InsertCompiler.NonAutoInc), new MemoryInsertCodeGen)
override protected def computeCapabilities = super.computeCapabilities ++ MemoryProfile.capabilities.all
def createQueryExecutor[R](tree: Node, param: Any): QueryExecutor[R] = new QueryExecutorDef[R](tree, param)
def createInsertInvoker[T](tree: Node): InsertInvoker[T] = new InsertInvokerDef[T](tree)
def createDDLInvoker(sd: SchemaDescription): DDLInvoker = sd.asInstanceOf[DDLInvoker]
def buildSequenceSchemaDescription(seq: Sequence[_]): SchemaDescription = ??
def buildTableSchemaDescription(table: Table[_]): SchemaDescription = new TableDDL(table)
type QueryActionExtensionMethods[R, S <: NoStream] = QueryActionExtensionMethodsImpl[R, S]
type StreamingQueryActionExtensionMethods[R, T] = StreamingQueryActionExtensionMethodsImpl[R, T]
type SchemaActionExtensionMethods = SchemaActionExtensionMethodsImpl
type InsertActionExtensionMethods[T] = InsertActionExtensionMethodsImpl[T]
def createQueryActionExtensionMethods[R, S <: NoStream](tree: Node, param: Any): QueryActionExtensionMethods[R, S] =
new QueryActionExtensionMethods[R, S](tree, param)
def createStreamingQueryActionExtensionMethods[R, T](tree: Node, param: Any): StreamingQueryActionExtensionMethods[R, T] =
new StreamingQueryActionExtensionMethods[R, T](tree, param)
def createSchemaActionExtensionMethods(schema: SchemaDescription): SchemaActionExtensionMethods =
new SchemaActionExtensionMethodsImpl(schema)
def createInsertActionExtensionMethods[T](compiled: CompiledInsert): InsertActionExtensionMethods[T] =
new InsertActionExtensionMethodsImpl[T](compiled)
lazy val MappedColumnType = new MappedColumnTypeFactory
class MappedColumnTypeFactory extends super.MappedColumnTypeFactory {
def base[T : ClassTag, U : BaseColumnType](tmap: T => U, tcomap: U => T): BaseColumnType[T] = {
assertNonNullType(implicitly[BaseColumnType[U]])
new MappedColumnType(implicitly[BaseColumnType[U]], tmap, tcomap)
}
}
class MappedColumnType[T, U](val baseType: ColumnType[U], toBase: T => U, toMapped: U => T)(implicit val classTag: ClassTag[T]) extends ScalaType[T] with BaseTypedType[T] {
def nullable: Boolean = baseType.nullable
def ordered: Boolean = baseType.ordered
def scalaOrderingFor(ord: Ordering): scala.math.Ordering[T] = new scala.math.Ordering[T] {
val uOrdering = baseType.scalaOrderingFor(ord)
def compare(x: T, y: T): Int = uOrdering.compare(toBase(x), toBase(y))
}
}
trait Implicits extends super[RelationalProfile].Implicits with super[MemoryQueryingProfile].Implicits {
implicit def ddlToDDLInvoker(sd: SchemaDescription): DDLInvoker = createDDLInvoker(sd)
}
trait SimpleQL extends super[RelationalProfile].SimpleQL with super[MemoryQueryingProfile].SimpleQL with Implicits
trait API extends super[RelationalProfile].API with super[MemoryQueryingProfile].API
protected def createInterpreter(db: Backend#Database, param: Any): QueryInterpreter = new QueryInterpreter(db, param) {
override def run(n: Node) = n match {
case ResultSetMapping(_, from, CompiledMapping(converter, _)) :@ CollectionType(cons, el) =>
val fromV = run(from).asInstanceOf[TraversableOnce[Any]]
val b = cons.createBuilder(el.classTag).asInstanceOf[Builder[Any, Any]]
b ++= fromV.map(v => converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, _]].read(v.asInstanceOf[QueryInterpreter.ProductValue]))
b.result()
case n => super.run(n)
}
}
class QueryExecutorDef[R](tree: Node, param: Any) extends super.QueryExecutorDef[R] {
def run(implicit session: Backend#Session): R = createInterpreter(session.database, param).run(tree).asInstanceOf[R]
}
class InsertInvokerDef[T](tree: Node) extends super.InsertInvokerDef[T] {
protected[this] val ResultSetMapping(_, Insert(_, table: TableNode, _), CompiledMapping(converter, _)) = tree
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T)(implicit session: Backend#Session) {
val htable = session.database.getTable(table.tableName)
val buf = htable.createInsertRow
converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, Any]].set(value, buf)
htable.append(buf)
}
def ++= (values: Iterable[T])(implicit session: Backend#Session): Unit =
values.foreach(this += _)
}
abstract class DDL extends SchemaDescriptionDef with DDLInvoker { self =>
def ++(other: SchemaDescription): SchemaDescription = {
val d = Implicit.ddlToDDLInvoker(other)
new DDL {
def create(implicit session: Backend#Session) { self.create; d.create }
def drop(implicit session: Backend#Session) { self.drop; d.drop }
}
}
}
class TableDDL(table: Table[_]) extends DDL {
def create(implicit session: Backend#Session): Unit =
session.database.createTable(table.tableName,
table.create_*.map { fs => new HeapBackend.Column(fs, typeInfoFor(fs.tpe)) }.toIndexedSeq,
table.indexes.toIndexedSeq, table.tableConstraints.toIndexedSeq)
def drop(implicit session: Backend#Session): Unit =
session.database.dropTable(table.tableName)
}
type DriverAction[-E <: Effect, +R, +S <: NoStream] = DriverActionDef[E, R, S]
type StreamingDriverAction[-E <: Effect, +R, +T] = StreamingDriverActionDef[E, R, T]
protected[this] def dbAction[E <: Effect, R, S <: NoStream](f: Backend#Session => R): DriverAction[E, R, S] = new DriverAction[E, R, S] with SynchronousDatabaseAction[Backend#This, E, R, S] {
def run(ctx: ActionContext[Backend]): R = f(ctx.session)
def getDumpInfo = DumpInfo("MemoryProfile.DriverAction")
}
class StreamingQueryAction[R, T](tree: Node, param: Any) extends StreamingDriverAction[Effect.Read, R, T] with SynchronousDatabaseAction[Backend#This, Effect.Read, R, Streaming[T]] {
type StreamState = Iterator[T]
protected[this] def getIterator(ctx: ActionContext[Backend]): Iterator[T] = {
val inter = createInterpreter(ctx.session.database, param)
val ResultSetMapping(_, from, CompiledMapping(converter, _)) = tree
val pvit = inter.run(from).asInstanceOf[TraversableOnce[QueryInterpreter.ProductValue]].toIterator
pvit.map(converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, T]].read _)
}
def run(ctx: ActionContext[Backend]): R =
createInterpreter(ctx.session.database, param).run(tree).asInstanceOf[R]
override def emitStream(ctx: StreamingActionContext[Backend], limit: Long, state: StreamState): StreamState = {
val it = if(state ne null) state else getIterator(ctx)
var count = 0L
while(count < limit && it.hasNext) {
count += 1
ctx.emit(it.next)
}
if(it.hasNext) it else null
}
def head: DriverAction[Effect.Read, T, NoStream] = new DriverAction[Effect.Read, T, NoStream] with SynchronousDatabaseAction[Backend#This, Effect.Read, T, NoStream] {
def run(ctx: ActionContext[Backend]): T = getIterator(ctx).next
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.first")
}
def headOption: DriverAction[Effect.Read, Option[T], NoStream] = new DriverAction[Effect.Read, Option[T], NoStream] with SynchronousDatabaseAction[Backend#This, Effect.Read, Option[T], NoStream] {
def run(ctx: ActionContext[Backend]): Option[T] = {
val it = getIterator(ctx)
if(it.hasNext) Some(it.next) else None
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.firstOption")
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction")
}
class QueryActionExtensionMethodsImpl[R, S <: NoStream](tree: Node, param: Any) extends super.QueryActionExtensionMethodsImpl[R, S] {
def result: DriverAction[Effect.Read, R, S] =
new StreamingQueryAction[R, Nothing](tree, param).asInstanceOf[DriverAction[Effect.Read, R, S]]
}
class StreamingQueryActionExtensionMethodsImpl[R, T](tree: Node, param: Any) extends QueryActionExtensionMethodsImpl[R, Streaming[T]](tree, param) with super.StreamingQueryActionExtensionMethodsImpl[R, T] {
override def result: StreamingDriverAction[Effect.Read, R, T] = super.result.asInstanceOf[StreamingDriverAction[Effect.Read, R, T]]
}
class SchemaActionExtensionMethodsImpl(schema: SchemaDescription) extends super.SchemaActionExtensionMethodsImpl {
def create = dbAction(createDDLInvoker(schema).create(_))
def drop = dbAction(createDDLInvoker(schema).drop(_))
}
class InsertActionExtensionMethodsImpl[T](compiled: CompiledInsert) extends super.InsertActionExtensionMethodsImpl[T] {
protected[this] val inv = createInsertInvoker[T](compiled)
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T) = dbAction(inv.+=(value)(_))
def ++= (values: Iterable[T]) = dbAction(inv.++=(values)(_))
}
}
object MemoryProfile {
object capabilities {
/** Supports all MemoryProfile features which do not have separate capability values */
val other = Capability("memory.other")
/** All MemoryProfile capabilities */
val all = Set(other)
}
}
trait MemoryDriver extends RelationalDriver with MemoryQueryingDriver with MemoryProfile { driver =>
override val profile: MemoryProfile = this
class InsertMappingCompiler(insert: Insert) extends ResultConverterCompiler[MemoryResultConverterDomain] {
val Insert(_, table: TableNode, ProductNode(cols)) = insert
val tableColumnIdxs = table.driverTable.asInstanceOf[Table[_]].create_*.zipWithIndex.toMap
def createColumnConverter(n: Node, idx: Int, column: Option[FieldSymbol]): ResultConverter[MemoryResultConverterDomain, _] =
new InsertResultConverter(tableColumnIdxs(column.get))
class InsertResultConverter(tidx: Int) extends ResultConverter[MemoryResultConverterDomain, Any] {
def read(pr: MemoryResultConverterDomain#Reader) = ??
def update(value: Any, pr: MemoryResultConverterDomain#Updater) = ??
def set(value: Any, pp: MemoryResultConverterDomain#Writer) = pp(tidx) = value
override def getDumpInfo = super.getDumpInfo.copy(mainInfo = s"tidx=$tidx")
def width = 1
}
}
class MemoryInsertCodeGen extends CodeGen {
def compileServerSideAndMapping(serverSide: Node, mapping: Option[Node], state: CompilerState) =
(serverSide, mapping.map(new InsertMappingCompiler(serverSide.asInstanceOf[Insert]).compileMapping))
}
}
object MemoryDriver extends MemoryDriver
| nuodb/slick | src/main/scala/scala/slick/memory/MemoryProfile.scala | Scala | bsd-2-clause | 11,736 |
trait ExplodeWhenLowEnergy extends Strategy {
val InterestingCells = Cell.NonEmptyTypes - MyMiniBot - Wall - Toxifera - Snorg
val LowEnergyThreshold = 80
def interestingCellsInNeighbourhood(view: View) =
for {
cellType <- InterestingCells
cell <- view.allOfType(cellType)
} yield cell
override def react(input: React): Seq[OutputOpcode] =
if(input.energy < LowEnergyThreshold && interestingCellsInNeighbourhood(input.view).isEmpty) {
Seq(Explode(10))
} else
super.react(input)
}
| nurkiewicz/scalatron-bot | src/main/scala/ExplodeWhenLowEnergy.scala | Scala | apache-2.0 | 507 |
package com.lynbrookrobotics.potassium.frc
import edu.wpi.first.wpilibj.AnalogInput
import org.scalacheck.Prop.forAll
import org.scalatest.FunSuite
import org.scalatest.prop.Checkers._
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import Implicits._
import squants.electro.Volts
class InterfaceSignalsTest extends FunSuite with MockitoSugar {
test("Analog input produces correct voltage") {
val mockedAnalogIn = mock[AnalogInput]
check(forAll { d: Double =>
when(mockedAnalogIn.getVoltage).thenReturn(d)
mockedAnalogIn.voltage.toVolts == d
})
}
test("Analog input produces correct average voltage") {
val mockedAnalogIn = mock[AnalogInput]
check(forAll { d: Double =>
when(mockedAnalogIn.getAverageVoltage).thenReturn(d)
mockedAnalogIn.averageVoltage.toVolts == d
})
}
}
| Team846/potassium | frc/jvm/src/test/scala/com/lynbrookrobotics/potassium/frc/InterfaceSignalsTest.scala | Scala | mit | 859 |
package jp.mwsoft.r2c.aozora
import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter
import java.io.IOException
import java.io.InputStream
import java.util.zip.ZipFile
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.io.Codec.string2codec
import scala.io.Source
import org.apache.commons.cli.Options
import org.apache.commons.cli.PosixParser
import org.apache.commons.io.IOUtils
object Aozora2Text extends App {
def usage() = println("Usage: ./activator 'run [--wakati] cards_dir out_dir'")
val options = new Options()
options.addOption("wakati", false, "execute kuromoji wakati")
val cmd = new PosixParser().parse(options, args)
if (cmd.getArgs.length < 2) {
usage()
sys.exit()
}
// get args
val cardsDir = new File(cmd.getArgs()(0))
val outDir = new File(cmd.getArgs()(1))
val wakati = cmd.hasOption("wakati")
if (!cardsDir.exists)
throw new IOException(s"cardd_dir:${cardsDir} not found")
// create output directory
if (outDir.exists && !outDir.isDirectory())
throw new IOException(s"out_dir:${outDir} already exists")
if (!outDir.exists && !outDir.mkdirs())
throw new IOException(s"out_dir:${outDir} cannot create")
// execute create file
execute(cardsDir, outDir, wakati)
def execute(cardsDir: File, outDir: File, wakati: Boolean = false) {
// read files
var counter = 0
for (
card <- cardsDir.listFiles if card.isDirectory && card.getName.matches("[0-9]+");
files <- card.listFiles if files.isDirectory && files.getName == "files";
file <- files.listFiles if file.getName.endsWith(".zip");
zipFile <- try Some(new ZipFile(file.getPath)) catch { case e: Throwable => None }
) {
counter += 1
val entries = zipFile.entries
while (entries.hasMoreElements) {
val entry = entries.nextElement()
if (entry.getName.endsWith(".txt")) {
val content = inputStream2text(zipFile.getInputStream(entry))
// write
val writer = new BufferedWriter(new FileWriter(new File(outDir, entry.getName.split("/").last)))
try writer.write(content) finally writer.close()
}
}
}
println(counter)
}
private def inputStream2text(is: InputStream) = try {
val lines = IOUtils.lineIterator(is, "Shift_JIS").toList
val size = lines.size
object State {
val title = 0
val header = 1
val content = 2
val footer = 3
}
def cleanLine(line: String) = {
line.replaceAll("《[^》]+》", "").replaceAll("""[[^]]+\\]""", "").replaceAll("""|""", "")
}
var state = State.title
val builder = new StringBuilder()
for ((line, idx) <- lines.zipWithIndex) yield {
if (state == State.title) {
if (line.startsWith("------------------------------------"))
state = State.header
} else if (state == State.header) {
if (line.startsWith("------------------------------------"))
state = State.content
} else if (state == State.content) {
if (idx + 20 > size && line.startsWith("底本:")) state = State.footer
else if (line.trim.size > 0) builder append cleanLine(line).+("\\n")
}
}
builder.toString
} finally is.close()
} | mwsoft/resource2text | src/main/scala/jp/mwsoft/r2c/aozora/Aozora2Text.scala | Scala | mit | 3,306 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
class DoubleDenseMatrix(nRows: Int, nCols: Int) {
var _numRows = nRows
var _numCols = nCols
var _data: Array[Double] = new Array[Double](nRows*nCols)
/**
* These are temporarily needed because they are hard-coded into DeliteOp code gen.
*/
def unsafeSetData(xs: Array[Double], len: Int) {
_data = xs
// _length = len
}
def Clone = {
val res = new DoubleDenseMatrix(_numRows, _numCols)
res._data = _data.clone
res
}
}
| TiarkRompf/lancet | src/main/scala/generated/scala/DoubleDenseMatrix.scala | Scala | agpl-3.0 | 1,490 |
package com.sksamuel.elastic4s.get
import com.sksamuel.elastic4s.RefreshPolicy
import com.sksamuel.elastic4s.testkit.DockerTests
import org.scalatest.FlatSpec
import org.scalatest.Matchers._
import org.scalatest.mockito.MockitoSugar
import scala.util.Try
class MultiGetTest extends FlatSpec with MockitoSugar with DockerTests {
Try {
client.execute {
deleteIndex("coldplay")
}.await
}
client.execute {
createIndex("coldplay").shards(2).mappings(
mapping("albums").fields(
textField("name").stored(true),
intField("year").stored(true)
)
)
}.await
client.execute(
bulk(
indexInto("coldplay" / "albums") id "1" fields("name" -> "parachutes", "year" -> 2000),
indexInto("coldplay" / "albums") id "3" fields("name" -> "x&y", "year" -> 2005),
indexInto("coldplay" / "albums") id "5" fields("name" -> "mylo xyloto", "year" -> 2011),
indexInto("coldplay" / "albums") id "7" fields("name" -> "ghost stories", "year" -> 2015)
).refresh(RefreshPolicy.Immediate)
).await
"a multiget request" should "retrieve documents by id" in {
val resp = client.execute(
multiget(
get("3").from("coldplay/albums"),
get("5") from "coldplay/albums",
get("7") from "coldplay/albums"
)
).await.result
resp.size shouldBe 3
resp.items.head.id shouldBe "3"
resp.items.head.exists shouldBe true
resp.items(1).id shouldBe "5"
resp.items(1).exists shouldBe true
resp.items.last.id shouldBe "7"
resp.items.last.exists shouldBe true
}
it should "set exists=false for missing documents" in {
val resp = client.execute(
multiget(
get("3").from("coldplay/albums"),
get("711111") from "coldplay/albums"
)
).await.result
resp.size shouldBe 2
resp.items.head.exists shouldBe true
resp.items.last.exists shouldBe false
}
it should "retrieve documents by id with selected fields" in {
val resp = client.execute(
multiget(
get("3") from "coldplay/albums" storedFields("name", "year"),
get("5") from "coldplay/albums" storedFields "name"
)
).await.result
resp.size shouldBe 2
resp.items.head.fields shouldBe Map("year" -> List(2005), "name" -> List("x&y"))
resp.items.last.fields shouldBe Map("name" -> List("mylo xyloto"))
}
it should "retrieve documents by id with fetchSourceContext" in {
val resp = client.execute(
multiget(
get("3") from "coldplay/albums" fetchSourceContext Seq("name", "year"),
get("5") from "coldplay/albums" fetchSourceContext Seq("name")
)
).await.result
resp.size shouldBe 2
resp.items.head.source shouldBe Map("year" -> 2005, "name" -> "x&y")
resp.items.last.source shouldBe Map("name" -> "mylo xyloto")
}
}
| Tecsisa/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/get/MultiGetTest.scala | Scala | apache-2.0 | 2,835 |
package smartupedit
package build
import sbt._
import Keys._
object EditorProject {
def apply(name: String, path: String): Project = (
Project(name, file(path))
settings(commonSettings: _*)
)
}
| wookietreiber/smartupedit | project/project.scala | Scala | gpl-3.0 | 208 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.net.URLClassLoader
import org.scalatest.FunSuite
import org.apache.spark.{LocalSparkContext, SparkContext, SparkException, TestUtils}
import org.apache.spark.util.Utils
class MutableURLClassLoaderSuite extends FunSuite {
val urls2 = List(TestUtils.createJarWithClasses(
classNames = Seq("FakeClass1", "FakeClass2", "FakeClass3"),
toStringValue = "2")).toArray
val urls = List(TestUtils.createJarWithClasses(
classNames = Seq("FakeClass1"),
classNamesWithBase = Seq(("FakeClass2", "FakeClass3")), // FakeClass3 is in parent
toStringValue = "1",
classpathUrls = urls2)).toArray
test("child first") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass2").newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "1")
val fakeClass2 = classLoader.loadClass("FakeClass2").newInstance()
assert(fakeClass.getClass === fakeClass2.getClass)
}
test("parent first") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new MutableURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass1").newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "2")
val fakeClass2 = classLoader.loadClass("FakeClass1").newInstance()
assert(fakeClass.getClass === fakeClass2.getClass)
}
test("child first can fall back") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass3").newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "2")
}
test("child first can fail") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
intercept[java.lang.ClassNotFoundException] {
classLoader.loadClass("FakeClassDoesNotExist").newInstance()
}
}
test("driver sets context class loader in local mode") {
// Test the case where the driver program sets a context classloader and then runs a job
// in local mode. This is what happens when ./spark-submit is called with "local" as the
// master.
val original = Thread.currentThread().getContextClassLoader
val className = "ClassForDriverTest"
val jar = TestUtils.createJarWithClasses(Seq(className))
val contextLoader = new URLClassLoader(Array(jar), Utils.getContextOrSparkClassLoader)
Thread.currentThread().setContextClassLoader(contextLoader)
val sc = new SparkContext("local", "driverLoaderTest")
try {
sc.makeRDD(1 to 5, 2).mapPartitions { x =>
val loader = Thread.currentThread().getContextClassLoader
Class.forName(className, true, loader).newInstance()
Seq().iterator
}.count()
}
catch {
case e: SparkException if e.getMessage.contains("ClassNotFoundException") =>
fail("Local executor could not find class", e)
case t: Throwable => fail("Unexpected exception ", t)
}
sc.stop()
Thread.currentThread().setContextClassLoader(original)
}
}
| Dax1n/spark-core | core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala | Scala | apache-2.0 | 4,111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import org.apache.spark.{LocalSparkContext, SparkContext, SparkFunSuite}
import org.apache.spark.scheduler.cluster.ExecutorInfo
/**
* Unit tests for SparkListener that require a local cluster.
*/
class SparkListenerWithClusterSuite extends SparkFunSuite with LocalSparkContext
with BeforeAndAfter with BeforeAndAfterAll {
/** Length of time to wait while draining listener events. */
val WAIT_TIMEOUT_MILLIS = 10000
before {
sc = new SparkContext("local-cluster[2,1,1024]", "SparkListenerSuite")
}
test("SparkListener sends executor added message") {
val listener = new SaveExecutorInfo
sc.addSparkListener(listener)
// This test will check if the number of executors received by "SparkListener" is same as the
// number of all executors, so we need to wait until all executors are up
sc.jobProgressListener.waitUntilExecutorsUp(2, 60000)
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.map(_.toString)
rdd2.setName("Target RDD")
rdd2.count()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(listener.addedExecutorInfo.size == 2)
assert(listener.addedExecutorInfo("0").totalCores == 1)
assert(listener.addedExecutorInfo("1").totalCores == 1)
}
private class SaveExecutorInfo extends SparkListener {
val addedExecutorInfo = mutable.Map[String, ExecutorInfo]()
override def onExecutorAdded(executor: SparkListenerExecutorAdded) {
addedExecutorInfo(executor.executorId) = executor.executorInfo
}
}
}
| aokolnychyi/spark | core/src/test/scala/org/apache/spark/scheduler/SparkListenerWithClusterSuite.scala | Scala | apache-2.0 | 2,445 |
package dao
import javax.inject.Inject
import models.{Label, LabelThing, Thing}
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import slick.jdbc.JdbcProfile
import scala.concurrent.{ExecutionContext, Future}
class LabelDAO @Inject()(protected val dbConfigProvider: DatabaseConfigProvider)(implicit executionContext: ExecutionContext) extends ThingComponent with HasDatabaseConfigProvider[JdbcProfile] {
import profile.api._
lazy val labels = TableQuery[LabelsTable]
lazy val labelThings = TableQuery[LabelThingsTable]
def all(): Future[Seq[Label]] =
db.run(labels.result)
def search(query: String): Future[Seq[Label]] =
db.run(labels.filter(l => (l.name like s"%$query%") || (l.description like s"%$query%")).result)
def findById(id: Long): Future[Option[Label]] =
db.run(labels.filter(_.id === id).result.headOption)
def insert(label: Label): Future[Long] =
db.run((labels returning labels.map(_.id)) += label)
def update(id: Long, label: Label): Future[Unit] =
db.run(labels.filter(_.id === id).update(label.copy(Some(id)))).map(_ => ())
def delete(id: Long): Future[Int] =
db.run(labels.filter(_.id === id).delete)
def getLinkedLabels(thingId: Long): Future[Seq[(LabelThing, Label)]] =
db.run((labelThings join labels on (_.labelId === _.id)).filter(_._1.thingId === thingId).result)
def getLinkedThings(labelId: Long): Future[Seq[(LabelThing, Thing)]] =
db.run((labelThings join things on (_.thingId === _.id)).filter(_._1.labelId === labelId).result)
def linkLabel(labelThing: LabelThing): Future[Long] =
db.run((labelThings returning labelThings.map(_.id)) += labelThing)
def unlinkLabel(id: Long): Future[Int] =
db.run(labelThings.filter(_.id === id).delete)
class LabelsTable(tag: Tag) extends Table[Label](tag, "LABELS") {
def * = (id.?, name, description, color) <> (Label.tupled, Label.unapply)
def id = column[Long]("LABEL_ID", O.PrimaryKey, O.AutoInc)
def name = column[String]("NAME")
def description = column[String]("DESCRIPTION")
def color = column[String]("COLOR")
}
class LabelThingsTable(tag: Tag) extends Table[LabelThing](tag, "LABELS_THINGS") {
def * = (id.?, labelId, thingId) <> (LabelThing.tupled, LabelThing.unapply)
def id = column[Long]("LABELTHING_ID", O.PrimaryKey, O.AutoInc)
def thingId = column[Long]("THING_ID")
def label = foreignKey("LABEL", labelId, labels)(_.id, onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def thing = foreignKey("THING", labelId, things)(_.id, onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def labelId = column[Long]("LABEL_ID")
}
}
| MarekSuchanek/Inventory | app/dao/LabelDAO.scala | Scala | mit | 2,728 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.geotools
import java.io.IOException
import com.typesafe.scalalogging.LazyLogging
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
import org.locationtech.geomesa.utils.geotools.GeoMesaParam.{SystemPropertyBooleanParam, SystemPropertyDurationParam, SystemPropertyIntegerParam, SystemPropertyStringParam}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.concurrent.duration.Duration
@RunWith(classOf[JUnitRunner])
class GeoMesaParamTest extends Specification with LazyLogging {
import scala.collection.JavaConversions._
"GeoMesaParam" should {
"look up values" in {
new GeoMesaParam[String]("foo").lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[Integer]("foo").lookup(Map("foo" -> Int.box(1))) mustEqual 1
new GeoMesaParam[java.lang.Boolean]("foo").lookup(Map("foo" -> Boolean.box(true))) mustEqual true
}
"look up null values" in {
new GeoMesaParam[String]("foo").lookup(Map.empty[String, String]) must beNull
new GeoMesaParam[Integer]("foo").lookup(Map.empty[String, String]) must beNull
new GeoMesaParam[java.lang.Boolean]("foo").lookup(Map.empty[String, String]) must beNull
}
"look up optional values" in {
new GeoMesaParam[String]("foo").lookupOpt(Map("foo" -> "bar")) must beSome("bar")
new GeoMesaParam[Integer]("foo").lookupOpt(Map("foo" -> Int.box(1))) must beSome(Int.box(1))
new GeoMesaParam[java.lang.Boolean]("foo").lookupOpt(Map("foo" -> Boolean.box(true))) must beSome(Boolean.box(true))
}
"look up missing optional values" in {
new GeoMesaParam[String]("foo").lookupOpt(Map.empty[String, String]) must beNone
new GeoMesaParam[Integer]("foo").lookupOpt(Map.empty[String, String]) must beNone
new GeoMesaParam[java.lang.Boolean]("foo").lookupOpt(Map.empty[String, String]) must beNone
}
"look up values with defaults" in {
new GeoMesaParam[String]("foo", default = "baz").lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[Integer]("foo", default = 2).lookup(Map("foo" -> Int.box(1))) mustEqual 1
new GeoMesaParam[java.lang.Boolean]("foo", default = false).lookup(Map("foo" -> Boolean.box(true))) mustEqual true
}
"look up default values" in {
new GeoMesaParam[String]("foo", default = "bar").lookup(Map.empty[String, String]) mustEqual "bar"
new GeoMesaParam[Integer]("foo", default = 2).lookup(Map.empty[String, String]) mustEqual 2
new GeoMesaParam[java.lang.Boolean]("foo", default = true).lookup(Map.empty[String, String]) mustEqual true
}
"look up required values" in {
new GeoMesaParam[String]("foo", optional = false).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[Integer]("foo", optional = false).lookup(Map("foo" -> Int.box(1))) mustEqual 1
new GeoMesaParam[java.lang.Boolean]("foo", optional = false).lookup(Map("foo" -> Boolean.box(true))) mustEqual true
}
"throw exception for missing required values" in {
new GeoMesaParam[String]("foo", optional = false).lookup(Map.empty[String, String]) must throwAn[IOException]
new GeoMesaParam[Integer]("foo", optional = false).lookup(Map.empty[String, String]) must throwAn[IOException]
new GeoMesaParam[java.lang.Boolean]("foo", optional = false).lookup(Map.empty[String, String]) must throwAn[IOException]
}
"throw exception for invalid type conversions" in {
new GeoMesaParam[Integer]("foo").lookup(Map("foo" -> "bar")) must throwAn[IOException]
}
"lookup deprecated values" in {
new GeoMesaParam[String]("foo", deprecatedKeys = Seq("notfoo")).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", deprecatedKeys = Seq("notfoo")).lookup(Map("notfoo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", optional = false, deprecatedKeys = Seq("notfoo")).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", optional = false, deprecatedKeys = Seq("notfoo")).lookup(Map("notfoo" -> "bar")) mustEqual "bar"
}
"look up system properties" in {
val prop = SystemProperty("params.foo.bar")
prop.threadLocalValue.set("baz")
new GeoMesaParam[String]("foo", systemProperty = Some(SystemPropertyStringParam(prop))).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", systemProperty = Some(SystemPropertyStringParam(prop))).lookup(Map.empty[String, String]) mustEqual "baz"
prop.threadLocalValue.set("2")
new GeoMesaParam[Integer]("foo", systemProperty = Some(SystemPropertyIntegerParam(prop))).lookup(Map("foo" -> Int.box(1))) mustEqual 1
new GeoMesaParam[Integer]("foo", systemProperty = Some(SystemPropertyIntegerParam(prop))).lookup(Map.empty[String, String]) mustEqual 2
prop.threadLocalValue.set("true")
new GeoMesaParam[java.lang.Boolean]("foo", systemProperty = Some(SystemPropertyBooleanParam(prop))).lookup(Map("foo" -> Boolean.box(false))) mustEqual false
new GeoMesaParam[java.lang.Boolean]("foo", systemProperty = Some(SystemPropertyBooleanParam(prop))).lookup(Map.empty[String, String]) mustEqual true
}
"not accept system properties for required parameters" in {
val prop = SystemProperty("params.foo.bar")
prop.threadLocalValue.set("baz")
new GeoMesaParam[String]("foo", optional = false, systemProperty = Some(SystemPropertyStringParam(prop))).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", optional = false, systemProperty = Some(SystemPropertyStringParam(prop))).lookup(Map.empty[String, String]) must throwAn[IOException]
prop.threadLocalValue.set("2")
new GeoMesaParam[Integer]("foo", optional = false, systemProperty = Some(SystemPropertyIntegerParam(prop))).lookup(Map("foo" -> Int.box(1))) mustEqual 1
new GeoMesaParam[Integer]("foo", optional = false, systemProperty = Some(SystemPropertyIntegerParam(prop))).lookup(Map.empty[String, String]) must throwAn[IOException]
prop.threadLocalValue.set("true")
new GeoMesaParam[java.lang.Boolean]("foo", optional = false, systemProperty = Some(SystemPropertyBooleanParam(prop))).lookup(Map("foo" -> Boolean.box(false))) mustEqual false
new GeoMesaParam[java.lang.Boolean]("foo", optional = false, systemProperty = Some(SystemPropertyBooleanParam(prop))).lookup(Map.empty[String, String]) must throwAn[IOException]
}
"prioritize system properties over default values" in {
val prop = SystemProperty("params.foo.bar")
val sysParam = SystemPropertyStringParam(prop)
prop.threadLocalValue.set("baz")
new GeoMesaParam[String]("foo", default = "wuz", systemProperty = Some(sysParam)).lookup(Map("foo" -> "bar")) mustEqual "bar"
new GeoMesaParam[String]("foo", default = "wuz", systemProperty = Some(sysParam)).lookup(Map.empty[String, String]) mustEqual "baz"
prop.threadLocalValue.remove()
new GeoMesaParam[String]("foo", default = "wuz", systemProperty = Some(sysParam)).lookup(Map.empty[String, String]) mustEqual "wuz"
}
"require system properties to have a common default" in {
new GeoMesaParam[String]("foo", systemProperty = Some(SystemPropertyStringParam(SystemProperty("params.foo.bar", "baz")))) must throwAn[AssertionError]
new GeoMesaParam[String]("foo", default = "bar", systemProperty = Some(SystemPropertyStringParam(SystemProperty("params.foo.bar", "baz")))) must throwAn[AssertionError]
new GeoMesaParam[String]("foo", default = "bar", systemProperty = Some(SystemPropertyStringParam(SystemProperty("params.foo.bar", "bar")))) must not(throwAn[AssertionError])
new GeoMesaParam[Integer]("foo", systemProperty = Some(SystemPropertyIntegerParam(SystemProperty("params.foo.bar", "2")))) must throwAn[AssertionError]
new GeoMesaParam[Integer]("foo", default = 1, systemProperty = Some(SystemPropertyIntegerParam(SystemProperty("params.foo.bar", "2")))) must throwAn[AssertionError]
new GeoMesaParam[Integer]("foo", default = 1, systemProperty = Some(SystemPropertyIntegerParam(SystemProperty("params.foo.bar", "1")))) must not(throwAn[AssertionError])
}
"lookup durations" in {
new GeoMesaParam[Duration]("foo").lookup(Map("foo" -> "10s")) mustEqual Duration("10s")
new GeoMesaParam[Duration]("foo").lookup(Map("foo" -> "10S")) mustEqual Duration("10s")
new GeoMesaParam[Duration]("foo").lookup(Map("foo" -> "Inf")) mustEqual Duration.Inf
new GeoMesaParam[Duration]("foo").lookup(Map("foo" -> "inf")) mustEqual Duration.Inf
new GeoMesaParam[Duration]("foo").lookup(Map("foo" -> "bar")) must throwAn[IOException]
}
"lookup durations with defaults" in {
new GeoMesaParam[Duration]("foo", default = Duration("10s")).lookup(Map("foo" -> "10s")) mustEqual Duration("10s")
new GeoMesaParam[Duration]("foo", default = Duration("10s")).lookup(Map.empty[String, String]) mustEqual Duration("10s")
}
"lookup durations with defaults and system properties" in {
new GeoMesaParam[Duration]("foo", default = Duration("10s"), systemProperty = Some(SystemPropertyDurationParam(SystemProperty("params.foo", "10s")))).lookup(Map("foo" -> "10s")) mustEqual Duration("10s")
new GeoMesaParam[Duration]("foo", default = Duration("10s"), systemProperty = Some(SystemPropertyDurationParam(SystemProperty("params.foo", "10s")))).lookup(Map.empty[String, String]) mustEqual Duration("10s")
}
}
}
| locationtech/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geotools/GeoMesaParamTest.scala | Scala | apache-2.0 | 10,029 |
/*
* Copyright 2012 Tumblr Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.redis
import com.twitter.finagle.redis.Client
import com.twitter.finagle.redis.protocol.{Limit, ZInterval, ZRangeResults}
import com.twitter.util.{Duration, Future}
import org.jboss.netty.buffer.ChannelBuffer
/**
* RedisSortedSetMap is a map from strings to sorted sets.
* @param database the redis client to use
* @param prefix the namespace of the sorted set
* @param defaultTTL the timeout on the sorted set
*/
class RedisSortedSetMap(database: Client, prefix: String, defaultTTL: Option[Duration]) {
private[this] def preface(key: String) = "%s:%s".format(prefix, key)
/**
* Adds a buffer with a score to the sorted set specified by key.
*/
def add(key: String, score: Double, buffer: ChannelBuffer): Future[Unit] =
database.zAdd(preface(key), score, buffer) flatMap { _ =>
defaultTTL match {
case Some(ttl) => database.expire(preface(key), ttl.inSeconds).unit
case None => Future.Unit
}
}
/**
* Gets elements from a sorted set, in reverse order.
* @param key specifies which sorted set
* @param start items must have a score bigger than this
* @param stop items must have a score smaller than this
* @param count number of items to return
*/
def get(key: String, start: Double, stop: Double, count: Long): Future[ZRangeResults] =
database.zRevRangeByScore(preface(key), ZInterval(stop), ZInterval(start), true, Some(Limit(0, count)))
}
| knowledgehacker/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/RedisSortedSetMap.scala | Scala | apache-2.0 | 2,060 |
package uk.gov.dvla.vehicles.presentation.common.controllers
import com.google.inject.Inject
import play.api.mvc.{Action, Controller}
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import uk.gov.dvla.vehicles.presentation.common.model.CookieReport
import uk.gov.dvla.vehicles.presentation.common.views
class CookiePolicyController @Inject()(implicit clientSideSessionFactory: ClientSideSessionFactory) extends Controller {
protected val cookies = List(
CookieReport("_ga", "ga", "normal", "2years"),
CookieReport("_gat", "gat", "normal", "10min"),
CookieReport("tracking_id", "tracking_id", "normal", "7days"),
CookieReport("PLAY_LANG", "PLAY_LANG", "normal", "close"),
CookieReport("40 character length", "encrypted", "normal-secure", "7days")
)
def present = Action { implicit request =>
Ok(views.html.cookiesPolicyPage(cookies))
}
}
| dvla/vehicles-presentation-common | common-test/app/uk/gov/dvla/vehicles/presentation/common/controllers/CookiePolicyController.scala | Scala | mit | 916 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti effectful
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package effectful
/**
* A type-class for lifting the exec monad of a service into another
* exec monad
*
* @tparam S the type of the service whose methods are all wrapped in
* an exec monad
*/
trait LiftService[S[_[_]]] {
/**
* Create a new instance of a service that returns computations
* in a different exec monad by utilizing the supplied service and
* its current exec monad
*
* @param s service to lift
* @param C a type-class for capturing the computation of G inside F
* @tparam F type of service's exec monad
* @tparam G type of target exec monad
* @return an instance of S[F] that utilizes the underlying S[E] to compute
* values by lifting all computed E[_] values into F[_]
*/
def apply[F[_],G[_]](
s: S[F]
)(implicit
C:CaptureTransform[F,G]
) : S[G]
}
| S-Mach/effectful | src/main/scala/effectful/LiftService.scala | Scala | mit | 1,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity.test
import akka.http.scaladsl.model.ContentTypes
import common.StreamLogging
import spray.json._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.entity.Attachments.{Attached, Inline}
import org.apache.openwhisk.core.entity.ExecManifest.ImageName
import org.apache.openwhisk.core.entity.{
BlackBoxExec,
CodeExecAsAttachment,
CodeExecAsString,
Exec,
ExecManifest,
WhiskAction
}
import scala.collection.mutable
@RunWith(classOf[JUnitRunner])
class ExecTests extends FlatSpec with Matchers with StreamLogging with BeforeAndAfterAll {
behavior of "exec deserialization"
val config = new WhiskConfig(ExecManifest.requiredProperties)
ExecManifest.initialize(config)
override protected def afterAll(): Unit = {
ExecManifest.initialize(config)
super.afterAll()
}
it should "read existing code string as attachment" in {
val json = """{
| "name": "action_tests_name2",
| "_id": "anon-Yzycx8QnIYDp3Tby0Fnj23KcMtH/action_tests_name2",
| "publish": false,
| "annotations": [],
| "version": "0.0.1",
| "updated": 1533623651650,
| "entityType": "action",
| "exec": {
| "kind": "nodejs:14",
| "code": "foo",
| "binary": false
| },
| "parameters": [
| {
| "key": "x",
| "value": "b"
| }
| ],
| "limits": {
| "timeout": 60000,
| "memory": 256,
| "logs": 10
| },
| "namespace": "anon-Yzycx8QnIYDp3Tby0Fnj23KcMtH"
|}""".stripMargin.parseJson.asJsObject
val action = WhiskAction.serdes.read(json)
action.exec should matchPattern { case CodeExecAsAttachment(_, Inline("foo"), None, false) => }
}
it should "properly determine binary property" in {
val j1 = """{
| "kind": "nodejs:14",
| "code": "SGVsbG8gT3BlbldoaXNr",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j1) should matchPattern {
case CodeExecAsAttachment(_, Inline("SGVsbG8gT3BlbldoaXNr"), None, true) =>
}
val j2 = """{
| "kind": "nodejs:14",
| "code": "while (true)",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j2) should matchPattern {
case CodeExecAsAttachment(_, Inline("while (true)"), None, false) =>
}
//Defaults to binary
val j3 = """{
| "kind": "nodejs:14",
| "code": "while (true)"
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j3) should matchPattern {
case CodeExecAsAttachment(_, Inline("while (true)"), None, false) =>
}
}
it should "read code stored as attachment" in {
val json = """{
| "kind": "java:8",
| "code": {
| "attachmentName": "foo:bar",
| "attachmentType": "application/java-archive",
| "length": 32768,
| "digest": "sha256-foo"
| },
| "binary": true,
| "main": "hello"
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(json) should matchPattern {
case CodeExecAsAttachment(_, Attached("foo:bar", _, Some(32768), Some("sha256-foo")), Some("hello"), true) =>
}
}
it should "read code stored as jar property" in {
val j1 = """{
| "kind": "nodejs:14",
| "jar": "SGVsbG8gT3BlbldoaXNr",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j1) should matchPattern {
case CodeExecAsAttachment(_, Inline("SGVsbG8gT3BlbldoaXNr"), None, true) =>
}
}
it should "read existing code string as string with old manifest" in {
val oldManifestJson =
"""{
| "runtimes": {
| "nodejs": [
| {
| "kind": "nodejs:14",
| "default": true,
| "image": {
| "prefix": "openwhisk",
| "name": "nodejs6action",
| "tag": "latest"
| },
| "deprecated": false,
| "stemCells": [{
| "initialCount": 2,
| "memory": "256 MB"
| }]
| }
| ]
| }
|}""".stripMargin.parseJson.compactPrint
val oldConfig =
new TestConfig(Map(WhiskConfig.runtimesManifest -> oldManifestJson), ExecManifest.requiredProperties)
ExecManifest.initialize(oldConfig)
val j1 = """{
| "kind": "nodejs:14",
| "code": "SGVsbG8gT3BlbldoaXNr",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j1) should matchPattern {
case CodeExecAsString(_, "SGVsbG8gT3BlbldoaXNr", None) =>
}
//Reset config back
ExecManifest.initialize(config)
}
behavior of "blackbox exec deserialization"
it should "read existing code string as attachment" in {
val json = """{
| "name": "action_tests_name2",
| "_id": "anon-Yzycx8QnIYDp3Tby0Fnj23KcMtH/action_tests_name2",
| "publish": false,
| "annotations": [],
| "version": "0.0.1",
| "updated": 1533623651650,
| "entityType": "action",
| "exec": {
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "code": "foo",
| "binary": false
| },
| "parameters": [
| {
| "key": "x",
| "value": "b"
| }
| ],
| "limits": {
| "timeout": 60000,
| "memory": 256,
| "logs": 10
| },
| "namespace": "anon-Yzycx8QnIYDp3Tby0Fnj23KcMtH"
|}""".stripMargin.parseJson.asJsObject
val action = WhiskAction.serdes.read(json)
action.exec should matchPattern { case BlackBoxExec(_, Some(Inline("foo")), None, false, false) => }
}
it should "properly determine binary property" in {
val j1 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "code": "SGVsbG8gT3BlbldoaXNr",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j1) should matchPattern {
case BlackBoxExec(_, Some(Inline("SGVsbG8gT3BlbldoaXNr")), None, false, true) =>
}
val j2 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "code": "while (true)",
| "binary": false
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j2) should matchPattern {
case BlackBoxExec(_, Some(Inline("while (true)")), None, false, false) =>
}
//Empty code should resolve as None
val j3 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "code": " "
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j3) should matchPattern {
case BlackBoxExec(_, None, None, false, false) =>
}
val j4 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "code": {
| "attachmentName": "foo:bar",
| "attachmentType": "application/octet-stream",
| "length": 32768,
| "digest": "sha256-foo"
| },
| "binary": true,
| "main": "hello"
|}""".stripMargin.parseJson.asJsObject
Exec.serdes.read(j4) should matchPattern {
case BlackBoxExec(_, Some(Attached("foo:bar", _, Some(32768), Some("sha256-foo"))), Some("hello"), false, true) =>
}
}
behavior of "blackbox exec serialization"
it should "serialize with inline attachment" in {
val bb = BlackBoxExec(
ImageName.fromString("docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1").get,
Some(Inline("foo")),
None,
false,
false)
val js = Exec.serdes.write(bb)
val js2 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "binary": false,
| "code": "foo"
|}""".stripMargin.parseJson.asJsObject
js shouldBe js2
}
it should "serialize with attached attachment" in {
val bb = BlackBoxExec(
ImageName.fromString("docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1").get,
Some(Attached("foo", ContentTypes.`application/octet-stream`, Some(42), Some("sha1-42"))),
None,
false,
true)
val js = Exec.serdes.write(bb)
val js2 = """{
| "kind": "blackbox",
| "image": "docker-custom.com/openwhisk-runtime/magic/nodejs:0.0.1",
| "binary": true,
| "code": {
| "attachmentName": "foo",
| "attachmentType": "application/octet-stream",
| "length": 42,
| "digest": "sha1-42"
| }
|}""".stripMargin.parseJson.asJsObject
js shouldBe js2
}
private class TestConfig(val props: Map[String, String], requiredProperties: Map[String, String])
extends WhiskConfig(requiredProperties) {
override protected def getProperties() = mutable.Map(props.toSeq: _*)
}
}
| style95/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/entity/test/ExecTests.scala | Scala | apache-2.0 | 11,292 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
object Test extends dotty.runtime.LegacyApp {
class A1 { def foo = ??? }
val c1 = cm.classSymbol(classOf[A1])
println(c1)
println(c1.fullName)
println(c1.info)
new Test
}
class Test {
class A2 { def foo = ??? }
val c2 = cm.classSymbol(classOf[A2])
println(c2)
println(c2.fullName)
println(c2.info)
}
| yusuke2255/dotty | tests/pending/run/t5256f.scala | Scala | bsd-3-clause | 418 |
package edison.model.serialization
import edison.model.domain.SampleData
import edison.search.serialization.{ JsonSerialization, JsonSerializer }
import edison.util.SmartSpec
class ModelSerializationTest extends SmartSpec with SampleData {
import edison.model.serialization.DefaultSerializers._
def serialize[T](obj: T)(implicit serializer: JsonSerializer[T]): String =
JsonSerialization.ExtendedJson(JsonSerialization.serialize(obj)).pretty
behavior of "ParamSerializer"
it must "serialize integer Param" in {
serialize(param1_5mb) shouldBe
"""
|{
| "CacheSize" : 5242880
|}
""".strip
}
it must "serialize enum Param" in {
serialize(param0_fifo) shouldBe
"""
|{
| "EvictionPolicy" : "FIFO"
|}
""".strip
}
behavior of "PointSerializer"
it must "serialize Point" in {
serialize(point1) shouldBe
"""
|{
| "EvictionPolicy" : "FIFO",
| "CacheSize" : 5242880
|}
""".strip
}
}
| pawel-wiejacha/edison | core/src/test/scala/edison/model/serialization/ModelSerializationTest.scala | Scala | mit | 1,036 |
package fpinscala.errorhandling
import scala.{Option => _, Either => _, Left => _, Right => _, _} // hide std library `Option` and `Either`, since we are writing our own in this chapter
sealed trait Either[+E,+A] {
def map[B](f: A => B): Either[E, B] = this match {
case Left(x) => Left(x)
case Right(x) => Right(f(x))
}
def flatMap[EE >: E, B](f: A => Either[EE, B]): Either[EE, B] =
this match {
case Left(x) => Left(x)
case Right(x) => f(x)
}
def orElse[EE >: E, B >: A](b: => Either[EE, B]): Either[EE, B] =
this match {
case Left(_) => b
case Right(x) => Right(x)
}
def map2[EE >: E, B, C](b: Either[EE, B])(f: (A, B) => C): Either[EE, C] =
for {
aa <- this
bb <- b
} yield f(aa,bb)
}
case class Left[+E](get: E) extends Either[E,Nothing]
case class Right[+A](get: A) extends Either[Nothing,A]
object Either {
def traverse[E,A,B](es: List[A])(f: A => Either[E, B]): Either[E, List[B]] =
es.foldRight[Either[E, List[B]]](Right(Nil))((x, acc) => f(x).map2(acc)(_ :: _))
def sequence[E,A](es: List[Either[E,A]]): Either[E,List[A]] =
traverse(es)(identity)
def mean(xs: IndexedSeq[Double]): Either[String, Double] =
if (xs.isEmpty)
Left("mean of empty list!")
else
Right(xs.sum / xs.length)
def safeDiv(x: Int, y: Int): Either[Exception, Int] =
try Right(x / y)
catch { case e: Exception => Left(e) }
def Try[A](a: => A): Either[Exception, A] =
try Right(a)
catch { case e: Exception => Left(e) }
} | 724399396/function-programming-in-scala | exercises/src/main/scala/fpinscala/errorhandling/Either.scala | Scala | mit | 1,528 |
package com.datastax.spark.connector.rdd.reader
import com.datastax.driver.core.Row
import com.datastax.spark.connector.{CassandraRowMetadata, ColumnRef, ColumnSelector}
import com.datastax.spark.connector.cql.TableDef
private[connector] class KeyValueRowReaderFactory[K, V](
keySelection: ColumnSelector,
keyRRF: RowReaderFactory[K],
valueRRF: RowReaderFactory[V])
extends RowReaderFactory[(K, V)] {
override def rowReader(table: TableDef, columnSelection: IndexedSeq[ColumnRef]): RowReader[(K, V)] = {
val keyReader = keyRRF.rowReader(table, keySelection.selectFrom(table))
val valueReader = valueRRF.rowReader(table, columnSelection)
new KeyValueRowReader(keyReader, valueReader)
}
override def targetClass: Class[(K, V)] = classOf[(K, V)]
}
private[connector] class KeyValueRowReader[K, V](keyReader: RowReader[K], valueReader: RowReader[V])
extends RowReader[(K, V)] {
override def neededColumns: Option[Seq[ColumnRef]] =
(for (keyNames <- keyReader.neededColumns; valueNames <- valueReader.neededColumns) yield keyNames ++ valueNames)
.orElse(keyReader.neededColumns).orElse(valueReader.neededColumns)
override def read(row: Row, rowMetaData: CassandraRowMetadata): (K, V) = {
(keyReader.read(row, rowMetaData), valueReader.read(row, rowMetaData))
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/reader/KeyValueRowReader.scala | Scala | apache-2.0 | 1,319 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.util.Date
import com.vividsolutions.jts.geom.Point
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinConversionProcessTest extends Specification {
import scala.collection.JavaConversions._
val sft = SimpleFeatureTypes.createType("bin",
"name:String,track:String,dtg:Date,dtg2:Date,*geom:Point:srid=4326,geom2:Point:srid=4326")
val process = new BinConversionProcess
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"0$i")
sf.setAttribute("name", s"name$i")
sf.setAttribute("track", s"$i")
sf.setAttribute("dtg", s"2017-02-20T00:00:0$i.000Z")
sf.setAttribute("dtg2", s"2017-02-21T00:00:0$i.000Z")
sf.setAttribute("geom", s"POINT(40 ${50 + i})")
sf.setAttribute("geom2", s"POINT(20 ${30 + i})")
sf
}
val ids = features.map(_.getID.hashCode)
val names = features.map(_.getAttribute("name").hashCode)
val tracks = features.map(_.getAttribute("track").hashCode)
val dates = features.map(_.getAttribute("dtg").asInstanceOf[Date].getTime)
val dates2 = features.map(_.getAttribute("dtg2").asInstanceOf[Date].getTime)
val lonlat = features.map(_.getAttribute("geom").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon = lonlat.map(_.swap)
val lonlat2 = features.map(_.getAttribute("geom2").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon2 = lonlat2.map(_.swap)
val listCollection = new ListFeatureCollection(sft, features)
// converts to tuples that we can compare to zipped values
def toTuples(value: EncodedValues): Any = value match {
case EncodedValues(trackId, lat, lon, dtg, label) if label == -1L => ((trackId, dtg), (lat, lon))
case EncodedValues(trackId, lat, lon, dtg, label) => (((trackId, dtg), (lat, lon)), label)
}
"BinConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, "lonlat")
bytes must beEmpty
}
"encode a generic feature collection" in {
val bytes = process.execute(listCollection, null, null, null, null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat))
}
"encode a generic feature collection with alternate values" in {
val bytes = process.execute(listCollection, "name", "geom2", "dtg2", null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(names.zip(dates2).zip(lonlat2))
}
"encode a generic feature collection with labels" in {
val bytes = process.execute(listCollection, null, null, null, "track", "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat).zip(tracks))
}
}
}
| ronq/geomesa | geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/transform/BinConversionProcessTest.scala | Scala | apache-2.0 | 3,991 |
package dotty.tools.dotc
package transform
import core._
import Names._
import StdNames.nme
import Types._
import dotty.tools.dotc.transform.TreeTransforms.{AnnotationTransformer, TransformerInfo, MiniPhaseTransform, TreeTransformer}
import ast.Trees._
import Flags._
import Contexts.Context
import Symbols._
import Constants._
import Denotations._, SymDenotations._
import Decorators.StringInterpolators
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.core.Annotations.ConcreteAnnotation
import scala.collection.mutable
import DenotTransformers._
import Names.Name
import NameOps._
import TypeUtils._
/** A transformer that removes repeated parameters (T*) from all types, replacing
* them with Seq types.
*/
class ElimRepeated extends MiniPhaseTransform with InfoTransformer with AnnotationTransformer { thisTransformer =>
import ast.tpd._
override def phaseName = "elimRepeated"
def transformInfo(tp: Type, sym: Symbol)(implicit ctx: Context): Type =
elimRepeated(tp)
override def mayChange(sym: Symbol)(implicit ctx: Context): Boolean = sym is Method
private def elimRepeated(tp: Type)(implicit ctx: Context): Type = tp.stripTypeVar match {
case tp @ MethodType(paramNames, paramTypes) =>
val resultType1 = elimRepeated(tp.resultType)
val paramTypes1 =
if (paramTypes.nonEmpty && paramTypes.last.isRepeatedParam) {
val last = paramTypes.last.underlyingIfRepeated(tp.isJava)
paramTypes.init :+ last
} else paramTypes
tp.derivedMethodType(paramNames, paramTypes1, resultType1)
case tp: PolyType =>
tp.derivedPolyType(tp.paramNames, tp.paramBounds, elimRepeated(tp.resultType))
case tp =>
tp
}
def transformTypeOfTree(tree: Tree)(implicit ctx: Context): Tree =
tree.withType(elimRepeated(tree.tpe))
override def transformIdent(tree: Ident)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformSelect(tree: Select)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
override def transformApply(tree: Apply)(implicit ctx: Context, info: TransformerInfo): Tree = {
val args1 = tree.args.map {
case arg: Typed if isWildcardStarArg(arg) =>
if (tree.fun.symbol.is(JavaDefined) && arg.expr.tpe.derivesFrom(defn.SeqClass))
seqToArray(arg.expr)
else arg.expr
case arg => arg
}
transformTypeOfTree(cpy.Apply(tree)(tree.fun, args1))
}
/** Convert sequence argument to Java array */
private def seqToArray(tree: Tree)(implicit ctx: Context): Tree = tree match {
case SeqLiteral(elems) =>
JavaSeqLiteral(elems)
case _ =>
val elemType = tree.tpe.firstBaseArgInfo(defn.SeqClass)
var elemClass = elemType.classSymbol
if (defn.PhantomClasses contains elemClass) elemClass = defn.ObjectClass
ref(defn.DottyArraysModule)
.select(nme.seqToArray)
.appliedToType(elemType)
.appliedTo(tree, Literal(Constant(elemClass.typeRef)))
.ensureConforms(defn.ArrayType(elemType))
// Because of phantomclasses, the Java array's type might not conform to the return type
}
override def transformTypeApply(tree: TypeApply)(implicit ctx: Context, info: TransformerInfo): Tree =
transformTypeOfTree(tree)
/** If method overrides a Java varargs method, add a varargs bridge.
* Also transform trees inside method annotation
*/
override def transformDefDef(tree: DefDef)(implicit ctx: Context, info: TransformerInfo): Tree = {
assert(ctx.phase == thisTransformer)
def overridesJava = tree.symbol.allOverriddenSymbols.exists(_ is JavaDefined)
if (tree.symbol.info.isVarArgsMethod && overridesJava)
addVarArgsBridge(tree)(ctx.withPhase(thisTransformer.next))
else
tree
}
/** Add a Java varargs bridge
* @param ddef the original method definition which is assumed to override
* a Java varargs method JM up to this phase.
* @return a thicket consisting of `ddef` and a varargs bridge method
* which overrides the Java varargs method JM from this phase on
* and forwards to `ddef`.
*/
private def addVarArgsBridge(ddef: DefDef)(implicit ctx: Context): Tree = {
val original = ddef.symbol.asTerm
val bridge = original.copy(
flags = ddef.symbol.flags &~ Private | Artifact,
info = toJavaVarArgs(ddef.symbol.info)).enteredAfter(thisTransformer).asTerm
val bridgeDef = polyDefDef(bridge, trefs => vrefss => {
val (vrefs :+ varArgRef) :: vrefss1 = vrefss
val elemtp = varArgRef.tpe.widen.argTypes.head
ref(original.termRef)
.appliedToTypes(trefs)
.appliedToArgs(vrefs :+ TreeGen.wrapArray(varArgRef, elemtp))
.appliedToArgss(vrefss1)
})
Thicket(ddef, bridgeDef)
}
/** Convert type from Scala to Java varargs method */
private def toJavaVarArgs(tp: Type)(implicit ctx: Context): Type = tp match {
case tp: PolyType =>
tp.derivedPolyType(tp.paramNames, tp.paramBounds, toJavaVarArgs(tp.resultType))
case tp: MethodType =>
val inits :+ last = tp.paramTypes
val last1 = last.underlyingIfRepeated(isJava = true)
tp.derivedMethodType(tp.paramNames, inits :+ last1, tp.resultType)
}
}
| yusuke2255/dotty | src/dotty/tools/dotc/transform/ElimRepeated.scala | Scala | bsd-3-clause | 5,311 |
package controllers
import io.apibuilder.api.v0.models.{ApplicationForm, AppSortBy, MoveForm, SortOrder}
import io.apibuilder.api.v0.models.json._
import db._
import javax.inject.{Inject, Singleton}
import lib.Validation
import play.api.mvc._
import play.api.libs.json._
import java.util.UUID
@Singleton
class Applications @Inject() (
val apibuilderControllerComponents: ApibuilderControllerComponents,
applicationsDao: ApplicationsDao,
versionsDao: VersionsDao
) extends ApibuilderController {
def get(
orgKey: String,
name: Option[String],
guid: Option[UUID],
key: Option[String],
hasVersion: Option[Boolean],
limit: Long = 25,
offset: Long = 0,
sorting: Option[AppSortBy],
ordering: Option[SortOrder]
) = Identified { request =>
val applications = applicationsDao.findAll(
request.authorization,
orgKey = Some(orgKey),
name = name,
key = key,
guid = guid,
hasVersion = hasVersion,
limit = limit,
offset = offset,
sorting = sorting,
ordering = ordering
)
Ok(Json.toJson(applications))
}
def post(orgKey: String) = Identified(parse.json) { request =>
withOrg(request.authorization, orgKey) { org =>
request.body.validate[ApplicationForm] match {
case e: JsError => {
Conflict(Json.toJson(Validation.invalidJson(e)))
}
case s: JsSuccess[ApplicationForm] => {
val form = s.get
applicationsDao.validate(org, form) match {
case Nil => {
val app = applicationsDao.create(request.user, org, form)
Ok(Json.toJson(app))
}
case errors => {
Conflict(Json.toJson(errors))
}
}
}
}
}
}
def putByApplicationKey(orgKey: String, applicationKey: String) = Identified(parse.json) { request =>
withOrg(request.authorization, orgKey) { org =>
request.body.validate[ApplicationForm] match {
case e: JsError => {
Conflict(Json.toJson(Validation.invalidJson(e)))
}
case s: JsSuccess[ApplicationForm] => {
val form = s.get
applicationsDao.findByOrganizationKeyAndApplicationKey(request.authorization, org.key, applicationKey) match {
case None => Conflict(Json.toJson(Validation.error(s"application[$applicationKey] not found or inaccessible")))
case Some(existing) => {
applicationsDao.validate(org, form, Some(existing)) match {
case Nil => {
val app = applicationsDao.update(request.user, existing, form)
Ok(Json.toJson(app))
}
case errors => {
Conflict(Json.toJson(errors))
}
}
}
}
}
}
}
}
def deleteByApplicationKey(orgKey: String, applicationKey: String) = Identified { request =>
withOrgMember(request.user, orgKey) { _ =>
applicationsDao.findByOrganizationKeyAndApplicationKey(request.authorization, orgKey, applicationKey).foreach { application =>
applicationsDao.softDelete(request.user, application)
}
NoContent
}
}
def postMoveByApplicationKey(orgKey: String, applicationKey: String) = Identified(parse.json) { request =>
withOrg(request.authorization, orgKey) { org =>
applicationsDao.findByOrganizationKeyAndApplicationKey(request.authorization, org.key, applicationKey) match {
case None => NotFound
case Some(app) => {
request.body.validate[MoveForm] match {
case e: JsError => {
Conflict(Json.toJson(Validation.invalidJson(e)))
}
case s: JsSuccess[MoveForm] => {
val form = s.get
applicationsDao.validateMove(request.authorization, app, form) match {
case Nil => {
val updatedApp = applicationsDao.move(request.user, app, form)
Ok(Json.toJson(updatedApp))
}
case errors => {
Conflict(Json.toJson(errors))
}
}
}
}
}
}
}
}
def getMetadataAndVersionsByApplicationKey(
orgKey: String,
applicationKey: String,
limit: Long = 25,
offset: Long = 0
) = Anonymous { request =>
applicationsDao.findByOrganizationKeyAndApplicationKey(request.authorization, orgKey, applicationKey) match {
case None => NotFound
case Some(application) => {
val versions = versionsDao.findAllVersions(
request.authorization,
applicationGuid = Some(application.guid),
limit = limit,
offset = offset
)
Ok(Json.toJson(versions))
}
}
}
def getMetadataAndVersionsAndLatestTxtByApplicationKey(
orgKey: String,
applicationKey: String,
limit: Long = 25,
offset: Long = 0
) = Anonymous { request =>
applicationsDao.findByOrganizationKeyAndApplicationKey(request.authorization, orgKey, applicationKey) match {
case None => NotFound
case Some(application) => {
versionsDao.findAllVersions(
request.authorization,
applicationGuid = Some(application.guid),
limit = 1
).headOption match {
case None => NotFound
case Some(v) => Ok(v.version)
}
}
}
}
}
| gheine/apidoc | api/app/controllers/Applications.scala | Scala | mit | 5,457 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.player.classes
import com.anathema_roguelike.entities.characters.perks.PerkGroup
import com.anathema_roguelike.entities.characters.player.perks.abilities.spells.SpellOrSpecialization
import com.anathema_roguelike.entities.characters.player.perks.abilities.techniques.{Hide, PointBlankExpertise, QuickDraw, Technique}
import com.anathema_roguelike.entities.characters.player.perks.masteries.MasteryLevel
import com.anathema_roguelike.entities.characters.player.perks.skills._
import com.anathema_roguelike.entities.characters.player.perks.specializations.AbilitySpecialization
import com.anathema_roguelike.stats.characterstats.masteries.BowMastery
import com.anathema_roguelike.stats.characterstats.masteries.CrossbowMastery
import com.anathema_roguelike.stats.characterstats.masteries.SpellMastery
class Ranger() extends PlayerClass(
new PerkGroup /*1*/ (new MasteryLevel[BowMastery], new PointBlankExpertise),
new PerkGroup /*2*/ (new MasteryLevel[CrossbowMastery], new SpellOrSpecialization[Druid](1)),
new PerkGroup /*3*/ (new MasteryLevel[BowMastery], new Hide),
new PerkGroup /*4*/ (new MasteryLevel[SpellMastery], new Analysis),
new PerkGroup /*5*/ (new MasteryLevel[BowMastery], new SpellOrSpecialization[Druid](1), new Trapfinding),
new PerkGroup /*6*/ (new MasteryLevel[CrossbowMastery], new AbilitySpecialization[Technique[_]]),
new PerkGroup /*7*/ (new MasteryLevel[BowMastery], new MasteryLevel[SpellMastery]),
new PerkGroup /*8*/ (new SpellOrSpecialization[Druid](1), new QuickDraw),
new PerkGroup /*9*/ (new MasteryLevel[BowMastery], new AbilitySpecialization[Technique[_]]),
new PerkGroup /*10*/ (new MasteryLevel[CrossbowMastery], new MasteryLevel[SpellMastery], new SpellOrSpecialization[Druid](1)),
new PerkGroup /*11*/ (new MasteryLevel[BowMastery], new AbilitySpecialization[Technique[_]]),
new PerkGroup /*12*/ (new SpellOrSpecialization[Druid](2), new SureFooting),
new PerkGroup /*13*/ (new MasteryLevel[BowMastery], new MasteryLevel[SpellMastery]),
new PerkGroup /*14*/ (new AbilitySpecialization[Technique[_]], new EagleEye),
new PerkGroup /*15*/ (new MasteryLevel[BowMastery], new MasteryLevel[CrossbowMastery], new SpellOrSpecialization[Druid](2)),
new PerkGroup /*16*/ (new MasteryLevel[SpellMastery], new Swiftness),
new PerkGroup /*17*/ (new MasteryLevel[BowMastery], new AbilitySpecialization[Technique[_]]),
new PerkGroup /*18*/ (new MasteryLevel[CrossbowMastery], new SpellOrSpecialization[Druid](2)),
new PerkGroup /*19*/ (new MasteryLevel[BowMastery], new MasteryLevel[SpellMastery]),
new PerkGroup /*20*/ (new SpellOrSpecialization[Druid](3), new MasteryLevel[SpellMastery], new Autoload)) {
} | carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/characters/player/classes/Ranger.scala | Scala | gpl-3.0 | 3,578 |
package mockws
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference
import mockws.MockWSHelpers._
import org.mockito.Mockito._
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import play.api.http.Status
import play.api.libs.json.Json
import play.api.libs.ws.WSAuthScheme
import play.api.libs.ws.WSResponse
import play.api.libs.ws.WSSignatureCalculator
import play.api.mvc.Result
import play.api.mvc.Results._
import play.api.test.Helpers._
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
/**
* Tests that [[MockWS]] simulates a WS client
*/
class MockWSTest extends AnyFunSuite with Matchers with ScalaCheckPropertyChecks {
test("mock WS simulates all HTTP methods") {
val ws = MockWS {
case (GET, "/get") => Action { Ok("get ok") }
case (POST, "/post") => Action { Ok("post ok") }
case (PUT, "/put") => Action { Ok("put ok") }
case (DELETE, "/delete") => Action { Ok("delete ok") }
case ("PATCH", "/patch") => Action { Ok("patch ok") }
}
await(ws.url("/get").get()).body shouldEqual "get ok"
await(ws.url("/post").post("")).body shouldEqual "post ok"
await(ws.url("/put").put("")).body shouldEqual "put ok"
await(ws.url("/delete").delete()).body shouldEqual "delete ok"
await(ws.url("/patch").patch("")).body shouldEqual "patch ok"
ws.close()
}
test("mock WS simulates the HTTP status code") {
val ws = MockWS {
case (GET, "/get200") => Action { Ok("") }
case (GET, "/get201") => Action { Created("") }
case (GET, "/get404") => Action { NotFound("") }
}
await(ws.url("/get200").get()).status shouldEqual OK
await(ws.url("/get201").get()).status shouldEqual CREATED
await(ws.url("/get404").get()).status shouldEqual NOT_FOUND
ws.close()
}
test("mock WS works with explicit request") {
val ws = MockWS { case (GET, "/blah") =>
Action { request =>
Ok(request.method)
}
}
await(ws.url("/blah").get()).status shouldEqual OK
ws.close()
}
test("mock WS simulates a POST with a JSON payload") {
val ws = MockWS { case (POST, "/") =>
Action { request =>
Ok((request.body.asJson.get \ "result").as[String])
}
}
val json = Json.parse("""{"result": "OK"}""")
val response = await(ws.url("/").post(json))
response.status shouldEqual OK
response.body shouldEqual "OK"
ws.close()
}
test("mock WS simulates a POST with a JSON payload with a custom content type") {
val ws = MockWS { case (POST, "/") =>
Action(BodyParser.tolerantJson) { request =>
Ok((request.body \ "result").as[String])
}
}
val json = Json.parse("""{"result": "OK"}""")
val response = await(ws.url("/").addHttpHeaders(CONTENT_TYPE -> "application/my-json").post(json))
response.status shouldEqual OK
response.body shouldEqual "OK"
ws.close()
}
test("mock WS sets the response content type") {
val ws = MockWS {
case (GET, "/text") => Action { Ok("text") }
case (GET, "/json") => Action { Ok(Json.parse("""{ "type": "json" }""")) }
}
val text = await(ws.url("/text").get())
val json = await(ws.url("/json").get())
text.header(CONTENT_TYPE) shouldEqual Some("text/plain; charset=utf-8")
json.header(CONTENT_TYPE) shouldEqual Some("application/json")
ws.close()
}
test("mock WS can produce JSON") {
val ws = MockWS { case (GET, "/json") =>
Action {
Ok(Json.obj("field" -> "value"))
}
}
val wsResponse = await(ws.url("/json").get())
wsResponse.body shouldEqual """{"field":"value"}"""
(wsResponse.json \ "field").asOpt[String] shouldEqual Some("value")
ws.close()
}
test("mock WS can produce XML") {
val ws = MockWS { case (GET, "/xml") =>
Action {
Ok(<foo><bar>value</bar></foo>)
}
}
val wsResponse = await(ws.url("/xml").get())
wsResponse.body shouldEqual "<foo><bar>value</bar></foo>"
(wsResponse.xml \ "bar").text shouldEqual "value"
ws.close()
}
test("a call to an unknown route produces default not found") {
val ws = MockWS { case (GET, "/url") =>
Action { Ok("") }
}
await(ws.url("/url2").get()).status should be(Status.NOT_FOUND)
await(ws.url("/url").get()).status should be(Status.OK)
ws.close()
}
test("a call to an unknown route produces implicit behaviour") {
implicit val notDefinedBehaviour = RouteNotDefined(BadGateway)
val ws = MockWS { case (GET, "/url") =>
Action { Ok("") }
}
await(ws.url("/url2").get()).status should be(Status.BAD_GATEWAY)
await(ws.url("/url").get()).status should be(Status.OK)
ws.close()
}
test("mock WS supports custom response content types") {
val ws = MockWS { case (_, _) =>
Action {
Ok("hello").as("hello/world")
}
}
val wsResponse = await(ws.url("/").get())
wsResponse.status shouldEqual OK
wsResponse.header(CONTENT_TYPE) shouldEqual Some("hello/world")
wsResponse.body shouldEqual "hello"
ws.close()
}
test("mock WS supports custom request content types") {
val ws = MockWS { case (_, _) =>
Action { request =>
request.contentType match {
case Some(ct) => Ok(ct)
case None => BadRequest("no content type")
}
}
}
val wsResponse = await(ws.url("/").addHttpHeaders(CONTENT_TYPE -> "hello/world").get)
wsResponse.status shouldEqual OK
wsResponse.body shouldEqual "hello/world"
ws.close()
}
test("mock WS supports query parameter") {
forAll { (q: String, v: String) =>
whenever(q.nonEmpty) {
val ws = MockWS { case (GET, "/uri") =>
Action { request =>
request.getQueryString(q).fold[Result](NotFound) { id =>
Ok(id)
}
}
}
val wsResponse = await(ws.url("/uri").addQueryStringParameters(q -> v).get)
wsResponse.status shouldEqual OK
wsResponse.body shouldEqual v
ws.close()
}
}
}
test("mock WS supports varargs passed as immutable Seqs") {
forAll { (q: String, v: String) =>
whenever(q.nonEmpty) {
val ws = MockWS { case (GET, "/uri") =>
Action { request =>
request.getQueryString(q).fold[Result](NotFound) { id =>
Ok(id)
}
}
}
await(ws.url("/uri").addHttpHeaders(Seq(q -> v): _*).get)
await(ws.url("/uri").addQueryStringParameters(Seq(q -> v): _*).get)
ws.close()
}
}
}
test("mock WS supports method in execute") {
val ws = MockWS {
case (GET, "/get") => Action { Ok("get ok") }
case (POST, "/post") => Action { Ok("post ok") }
case (PUT, "/put") => Action { Ok("put ok") }
case (DELETE, "/delete") => Action { Ok("delete ok") }
}
await(ws.url("/get").withMethod("GET").execute()).body shouldEqual "get ok"
await(ws.url("/post").withMethod("POST").execute()).body shouldEqual "post ok"
await(ws.url("/put").withMethod("PUT").execute()).body shouldEqual "put ok"
await(ws.url("/delete").withMethod("DELETE").execute()).body shouldEqual "delete ok"
ws.close()
}
test("should not raise NullPointerExceptions on method chaining") {
val ws = MockWS { case (GET, "/get") =>
Action { Ok("get ok") }
}
await(
ws.url("/get")
.sign(mock(classOf[WSSignatureCalculator]))
.withVirtualHost("bla")
.withFollowRedirects(follow = true)
.withAuth("user", "password", WSAuthScheme.BASIC)
.withRequestTimeout(10.millis)
.get()
).body shouldEqual "get ok"
ws.close()
}
test("should not raise Exceptions when asking for the used sockets") {
val ws = MockWS { case (GET, "/get") =>
Action { Ok("get ok") }
}
val request: Future[WSResponse] = ws
.url("/get")
.sign(mock(classOf[WSSignatureCalculator]))
.withVirtualHost("bla")
.withFollowRedirects(follow = true)
.withRequestTimeout(10.millis)
.get()
val response: WSResponse = await(request)
response.body shouldEqual "get ok"
response.underlying[play.shaded.ahc.org.asynchttpclient.Response].getLocalAddress shouldBe InetSocketAddress
.createUnresolved("127.0.0.1", 8383)
response.underlying[play.shaded.ahc.org.asynchttpclient.Response].getRemoteAddress shouldBe InetSocketAddress
.createUnresolved("127.0.0.1", 8384)
ws.close()
}
test("multiple headers with same name should be retained & merged correctly") {
val ws = MockWS { case (GET, "/get") =>
Action { req =>
Ok(req.headers.getAll("v1").zipWithIndex.toString)
}
}
val request = ws
.url("/get")
.addHttpHeaders(("v1", "first"), ("v1", "second"))
.get()
val response = await(request)
response.body shouldEqual "List((first,0), (second,1))"
ws.close()
}
test("discard old headers when setting withHttpHeaders") {
val headers = new AtomicReference[Map[String, scala.Seq[String]]](Map.empty)
val ws = MockWS { case (GET, "/get") =>
Action { req =>
headers.set(req.headers.toMap)
Ok(req.headers.getAll("key1").zipWithIndex.toString)
}
}
val request = ws
.url("/get")
.withHttpHeaders("key1" -> "value1")
.withHttpHeaders("key2" -> "value2")
.get()
await(request)
val headersMap = headers.get()
headersMap.get("key1") shouldBe None
headersMap.get("key2") shouldBe Some(Seq("value2"))
}
test("discard old query parameters when setting withQueryStringParameters") {
val queryString = new AtomicReference[Map[String, scala.Seq[String]]](Map.empty)
val ws = MockWS { case (GET, "/get") =>
Action { req =>
queryString.set(req.queryString)
Ok("")
}
}
val request = ws
.url("/get")
.withQueryStringParameters("bar" -> "baz")
.withQueryStringParameters("bar" -> "bah")
.get()
await(request)
val queryMap = queryString.get()
queryMap.get("bar") shouldBe Some(Seq("bah"))
}
test("keep headers with content type from BodyWritable") {
val headers = new AtomicReference[Map[String, scala.Seq[String]]](Map.empty)
val ws = MockWS { case (POST, "/") =>
Action { req =>
headers.set(req.headers.toMap)
Ok
}
}
val request = ws
.url("/")
.withHttpHeaders("key1" -> "value1")
.post("test")
val response = await(request)
response.status shouldBe OK
val headersMap = headers.get()
headersMap.get("key1") shouldBe Some(Seq("value1"))
}
}
| leanovate/play-mockws | src/test/scala/mockws/MockWSTest.scala | Scala | mit | 10,861 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.misc
/** Cross-platform equivalent for `java.lang.ThreadLocal`,
* for specifying thread-local variables.
*
* These variables differ from their normal counterparts in that each
* thread that accesses one (via its [[ThreadLocal#get]] or
* [[ThreadLocal#set]] method) has its own, independently initialized
* copy of the variable.
*
* @param initial is the initial value that this thread-local
* reference returns on [[ThreadLocal#get reads]] in case
* the current thread hasn't [[ThreadLocal#set written]]
* any values yet.
*/
final class ThreadLocal[A] private (val initial: A) {
private[this] val tl = new java.lang.ThreadLocal[A]() {
override def initialValue(): A =
initial
}
/** Returns the value in the current thread's copy of this
* thread-local variable. If the variable has no value for the
* current thread, it is initialized with the
* [[ThreadLocal.initial initial]] value specified in the
* constructor.
*
* @return the current thread's value of this thread-local
*/
def get(): A = tl.get
/** Sets the current thread's copy of this thread-local variable
* to the specified value.
*
* @param value the value to be stored in the current
* thread's copy of this thread-local.
*/
def set(value: A): Unit = tl.set(value)
/** Removes the current thread's value for this thread-local
* variable. If this thread-local variable is subsequently
* [[ThreadLocal.get read]] by the current thread, its value will be
* reinitialized by its [[ThreadLocal.initial initial]] value.
*/
def reset(): Unit = tl.remove()
}
object ThreadLocal {
/** Builds a [[ThreadLocal]] reference initialized with `null`. */
def apply[A <: AnyRef](): ThreadLocal[A] =
new ThreadLocal[A](null.asInstanceOf[A])
/** Builds a [[ThreadLocal]] reference.
*
* @param initial is the initial value that this thread-local
* reference returns on [[ThreadLocal.get reads]] in case
* the current thread hasn't [[ThreadLocal#set written]]
* any values yet.
*/
def apply[A](initial: A): ThreadLocal[A] =
new ThreadLocal[A](initial)
}
| alexandru/monifu | monix-execution/jvm/src/main/scala/monix/execution/misc/ThreadLocal.scala | Scala | apache-2.0 | 2,905 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Float => jFloat, Long => jLong}
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.{BinnedTime, TimePeriod, Z2SFC}
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.locationtech.sfcurve.zorder.Z2
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FrequencyTest extends Specification with StatTestHelper {
def createStat[T](attribute: String, precision: Int, observe: Boolean): Frequency[T] = {
val s = Stat(sft, Stat.Frequency(attribute, precision))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Frequency[T]]
}
def stringStat(precision: Int, observe: Boolean = true) =
createStat[String]("strAttr", precision: Int, observe)
def intStat(precision: Int, observe: Boolean = true) =
createStat[Integer]("intAttr", precision: Int, observe)
def longStat(precision: Int, observe: Boolean = true) =
createStat[jLong]("longAttr", precision: Int, observe)
def floatStat(precision: Int, observe: Boolean = true) =
createStat[jFloat]("floatAttr", precision: Int, observe)
def doubleStat(precision: Int, observe: Boolean = true) =
createStat[jDouble]("doubleAttr", precision: Int, observe)
def dateStat(precision: Int, observe: Boolean = true) =
createStat[Date]("dtg", precision: Int, observe)
def geomStat(precision: Int, observe: Boolean = true) =
createStat[Geometry]("geom", precision: Int, observe)
def toDate(string: String) = java.util.Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String) = WKTUtils.read(string)
"Frequency stat" should {
"enumerate ranges" >> {
val min = Z2SFC.invert(Z2(2, 2))
val max = Z2SFC.invert(Z2(3, 6))
val ranges = Z2SFC.ranges(Seq((min._1, min._2, max._1, max._2)))
val indices = Frequency.enumerate(ranges, 64).toSeq
indices must containTheSameElementsAs(Seq(12, 13, 14, 15, 36, 37, 38, 39, 44, 45))
}
"support weekly binning" >> {
val stat = Stat(sft, Stat.Frequency("longAttr", "dtg", TimePeriod.Week, 1)).asInstanceOf[Frequency[Long]]
val weekStart = 45 * 52 // approximately jan 2015
val weeks = Set(weekStart, weekStart + 1, weekStart + 2, weekStart + 3)
val dayStart = BinnedTime.Epoch.plusWeeks(weekStart).plusHours(1)
(0 until 28 * 4).foreach { i =>
val sf = SimpleFeatureBuilder.build(sft, Array[AnyRef](), i.toString)
sf.setAttribute("longAttr", i)
sf.setAttribute("geom", "POINT(-75 45)")
sf.setAttribute("dtg", dayStart.plusDays(i % 28).toDate)
stat.observe(sf)
}
val serializer = StatSerializer(sft)
stat.sketchMap must haveSize(4)
stat.sketchMap.keySet mustEqual weeks
val offsets = (0 until 4).map(_ * 28)
forall(offsets.flatMap(o => o + 0 until o + 7))(stat.count(weekStart.toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 7 until o + 14))(stat.count((weekStart + 1).toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 14 until o + 21))(stat.count((weekStart + 2).toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 21 until o + 28))(stat.count((weekStart + 3).toShort, _) mustEqual 1)
val serialized = serializer.serialize(stat)
val deserialized = serializer.deserialize(serialized)
stat.isEquivalent(deserialized) must beTrue
val splits = stat.splitByTime.toMap
splits must haveSize(4)
splits.keySet mustEqual weeks
forall(offsets.flatMap(o => o + 0 until o + 7))(d => splits(weekStart.toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 7 until o + 14))(d => splits((weekStart + 1).toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 14 until o + 21))(d => splits((weekStart + 2).toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 21 until o + 28))(d => splits((weekStart + 3).toShort).count(d) mustEqual 1)
// the splits still have to serialize the attribute, eps, etc, hence the extra 25 bytes per (split - 1)
splits.values.map(serializer.serialize).map(_.length).sum mustEqual serialized.length + 3 * 25
}
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat(6, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = stringStat(6)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(f"abc$i%03d") must beBetween(1L, 2L))
stat.count("foo") mustEqual 0
}
"serialize and deserialize" >> {
val stat = stringStat(6)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = stringStat(6, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = stringStat(6)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"combine two Frequencies" >> {
val stat = stringStat(6)
val stat2 = stringStat(6, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(f"abc$i%03d") must beBetween(1L, 2L))
stat2.count("foo") mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(f"abc$i%03d") must beBetween(1L, 3L))
stat.count("foo") mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(f"abc$i%03d") must beBetween(1L, 2L))
stat2.count("foo") mustEqual 0L
}
"clear" >> {
val stat = stringStat(6)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(f"abc$i%3d") mustEqual 0)
}
}
"work with integers" >> {
"be empty initially" >> {
val stat = intStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = intStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i) must beBetween(1L, 2L))
stat.count(200) mustEqual 0
}
"serialize and deserialize" >> {
val stat = intStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Integer]]
unpacked.asInstanceOf[Frequency[Integer]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Integer]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Integer]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Integer]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = intStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Integer]]
unpacked.asInstanceOf[Frequency[Integer]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Integer]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Integer]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Integer]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = intStat(1)
val stat2 = intStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i) must beBetween(1L, 2L))
stat2.count(300) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i) must beBetween(1L, 3L))
stat.count(300) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i) must beBetween(1L, 2L))
stat2.count(300) mustEqual 0L
}
"clear" >> {
val stat = intStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i) mustEqual 0)
}
}
"work with longs" >> {
"be empty initially" >> {
val stat = longStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = longStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toLong) must beBetween(1L, 2L))
stat.count(200L) mustEqual 0
}
"serialize and deserialize" >> {
val stat = longStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jLong]]
unpacked.asInstanceOf[Frequency[jLong]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jLong]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jLong]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jLong]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = longStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jLong]]
unpacked.asInstanceOf[Frequency[jLong]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jLong]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jLong]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jLong]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = longStat(1)
val stat2 = longStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toLong) must beBetween(1L, 2L))
stat2.count(300L) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toLong) must beBetween(1L, 3L))
stat.count(300L) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toLong) must beBetween(1L, 2L))
stat2.count(300L) mustEqual 0L
}
"clear" >> {
val stat = longStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toLong) mustEqual 0)
}
}
"work with floats" >> {
"be empty initially" >> {
val stat = floatStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = floatStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toFloat) must beBetween(1L, 2L))
stat.count(200f) mustEqual 0
}
"serialize and deserialize" >> {
val stat = floatStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jFloat]]
unpacked.asInstanceOf[Frequency[jFloat]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jFloat]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jFloat]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jFloat]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = floatStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jFloat]]
unpacked.asInstanceOf[Frequency[jFloat]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jFloat]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jFloat]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jFloat]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = floatStat(1)
val stat2 = floatStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toFloat) must beBetween(1L, 2L))
stat2.count(300f) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toFloat) must beBetween(1L, 3L))
stat.count(300f) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toFloat) must beBetween(1L, 2L))
stat2.count(300f) mustEqual 0L
}
"clear" >> {
val stat = floatStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toFloat) mustEqual 0)
}
}
"work with doubles" >> {
"be empty initially" >> {
val stat = doubleStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = doubleStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toDouble) must beBetween(1L, 2L))
stat.count(200d) mustEqual 0
}
"serialize and deserialize" >> {
val stat = doubleStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jDouble]]
unpacked.asInstanceOf[Frequency[jDouble]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jDouble]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jDouble]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jDouble]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = doubleStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jDouble]]
unpacked.asInstanceOf[Frequency[jDouble]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[jDouble]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jDouble]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jDouble]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = doubleStat(1)
val stat2 = doubleStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toDouble) must beBetween(1L, 2L))
stat2.count(300d) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toDouble) must beBetween(1L, 3L))
stat.count(300d) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toDouble) must beBetween(1L, 2L))
stat2.count(300d) mustEqual 0L
}
"clear" >> {
val stat = doubleStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toDouble) mustEqual 0)
}
}
"work with dates" >> {
"be empty initially" >> {
val stat = dateStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = dateStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0
}
"serialize and deserialize" >> {
val stat = dateStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Date]]
unpacked.asInstanceOf[Frequency[Date]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Date]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Date]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Date]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = dateStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Date]]
unpacked.asInstanceOf[Frequency[Date]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Date]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Date]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Date]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = dateStat(1)
val stat2 = dateStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat2.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
forall(100 until 200)(i => stat.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat2.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
}
"clear" >> {
val stat = dateStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) mustEqual 0)
forall(100 until 200)(i => stat.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) mustEqual 0)
}
}
"work with geometries" >> {
"be empty initially" >> {
val stat = geomStat(24, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = geomStat(24)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) must beBetween(1L, 6L))
}
"serialize and deserialize" >> {
val stat = geomStat(24)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Geometry]]
unpacked.asInstanceOf[Frequency[Geometry]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Geometry]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Geometry]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Geometry]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = geomStat(24, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Geometry]]
unpacked.asInstanceOf[Frequency[Geometry]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Frequency[Geometry]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Geometry]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Geometry]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = geomStat(24)
val stat2 = geomStat(24, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 6L))
stat += stat2
stat.size mustEqual 200
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) must beBetween(1L, 10L))
forall(100 until 200)(i => stat.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 10L))
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 6L))
}
"clear" >> {
val stat = geomStat(24)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) mustEqual 0)
forall(100 until 200)(i => stat.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) mustEqual 0)
}
}
}
}
| ronq/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/FrequencyTest.scala | Scala | apache-2.0 | 24,000 |
/*
Lodo is a layered to-do list (Outliner)
Copyright (C) 2015 Keith Morrow.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License v3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package lodo
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
object BtnGroup {
sealed trait BtnType
object BtnComplete extends BtnType
object BtnEdit extends BtnType
object BtnAdd extends BtnType
object BtnRemove extends BtnType
case class Props(item: Item, btnGroupType: String,
isEditing: Boolean, isAdding: Boolean, isComplete: Boolean,
onClickComplete: Item => Unit,
onClickEdit: Item => Unit,
onClickAdd: Item => Unit,
onClickRemove: Item => Unit)
def btn(P: Props, btnType: BtnType, title: String) = {
val glyphClass = btnType match {
case BtnComplete => if (P.isComplete) "ok-circle" else "ok"
case BtnEdit => if (P.isEditing) "edit" else "pencil"
case BtnAdd => if (P.isAdding) "check" else "plus"
case BtnRemove => "remove"
}
<.button(^.cls := "btn btn-sm btn-default", ^.title := title,
<.span(^.cls := "glyphicon glyphicon-" + glyphClass),
^.onClick ==> { e: ReactEventH =>
e.stopPropagation()
btnType match {
case BtnComplete => P.onClickComplete(P.item)
case BtnEdit => P.onClickEdit(P.item)
case BtnAdd => P.onClickAdd(P.item)
case BtnRemove => P.onClickRemove(P.item)
}
}
)
}
val btnGroup = ReactComponentB[Props]("BtnGroup")
.render(P => {
<.span(^.cls := s"pull-right btn-group ${P.btnGroupType}-buttons",
btn(P, BtnComplete, "Complete"),
btn(P, BtnRemove, "Remove"),
btn(P, BtnEdit, "Edit"),
btn(P, BtnAdd, "Add")
)
}).build
def apply(props: Props) = btnGroup(props)
}
| k3d3/lodo | lodo/js/src/main/scala/components/BtnGroup.scala | Scala | agpl-3.0 | 2,358 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.io.ByteArrayOutputStream
import java.util.Locale
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import com.fasterxml.jackson.core._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* Constructs a parser for a given schema that translates a json string to an [[InternalRow]].
*/
class JacksonParser(
schema: StructType,
val options: JSONOptions) extends Logging {
import JacksonUtils._
import com.fasterxml.jackson.core.JsonToken._
// A `ValueConverter` is responsible for converting a value from `JsonParser`
// to a value in a field for `InternalRow`.
private type ValueConverter = JsonParser => AnyRef
// `ValueConverter`s for the root schema for all fields in the schema
private val rootConverter = makeRootConverter(schema)
private val factory = new JsonFactory()
options.setJacksonOptions(factory)
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema. This is a wrapper for the method
* `makeConverter()` to handle a row wrapped with an array.
*/
private def makeRootConverter(st: StructType): JsonParser => Seq[InternalRow] = {
val elementConverter = makeConverter(st)
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
(parser: JsonParser) => parseJsonToken[Seq[InternalRow]](parser, st) {
case START_OBJECT => convertObject(parser, st, fieldConverters) :: Nil
// SPARK-3308: support reading top level JSON arrays and take every element
// in such an array as a row
//
// For example, we support, the JSON data as below:
//
// [{"a":"str_a_1"}]
// [{"a":"str_a_2"}, {"b":"str_b_3"}]
//
// resulting in:
//
// List([str_a_1,null])
// List([str_a_2,null], [null,str_b_3])
//
case START_ARRAY =>
val array = convertArray(parser, elementConverter)
// Here, as we support reading top level JSON arrays and take every element
// in such an array as a row, this case is possible.
if (array.numElements() == 0) {
Nil
} else {
array.toArray[InternalRow](schema).toSeq
}
}
}
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema.
*/
def makeConverter(dataType: DataType): ValueConverter = dataType match {
case BooleanType =>
(parser: JsonParser) => parseJsonToken[java.lang.Boolean](parser, dataType) {
case VALUE_TRUE => true
case VALUE_FALSE => false
}
case ByteType =>
(parser: JsonParser) => parseJsonToken[java.lang.Byte](parser, dataType) {
case VALUE_NUMBER_INT => parser.getByteValue
}
case ShortType =>
(parser: JsonParser) => parseJsonToken[java.lang.Short](parser, dataType) {
case VALUE_NUMBER_INT => parser.getShortValue
}
case IntegerType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_NUMBER_INT => parser.getIntValue
}
case LongType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_NUMBER_INT => parser.getLongValue
}
case FloatType =>
(parser: JsonParser) => parseJsonToken[java.lang.Float](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getFloatValue
case VALUE_STRING =>
// Special case handling for NaN and Infinity.
val value = parser.getText
val lowerCaseValue = value.toLowerCase(Locale.ROOT)
if (lowerCaseValue.equals("nan") ||
lowerCaseValue.equals("infinity") ||
lowerCaseValue.equals("-infinity") ||
lowerCaseValue.equals("inf") ||
lowerCaseValue.equals("-inf")) {
value.toFloat
} else {
throw new RuntimeException(s"Cannot parse $value as FloatType.")
}
}
case DoubleType =>
(parser: JsonParser) => parseJsonToken[java.lang.Double](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getDoubleValue
case VALUE_STRING =>
// Special case handling for NaN and Infinity.
val value = parser.getText
val lowerCaseValue = value.toLowerCase(Locale.ROOT)
if (lowerCaseValue.equals("nan") ||
lowerCaseValue.equals("infinity") ||
lowerCaseValue.equals("-infinity") ||
lowerCaseValue.equals("inf") ||
lowerCaseValue.equals("-inf")) {
value.toDouble
} else {
throw new RuntimeException(s"Cannot parse $value as DoubleType.")
}
}
case StringType =>
(parser: JsonParser) => parseJsonToken[UTF8String](parser, dataType) {
case VALUE_STRING =>
UTF8String.fromString(parser.getText)
case _ =>
// Note that it always tries to convert the data as string without the case of failure.
val writer = new ByteArrayOutputStream()
Utils.tryWithResource(factory.createGenerator(writer, JsonEncoding.UTF8)) {
generator => generator.copyCurrentStructure(parser)
}
UTF8String.fromBytes(writer.toByteArray)
}
case TimestampType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_STRING =>
val stringValue = parser.getText
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.
Long.box {
Try(options.timestampFormat.parse(stringValue).getTime * 1000L)
.getOrElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
DateTimeUtils.stringToTime(stringValue).getTime * 1000L
}
}
case VALUE_NUMBER_INT =>
parser.getLongValue * 1000000L
}
case DateType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_STRING =>
val stringValue = parser.getText
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.x
Int.box {
Try(DateTimeUtils.millisToDays(options.dateFormat.parse(stringValue).getTime))
.orElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
Try(DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(stringValue).getTime))
}
.getOrElse {
// In Spark 1.5.0, we store the data as number of days since epoch in string.
// So, we just convert it to Int.
stringValue.toInt
}
}
}
case BinaryType =>
(parser: JsonParser) => parseJsonToken[Array[Byte]](parser, dataType) {
case VALUE_STRING => parser.getBinaryValue
}
case dt: DecimalType =>
(parser: JsonParser) => parseJsonToken[Decimal](parser, dataType) {
case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT) =>
Decimal(parser.getDecimalValue, dt.precision, dt.scale)
}
case st: StructType =>
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
(parser: JsonParser) => parseJsonToken[InternalRow](parser, dataType) {
case START_OBJECT => convertObject(parser, st, fieldConverters)
}
case at: ArrayType =>
val elementConverter = makeConverter(at.elementType)
(parser: JsonParser) => parseJsonToken[ArrayData](parser, dataType) {
case START_ARRAY => convertArray(parser, elementConverter)
}
case mt: MapType =>
val valueConverter = makeConverter(mt.valueType)
(parser: JsonParser) => parseJsonToken[MapData](parser, dataType) {
case START_OBJECT => convertMap(parser, valueConverter)
}
case udt: UserDefinedType[_] =>
makeConverter(udt.sqlType)
case _ =>
(parser: JsonParser) =>
// Here, we pass empty `PartialFunction` so that this case can be
// handled as a failed conversion. It will throw an exception as
// long as the value is not null.
parseJsonToken[AnyRef](parser, dataType)(PartialFunction.empty[JsonToken, AnyRef])
}
/**
* This method skips `FIELD_NAME`s at the beginning, and handles nulls ahead before trying
* to parse the JSON token using given function `f`. If the `f` failed to parse and convert the
* token, call `failedConversion` to handle the token.
*/
private def parseJsonToken[R >: Null](
parser: JsonParser,
dataType: DataType)(f: PartialFunction[JsonToken, R]): R = {
parser.getCurrentToken match {
case FIELD_NAME =>
// There are useless FIELD_NAMEs between START_OBJECT and END_OBJECT tokens
parser.nextToken()
parseJsonToken[R](parser, dataType)(f)
case null | VALUE_NULL => null
case other => f.applyOrElse(other, failedConversion(parser, dataType))
}
}
/**
* This function throws an exception for failed conversion, but returns null for empty string,
* to guard the non string types.
*/
private def failedConversion[R >: Null](
parser: JsonParser,
dataType: DataType): PartialFunction[JsonToken, R] = {
case VALUE_STRING if parser.getTextLength < 1 =>
// If conversion is failed, this produces `null` rather than throwing exception.
// This will protect the mismatch of types.
null
case token =>
// We cannot parse this token based on the given data type. So, we throw a
// RuntimeException and this exception will be caught by `parse` method.
throw new RuntimeException(
s"Failed to parse a value for data type $dataType (current token: $token).")
}
/**
* Parse an object from the token stream into a new Row representing the schema.
* Fields in the json that are not defined in the requested schema will be dropped.
*/
private def convertObject(
parser: JsonParser,
schema: StructType,
fieldConverters: Array[ValueConverter]): InternalRow = {
val row = new GenericInternalRow(schema.length)
while (nextUntil(parser, JsonToken.END_OBJECT)) {
schema.getFieldIndex(parser.getCurrentName) match {
case Some(index) =>
row.update(index, fieldConverters(index).apply(parser))
case None =>
parser.skipChildren()
}
}
row
}
/**
* Parse an object as a Map, preserving all fields.
*/
private def convertMap(
parser: JsonParser,
fieldConverter: ValueConverter): MapData = {
val keys = ArrayBuffer.empty[UTF8String]
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_OBJECT)) {
keys += UTF8String.fromString(parser.getCurrentName)
values += fieldConverter.apply(parser)
}
ArrayBasedMapData(keys.toArray, values.toArray)
}
/**
* Parse an object as a Array.
*/
private def convertArray(
parser: JsonParser,
fieldConverter: ValueConverter): ArrayData = {
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_ARRAY)) {
values += fieldConverter.apply(parser)
}
new GenericArrayData(values.toArray)
}
/**
* Parse the JSON input to the set of [[InternalRow]]s.
*
* @param recordLiteral an optional function that will be used to generate
* the corrupt record text instead of record.toString
*/
def parse[T](
record: T,
createParser: (JsonFactory, T) => JsonParser,
recordLiteral: T => UTF8String): Seq[InternalRow] = {
try {
Utils.tryWithResource(createParser(factory, record)) { parser =>
// a null first token is equivalent to testing for input.trim.isEmpty
// but it works on any token stream and not just strings
parser.nextToken() match {
case null => Nil
case _ => rootConverter.apply(parser) match {
case null => throw new RuntimeException("Root converter returned null")
case rows => rows
}
}
}
} catch {
case e @ (_: RuntimeException | _: JsonProcessingException) =>
throw BadRecordException(() => recordLiteral(record), () => None, e)
}
}
}
| MLnick/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala | Scala | apache-2.0 | 13,677 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark
import java.util
import java.util.Comparator
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.Partitioner
/**
* A Partitioner implementation that will separate records to different
* HBase Regions based on region splits
*
* @param startKeys The start keys for the given table
*/
@InterfaceAudience.Public
class BulkLoadPartitioner(startKeys:Array[Array[Byte]])
extends Partitioner {
// when table not exist, startKeys = Byte[0][]
override def numPartitions: Int = if (startKeys.length == 0) 1 else startKeys.length
override def getPartition(key: Any): Int = {
val comparator: Comparator[Array[Byte]] = new Comparator[Array[Byte]] {
override def compare(o1: Array[Byte], o2: Array[Byte]): Int = {
Bytes.compareTo(o1, o2)
}
}
val rowKey:Array[Byte] =
key match {
case qualifier: KeyFamilyQualifier =>
qualifier.rowKey
case wrapper: ByteArrayWrapper =>
wrapper.value
case _ =>
key.asInstanceOf[Array[Byte]]
}
var partition = util.Arrays.binarySearch(startKeys, rowKey, comparator)
if (partition < 0)
partition = partition * -1 + -2
if (partition < 0)
partition = 0
partition
}
}
| JingchengDu/hbase | hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/BulkLoadPartitioner.scala | Scala | apache-2.0 | 2,134 |
package com.github.tminglei.slickpg
package json
import slick.ast.TypedType
import slick.ast.Library.{SqlFunction, SqlOperator}
import slick.lifted.{ExtensionMethods}
import slick.driver.{JdbcTypesComponent, PostgresDriver}
import slick.jdbc.JdbcType
trait PgJsonExtensions extends JdbcTypesComponent { driver: PostgresDriver =>
import driver.api._
class JsonLibrary(pgjson: String) {
val Arrow = new SqlOperator("->")
val BiArrow = new SqlOperator("->>")
val PoundArrow = new SqlOperator("#>")
val PoundBiArrow = new SqlOperator("#>>")
val Contains = new SqlOperator("@>")
val ContainsBy = new SqlOperator("<@")
val Exists = new SqlOperator("??")
val ExistsAny = new SqlOperator("??|")
val ExistsAll = new SqlOperator("??&")
val Concatenate = new SqlOperator("||")
val Delete = new SqlOperator("-")
val DeleteDeep = new SqlOperator("#-")
val toJson = new SqlFunction("to_json")
val toJsonb = new SqlFunction("to_jsonb")
val arrayToJson = new SqlFunction("array_to_json")
// val rowToJson = new SqlFunction("row_to_json") //not support, since "row" type not supported by slick/slick-pg yet
val jsonbSet = new SqlFunction("jsonb_set")
val jsonObject = new SqlFunction(pgjson + "_object")
val typeof = new SqlFunction(pgjson + "_typeof")
val objectKeys = new SqlFunction(pgjson + "_object_keys")
val arrayLength = new SqlFunction(pgjson + "_array_length")
val arrayElements = new SqlFunction(pgjson + "_array_elements")
val arrayElementsText = new SqlFunction(pgjson + "_array_elements_text")
// val jsonEach = new SqlFunction(pgjson + "_each") //not support, since "row" type not supported by slick/slick-pg yet
// val jsonEachText = new SqlFunction(pgjson + "_each_text") //not support, since "row" type not supported by slick/slick-pg yet
// val jsonPopulateRecord = new SqlFunction(pgjson + "_populate_record") //not support, since "row" type not supported by slick/slick-pg yet
// val jsonPopulateRecordset = new SqlFunction(pgjson + "_populate_recordset") //not support, since "row" type not supported by slick/slick-pg yet
// val jsonToRecord = new SqlFunction(pgjson + "_to_record") //not support, since "row" type not supported by slick/slick-pg yet
// val jsonToRecordSet = new SqlFunction(pgjson + "_to_recordset") //not support, since "row" type not supported by slick/slick-pg yet
}
class JsonColumnExtensionMethods[JSONType, P1](val c: Rep[P1])(
implicit tm: JdbcType[JSONType]) extends ExtensionMethods[JSONType, P1] {
protected implicit def b1Type: TypedType[JSONType] = implicitly[TypedType[JSONType]]
val jsonLib = new JsonLibrary(tm.sqlTypeName(None))
/** Note: json array's index starts with 0 */
def ~> [P2, R](index: Rep[P2])(implicit om: o#arg[Int, P2]#to[JSONType, R]) = {
om.column(jsonLib.Arrow, n, index.toNode)
}
def ~>>[P2, R](index: Rep[P2])(implicit om: o#arg[Int, P2]#to[String, R]) = {
om.column(jsonLib.BiArrow, n, index.toNode)
}
def +> [P2, R](key: Rep[P2])(implicit om: o#arg[String, P2]#to[JSONType, R]) = {
om.column(jsonLib.Arrow, n, key.toNode)
}
def +>>[P2, R](key: Rep[P2])(implicit om: o#arg[String, P2]#to[String, R]) = {
om.column(jsonLib.BiArrow, n, key.toNode)
}
def #> [P2, R](keyPath: Rep[P2])(implicit om: o#arg[List[String], P2]#to[JSONType, R]) = {
om.column(jsonLib.PoundArrow, n, keyPath.toNode)
}
def #>>[P2, R](keyPath: Rep[P2])(implicit om: o#arg[List[String], P2]#to[String, R]) = {
om.column(jsonLib.PoundBiArrow, n, keyPath.toNode)
}
def @>[P2,R](c2: Rep[P2])(implicit om: o#arg[JSONType, P2]#to[Boolean, R]) = {
om.column(jsonLib.Contains, n, c2.toNode)
}
def <@:[P2,R](c2: Rep[P2])(implicit om: o#arg[JSONType, P2]#to[Boolean, R]) = {
om.column(jsonLib.ContainsBy, c2.toNode, n)
}
def ??[P2, R](c2: Rep[P2])(implicit om: o#arg[String, P2]#to[Boolean, R]) = {
om.column(jsonLib.Exists, n, c2.toNode)
}
def ?|[P2, R](c2: Rep[P2])(implicit om: o#arg[List[String], P2]#to[Boolean, R]) = {
om.column(jsonLib.ExistsAny, n, c2.toNode)
}
def ?&[P2, R](c2: Rep[P2])(implicit om: o#arg[List[String], P2]#to[Boolean, R]) = {
om.column(jsonLib.ExistsAll, n, c2.toNode)
}
def ||[P2, R](c2: Rep[P2])(implicit om: o#arg[JSONType, P2]#to[JSONType, R]) = {
om.column(jsonLib.Concatenate, n, c2.toNode)
}
def - [P2, R](c2: Rep[P2])(implicit om: o#arg[String, P2]#to[JSONType, R]) = {
om.column(jsonLib.Delete, n, c2.toNode)
}
def #-[P2, R](c2: Rep[P2])(implicit om: o#arg[List[String], P2]#to[JSONType, R]) = {
om.column(jsonLib.DeleteDeep, n, c2.toNode)
}
def jsonType[R](implicit om: o#to[String, R]) = om.column(jsonLib.typeof, n)
def objectKeys[R](implicit om: o#to[String, R]) = om.column(jsonLib.objectKeys, n)
def arrayLength[R](implicit om: o#to[Int, R]) = om.column(jsonLib.arrayLength, n)
def arrayElements[R](implicit om: o#to[JSONType, R]) = om.column(jsonLib.arrayElements, n)
def arrayElementsText[R](implicit om: o#to[String, R]) = om.column(jsonLib.arrayElementsText, n)
def set[R](path: Rep[List[String]], value: Rep[JSONType], createMissing: Option[Boolean] = None)(
implicit om: o#to[JSONType, R]) = createMissing match {
case Some(b) => om.column(jsonLib.jsonbSet, n, path.toNode, value.toNode, LiteralColumn(b).toNode)
case None => om.column(jsonLib.jsonbSet, n, path.toNode, value.toNode)
}
}
}
| enragedginger/slick-pg | core/src/main/scala/com/github/tminglei/slickpg/json/PgJsonExtensions.scala | Scala | bsd-2-clause | 5,635 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import com.google.inject.{Inject, Singleton}
import play.api.{Configuration, Environment}
import scala.collection.JavaConverters._
@Singleton
class ConfigHelper @Inject() (config: Configuration, env: Environment) {
val mode = env.mode
def mustGetConfigString(key: String): String = {
getConfigString(key).getOrElse {
throw new Exception(s"ERROR: Unable to find config item $mode.$key or $key")
}
}
def mustGetConfigStringList(key: String): List[String] = {
getConfigStringList(key).getOrElse {
throw new Exception(s"ERROR: Unable to find config item $mode.$key or $key")
}
}
def mustGetConfigInt(key: String): Int = {
getConfigInt(key).getOrElse {
throw new Exception(s"ERROR: Unable to find config item $mode.$key or $key")
}
}
def getConfigString(key: String): Option[String] = {
val modeKey = s"$mode.$key"
config.getString(modeKey).orElse(config.getString(key))
}
def getConfigStringList(key: String): Option[List[String]] = {
val modeKey = s"$mode.$key"
config.getStringList(modeKey).orElse(config.getStringList(key)).map(_.asScala.toList)
}
def getConfigInt(key: String): Option[Int] = {
val modeKey = s"$mode.$key"
config.getInt(modeKey).orElse(config.getInt(key))
}
def getConfig(key: String): Option[Configuration] = {
val modeKey = s"$mode.$key"
config.getConfig(modeKey).orElse(config.getConfig(key))
}
def replaceHome(string: String): String = {
if (string.startsWith("$HOME")) System.getenv("HOME") + string.substring(5)
else string
}
}
| andywhardy/address-reputation-ingester | app/config/ConfigHelper.scala | Scala | apache-2.0 | 2,200 |
def f() = {}
println(/* offset: 4, applicable: false */ f(_: Int)) | ilinum/intellij-scala | testdata/resolve2/function/partial/OneToNone.scala | Scala | apache-2.0 | 67 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.SelectTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.collection.mutable.HashMap
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class SelectTableSpec extends TorchSpec {
"A SelectTable selects a tensor as an output" should "generate correct output and grad" in {
torchCheck()
val seed = 100
Random.setSeed(seed)
val module = new SelectTable[Double](3)
val input1 = Tensor[Double](10).randn()
val input2 = Tensor[Double](10).randn()
val input3 = Tensor[Double](10).randn()
val input = T(1.0 -> input1, 2.0 -> input2, 3.0 -> input3)
val gradOutput = Tensor[Double](10).randn()
var i = 0
var output = Tensor[Double]()
var gradInput = T()
val start = System.nanoTime()
while (i < 10) {
output = module.forward(input).toTensor
gradInput = module.backward(input, gradOutput)
i += 1
}
val scalaTime = System.nanoTime() - start
val code =
s"""
torch.manualSeed($seed)
module = nn.SelectTable(3)
local i = 0
while i < 10 do
output = module:forward(input)
gradInput = module:backward(input, gradOutput)
gradInput1 = gradInput[1]
gradInput2 = gradInput[2]
gradInput3 = gradInput[3]
i = i + 1
end
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput1", "gradInput2", "gradInput3"))
val torchOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val torchgradInput1 = torchResult("gradInput1").asInstanceOf[Tensor[Double]]
val torchgradInput2 = torchResult("gradInput2").asInstanceOf[Tensor[Double]]
val torchgradInput3 = torchResult("gradInput3").asInstanceOf[Tensor[Double]]
val torchgradInput = T(torchgradInput1, torchgradInput2, torchgradInput3)
torchOutput should be(output)
torchgradInput should be(gradInput)
println("Test case : PairwiseDistance, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 +
" s")
}
"A SelectTable selects a table as an output" should "generate correct output and grad" in {
torchCheck()
val seed = 100
Random.setSeed(seed)
val module = new SelectTable[Double](1)
val embeddedInput1 = T(
1.0 -> Tensor[Double](10).randn(),
2.0 -> Tensor[Double](10).randn())
val embeddedInput2 = Tensor[Double](10).randn()
val input = T(
1.0 -> embeddedInput1,
2.0 -> embeddedInput2)
val gradOutput = T(
1.0 -> Tensor[Double](10).randn(),
2.0 -> Tensor[Double](10).randn())
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val g1 = gradInput.toTable[Table](1.0)
val g1_1 = g1[Tensor[Double]](1.0)
val g1_2 = g1[Tensor[Double]](2.0)
val g2 = gradInput.toTable[Tensor[Double]](2.0)
val scalaTime = System.nanoTime() - start
val code =
s"""
torch.manualSeed($seed)
module = nn.SelectTable(1)
output = module:forward{embeddedInput1, embeddedInput2}
gradInput = module:backward({embeddedInput1, embeddedInput2}, gradOutput)
gradInput1 = gradInput[1]
gradInput2 = gradInput[2]
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("embeddedInput2" -> embeddedInput2,
"embeddedInput1" -> embeddedInput1, "gradOutput" -> gradOutput),
Array("output", "gradInput1", "gradInput2"))
val torchOutput = torchResult("output").asInstanceOf[Table]
val torchgradInput1 = torchResult("gradInput1").asInstanceOf[Table]
val torchgradInput2 = torchResult("gradInput2").asInstanceOf[Tensor[Double]]
val torchgradInput = T(torchgradInput1, torchgradInput2)
torchOutput[Tensor[Double]](1.0) should be(output.toTable[Tensor[Double]](1.0))
torchOutput[Tensor[Double]](2.0) should be(output.toTable[Tensor[Double]](2.0))
torchgradInput1[Tensor[Double]](1.0) should be (g1_1)
torchgradInput1[Tensor[Double]](2.0) should be (g1_2)
torchgradInput2 should be(g2)
println("Test case : PairwiseDistance, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 +
" s")
}
}
| jenniew/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/SelectTableSpec.scala | Scala | apache-2.0 | 4,954 |
package com.szadowsz.starform.profile.accrete
import com.szadowsz.starform.model.accrete.AccreteProfile
import com.szadowsz.starform.model.accrete.calc.collision.{CollisionCalc, DoleCollCalc}
import com.szadowsz.starform.model.accrete.calc.insert.{AccreteInsertStrat, RandInsertStrat}
import com.szadowsz.starform.model.accrete.calc.planet.PlanetesimalCalc
import com.szadowsz.starform.model.accrete.constants.{AccreteConstants, FKeris2Constants}
import com.szadowsz.starform.rand.{GilhamRandGen, RandGenTrait}
/**
* Created on 11/04/2017.
*/
class GilhamProfile extends AccreteProfile{
override val rand: RandGenTrait = new GilhamRandGen()
override val accConsts: AccreteConstants = new FKeris2Constants() // TODO change to Gilham
override def buildInsertStrat(aConst : AccreteConstants): AccreteInsertStrat = new RandInsertStrat(aConst)
override def buildCollCalc(pCalc: PlanetesimalCalc): CollisionCalc = DoleCollCalc(pCalc) // TODO check if it should use Gilham calc
}
| zakski/accrete-starform-stargen | recreations/composite/src/main/scala/com/szadowsz/starform/profile/accrete/GilhamProfile.scala | Scala | apache-2.0 | 992 |
package com.konfliktriends.camel
import org.apache.camel.main.Main
import org.apache.camel.builder.RouteBuilder
/**
* @author Andreas C. Osowski
*/
class KafkaStreamRoute extends RouteBuilder {
override def configure(): Unit = from("kafka:localhost?zookeeperHost=localhost&groupId=defaultz&topic=websocket")
.to("websocket:0.0.0.0:9292/websocket?sendToAll=true&staticResources=classpath:html")
.to("stream:file?fileName=/tmp/twitter.json")
}
class ProxyPythonRoute extends RouteBuilder {
override def configure() = from("jetty:http://0.0.0.0:9292/python?matchOnUriPrefix=false")
.to("jetty:http://localhost:1337/?bridgeEndpoint=true")
}
object CamelMain extends App {
val main = new Main
main.enableHangupSupport()
main.addRouteBuilder(new KafkaStreamRoute)
//main.addRouteBuilder(new ProxyPythonRoute)
main.run()
}
| th0br0/konfliktriends | backend-camel/src/main/scala/com/konfliktriends/camel/Main.scala | Scala | gpl-2.0 | 849 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.code.languages
import cats.data.NonEmptyList
import laika.ast.{CodeSpan, ~}
import laika.bundle.SyntaxHighlighter
import laika.parse.Parser
import laika.parse.code.common._
import laika.parse.code.{CodeCategory, CodeSpanParser}
import laika.parse.text.Characters
import laika.parse.builders._
import laika.parse.code.common.NumberLiteral.DigitParsers
import laika.parse.code.implicits._
import laika.parse.implicits._
/**
* @author Jens Halm
*/
object CSSSyntax extends SyntaxHighlighter {
private val ws: Characters[String] = anyOf('\n', ' ')
private def idBase (category: CodeCategory, allowDigitBeforeStart: Boolean): Identifier.IdParser =
Identifier.alphaNum
.withIdStartChars('_','-')
.withCategory(category)
.copy(allowDigitBeforeStart = allowDigitBeforeStart)
def identifier (category: CodeCategory, allowDigitBeforeStart: Boolean): CodeSpanParser = {
val base = idBase(category, allowDigitBeforeStart)
base.withPrefix("@" | "#") ++ base
}
lazy val escape: CodeSpanParser =
CodeSpanParser(CodeCategory.EscapeSequence)(("\\" ~ DigitParsers.hex.min(1)).source) ++ StringLiteral.Escape.char
lazy val url: CodeSpanParser = CodeSpanParser {
(literal("url(") ~ ws ~ anyNot('"', '\'', '(', ')', ' ', '\n') ~ ws ~ ")").map {
case _ ~ ws1 ~ value ~ ws2 ~ _ => Seq(
CodeSpan("url", CodeCategory.Identifier),
CodeSpan("(" + ws1),
CodeSpan(value, CodeCategory.StringLiteral),
CodeSpan(ws2 + ")"),
)
}
}
val color: CodeSpanParser = CodeSpanParser(CodeCategory.NumberLiteral)(("#" ~ DigitParsers.hex.min(1).max(6)).source)
val string: CodeSpanParser = StringLiteral.singleLine('"').embed(escape) ++
StringLiteral.singleLine('\'').embed(escape)
val number: CodeSpanParser = NumberLiteral.decimalFloat.copy(allowFollowingLetter = true) ++
NumberLiteral.decimalInt.copy(allowFollowingLetter = true)
val declaration: CodeSpanParser = {
val embedded: Seq[CodeSpanParser] = Seq(
Comment.multiLine("/*", "*/"),
string,
color,
url,
number,
identifier(CodeCategory.Identifier, allowDigitBeforeStart = true),
)
def valueParser(inBlock: Boolean): Parser[Seq[CodeSpan]] = {
val separator = (ws ~ ":").source.asCode()
val delim = oneChar.asCode()
val text = if (inBlock) delimitedBy('}',';', ')') else delimitedBy('}',';')
(separator ~ EmbeddedCodeSpans.parser(text.keepDelimiter, embedded) ~ delim).map {
case sep ~ con ~ del => sep +: con :+ del
}
}
val attrName = idBase(CodeCategory.AttributeName, allowDigitBeforeStart = false)
CodeSpanParser {
(attrName ~ valueParser(inBlock = false)).concat
} ++
CodeSpanParser {
("(" ~> attrName ~ valueParser(inBlock = true)).concat.map(spans => CodeSpan("(") +: spans)
}
}
val language: NonEmptyList[String] = NonEmptyList.of("css")
val spanParsers: Seq[CodeSpanParser] = Seq(
Comment.multiLine("/*", "*/"),
string,
declaration,
identifier(CodeCategory.Identifier, allowDigitBeforeStart = false),
Keywords("!important")
)
}
| planet42/Laika | core/shared/src/main/scala/laika/parse/code/languages/CSSSyntax.scala | Scala | apache-2.0 | 3,801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.secondaryindex.optimizer
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.control.Breaks.{break, breakable}
import org.apache.spark.sql.{Dataset, _}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.UnresolvedCatalogRelation
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.execution.datasources.{FindDataSourceTable, LogicalRelation}
import org.apache.spark.sql.hive.{CarbonHiveIndexMetadataUtil, CarbonRelation}
import org.apache.spark.sql.index.CarbonIndexUtil
import org.apache.spark.sql.secondaryindex.optimizer
import org.apache.spark.sql.secondaryindex.optimizer.NodeType.NodeType
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.index.secondaryindex.CarbonCostBasedOptimizer
import org.apache.carbondata.core.util.CarbonProperties
class SIFilterPushDownOperation(nodeType: NodeType)
case class SIBinaryFilterPushDownOperation(nodeType: NodeType,
leftOperation: SIFilterPushDownOperation,
rightOperation: SIFilterPushDownOperation) extends SIFilterPushDownOperation(nodeType)
case class SIUnaryFilterPushDownOperation(tableName: String, filterCondition: Expression)
extends SIFilterPushDownOperation(nodeType = null)
object NodeType extends Enumeration {
type NodeType = Value
val Or: optimizer.NodeType.Value = Value("or")
val And: optimizer.NodeType.Value = Value("and")
}
/**
* Carbon Optimizer to add dictionary decoder.
*/
class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
/**
* This method will identify whether provided filter column have any index table
* if exist, it will transform the filter with join plan as follows
*
* eg., select * from a where dt='20261201' and age='10' limit 10
*
* Assume 2 index tables
* 1)index1 indexed on column(dt)
* 2)index2 indexed on column(age)
*
* Plan will be transformed as follows
*
* with i1 as (select positionReference from index1 where dt='20261201'
* group by positionReference),
* with i2 as (select positionReference from index2 where age='10'),
* with indexJoin as (select positionReference from i1 join i2 on
* i1.positionReference = i2.positionReference limit 10),
* with index as (select positionReference from indexJoin group by positionReference)
* select * from a join index on a.positionId = index.positionReference limit 10
*
* @return transformed logical plan
*/
private def rewritePlanForSecondaryIndex(filter: Filter,
indexableRelation: CarbonDatasourceHadoopRelation, dbName: String,
cols: Seq[NamedExpression] = null, limitLiteral: Literal = null,
sortNodeForPushDown: Sort = null, pushDownNotNullFilter: Boolean = false): LogicalPlan = {
var originalFilterAttributes: Set[String] = Set.empty
var filterAttributes: Set[String] = Set.empty
// all filter attributes are retrieved
filter.condition collect {
case attr: AttributeReference =>
originalFilterAttributes = originalFilterAttributes. +(attr.name.toLowerCase)
}
// Removed is Not Null filter from all filters and other attributes are selected
// isNotNull filter will return all the unique values except null from table,
// For High Cardinality columns, this filter is of no use, hence skipping it.
removeIsNotNullAttribute(filter.condition, pushDownNotNullFilter) collect {
case attr: AttributeReference =>
filterAttributes = filterAttributes. +(attr.name.toLowerCase)
}
val enabledMatchingIndexTables = CarbonCostBasedOptimizer.identifyRequiredTables(
filterAttributes.asJava,
CarbonIndexUtil.getSecondaryIndexes(indexableRelation).mapValues(_.toList.asJava)
.asJava).asScala
if (enabledMatchingIndexTables.isEmpty) {
filter
} else {
var isPositionIDPresent = false
val project: Seq[NamedExpression] = if (cols != null) {
cols.foreach {
case a@Alias(s: ScalaUDF, name)
if name.equalsIgnoreCase(CarbonCommonConstants.POSITION_ID) =>
isPositionIDPresent = true
case _ =>
}
cols
} else {
filter.output
}
var mainTableDf = createDF(sparkSession, Project(project, filter))
// If the createDF creates DF from MV flow by hitting MV, no need to consider this DF
// as main table DF and then do join with SI table and go ahead, So here checking whether the
// DF is from child table, if it is, just return the filter as it is without rewriting
val tableRelation = mainTableDf.logicalPlan collect {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
l.relation.asInstanceOf[CarbonDatasourceHadoopRelation]
}
if (tableRelation.nonEmpty && tableRelation.head.carbonTable.isMV) {
return filter
}
if (!isPositionIDPresent) {
mainTableDf = mainTableDf.selectExpr("getPositionId() as positionId", "*")
} else {
// if the user has requested for the positionID column, in that case we are
// adding this table property. This is used later to check whether to
// remove the positionId column from the projection list.
// This property will be reflected across sessions as it is directly added to tblProperties.
// So concurrent query run with getPositionID() UDF will have issue.
// But getPositionID() UDF is restricted to testing purpose.
indexableRelation.carbonTable.getTableInfo.getFactTable.getTableProperties
.put("isPositionIDRequested", "true")
}
// map for index table name to its column name mapping
val indexTableToColumnsMapping: mutable.Map[String, Set[String]] =
new mutable.HashMap[String, Set[String]]()
// map for index table to logical relation mapping
val indexTableToLogicalRelationMapping: mutable.Map[String, LogicalPlan] =
new mutable.HashMap[String, LogicalPlan]()
// map for index table to attributeMap mapping. AttributeMap is a mapping of columnName
// to its attribute reference
val indexTableAttributeMap: mutable.Map[String, Map[String, AttributeReference]] =
new mutable.HashMap[String, Map[String, AttributeReference]]()
// mapping of all the index tables and its columns created on the main table
val allIndexTableToColumnMapping = CarbonIndexUtil.getSecondaryIndexes(indexableRelation)
enabledMatchingIndexTables.foreach { matchedTable =>
// create index table to index column mapping
val indexTableColumns = allIndexTableToColumnMapping.getOrElse(matchedTable, Array())
indexTableToColumnsMapping.put(matchedTable, indexTableColumns.toSet)
// create index table to logical plan mapping
val indexTableLogicalPlan = retrievePlan(sparkSession.sessionState.catalog
.lookupRelation(TableIdentifier(matchedTable, Some(dbName))))(sparkSession)
indexTableToLogicalRelationMapping.put(matchedTable, indexTableLogicalPlan)
// collect index table columns
indexTableLogicalPlan collect {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
indexTableAttributeMap
.put(matchedTable, l.output.map { attr => (attr.name.toLowerCase -> attr) }.toMap)
}
}
val filterTree: SIFilterPushDownOperation = null
val newSIFilterTree = createIndexTableFilterCondition(
filterTree,
filter.copy(filter.condition, filter.child).condition,
indexTableToColumnsMapping,
pushDownNotNullFilter)
val indexTablesDF: DataFrame = newSIFilterTree._3 match {
case Some(tableName) =>
// flag to check whether apply limit literal on the filter push down condition or not
// if it satisfies the limit push down scenario. it will be true only if the complete
// tree has only one node which is of unary type
val checkAndApplyLimitLiteral = newSIFilterTree._1 match {
case SIUnaryFilterPushDownOperation(tableName, filterCondition) => true
case _ => false
}
val dataFrameWithAttributes = createIndexFilterDataFrame(
newSIFilterTree._1,
indexTableAttributeMap,
Set.empty,
indexTableToLogicalRelationMapping,
originalFilterAttributes,
limitLiteral,
checkAndApplyLimitLiteral,
sortNodeForPushDown)
dataFrameWithAttributes._1
case _ =>
null
// don't do anything
}
// In case column1 > column2, index table have only 1 column1,
// then index join should not happen if only 1 index table is selected.
if (indexTablesDF != null) {
mainTableDf = mainTableDf.join(indexTablesDF,
mainTableDf(CarbonCommonConstants.POSITION_ID) ===
indexTablesDF(CarbonCommonConstants.POSITION_REFERENCE))
mainTableDf.queryExecution.analyzed
} else {
filter
}
}
}
def retrievePlan(plan: LogicalPlan)(sparkSession: SparkSession):
LogicalRelation = {
plan match {
case SubqueryAlias(alias, l: UnresolvedCatalogRelation) =>
val logicalPlan = new FindDataSourceTable(sparkSession).apply(l).collect {
case lr: LogicalRelation => lr
}
if (logicalPlan.head.relation.isInstanceOf[CarbonDatasourceHadoopRelation]) {
logicalPlan.head
} else {
null
}
case SubqueryAlias(alias, l: LogicalRelation)
if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] => l
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] => l
case _ => null
}
}
private def createDF(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame = {
new Dataset[Row](sparkSession, logicalPlan, RowEncoder(logicalPlan.schema))
}
/**
* This method will traverse the filter push down tree and prepare the data frame based on the
* NodeType. if nodeType is union then it will perform union on 2 tables else it will
* perform join on 2 tables
*
*/
private def createIndexFilterDataFrame(
siFilterPushDownTree: SIFilterPushDownOperation,
indexTableAttributeMap: mutable.Map[String, Map[String, AttributeReference]],
indexJoinedFilterAttributes: Set[String],
indexTableToLogicalRelationMapping: mutable.Map[String, LogicalPlan],
originalFilterAttributes: Set[String],
limitLiteral: Literal,
checkAndAddLimitLiteral: Boolean = false,
sortNode: Sort): (DataFrame, Set[String]) = {
val sortColumns = if (sortNode != null) {
sortNode.order.map(_.child.asInstanceOf[AttributeReference].name.toLowerCase()).toSet
} else {
Set.empty
}
siFilterPushDownTree match {
case SIUnaryFilterPushDownOperation(tableName, filterCondition) =>
val attributeMap = indexTableAttributeMap.get(tableName).get
var filterAttributes = indexJoinedFilterAttributes
val newFilterCondition = filterCondition transform {
case ArrayContains(left, right) =>
EqualTo(left, right)
}
val indexTableFilter = newFilterCondition transformDown {
case array: GetArrayItem =>
val attr = array.child.asInstanceOf[AttributeReference]
val attrNew = attributeMap.get(attr.name.toLowerCase()).get
filterAttributes += attr.name.toLowerCase
attrNew
case attr: AttributeReference =>
val attrNew = attributeMap.get(attr.name.toLowerCase()).get
filterAttributes += attr.name.toLowerCase
attrNew
}
val positionRef = attributeMap(CarbonCommonConstants.POSITION_REFERENCE.toLowerCase())
var positionReference = Seq(positionRef)
// Add Filter on logicalRelation
var planTransform: LogicalPlan = Filter(indexTableFilter,
indexTableToLogicalRelationMapping(tableName))
var needPushDown = false
var addLimit = checkAndAddLimitLiteral
if (sortNode != null && (filterAttributes.intersect(originalFilterAttributes)
.size == originalFilterAttributes.size)) {
needPushDown = true
} else if (filterAttributes.intersect(originalFilterAttributes)
.size != originalFilterAttributes.size) {
addLimit = false
}
var sortAttr: Seq[AttributeReference] = Seq.empty
if (needPushDown) {
val plan = indexTableToLogicalRelationMapping(tableName)
plan collect {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
sortColumns.foreach { x =>
sortAttr = sortAttr :+ attributeMap(x)
}
}
positionReference = positionReference ++ sortAttr
}
// Add PositionReference Projection on Filter
planTransform = Project(positionReference, planTransform)
if (needPushDown) {
var newSortOrder: Seq[SortOrder] = Seq.empty
var i = 0
sortNode.order.foreach { sortOrder =>
newSortOrder = newSortOrder :+ SortOrder(sortAttr(i), sortOrder.direction)
i = i + 1
}
planTransform = Limit(limitLiteral, Sort(newSortOrder, sortNode.global, planTransform))
// limit is already added, no need to add again.
addLimit = false
}
var indexTableDf = createDF(sparkSession, planTransform)
// When all the filter columns are joined from index table,
// limit can be pushed down before grouping last index table as the
// number of records selected will definitely return at least 1 record
// NOTE: flag checkAndAddLimitLiteral will be true only when the complete filter tree
// contains only one node which is a unary node
val indexLogicalPlan = if (addLimit) {
if (limitLiteral != null &&
filterAttributes.intersect(originalFilterAttributes)
.size == originalFilterAttributes.size) {
Limit(limitLiteral, indexTableDf.logicalPlan)
} else {
indexTableDf.logicalPlan
}
} else {
indexTableDf.logicalPlan
}
// Add Group By on PositionReference after join
indexTableDf = createDF(sparkSession,
Aggregate(positionReference, positionReference, indexLogicalPlan))
// return the data frame
(indexTableDf, filterAttributes)
case SIBinaryFilterPushDownOperation(nodeType, leftOperation, rightOperation) =>
val (leftOperationDataFrame, indexFilterAttributesLeft) = createIndexFilterDataFrame(
leftOperation,
indexTableAttributeMap,
indexJoinedFilterAttributes,
indexTableToLogicalRelationMapping,
originalFilterAttributes,
limitLiteral,
sortNode = sortNode)
val (rightOperationDataFrame, indexFilterAttributesRight) = createIndexFilterDataFrame(
rightOperation,
indexTableAttributeMap,
indexFilterAttributesLeft,
indexTableToLogicalRelationMapping,
originalFilterAttributes,
limitLiteral,
sortNode = sortNode)
// create new data frame by applying join or union based on nodeType
val newDFAfterUnionOrJoin = applyUnionOrJoinOnDataFrames(nodeType,
leftOperationDataFrame,
rightOperationDataFrame,
indexFilterAttributesRight,
originalFilterAttributes,
limitLiteral)
(newDFAfterUnionOrJoin, indexFilterAttributesRight)
}
}
/**
* This method will combine 2 DataFrames by applying union or join based on the nodeType and
* create a new DataFrame
*
*/
private def applyUnionOrJoinOnDataFrames(nodeType: NodeType,
leftConditionDataFrame: DataFrame,
rightConditionDataFrame: DataFrame,
indexJoinedFilterAttributes: Set[String],
originalFilterAttributes: Set[String],
limitLiteral: Literal): DataFrame = {
// For multiple index table selection,
// all index tables are joined before joining with main table
var allIndexTablesDF = nodeType match {
case NodeType.Or =>
rightConditionDataFrame.union(leftConditionDataFrame)
case _ =>
leftConditionDataFrame.join(rightConditionDataFrame,
leftConditionDataFrame(CarbonCommonConstants.POSITION_REFERENCE) ===
rightConditionDataFrame(CarbonCommonConstants.POSITION_REFERENCE))
}
// When all the filter columns are joined from index table,
// limit can be pushed down before grouping last index table as the
// number of records selected will definitely return at least 1 record
val indexLogicalPlan = if (limitLiteral != null &&
indexJoinedFilterAttributes.intersect(originalFilterAttributes)
.size == originalFilterAttributes.size) {
Limit(limitLiteral, allIndexTablesDF.logicalPlan)
} else {
allIndexTablesDF.logicalPlan
}
// in case of same table join position reference taken from the table relation will always
// return the same positionReference which can result in Node Binding exception.
// To avoid this take the positionReference from logical plan of dataFrame in which right
// column will always be the projection column in join condition
var positionReferenceFromLogicalPlan: Seq[AttributeReference] = Seq.empty
indexLogicalPlan transform {
case join: Join =>
// this check is required as we need the right condition positionReference only for
// the topmost node
if (positionReferenceFromLogicalPlan.isEmpty) {
// take the right attribute reference as new data frame positionReference is always
// put in the right above
positionReferenceFromLogicalPlan =
Seq(join.condition.get.asInstanceOf[EqualTo].right
.asInstanceOf[AttributeReference])
}
join
case project: Project =>
if (positionReferenceFromLogicalPlan.isEmpty) {
positionReferenceFromLogicalPlan =
Seq(project.projectList.head.asInstanceOf[AttributeReference])
}
project
}
// Add Group By on PositionReference after join
allIndexTablesDF = createDF(sparkSession,
Aggregate(positionReferenceFromLogicalPlan,
positionReferenceFromLogicalPlan,
indexLogicalPlan))
// return the data frame
allIndexTablesDF
}
private def removeIsNotNullAttribute(condition: Expression,
pushDownNotNullFilter: Boolean): Expression = {
val isPartialStringEnabled = CarbonProperties.getInstance
.getProperty(CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING,
CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT)
.equalsIgnoreCase("true")
condition transform {
// Like is possible only if user provides _ in between the string
// _ in like means any single character wild card check.
case IsNotNull(child: AttributeReference) => Literal(!pushDownNotNullFilter)
case plan if (CarbonHiveIndexMetadataUtil.checkNIUDF(plan)) => Literal(true)
case _: Like if (!isPartialStringEnabled) => Literal(
true)
case EndsWith(left: AttributeReference,
right: Literal) if (!isPartialStringEnabled) => Literal(true)
case Contains(left: AttributeReference,
right: Literal) if (!isPartialStringEnabled) => Literal(true)
}
}
private def conditionsHasStartWith(condition: Expression): Boolean = {
condition match {
case or@Or(left, right) =>
val isIndexColumnUsedInLeft = conditionsHasStartWith(left)
val isIndexColumnUsedInRight = conditionsHasStartWith(right)
isIndexColumnUsedInLeft || isIndexColumnUsedInRight
case and@And(left, right) =>
val isIndexColumnUsedInLeft = conditionsHasStartWith(left)
val isIndexColumnUsedInRight = conditionsHasStartWith(right)
isIndexColumnUsedInLeft || isIndexColumnUsedInRight
case _ => hasStartsWith(condition)
}
}
private def hasStartsWith(condition: Expression): Boolean = {
condition match {
case _: Like => false
case EndsWith(left: AttributeReference, right: Literal) => false
case Contains(left: AttributeReference, right: Literal) => false
case _ => true
}
}
/**
* This method will check whether the condition is valid for SI push down. If yes then return the
* tableName which contains this condition
*
* @param condition
* @param indexTableColumnsToTableMapping
* @param pushDownRequired
* @return
*/
private def isConditionColumnInIndexTable(condition: Expression,
indexTableColumnsToTableMapping: mutable.Map[String, Set[String]],
pushDownRequired: Boolean, pushDownNotNullFilter: Boolean): Option[String] = {
// In case of Like Filter in OR, both the conditions should not be transformed
// In case of like filter in And, only like filter should be removed and
// other filter should be transformed with index table
// In case NI condition with and, eg., NI(col1 = 'a') && col1 = 'b',
// only col1 = 'b' should be pushed to index table.
// In case NI condition with or, eg., NI(col1 = 'a') || col1 = 'b',
// both the condition should not be pushed to index table.
var tableName: Option[String] = None
val doNotPushToSI = condition match {
case IsNotNull(child: AttributeReference) => !pushDownNotNullFilter
case Not(EqualTo(left: AttributeReference, right: Literal)) => true
case Not(EqualTo(left: Cast, right: Literal))
if left.child.isInstanceOf[AttributeReference] => true
case Not(like) if like.isInstanceOf[Like] => true
case Not(In(left: AttributeReference, right: Seq[Expression])) => true
case Not(Contains(left: AttributeReference, right: Literal)) => true
case Not(EndsWith(left: AttributeReference, right: Literal)) => true
case Not(StartsWith(left: AttributeReference, right: Literal)) => true
case _: Like if (!pushDownRequired) => true
case EndsWith(left: AttributeReference, right: Literal) if (!pushDownRequired) => true
case Contains(left: AttributeReference, right: Literal) if (!pushDownRequired) => true
case plan if (CarbonHiveIndexMetadataUtil.checkNIUDF(plan)) => true
case _ => false
}
if (!doNotPushToSI) {
val attributes = condition collect {
case attributeRef: AttributeReference => attributeRef
}
var isColumnExistsInSITable = false
breakable {
indexTableColumnsToTableMapping.foreach { tableAndIndexColumn =>
isColumnExistsInSITable = attributes
.forall { attributeRef => tableAndIndexColumn._2
.contains(attributeRef.name.toLowerCase)
}
if (isColumnExistsInSITable) {
tableName = Some(tableAndIndexColumn._1)
break
}
}
}
}
tableName
}
/**
* This method will evaluate the filter tree and return new filter tree with SI push down
* operation and flag
* 1) In case of or condition, all the columns in left & right are existing in the index tables,
* then the condition will be pushed to index tables as union and joined with main table
* 2) In case of and condition, if any of the left or right condition column matches with
* index table column, then that particular condition will be pushed to index table.
*
* @param filterTree
* @param condition
* @param indexTableToColumnsMapping
* @return newSIFilterCondition can be pushed to index table & boolean to join with index table
*/
private def createIndexTableFilterCondition(filterTree: SIFilterPushDownOperation,
condition: Expression,
indexTableToColumnsMapping: mutable.Map[String, Set[String]],
pushDownNotNullFilter: Boolean):
(SIFilterPushDownOperation, Expression, Option[String]) = {
condition match {
case or@Or(left, right) =>
val (newSIFilterTreeLeft, newLeft, tableNameLeft) =
createIndexTableFilterCondition(
filterTree,
left,
indexTableToColumnsMapping, pushDownNotNullFilter)
val (newSIFilterTreeRight, newRight, tableNameRight) =
createIndexTableFilterCondition(
filterTree,
right,
indexTableToColumnsMapping, pushDownNotNullFilter)
(tableNameLeft, tableNameRight) match {
case (Some(tableLeft), Some(tableRight)) =>
// In case of OR filter when both right and left filter exists in the
// index table (same or different), then only push down the condition to index tables
// e.g name='xyz' or city='c1', then if both name and city have SI tables created on
// them, then push down the condition to SI tables.
// 1. If both the columns are from same index table then then the condition can
// directly be joined to main table
// 2. If both the columns are from different index table then first union operation
// need to be performed between the 2 index tables and then joined with main table
val newFilterCondition = or.copy(newLeft, newRight)
// Points to be noted for passing the table name to next level: applicable for both
// AND and OR filter case
// 1. If both left and right condition are from same table then Unary node is created.
// When it is an Unary node both left and right table name will be same so does not
// matter which table name you are passing to next level.
// 2. If left and right condition are from different table then binary node is
// created. In case of binary node table name is not used for comparison. So it does
// not matter which table name you pass to next level.
(createSIFilterPushDownNode(
newSIFilterCondition = newFilterCondition,
leftOperation = newSIFilterTreeLeft,
leftNodeTableName = tableLeft,
rightOperation = newSIFilterTreeRight,
rightNodeTableName = tableRight,
nodeType = NodeType.Or), newFilterCondition, tableNameRight)
case _ =>
(filterTree, condition, None)
}
case And(ArrayContains(_, _), ArrayContains(_, _)) =>
(filterTree, condition, None)
case And(And(IsNotNull(_), ArrayContains(_, _)), ArrayContains(_, _)) =>
(filterTree, condition, None)
case and@And(left, right) =>
val (newSIFilterTreeLeft, newLeft, tableNameLeft) =
createIndexTableFilterCondition(
filterTree,
left,
indexTableToColumnsMapping,
pushDownNotNullFilter)
val (newSIFilterTreeRight, newRight, tableNameRight) =
createIndexTableFilterCondition(
filterTree,
right,
indexTableToColumnsMapping,
pushDownNotNullFilter)
(tableNameLeft, tableNameRight) match {
case (Some(tableLeft), Some(tableRight)) =>
// push down both left and right condition if both left and right columns have index
// table created on them
val newFilterCondition = and.copy(newLeft, newRight)
(createSIFilterPushDownNode(
newSIFilterCondition = newFilterCondition,
leftOperation = newSIFilterTreeLeft,
leftNodeTableName = tableLeft,
rightOperation = newSIFilterTreeRight,
rightNodeTableName = tableRight,
nodeType = NodeType.And), newFilterCondition, tableNameRight)
case (Some(tableLeft), None) =>
// return the left node
(newSIFilterTreeLeft, newLeft, tableNameLeft)
case (None, Some(tableRight)) =>
// return the right node
(newSIFilterTreeRight, newRight, tableNameRight)
case _ =>
(filterTree, condition, None)
}
case _ =>
// check whether the filter column exists in SI table and can it be pushDown
var isPartialStringEnabled = CarbonProperties.getInstance
.getProperty(CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING,
CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT)
.equalsIgnoreCase("true")
// When carbon.si.lookup.partialstring set to FALSE, if filter has startsWith then SI is
// used even though combination of other filters like endsWith or Contains
if (!isPartialStringEnabled) {
isPartialStringEnabled = conditionsHasStartWith(condition)
}
val tableName = isConditionColumnInIndexTable(condition,
indexTableToColumnsMapping,
isPartialStringEnabled, pushDownNotNullFilter = pushDownNotNullFilter)
// create a node if condition can be pushed down else return the same filterTree
val newFilterTree = tableName match {
case Some(table) =>
SIUnaryFilterPushDownOperation(table, condition)
case None =>
filterTree
}
(newFilterTree, condition, tableName)
}
}
/**
* This method will create a new node for the filter push down tree.
* a. If both left and right condition are from same index table then merge both the conditions
* and create a unary operation root node
* b. If left and right condition are from different table then create a binary node with
* separate left and right operation
*
* @param newSIFilterCondition
* @param leftOperation
* @param leftNodeTableName
* @param rightOperation
* @param rightNodeTableName
* @param nodeType
* @return
*/
private def createSIFilterPushDownNode(
newSIFilterCondition: Expression,
leftOperation: SIFilterPushDownOperation,
leftNodeTableName: String,
rightOperation: SIFilterPushDownOperation,
rightNodeTableName: String,
nodeType: NodeType): SIFilterPushDownOperation = {
// flag to check whether there exist a binary node in left or right operation
var isLeftOrRightOperationBinaryNode = false
leftOperation match {
case SIBinaryFilterPushDownOperation(nodeType, left, right) =>
isLeftOrRightOperationBinaryNode = true
case _ =>
// don't do anything as flag for checking binary is already false
}
// if flag is till false at this point then only check for binary node in right operation
if (!isLeftOrRightOperationBinaryNode) {
rightOperation match {
case SIBinaryFilterPushDownOperation(nodeType, left, right) =>
isLeftOrRightOperationBinaryNode = true
case _ =>
// don't do anything as flag for checking binary is already false
}
}
// if left or right node is binary then unary node cannot be created even though left and right
// table names are same. In this case only a new binary node can be created
if (isLeftOrRightOperationBinaryNode) {
SIBinaryFilterPushDownOperation(nodeType, leftOperation, rightOperation)
} else {
// If left and right table name is same then merge the 2 conditions
if (leftNodeTableName == rightNodeTableName) {
SIUnaryFilterPushDownOperation(leftNodeTableName, newSIFilterCondition)
} else {
SIBinaryFilterPushDownOperation(nodeType, leftOperation, rightOperation)
}
}
}
/**
* This method is used to determine whether limit has to be pushed down to secondary index or not.
*
* @param relation
* @return false if carbon table is not an index table and update status file exists because
* we know delete has happened on table and there is no need to push down the filter.
* Otherwise true
*/
private def isLimitPushDownRequired(relation: CarbonRelation): Boolean = {
val carbonTable = relation.carbonTable
lazy val updateStatusFileExists = FileFactory.getCarbonFile(carbonTable.getMetadataPath)
.listFiles()
.exists(file => file.getName.startsWith(CarbonCommonConstants.TABLEUPDATESTATUS_FILENAME))
(!carbonTable.isIndexTable && !updateStatusFileExists)
}
def transformFilterToJoin(plan: LogicalPlan, needProjection: Boolean): LogicalPlan = {
val isRowDeletedInTableMap = scala.collection.mutable.Map.empty[String, Boolean]
// if the join push down is enabled, then no need to add projection list to the logical plan as
// we can directly map the join output with the required projections
// if it is false then the join will not be pushed down to carbon and
// there it is required to add projection list to map the output from the join
val pushDownJoinEnabled = sparkSession.sparkContext.getConf
.getBoolean("spark.carbon.pushdown.join.as.filter", defaultValue = true)
val transformChild = false
var addProjection = needProjection
// to store the sort node per query
var sortNodeForPushDown: Sort = null
// to store the limit literal per query
var limitLiteral: Literal = null
// by default do not push down notNull filter,
// but for orderby limit push down, push down notNull filter also. Else we get wrong results.
var pushDownNotNullFilter: Boolean = false
val transformedPlan = transformPlan(plan, {
case union: Union =>
// In case of Union, Extra Project has to be added to the Plan. Because if left table is
// pushed to SI and right table is not pushed, then Output Attribute mismatch will happen
addProjection = true
(union, true)
case sort@Sort(_, _, _) =>
addProjection = true
(sort, true)
case limit@Limit(literal: Literal, sort@Sort(_, _, child)) =>
child match {
case filter: Filter =>
if (checkIfPushDownOrderByLimitAndNotNullFilter(literal, sort, filter)) {
sortNodeForPushDown = sort
limitLiteral = literal
pushDownNotNullFilter = true
}
case p: Project if p.child.isInstanceOf[Filter] =>
if (checkIfPushDownOrderByLimitAndNotNullFilter(literal,
sort,
p.child.asInstanceOf[Filter])) {
sortNodeForPushDown = sort
limitLiteral = literal
pushDownNotNullFilter = true
}
case _ =>
}
(limit, transformChild)
case limit@Limit(literal: Literal, _@Project(_, sort@Sort(_, _, child))) =>
child match {
case f: Filter =>
if (checkIfPushDownOrderByLimitAndNotNullFilter(literal, sort, f)) {
sortNodeForPushDown = sort
limitLiteral = literal
pushDownNotNullFilter = true
}
case p: Project if (p.child.isInstanceOf[Filter]) =>
if (checkIfPushDownOrderByLimitAndNotNullFilter(literal,
sort,
p.child.asInstanceOf[Filter])) {
sortNodeForPushDown = sort
limitLiteral = literal
pushDownNotNullFilter = true
}
case _ =>
}
(limit, transformChild)
case filter@Filter(condition, _@MatchIndexableRelation(indexableRelation))
if !condition.isInstanceOf[IsNotNull] &&
CarbonIndexUtil.getSecondaryIndexes(indexableRelation).nonEmpty =>
val reWrittenPlan = rewritePlanForSecondaryIndex(
filter,
indexableRelation,
filter.child.asInstanceOf[LogicalRelation].relation
.asInstanceOf[CarbonDatasourceHadoopRelation].carbonRelation.databaseName,
limitLiteral = limitLiteral,
sortNodeForPushDown = sortNodeForPushDown,
pushDownNotNullFilter = pushDownNotNullFilter)
if (reWrittenPlan.isInstanceOf[Join]) {
if (pushDownJoinEnabled && !addProjection) {
(reWrittenPlan, transformChild)
} else {
(Project(filter.output, reWrittenPlan), transformChild)
}
} else {
(filter, transformChild)
}
case projection@Project(cols, filter@Filter(condition,
_@MatchIndexableRelation(indexableRelation)))
if !condition.isInstanceOf[IsNotNull] &&
CarbonIndexUtil.getSecondaryIndexes(indexableRelation).nonEmpty =>
val reWrittenPlan = rewritePlanForSecondaryIndex(
filter,
indexableRelation,
filter.child.asInstanceOf[LogicalRelation].relation
.asInstanceOf[CarbonDatasourceHadoopRelation].carbonRelation.databaseName,
cols,
limitLiteral = limitLiteral,
sortNodeForPushDown = sortNodeForPushDown,
pushDownNotNullFilter = pushDownNotNullFilter)
// If Index table is matched, join plan will be returned.
// Adding projection over join to return only selected columns from query.
// Else all columns from left & right table will be returned in output columns
if (reWrittenPlan.isInstanceOf[Join]) {
if (pushDownJoinEnabled && !addProjection) {
(reWrittenPlan, transformChild)
} else {
(Project(projection.output, reWrittenPlan), transformChild)
}
} else {
(projection, transformChild)
}
// When limit is provided in query, this limit literal can be pushed down to index table
// if all the filter columns have index table, then limit can be pushed down before grouping
// last index table, as number of records returned after join where unique and it will
// definitely return at least 1 record.
case limit@Limit(literal: Literal,
filter@Filter(condition, _@MatchIndexableRelation(indexableRelation)))
if !condition.isInstanceOf[IsNotNull] &&
CarbonIndexUtil.getSecondaryIndexes(indexableRelation).nonEmpty =>
val carbonRelation = filter.child.asInstanceOf[LogicalRelation].relation
.asInstanceOf[CarbonDatasourceHadoopRelation].carbonRelation
val uniqueTableName = s"${ carbonRelation.databaseName }.${ carbonRelation.tableName }"
if (!isRowDeletedInTableMap
.contains(s"${ carbonRelation.databaseName }.${ carbonRelation.tableName }")) {
isRowDeletedInTableMap.put(uniqueTableName, isLimitPushDownRequired(carbonRelation))
}
val reWrittenPlan = if (isRowDeletedInTableMap(uniqueTableName)) {
rewritePlanForSecondaryIndex(filter, indexableRelation,
carbonRelation.databaseName, limitLiteral = literal)
} else {
rewritePlanForSecondaryIndex(filter, indexableRelation,
carbonRelation.databaseName)
}
if (reWrittenPlan.isInstanceOf[Join]) {
if (pushDownJoinEnabled && !addProjection) {
(Limit(literal, reWrittenPlan), transformChild)
} else {
(Limit(literal, Project(limit.output, reWrittenPlan)), transformChild)
}
} else {
(limit, transformChild)
}
case limit@Limit(literal: Literal, projection@Project(cols, filter@Filter(condition,
_@MatchIndexableRelation(indexableRelation))))
if !condition.isInstanceOf[IsNotNull] &&
CarbonIndexUtil.getSecondaryIndexes(indexableRelation).nonEmpty =>
val carbonRelation = filter.child.asInstanceOf[LogicalRelation].relation
.asInstanceOf[CarbonDatasourceHadoopRelation].carbonRelation
val uniqueTableName = s"${ carbonRelation.databaseName }.${ carbonRelation.tableName }"
if (!isRowDeletedInTableMap
.contains(s"${ carbonRelation.databaseName }.${ carbonRelation.tableName }")) {
isRowDeletedInTableMap.put(uniqueTableName, isLimitPushDownRequired(carbonRelation))
}
val reWrittenPlan = if (isRowDeletedInTableMap(uniqueTableName)) {
rewritePlanForSecondaryIndex(filter, indexableRelation,
carbonRelation.databaseName, cols, limitLiteral = literal)
} else {
rewritePlanForSecondaryIndex(filter, indexableRelation,
carbonRelation.databaseName, cols)
}
if (reWrittenPlan.isInstanceOf[Join]) {
if (pushDownJoinEnabled && !addProjection) {
(Limit(literal, reWrittenPlan), transformChild)
} else {
(Limit(literal, Project(projection.output, reWrittenPlan)), transformChild)
}
} else {
(limit, transformChild)
}
})
val transformedPlanWithoutNIUdf = transformedPlan.transform {
case filter: Filter =>
Filter(CarbonHiveIndexMetadataUtil.transformToRemoveNI(filter.condition), filter.child)
}
transformedPlanWithoutNIUdf
}
/**
* Returns a copy of this node where `rule` has been applied to the tree and all of
* its children (pre-order). When `rule` does not apply to a given node it is left unchanged. If
* rule is already applied to the node, then boolean value 'transformChild' decides whether to
* apply rule to its children nodes or not.
*
* @param plan
* @param rule the function used to transform this nodes children. Boolean value
* decides if need to traverse children nodes or not
*/
def transformPlan(plan: LogicalPlan,
rule: PartialFunction[LogicalPlan, (LogicalPlan, Boolean)]): LogicalPlan = {
val func: LogicalPlan => (LogicalPlan, Boolean) = {
a => (a, true)
}
val (afterRule, transformChild) = CurrentOrigin.withOrigin(CurrentOrigin.get) {
rule.applyOrElse(plan, func)
}
if (plan fastEquals afterRule) {
plan.mapChildren(transformPlan(_, rule))
} else {
// If node is not changed, then traverse the children nodes to transform the plan. Else
// return the changed plan
if (transformChild) {
afterRule.mapChildren(transformPlan(_, rule))
} else {
afterRule
}
}
}
private def checkIfPushDownOrderByLimitAndNotNullFilter(literal: Literal, sort: Sort,
filter: Filter): Boolean = {
val filterAttributes = filter.condition collect {
case attr: AttributeReference => attr.name.toLowerCase
}
// get the parent table logical relation from the filter node
val parentRelation = MatchIndexableRelation.unapply(filter.child)
if (parentRelation.isEmpty) {
return false
}
val parentTableRelation = parentRelation.get
val databaseName = parentTableRelation.carbonRelation.databaseName
val enabledMatchingIndexTables = CarbonCostBasedOptimizer.identifyRequiredTables(
filterAttributes.toSet.asJava,
CarbonIndexUtil.getSecondaryIndexes(parentTableRelation).mapValues(_.toList.asJava)
.asJava).asScala
// 1. check if only one SI matches for the filter columns
if (enabledMatchingIndexTables.nonEmpty && enabledMatchingIndexTables.size == 1) {
// 2. check if all the sort columns is in SI
val sortColumns = sort
.order
.map(_.child.asInstanceOf[AttributeReference].name.toLowerCase())
.toSet
val indexCarbonTable = CarbonEnv
.getCarbonTable(Some(databaseName), enabledMatchingIndexTables.head)(sparkSession)
return sortColumns.forall { x => indexCarbonTable.getColumnByName(x) != null }
}
false
}
}
object MatchIndexableRelation {
def unapply(plan: LogicalPlan): Option[CarbonDatasourceHadoopRelation] = {
plan match {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
Some(l.relation.asInstanceOf[CarbonDatasourceHadoopRelation])
case _ => None
}
}
}
| zzcclp/carbondata | integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala | Scala | apache-2.0 | 45,054 |
package controllers
import com.bryzek.apidoc.api.v0.models.{Publication, Subscription, SubscriptionForm}
import com.bryzek.apidoc.api.v0.errors.{ErrorsResponse, FailedRequest, UnitResponse}
import java.util.UUID
import play.api.test._
import play.api.test.Helpers._
class SubscriptionsSpec extends BaseSpec {
import scala.concurrent.ExecutionContext.Implicits.global
lazy val org = createOrganization()
"POST /subscriptions" in new WithServer {
val user = createUser()
val subscription = createSubscription(
SubscriptionForm(
organizationKey = org.key,
userGuid = user.guid,
publication = Publication.MembershipRequestsCreate
)
)
subscription.organization.key must be(org.key)
subscription.user.guid must be(user.guid)
subscription.publication must be(Publication.MembershipRequestsCreate)
}
"POST /subscriptions handles user already subscribed" in new WithServer {
val user = createUser()
val form = createSubscriptionForm(org, user)
val subscription = createSubscription(form)
intercept[ErrorsResponse] {
createSubscription(form)
}.errors.map(_.message) must be(Seq("User is already subscribed to this publication for this organization"))
}
"POST /subscriptions allows user to subscribe to a different organization" in new WithServer {
val user = createUser()
val form = createSubscriptionForm(org, user)
val subscription1 = createSubscription(form)
subscription1.organization.key must be(org.key)
subscription1.user.guid must be(user.guid)
subscription1.publication must be(Publication.MembershipRequestsCreate)
val org2 = createOrganization()
val subscription2 = createSubscription(form.copy(organizationKey = org2.key))
subscription2.organization.key must be(org2.key)
subscription2.user.guid must be(user.guid)
subscription2.publication must be(Publication.MembershipRequestsCreate)
}
"POST /subscriptions validates org key" in new WithServer {
val user = createUser()
intercept[ErrorsResponse] {
createSubscription(
SubscriptionForm(
organizationKey = UUID.randomUUID.toString,
userGuid = user.guid,
publication = Publication.MembershipRequestsCreate
)
)
}.errors.map(_.message) must be(Seq("Organization not found"))
}
"POST /subscriptions validates user guid" in new WithServer {
intercept[ErrorsResponse] {
createSubscription(
SubscriptionForm(
organizationKey = org.key,
userGuid = UUID.randomUUID,
publication = Publication.MembershipRequestsCreate
)
)
}.errors.map(_.message) must be(Seq("User not found"))
}
"POST /subscriptions validates publication" in new WithServer {
val user = createUser()
intercept[ErrorsResponse] {
createSubscription(
SubscriptionForm(
organizationKey = org.key,
userGuid = user.guid,
publication = Publication(UUID.randomUUID.toString)
)
)
}.errors.map(_.message) must be(Seq("Publication not found"))
}
"DELETE /subscriptions/:guid" in new WithServer {
val subscription = createSubscription(createSubscriptionForm(org))
await(client.subscriptions.deleteByGuid(subscription.guid)) must be(())
await(client.subscriptions.deleteByGuid(subscription.guid)) must be(()) // test idempotence
intercept[UnitResponse] {
await(client.subscriptions.getByGuid(subscription.guid))
}.status must be(404)
// now recreate
val subscription2 = createSubscription(createSubscriptionForm(org))
await(client.subscriptions.getByGuid(subscription2.guid)) must be(subscription2)
}
"GET /subscriptions/:guid" in new WithServer {
val subscription = createSubscription(createSubscriptionForm(org))
await(client.subscriptions.getByGuid(subscription.guid)) must be(subscription)
intercept[UnitResponse] {
await(client.subscriptions.getByGuid(UUID.randomUUID))
}.status must be(404)
}
"GET /subscriptions filters" in new WithServer {
val user1 = createUser()
val user2 = createUser()
val org1 = createOrganization()
val org2 = createOrganization()
val subscription1 = createSubscription(
SubscriptionForm(
organizationKey = org1.key,
userGuid = user1.guid,
publication = Publication.MembershipRequestsCreate
)
)
val subscription2 = createSubscription(
SubscriptionForm(
organizationKey = org2.key,
userGuid = user2.guid,
publication = Publication.ApplicationsCreate
)
)
await(client.subscriptions.get(organizationKey = Some(UUID.randomUUID.toString))) must be(Seq.empty)
await(client.subscriptions.get(organizationKey = Some(org1.key))).map(_.guid) must be(Seq(subscription1.guid))
await(client.subscriptions.get(organizationKey = Some(org2.key))).map(_.guid) must be(Seq(subscription2.guid))
await(client.subscriptions.get(userGuid = Some(UUID.randomUUID))) must be(Seq.empty)
await(client.subscriptions.get(userGuid = Some(user1.guid))).map(_.guid) must be(Seq(subscription1.guid))
await(client.subscriptions.get(userGuid = Some(user2.guid))).map(_.guid) must be(Seq(subscription2.guid))
await(client.subscriptions.get(userGuid = Some(user1.guid), publication = Some(Publication.MembershipRequestsCreate))).map(_.guid) must be(Seq(subscription1.guid))
await(client.subscriptions.get(userGuid = Some(user2.guid), publication = Some(Publication.ApplicationsCreate))).map(_.guid) must be(Seq(subscription2.guid))
intercept[FailedRequest] {
await(client.subscriptions.get(publication = Some(Publication(UUID.randomUUID.toString)))) must be(Seq.empty)
}.responseCode must be(400)
}
"GET /subscriptions authorizes user" in new WithServer {
val subscription = createSubscription(createSubscriptionForm(org))
val randomUser = createUser()
await(client.subscriptions.get(guid = Some(subscription.guid))).map(_.guid) must be(Seq(subscription.guid))
await(newClient(randomUser).subscriptions.get(guid = Some(subscription.guid))).map(_.guid) must be(Nil)
}
}
| Seanstoppable/apidoc | api/test/controllers/SubscriptionsSpec.scala | Scala | mit | 6,197 |
object Solution {
def main(args: Array[String]) {
var N = readLine().toInt
var matrix = Array.ofDim[Long](N,N)
var sol = Array.ofDim[Long](N,N)
// Read matrix
for(i <- 0 to N-1 ){ matrix(i) = readLine().split(" ")map(_.toLong) }
// Initializes solution matrix
for {
i <- 0 to N-1
j <- 0 to N-1
} sol(i)(j) = Long.MAX_LONG
sol(0)(0) = matrix(0)(0)
// We loop 10 times, each time be get a better answer
for (k <- 0 to 9) {
for (j <- 0 to N-1) {
for (i <- 0 to N-1) {
if (j > 0) sol(i)(j) = math.min(sol(i)(j), sol(i)(j-1) + matrix(i)(j))
if (j < N - 1) sol(i)(j) = math.min(sol(i)(j), sol(i)(j+1) + matrix(i)(j))
if (i > 0) sol(i)(j) = math.min(sol(i)(j), sol(i-1)(j) + matrix(i)(j))
if (i < N - 1) sol(i)(j) = math.min(sol(i)(j), sol(i+1)(j) + matrix(i)(j))
}
}
}
println(sol(N-1)(N-1))
}
} | andreshp/Algorithms | Problems/Hackerrank/ProjectEuler/83.scala | Scala | gpl-2.0 | 1,135 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.common
import util.matching.Regex
import kafka.coordinator.GroupCoordinator
object Topic {
val legalChars = "[a-zA-Z0-9\\\\._\\\\-]"
private val maxNameLength = 255
private val rgx = new Regex(legalChars + "+")
val InternalTopics = Set(GroupCoordinator.GroupMetadataTopicName)
def validate(topic: String) {
if (topic.length <= 0)
throw new org.apache.kafka.common.errors.InvalidTopicException("topic name is illegal, can't be empty")
else if (topic.equals(".") || topic.equals(".."))
throw new org.apache.kafka.common.errors.InvalidTopicException("topic name cannot be \\".\\" or \\"..\\"")
else if (topic.length > maxNameLength)
throw new org.apache.kafka.common.errors.InvalidTopicException("topic name is illegal, can't be longer than " + maxNameLength + " characters")
rgx.findFirstIn(topic) match {
case Some(t) =>
if (!t.equals(topic))
throw new org.apache.kafka.common.errors.InvalidTopicException("topic name " + topic + " is illegal, contains a character other than ASCII alphanumerics, '.', '_' and '-'")
case None => throw new org.apache.kafka.common.errors.InvalidTopicException("topic name " + topic + " is illegal, contains a character other than ASCII alphanumerics, '.', '_' and '-'")
}
}
/**
* Due to limitations in metric names, topics with a period ('.') or underscore ('_') could collide.
*
* @param topic The topic to check for colliding character
* @return true if the topic has collision characters
*/
def hasCollisionChars(topic: String): Boolean = {
topic.contains("_") || topic.contains(".")
}
/**
* Returns true if the topicNames collide due to a period ('.') or underscore ('_') in the same position.
*
* @param topicA A topic to check for collision
* @param topicB A topic to check for collision
* @return true if the topics collide
*/
def hasCollision(topicA: String, topicB: String): Boolean = {
topicA.replace('.', '_') == topicB.replace('.', '_')
}
}
| Mszak/kafka | core/src/main/scala/kafka/common/Topic.scala | Scala | apache-2.0 | 2,835 |
package utils
/** This object contains new string contextes for specific uses.
*/
object stringContextes {
implicit class RegexPattern(val pattern:java.util.regex.Pattern) extends AnyVal {
def unapply(s:String):Boolean = pattern.matcher(s).matches
}
//use r"" to build the corresponding regex
implicit class RegexPatternContext(val sc: StringContext) extends AnyVal {
def r(args:Any*):RegexPattern = new RegexPattern(java.util.regex.Pattern.compile(sc.s(args:_*)))
def r:RegexPattern = new RegexPattern(java.util.regex.Pattern.compile(sc.parts(0)))
}
//use /"" to split the string into the sequence of / separated elements
implicit class RegexSeqContext(val sc: StringContext) extends AnyVal {
def split(args:Any*):Seq[String] = { val x=sc.s(args:_*); if (x=="/") Seq("") else x.split("/",-1) }
def split:Seq[String] = split(sc.parts(0))
}
} | Y-P-/data-processing-binding | Utils/src/utils/stringContextes.scala | Scala | gpl-3.0 | 910 |
import scala.language.higherKinds
trait Request[F[_]]
trait Context { type F[_] }
final case class AuthedRequest[F[_], A](authInfo: A, req: Request[F])
final case class HttpRequestContext[C <: Context, Ctx](request: AuthedRequest[C#F, Ctx], context: Ctx)
| scala/scala | test/files/pos/t11239.scala | Scala | apache-2.0 | 256 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import spray.json._
import org.apache.openwhisk.core.entity.size.SizeString
/**
* A FullyQualifiedEntityName (qualified name) is a triple consisting of
* - EntityPath: the namespace and package where the entity is located
* - EntityName: the name of the entity
* - Version: the semantic version of the resource
* - Binding : the entity path of the package binding, it can be used by entities that support binding
*/
protected[core] case class FullyQualifiedEntityName(path: EntityPath,
name: EntityName,
version: Option[SemVer] = None,
binding: Option[EntityPath] = None)
extends ByteSizeable {
private val qualifiedName: String = path + EntityPath.PATHSEP + name
/** Resolves default namespace in path to given name if the root path is the default namespace. */
def resolve(namespace: EntityName) = FullyQualifiedEntityName(path.resolveNamespace(namespace), name, version)
/** @return full path including name, i.e., "path/name" */
def fullPath: EntityPath = path.addPath(name)
/**
* Creates new fully qualified entity name that shifts the name into the path and adds a new name:
* (p, n).add(x) -> (p/n, x).
*
* @return new fully qualified name
*/
def add(n: EntityName) = FullyQualifiedEntityName(path.addPath(name), n)
def toDocId = new DocId(qualifiedName)
def namespace: EntityName = path.root
def qualifiedNameWithLeadingSlash: String = EntityPath.PATHSEP + qualifiedName
def asString = path.addPath(name) + version.map("@" + _.toString).getOrElse("")
override def size = qualifiedName.sizeInBytes
override def toString = asString
override def hashCode = qualifiedName.hashCode
}
protected[core] object FullyQualifiedEntityName extends DefaultJsonProtocol {
// must use jsonFormat with explicit field names and order because class extends a trait
private val caseClassSerdes = jsonFormat(FullyQualifiedEntityName.apply _, "path", "name", "version", "binding")
protected[core] val serdes = new RootJsonFormat[FullyQualifiedEntityName] {
def write(n: FullyQualifiedEntityName) = caseClassSerdes.write(n)
def read(value: JsValue) =
Try {
value match {
case JsObject(fields) => caseClassSerdes.read(value)
// tolerate dual serialization modes; Exec serializes a sequence of fully qualified names
// by their document id which excludes the version (hence it is just a string)
case JsString(name) => EntityPath(name).toFullyQualifiedEntityName
case _ => deserializationError("fully qualified name malformed")
}
} match {
case Success(s) => s
case Failure(t: IllegalArgumentException) => deserializationError(t.getMessage)
case Failure(t) => deserializationError("fully qualified name malformed")
}
}
// alternate serializer that drops version
protected[entity] val serdesAsDocId = new RootJsonFormat[FullyQualifiedEntityName] {
def write(n: FullyQualifiedEntityName) = n.toDocId.toJson
def read(value: JsValue) =
Try {
value match {
case JsString(name) => EntityPath(name).toFullyQualifiedEntityName
case _ => deserializationError("fully qualified name malformed")
}
} match {
case Success(s) => s
case Failure(t: IllegalArgumentException) => deserializationError(t.getMessage)
case Failure(t) => deserializationError("fully qualified name malformed")
}
}
/**
* Converts the name to a fully qualified name.
* There are 3 cases:
* - name is not a valid EntityPath => error
* - name is a valid single segment with a leading slash => error
* - name is a valid single segment without a leading slash => map it to user namespace, default package
* - name is a valid multi segment with a leading slash => treat it as fully qualified name (max segments allowed: 3)
* - name is a valid multi segment without a leading slash => treat it as package name and resolve it to the user namespace (max segments allowed: 3)
*
* The last case is ambiguous as '/namespace/action' and 'package/action' will be the same EntityPath value.
* The action should use a fully qualified result to avoid the ambiguity.
*
* @param name name of the action to fully qualify
* @param namespace the user namespace for the simple resolution
* @return Some(FullyQualifiedName) if the name is valid otherwise None
*/
protected[core] def resolveName(name: JsValue, namespace: EntityName): Option[FullyQualifiedEntityName] = {
name match {
case v @ JsString(s) =>
Try(v.convertTo[EntityPath]).toOption
.flatMap { path =>
val n = path.segments
val leadingSlash = s.startsWith(EntityPath.PATHSEP)
if (n < 1 || n > 3 || (leadingSlash && n == 1) || (!leadingSlash && n > 3)) None
else if (leadingSlash || n == 3) Some(path)
else Some(namespace.toPath.addPath(path))
}
.map(_.resolveNamespace(namespace).toFullyQualifiedEntityName)
case _ => None
}
}
}
| jasonpet/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/entity/FullyQualifiedEntityName.scala | Scala | apache-2.0 | 6,265 |
package com.trafficland.augmentsbt.rpm
import sbt._
import sbt.Keys._
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.archetypes.ServerLoader
import com.typesafe.sbt.SbtNativePackager.Rpm
import com.trafficland.augmentsbt.distribute.StartupScriptPlugin
import scala.collection.Seq
import com.trafficland.augmentsbt.rpm.Keys._
object CentOSRPMPlugin extends AutoPlugin {
import autoImport._
override def requires: Plugins = RPMPlugin && StartupScriptPlugin
object autoImport {
val scriptsDirectory: SettingKey[File] = SettingKey[File]("scripts-directory")
}
override def projectSettings = Seq(
scriptsDirectory <<= baseDirectory apply { bd => bd / "scripts" },
defaultLinuxInstallLocation := vendorDirectory.value,
rpmBrpJavaRepackJars := true, // Upstream issue: Setting this to true disables repacking of jars, contrary to its name
serverLoading in Rpm := ServerLoader.Systemd
)
}
| ereichert/augment-sbt | src/main/scala/com/trafficland/augmentsbt/rpm/CentOSRPMPlugin.scala | Scala | apache-2.0 | 945 |
/*
* Copyright 2014 Alan Rodas Bonjour
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.alanrodas.scaland.cli
import scala.collection.mutable.ListBuffer
import scala.compat.Platform.currentTime
/** The `CLIApp` trait can be used to quickly turn objects
* into executable programs that executes command line argument
* command. Here is an example:
* {{{
* object Main extends CLIApp {
* Console.println("Hello World: " + (args mkString ", "))
* }
* }}}
* Here, object `Main` inherits the `main` method of `App`.
*
* `args` returns the current command line arguments as an array.
*
* ==Caveats==
*
* '''''It should be noted that this trait is implemented using the [[DelayedInit]]
* functionality, which means that fields of the object will not have been initialized
* before the main method has been executed.'''''
*
* It should also be noted that the `main` method should not be overridden:
* the whole class body becomes the “main method”.
*
* Future versions of this trait will no longer extend `DelayedInit`.
*
* This is based on Martin Odersky's code for App (ver 2.1 15/02/2011)
*
* @author Alan Rodas Bonjour
* @version 0.1
*/
trait CLIApp extends DelayedInit {
/** ***************** START DELAYED INIT IMPL ************************/
/** The time when the execution of this program started, in milliseconds since 1
* January 1970 UTC. */
// @deprecatedOverriding("executionStart should not be overridden", "2.11.0")
val executionStart: Long = currentTime
/** The command line arguments passed to the application's `main` method.
*/
// @deprecatedOverriding("args should not be overridden", "2.11.0")
protected def args: Array[String] = _args
private var _args: Array[String] = _
private val initCode = new ListBuffer[() => Unit]
/** The init hook. This saves all initialization code for execution within `main`.
* This method is normally never called directly from user code.
* Instead it is called as compiler-generated code for those classes and objects
* (but not traits) that inherit from the `DelayedInit` trait and that do not
* themselves define a `delayedInit` method.
* @param body the initialization code to be stored for later execution
*/
@deprecated("The delayedInit mechanism will disappear.", "2.11.0")
override def delayedInit(body: => Unit) {
initCode += (() => body)
}
/** The main method.
* This stores all arguments so that they can be retrieved with `args`
* and then executes all initialization code segments in the order in which
* they were passed to `delayedInit`.
* @param args the arguments passed to the main method
*/
// @deprecatedOverriding("main should not be overridden", "2.11.0")
def main(args: Array[String]) = {
this._args = args
for (proc <- initCode) proc()
try {
commandManager.setSigns(paramSigns._1, paramSigns._2)
commandManager.execute(args)
} catch {
case e: IllegalCommandLineArgumentsException => onIllegalCommandLineArgument(e)
}
if (util.Properties.propIsSet("scala.time")) {
val total = currentTime - executionStart
Console.println("[total " + total + "ms]")
}
}
/** ***************** END DELAYED INIT IMPL ************************/
/** Define the short and long parameter sign to use. Defaults to (''"-'', ''--''). */
protected var paramSigns : (String, String) =
(CommandManager.defaultShortParamSign, CommandManager.defaultLongParamSign)
/** Hold the command that the user defined. */
implicit protected val commandManager = new CommandManager()
/** Define the action to take in case the program was called with an invalid set of values. */
protected val onIllegalCommandLineArgument = (e: IllegalCommandLineArgumentsException) => {
println(e.getMessage)
}
}
| alanrodas/scaland | cli/src/main/scala/com/alanrodas/scaland/cli/CLIApp.scala | Scala | apache-2.0 | 4,401 |
package io.buoyant.namer.serversets
import com.twitter.finagle._
import com.twitter.util.{Activity, Var}
import io.buoyant.namer.NamerTestUtil
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.FunSuite
class ServersetNamerTest extends FunSuite with NamerTestUtil {
val prefix = Path.read("/#/some/prefix")
val loopback = Address(new InetSocketAddress(InetAddress.getLoopbackAddress, 1))
val pathAddr = Var[Addr](Addr.Pending)
val otherAddr = Var[Addr](Addr.Pending)
test("falls back to path prefixes") {
pathAddr() = Addr.Bound(loopback)
otherAddr() = Addr.Neg
namer("/foo/bar").lookup(Path.read("/foo/bar/x/y/z")).sample() match {
case NameTree.Leaf(name: Name.Bound) =>
assert(name.id == prefix ++ Path.read("/foo/bar"))
assert(name.path == Path.read("/x/y/z"))
case _ => fail("failed to bind")
}
}
test("neg") {
otherAddr() = Addr.Neg
assert(
namer("/this").lookup(Path.read("/that")).sample() == NameTree.Neg
)
}
test("exact match") {
pathAddr() = Addr.Bound(loopback)
namer("/foo/bar").lookup(Path.read("/foo/bar")).sample() match {
case NameTree.Leaf(name: Name.Bound) =>
assert(name.id == prefix ++ Path.read("/foo/bar"))
assert(name.path == Path.empty)
case _ => fail("failed to bind")
}
}
test("empty path") {
pathAddr() = Addr.Bound(loopback)
otherAddr() = Addr.Neg
namer("/").lookup(Path.read("/x/y/z")).sample() match {
case NameTree.Leaf(name: Name.Bound) =>
assert(name.id == prefix)
assert(name.path == Path.read("/x/y/z"))
case _ => fail("failed to bind")
}
}
test("id is bound name") {
pathAddr() = Addr.Bound(loopback)
val testNamer = namer("/test")
assertBoundIdAutobinds(testNamer, prefix ++ Path.read("/test"), prefix)
}
test("handles pending") {
otherAddr() = Addr.Pending
val act = namer("/foo/bar").lookup(Path.read("/foo/bar/x/y/z"))
assert(act.run.sample() == Activity.Pending)
otherAddr() = Addr.Neg
pathAddr() = Addr.Bound(loopback)
act.sample() match {
case NameTree.Leaf(name: Name.Bound) =>
assert(name.id == prefix ++ Path.read("/foo/bar"))
assert(name.path == Path.read("/x/y/z"))
case x => fail("failed to bind")
}
}
def namer(path: String) = new ServersetNamer("host", prefix) {
/** Resolve a resolver string to a Var[Addr]. */
override protected[this] def resolve(spec: String): Var[Addr] =
if (spec == s"zk2!host!$path") pathAddr
else otherAddr
}
}
| denverwilliams/linkerd | namer/serversets/src/test/scala/io/buoyant/namer/serversets/ServersetNamerTest.scala | Scala | apache-2.0 | 2,586 |
package dbpedia.config
object GeoCoordinateParserConfig
{
//make them language-specifig? might be redundant
val coordTemplateNames = Set("coord", "coor dms", "coor dm", "coor", "location", "geocoordinate", "coords", "coordenadas")
//"coor title dms", "coor title d", "coor title dm", "coorheader",
//"coor at dm", "coor at dms", "coor at d", "coor d/new", "coor dm/new",
//"coor dms/new", "coor dec", "coor/new", "coor dms/archive001",
//"coord/conversion", "coord/templates", "location dec"
//map latitude letters used in languages to the ones used in English ("E" for East and "W" for West)
val longitudeLetterMap = Map(
"de" -> Map("E" -> "E", "O" -> "E", "W" -> "W"),
"en" -> Map("E" -> "E", "W" -> "W"),
"fr" -> Map("E" -> "E", "O" -> "W", "W" -> "W")
)
//map longitude letters used in languages to the ones used in English ("N" for North and "S" for South)
val latitudeLetterMap = Map(
"en" -> Map("N" -> "N", "S" -> "S")
)
}
| FnOio/dbpedia-parsing-functions-scala | src/main/scala/dbpedia/config/GeoCoordinateParserConfig.scala | Scala | gpl-2.0 | 1,142 |
package nvim
final case class Position(row: Int, col: Int) {
override def toString = s"[row=$row,col=$col]"
}
| sschaef/scalajs-test | nvim/src/main/scala/nvim/Position.scala | Scala | mit | 113 |
package breeze.linalg.support
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.math.Complex
import scala.reflect.ClassTag
import breeze.linalg.support.CanTraverseKeyValuePairs.KeyValuePairsVisitor
/**
* Marker for being able to traverse over the values in a collection/tensor
*
* @author dramage
* @author dlwh
*/
trait CanTraverseKeyValuePairs[From, K, A] {
/**Traverses all values from the given collection. */
def traverse(from: From, fn: KeyValuePairsVisitor[K, A]): Unit
def isTraversableAgain(from: From):Boolean
}
object CanTraverseKeyValuePairs {
trait KeyValuePairsVisitor[@specialized(Int) K, @specialized A] {
def visit(k: K, a: A)
def visitArray(indices: Int=>K, arr: Array[A]):Unit = visitArray(indices, arr, 0, arr.length, 1)
def visitArray(indices: Int=>K, arr: Array[A], offset: Int, length: Int, stride: Int):Unit = {
var i = 0
while(i < length) {
visit(indices(i * stride + offset), arr(i * stride + offset))
i += 1
}
}
def zeros(numZero: Int, zeroKeys: Iterator[K], zeroValue: A)
}
//
// Arrays
//
class OpArray[@specialized(Double, Int, Float, Long) A]
extends CanTraverseKeyValuePairs[Array[A], Int, A] {
/** Traverses all values from the given collection. */
def traverse(from: Array[A], fn: KeyValuePairsVisitor[Int, A]): Unit = {
fn.visitArray(0 until from.length, from)
}
def isTraversableAgain(from: Array[A]): Boolean = true
}
implicit def opArray[@specialized A] =
new OpArray[A]
implicit object OpArrayII extends OpArray[Int]
implicit object OpArraySS extends OpArray[Short]
implicit object OpArrayLL extends OpArray[Long]
implicit object OpArrayFF extends OpArray[Float]
implicit object OpArrayDD extends OpArray[Double]
implicit object OpArrayCC extends OpArray[Complex]
}
| wstcpyt/breeze | math/src/main/scala/breeze/linalg/support/CanTraverseKeyValuePairs.scala | Scala | apache-2.0 | 2,374 |
package core
object Player {
def apply(name: String)(implicit cardlib: CardLibrary): Player = {
val deck = util.Random.shuffle(Seq.fill(3)(cardlib.estate) ++ Seq.fill(7)(cardlib.copper))
new Player(name,
deck = deck.drop(5).toVector,
hand = deck.take(5).toVector ++ Vector(cardlib.remodel, cardlib.throneRoom),
played = Vector.empty[Card],
discard = Vector.empty[Card])
}
}
case class Player(name: String,
deck: Vector[Card],
hand: Vector[Card],
played: Vector[Card],
discard: Vector[Card]) {
@annotation.tailrec
final def drawCards(count: Int): Player = {
(count, deck.isEmpty, discard.isEmpty) match {
case (0, _, _) => this
case (_, true, true) => this
case (_, true, false) => copy(
deck = util.Random.shuffle(discard),
discard = Vector.empty).drawCards(count)
case _ => copy(
deck = deck.tail,
hand = hand :+ deck.head)
.drawCards(count - 1)
}
}
def cleanup: Player = {
copy(
hand = Vector.empty[Card],
played = Vector.empty[Card],
discard = played ++ hand ++ discard)
.drawCards(5)
}
} | whence/powerlife | scala/powercards_future/core/Player.scala | Scala | mit | 1,149 |
package scala.collection.parallel.ops
import scala.collection.parallel._
trait IntOperators extends Operators[Int] {
def reduceOperators = List(_ + _, _ * _, math.min(_, _), math.max(_, _), _ ^ _)
def countPredicates = List(
x => true,
_ >= 0, _ < 0, _ < 50, _ < 500, _ < 5000, _ < 50000, _ % 2 == 0, _ == 99,
x => x > 50 && x < 150,
x => x > 350 && x < 550,
x => (x > 1000 && x < 1500) || (x > 400 && x < 500)
)
def forallPredicates = List(_ >= 0, _ < 0, _ % 2 == 0, _ != 55, _ != 505, _ != 5005)
def existsPredicates = List(_ >= 0, _ < 0, _ % 2 == 0, _ == 55, _ == 505, _ == 5005)
def findPredicates = List(_ >= 0, _ % 2 == 0, _ < 0, _ == 50, _ == 500, _ == 5000)
def mapFunctions = List(-_, math.abs(_), _ % 2, _ % 3, _ % 4, _ % 150, _ % 500)
def partialMapFunctions = List({case x => -x}, { case 0 => -1; case x if x > 0 => x + 1}, {case x if x % 3 == 0 => x / 3})
def flatMapFunctions = List(
(n: Int) => if (n < 0) List() else if (n % 2 == 0) List(1, 2, 3) else List(4, 5, 6),
(n: Int) => List[Int](),
(n: Int) => if (n == 0) List(1, 2, 3, 4, 5) else if (n < 0) List(1, 2, 3) else List()
)
def filterPredicates = List(
_ % 2 == 0, _ % 3 == 0,
_ % 4 != 0, _ % 17 != 0,
n => n > 50 && n < 100,
_ >= 0, _ < 0, _ == 99,
_ > 500, _ > 5000, _ > 50000,
_ < 500, _ < 50, _ < -50, _ < -5e5,
x => true, x => false,
x => x % 53 == 0 && x % 17 == 0
)
def filterNotPredicates = filterPredicates
def partitionPredicates = filterPredicates
def takeWhilePredicates = List(
_ != 50, _ != 500, _ != 5000, _ != 50000, _ % 2 == 0, _ % 3 == 1, _ % 47 != 0,
_ < 100, _ < 1000, _ < 10000, _ < 0,
_ < -100, _ < -1000, _ > -200, _ > -50,
n => -90 < n && n < -10,
n => 50 < n && n < 550,
n => 5000 < n && n < 7500,
n => -50 < n && n < 450
)
def dropWhilePredicates = takeWhilePredicates
def spanPredicates = takeWhilePredicates
def foldArguments = List(
(0, _ + _),
(1, _ * _),
(Int.MinValue, math.max(_, _)),
(Int.MaxValue, math.min(_, _))
)
def addAllTraversables = List(
List[Int](),
List(1),
List(1, 2),
List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
Array.fill(1000)(1).toSeq
)
def newArray(sz: Int) = new Array[Int](sz)
def groupByFunctions = List(
_ % 2, _ % 3, _ % 5, _ % 10, _ % 154, _% 3217,
_ * 2, _ + 1
)
}
trait IntSeqOperators extends IntOperators with SeqOperators[Int] {
def segmentLengthPredicates = List(
_ % 2 == 0, _ > 0, _ >= 0, _ < 0, _ <= 0, _ > -5000, _ > 5000, _ % 541 != 0, _ < -50, _ > 500,
n => -90 < n && n < -10, n => 500 < n && n < 1500
)
def indexWherePredicates = List(
_ % 2 == 0, _ % 11 == 0, _ % 123 == 0, _ % 901 == 0,
_ > 0, _ >= 0, _ < 0, _ <= 0,
_ > 50, _ > 500, _ > 5000,
_ < -10, _ < -100, _ < -1000,
n => n > 50 && n < 100,
n => n * n > 1000000 && n % 111 == 0
)
def lastIndexWherePredicates = List(
_ % 2 == 0, _ % 17 == 0, _ % 314 == 0, _ % 1017 == 0,
_ > 0, _ >= 0, _ < 0, _ <= 0,
_ > 50, _ > 500, _ > 5000,
_ < -20, _ < -200, _ < -2000,
_ == 0,
n => n > -40 && n < 40,
n => n > -80 && n < -10,
n => n > 110 && n < 150
)
def reverseMapFunctions = List(-_, n => n * n, _ + 1)
def sameElementsSeqs = List(
List[Int](),
List(1),
List(1, 2, 3, 4, 5, 6, 7, 8, 9),
Array.fill(150)(1).toSeq,
Array.fill(1000)(1).toSeq
)
def startEndSeqs = List(
Nil,
List(1),
List(1, 2, 3, 4, 5),
List(0, 1, 2, 3, 4, 5),
List(4, 5, 6, 7, 8, 9, 10),
List(4, 5, 6, 7, 8, 9, 0),
List(-4, -3, -2, -1)
)
}
| felixmulder/scala | test/files/scalacheck/parallel-collections/IntOperators.scala | Scala | bsd-3-clause | 3,638 |
package s99.p17
// P17 (*) Split a list into two parts.
// The length of the first part is given. Use a Tuple for your result.
//
// Example:
// scala> split(3, List('a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k))
// res0: (List[Symbol], List[Symbol]) = (List('a, 'b, 'c),List('d, 'e, 'f, 'g, 'h, 'i, 'j, 'k))
object P17Answer {
// Builtin.
def splitBuiltin[A](n: Int, ls: List[A]): (List[A], List[A]) = ls.splitAt(n)
// Simple recursion.
def splitRecursive[A](n: Int, ls: List[A]): (List[A], List[A]) = (n, ls) match {
case (_, Nil) => (Nil, Nil)
case (0, list) => (Nil, list)
case (n, h :: tail) => {
val (pre, post) = splitRecursive(n - 1, tail)
(h :: pre, post)
}
}
// Tail recursive.
def splitTailRecursive[A](n: Int, ls: List[A]): (List[A], List[A]) = {
def splitR(curN: Int, curL: List[A], pre: List[A]): (List[A], List[A]) =
(curN, curL) match {
case (_, Nil) => (pre.reverse, Nil)
case (0, list) => (pre.reverse, list)
case (n, h :: tail) => splitR(n - 1, tail, h :: pre)
}
splitR(n, ls, Nil)
}
// Functional (barely not "builtin").
def splitFunctional[A](n: Int, ls: List[A]): (List[A], List[A]) =
(ls.take(n), ls.drop(n))
} | izmailoff/scala-s-99 | src/main/scala/s99/p17/P17Answer.scala | Scala | apache-2.0 | 1,271 |
package com.typesafe.sbt
package packager
package rpm
import linux._
import sbt._
/** RPM Specific keys. */
trait RpmKeys {
// METADATA keys.
val rpmVendor = SettingKey[String]("rpm-vendor", "Name of the vendor for this RPM.")
val rpmOs = SettingKey[String]("rpm-os", "Name of the os for this RPM.")
val rpmRelease = SettingKey[String]("rpm-release", "Special release number for this rpm (vs. the software).")
val rpmPrefix = SettingKey[Option[String]]("rpm-prefix", "File system prefix for relocatable package.")
val rpmMetadata = SettingKey[RpmMetadata]("rpm-metadata", "Metadata associated with the generated RPM.")
// Changelog
val rpmChangelogFile = SettingKey[Option[String]]("rpm-changelog-file", "RPM changelog file to be imported")
// DESCRIPTION KEYS
// TODO - Summary and license are required.
val rpmLicense = SettingKey[Option[String]]("rpm-license", "License of the code within the RPM.")
val rpmDistribution = SettingKey[Option[String]]("rpm-distribution")
val rpmUrl = SettingKey[Option[String]]("rpm-url", "Url to include in the RPM.")
val rpmGroup = SettingKey[Option[String]]("rpm-group", "Group to associate with the RPM.")
val rpmPackager = SettingKey[Option[String]]("rpm-packger", "Person who packaged this rpm.")
val rpmIcon = SettingKey[Option[String]]("rpm-icon", "name of the icon to use with this RPM.")
val rpmDescription = SettingKey[RpmDescription]("rpm-description", "Description of this rpm.")
// DEPENDENCIES
val rpmAutoprov = SettingKey[String]("rpm-autoprov", "enable/disable automatic processing of 'provides' (\\"yes\\"/\\"no\\").")
val rpmAutoreq = SettingKey[String]("rpm-autoreq", "enable/disable automatic processing of requirements (\\"yes\\"/\\"no\\").")
val rpmProvides = SettingKey[Seq[String]]("rpm-provides", "Packages this RPM provides.")
val rpmRequirements = SettingKey[Seq[String]]("rpm-requirements", "Packages this RPM requires.")
val rpmPrerequisites = SettingKey[Seq[String]]("rpm-prerequisites", "Packages this RPM need *before* installation.")
val rpmObsoletes = SettingKey[Seq[String]]("rpm-obsoletes", "Packages this RPM makes obsolete.")
val rpmConflicts = SettingKey[Seq[String]]("rpm-conflicts", "Packages this RPM conflicts with.")
val rpmDependencies = SettingKey[RpmDependencies]("rpm-dependencies", "Configuration of dependency info for this RPM.")
// SPEC
val rpmSpecConfig = TaskKey[RpmSpec]("rpm-spec-config", "All the configuration for an RPM .spec file.")
// SCRIPTS
val rpmScripts = SettingKey[RpmScripts]("rpm-scripts", "Configuration of pre- and post-integration scripts.")
val rpmScriptsDirectory = SettingKey[File](
"rpm-scriptlets-directory",
"Directory where all debian control scripts reside. Default is 'src/rpm/scriptlets'"
)
val rpmPretrans = SettingKey[Option[String]]("rpm-pretrans", "%pretrans scriptlet")
val rpmPre = SettingKey[Option[String]]("rpm-pre", "%pre scriptlet")
val rpmVerifyscript = SettingKey[Option[String]]("rpm-verifyscript", "%verifyscript scriptlet")
val rpmPost = SettingKey[Option[String]]("rpm-post", "%post scriptlet")
val rpmPosttrans = SettingKey[Option[String]]("rpm-posttrans", "%posttrans scriptlet")
val rpmPreun = SettingKey[Option[String]]("rpm-preun", "%preun scriptlet")
val rpmPostun = SettingKey[Option[String]]("rpm-postun", "%postun scriptlet")
val rpmBrpJavaRepackJars = SettingKey[Boolean]("brp-java-repack-jars", """Overrides the __os_post_install scriptlet
http://swaeku.github.io/blog/2013/08/05/how-to-disable-brp-java-repack-jars-during-rpm-build/ for details""")
// Building
val rpmLint = TaskKey[Unit]("rpm-lint", "Runs rpmlint program against the genreated RPM, if available.")
val rpmDaemonLogFile = SettingKey[String]("rpm-daemon-log-file", "Name of the log file generated by application daemon")
}
| schon/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/rpm/Keys.scala | Scala | bsd-2-clause | 3,838 |
package gremlin.scala
import org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.matchers.should.Matchers
class ArrowSyntaxSpec extends AnyWordSpec with Matchers {
"A --> B creates an edge".which {
"has a label" in new Fixture {
paris --- Eurostar --> london
paris.out(Eurostar).head() shouldBe london
}
"has a label and one property" in new Fixture {
paris --- (Eurostar, Name -> "alpha") --> london
paris.out(Eurostar).head() shouldBe london
paris.outE(Eurostar).value(Name).head() shouldBe "alpha"
}
"has a label and multiple properties" in new Fixture {
paris --- (Eurostar, Name -> "alpha", Length -> 100) --> london
paris.out(Eurostar).head() shouldBe london
paris.outE(Eurostar).value(Name).head() shouldBe "alpha"
paris.outE(Eurostar).value(Length).head() shouldBe 100
}
"has a label and multiple properties as Map " in new Fixture {
paris --- (Eurostar, properties) --> london
paris.out(Eurostar).head() shouldBe london
paris.outE(Eurostar).value(Name).head() shouldBe "alpha"
paris.outE(Eurostar).value(Length).head() shouldBe 100
}
}
"A <-- B creates an edge".which {
"has a label" in new Fixture {
paris <-- Eurostar --- london
london.out(Eurostar).head() shouldBe paris
}
"has a label and one property" in new Fixture {
paris <-- (Eurostar, Name -> "alpha") --- london
paris.in(Eurostar).head() shouldBe london
paris.inE(Eurostar).value(Name).head() shouldBe "alpha"
}
"has a label and multiple properties" in new Fixture {
paris <-- (Eurostar, Name -> "alpha", Length -> 100) --- london
paris.in(Eurostar).head() shouldBe london
paris.inE(Eurostar).value(Name).head() shouldBe "alpha"
paris.inE(Eurostar).value(Length).head() shouldBe 100
}
/* "has a label and multiple properties as Map" in new Fixture {
paris <-- (Eurostar, properties) --- london
paris.in(Eurostar).head shouldBe london
paris.inE(Eurostar).value(Name).head shouldBe "alpha"
paris.inE(Eurostar).value(Length).head shouldBe 100
}*/
}
"A <--> B create edges".which {
"have labels" in new Fixture {
paris <-- Eurostar --> london
paris.out(Eurostar).head() shouldBe london
london.out(Eurostar).head() shouldBe paris
}
"have labels and one property" in new Fixture {
paris <-- (Eurostar, Name -> "alpha") --> london
paris.out(Eurostar).head() shouldBe london
paris.outE(Eurostar).value(Name).head() shouldBe "alpha"
paris.in(Eurostar).head() shouldBe london
paris.inE(Eurostar).value(Name).head() shouldBe "alpha"
}
"have labels and multiple properties" in new Fixture {
paris <-- (Eurostar, Name -> "alpha", Length -> 100) --> london
paris.out(Eurostar).head() shouldBe london
paris.outE(Eurostar).value(Name).head() shouldBe "alpha"
paris.outE(Eurostar).value(Length).head() shouldBe 100
paris.in(Eurostar).head() shouldBe london
paris.inE(Eurostar).value(Name).head() shouldBe "alpha"
paris.inE(Eurostar).value(Length).head() shouldBe 100
}
/* "have labels and multiple properties as Map" in new Fixture {
paris <-- (Eurostar, properties) --> london
paris.out(Eurostar).head shouldBe london
paris.outE(Eurostar).value(Name).head shouldBe "alpha"
paris.outE(Eurostar).value(Length).head shouldBe 100
paris.in(Eurostar).head shouldBe london
paris.inE(Eurostar).value(Name).head shouldBe "alpha"
paris.inE(Eurostar).value(Length).head shouldBe 100
}*/
}
// TODO: case class support
// "adding edge with case class" in {
// val graph = TinkerGraph.open.asScala
// val paris = graph.addVertex("Paris")
// val london = graph.addVertex("London")
// val e = paris --- CCWithLabelAndId(
// "some string",
// Int.MaxValue,
// Long.MaxValue,
// Some("option type"),
// Seq("test1", "test2"),
// Map("key1" -> "value1", "key2" -> "value2"),
// NestedClass("nested")
// ) --> london
// e.inVertex shouldBe london
// e.outVertex shouldBe paris
// }
// "adding bidirectional edge with case class" in {
// val graph = TinkerGraph.open.asScala
// val paris = graph.addVertex("Paris")
// val london = graph.addVertex("London")
// val (e0, e1) = paris <-- CCWithLabel(
// "some string",
// Long.MaxValue,
// Some("option type"),
// Seq("test1", "test2"),
// Map("key1" -> "value1", "key2" -> "value2"),
// NestedClass("nested")
// ) --> london
// e0.inVertex shouldBe london
// e0.outVertex shouldBe paris
// e1.inVertex shouldBe paris
// e1.outVertex shouldBe london
// }
// "adding left edge with case class" in {
// val graph = TinkerGraph.open.asScala
// val paris = graph.addVertex("Paris")
// val london = graph.addVertex("London")
// val e = paris <-- CCWithLabelAndId(
// "some string",
// Int.MaxValue,
// Long.MaxValue,
// Some("option type"),
// Seq("test1", "test2"),
// Map("key1" -> "value1", "key2" -> "value2"),
// NestedClass("nested")
// ) --- london
// e.inVertex shouldBe paris
// e.outVertex shouldBe london
// }
trait Fixture {
implicit val graph = TinkerGraph.open.asScala()
val paris: Vertex = graph + "Paris"
val london = graph + "London"
val Eurostar = "eurostar" //edge label
val Name: Key[String] = Key[String]("name")
val Length: Key[Int] = Key[Int]("length")
val properties: Map[String, Any] =
List(("name", "alpha"), ("length", 100)).toMap
}
}
| mpollmeier/gremlin-scala | gremlin-scala/src/test/scala/gremlin/scala/ArrowSyntaxSpec.scala | Scala | apache-2.0 | 5,817 |
package io.evolutionary.twin
import java.util.concurrent.TimeUnit
import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.json.MetricsModule
import com.fasterxml.jackson.databind.ObjectMapper
import com.typesafe.config.{ConfigFactory, Config}
import org.apache.log4j.BasicConfigurator
import org.http4s.server.blaze.BlazeBuilder
import org.http4s.server.middleware.Metrics
import scalaz._
import scalaz.stream._
import org.http4s.dsl._
import org.http4s.server._
import language.postfixOps
import net.ceedubs.ficus.Ficus._
class ServerSettings(config: Config) {
val serverCfg = config.getConfig("io.evolutionary.twin.server")
val port = serverCfg.as[Int]("port")
val host = serverCfg.as[String]("host")
val route = serverCfg.as[String]("route")
}
object Main extends App {
BasicConfigurator.configure()
val settings = new ServerSettings(ConfigFactory.load())
implicit val client = org.http4s.client.blaze.defaultClient
val metrics = new MetricRegistry()
val mapper = new ObjectMapper()
.registerModule(new MetricsModule(TimeUnit.SECONDS, TimeUnit.SECONDS, true))
val routerSettings = new RouterSettings(ConfigFactory.load())
val router = Router.make(routerSettings)
val httpService = HttpService(router)
val metricsService = HttpService {
case GET -> Root / "metrics" =>
val writer = mapper.writerWithDefaultPrettyPrinter()
Ok(writer.writeValueAsString(metrics))
}
val svc = Metrics.meter(metrics, "Twin")(httpService orElse metricsService)
val server = BlazeBuilder
.bindHttp(settings.port, settings.host)
.mountService(svc, settings.route)
.withNio2(true)
.run
println("Server started!")
val commandParsing = (io stdInLines) evalMap { cmd => Console.parseCommand(cmd, router) }
commandParsing.run.run
}
| edmundnoble/Twin | src/main/scala/io/evolutionary/twin/Main.scala | Scala | apache-2.0 | 1,813 |
package nl.dennislaumen.scalify
import java.net.URL
import xml.XML
class Track private[scalify] (val uri: String, val name: String, val artist: Artist, val album: Album, val ids: Map[String, String],
val discNumber: Option[Int], val trackNumber: Int, val length: Float, val popularity: Float) {
private[scalify] def this(uri: String, name: String, artist: Artist) = {
this (uri, name, artist, null, null, None, 0, 0F, 0F)
}
private[scalify] def this(uri: String, name: String, artist: Artist, ids: Map[String, String], discNumber: Option[Int], trackNumber: Int,
length: Float, popularity: Float) = {
this (uri, name, artist, null, ids, discNumber, trackNumber, length, popularity)
}
override def toString(): String = name
}
object Track {
def lookup(uri: String): Track = {
val trackXML = XML.load(new URL("http://ws.spotify.com/lookup/1/?uri=" + uri))
val name = (trackXML \\ "name").text
val artistXML = trackXML \\ "artist"
val artist = new Artist((artistXML \\ "@href").text, (artistXML \\ "name").text)
val albumXML = trackXML \\ "album"
val album = new Album((albumXML \\ "@href").text, (albumXML \\ "name").text)
val ids = (Map[String, String]() /: (trackXML) \\ "id") { (map, idNode) =>
map((idNode \\ "@type").text) = idNode.text
}
val discNumber =
if ((trackXML \\ "disc-number").text.isEmpty)
None
else
Some((trackXML \\ "disc-number").text.toInt)
val trackNumber = (trackXML \\ "track-number").text.toInt
val length = (trackXML \\ "length").text.toFloat
val popularity = (trackXML \\ "popularity").text.toFloat
new Track(uri, name, artist, album, ids, discNumber, trackNumber, length, popularity)
}
}
| dennislaumen/Scalify | src/main/scala/nl/dennislaumen/scalify/Track.scala | Scala | mit | 1,731 |
package edu.berkeley.nlp.entity
import java.io.PrintWriter
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
import edu.berkeley.nlp.entity.preprocess.PreprocessingDriver
import edu.berkeley.nlp.futile.syntax.Tree
import scala.collection.mutable.HashSet
import scala.collection.JavaConverters._
import edu.berkeley.nlp.entity.lang.Language
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.entity.lang.Language
import edu.berkeley.nlp.entity.coref.OrderedClusteringBound
import edu.berkeley.nlp.entity.preprocess.Reprocessor
import edu.berkeley.nlp.entity.wiki.WikiAnnotReaderWriter
object ConllDocWriter {
def writeDoc(writer: PrintWriter, conllDoc: ConllDoc, clustering: OrderedClusteringBound) {
writeIncompleteConllDoc(writer, conllDoc.docID, conllDoc.docPartNo, conllDoc.words, conllDoc.pos, conllDoc.trees.map(_.constTree), conllDoc.speakers, conllDoc.nerChunks, convertOrderedClusteringBoundToChunks(clustering, conllDoc.words.size));
// val corefBits = getCorefBits(conllDoc.words.map(_.size), convertOrderedClusteringBoundToChunks(clustering, conllDoc.words.size));
// val numZeroesToAddToPartNo = 3 - conllDoc.docPartNo.toString.size;
// writer.println("#begin document (" + conllDoc.docID + "); part " + ("0" * numZeroesToAddToPartNo) + conllDoc.docPartNo);
// for (sentIdx <- 0 until conllDoc.rawText.size) {
// val sent = conllDoc.rawText(sentIdx);
// for (tokenIdx <- 0 until sent.size) {
// val line = conllDoc.rawText(sentIdx)(tokenIdx);
// val lineNoCoref = line.substring(0, Math.max(line.lastIndexOf("\\t"), line.lastIndexOf(" ")) + 1);
//// writer.println(lineNoCoref + corefBits(sentIdx)(tokenIdx));
// writer.println(lineNoCoref.replaceAll("\\\\s+", "\\t") + corefBits(sentIdx)(tokenIdx));
// }
// writer.println();
// }
// writer.println("#end document");
}
def writeDocWithPredAnnotations(writer: PrintWriter,
conllDoc: ConllDoc,
nerChunks: Seq[Seq[Chunk[String]]],
corefClustering: OrderedClusteringBound,
wikiChunks: Option[Seq[Seq[Chunk[String]]]] = None) {
writeIncompleteConllDocNestedNER(writer, conllDoc.docID, conllDoc.docPartNo, conllDoc.words, conllDoc.pos, conllDoc.trees.map(_.constTree), conllDoc.speakers, nerChunks, convertOrderedClusteringBoundToChunks(corefClustering, conllDoc.words.size), wikiChunks);
}
def writeDocWithPredAnnotationsWikiStandoff(writer: PrintWriter,
standoffWriter: PrintWriter,
conllDoc: ConllDoc,
nerChunks: Seq[Seq[Chunk[String]]],
corefClustering: OrderedClusteringBound,
wikiChunks: Seq[Seq[Chunk[String]]]) {
writeIncompleteConllDocNestedNER(writer, conllDoc.docID, conllDoc.docPartNo, conllDoc.words, conllDoc.pos, conllDoc.trees.map(_.constTree), conllDoc.speakers, nerChunks, convertOrderedClusteringBoundToChunks(corefClustering, conllDoc.words.size), None);
WikiAnnotReaderWriter.writeStandoffAnnots(standoffWriter, conllDoc.docID, conllDoc.docPartNo, wikiChunks.map(_.map(Chunk.seqify(_))), conllDoc.words.map(_.size))
}
def writeIncompleteConllDoc(writer: PrintWriter,
doc: ConllDoc) {
writeIncompleteConllDocNestedNER(writer, doc.docID, doc.docPartNo, doc.words, doc.pos, doc.trees.map(_.constTree), doc.speakers, doc.nerChunks, doc.corefChunks);
}
// Doesn't write predicate-argument structures, senses, or lemmas (but we don't use these).
def writeIncompleteConllDoc(writer: PrintWriter,
docName: String,
partNo: Int,
words: Seq[Seq[String]],
pos: Seq[Seq[String]],
parses: Seq[Tree[String]],
speakers: Seq[Seq[String]],
nerChunks: Seq[Seq[Chunk[String]]],
corefChunks: Seq[Seq[Chunk[Int]]],
wikiChunks: Option[Seq[Seq[Chunk[String]]]] = None) {
val sentLens = words.map(_.size);
val parseBits = parses.map(tree => PreprocessingDriver.computeParseBits(Reprocessor.convertFromFutileTree(tree)).toSeq);
val nerBits = getNerBits(sentLens, nerChunks);
val corefBits = getCorefBits(sentLens, corefChunks);
val maybeWikiBits = wikiChunks.map(chunks => getNerBitsPossiblyNestedChunks(sentLens, wikiChunks.get))
writeIncompleteConllDocFromBits(writer, docName, partNo, words, pos, parseBits, speakers, nerBits, corefBits, maybeWikiBits);
}
def writeIncompleteConllDocNestedNER(writer: PrintWriter,
docName: String,
partNo: Int,
words: Seq[Seq[String]],
pos: Seq[Seq[String]],
parses: Seq[Tree[String]],
speakers: Seq[Seq[String]],
nerChunks: Seq[Seq[Chunk[String]]],
corefChunks: Seq[Seq[Chunk[Int]]],
wikiChunks: Option[Seq[Seq[Chunk[String]]]] = None) {
val sentLens = words.map(_.size);
val parseBits = parses.map(tree => PreprocessingDriver.computeParseBits(Reprocessor.convertFromFutileTree(tree)).toSeq);
val nerBits = getNerBitsPossiblyNestedChunks(sentLens, nerChunks);
val corefBits = getCorefBits(sentLens, corefChunks);
val maybeWikiBits = wikiChunks.map(chunks => getNerBitsPossiblyNestedChunks(sentLens, wikiChunks.get))
writeIncompleteConllDocFromBits(writer, docName, partNo, words, pos, parseBits, speakers, nerBits, corefBits, maybeWikiBits);
}
def writeIncompleteConllDocFromBits(writer: PrintWriter,
docName: String,
partNo: Int,
words: Seq[Seq[String]],
pos: Seq[Seq[String]],
parseBits: Seq[Seq[String]],
speakers: Seq[Seq[String]],
nerBits: Seq[Seq[String]],
corefBits: Seq[Seq[String]],
maybeWikiBits: Option[Seq[Seq[String]]] = None) {
val numZeroesToAddToPartNo = 3 - partNo.toString.size;
writer.println("#begin document (" + docName + "); part " + ("0" * numZeroesToAddToPartNo) + partNo);
for (sentIdx <- 0 until words.size) {
val sent = words(sentIdx);
for (i <- 0 until sent.size) {
writer.println(docName + "\\t" + partNo + "\\t" + i + "\\t" + words(sentIdx)(i) + "\\t" + pos(sentIdx)(i) + "\\t" + parseBits(sentIdx)(i) +
"\\t-\\t-\\t-\\t" + speakers(sentIdx)(i) + "\\t" + nerBits(sentIdx)(i) + "\\t" + corefBits(sentIdx)(i) +
(if (maybeWikiBits.isDefined) "\\t" + maybeWikiBits.get(sentIdx)(i) else ""));
}
writer.println();
}
writer.println("#end document");
}
private def convertOrderedClusteringBoundToChunks(clustering: OrderedClusteringBound, numSentences: Int): Seq[Seq[Chunk[Int]]] = {
val chunksPerSentence = Array.tabulate(numSentences)(i => new ArrayBuffer[Chunk[Int]]());
for (i <- 0 until clustering.ments.size) {
val ment = clustering.ments(i);
chunksPerSentence(ment.sentIdx) += new Chunk(ment.startIdx, ment.endIdx, clustering.clustering.getClusterIdx(i));
}
chunksPerSentence;
}
def getNerBits(sentLens: Seq[Int], nerChunks: Seq[Seq[Chunk[String]]]): Seq[Seq[String]] = {
for (sentIdx <- 0 until sentLens.size) yield {
val chunkStarts = new HashMap[Int,String];
val chunkEnds = new HashSet[Int];
for (chunk <- nerChunks(sentIdx)) {
chunkStarts.put(chunk.start, chunk.label);
chunkEnds += chunk.end - 1;
}
for (tokenIdx <- 0 until sentLens(sentIdx)) yield {
if (chunkStarts.contains(tokenIdx) && chunkEnds.contains(tokenIdx)) {
"(" + chunkStarts.get(tokenIdx).getOrElse("") + ")";
} else if (chunkStarts.contains(tokenIdx)) {
"(" + chunkStarts.get(tokenIdx).getOrElse("") + "*";
} else if (chunkEnds.contains(tokenIdx)) {
"*)";
} else {
"*";
}
}
}
}
def getNerBitsPossiblyNestedChunks(sentLens: Seq[Int], nerChunks: Seq[Seq[Chunk[String]]]): Seq[Seq[String]] = {
for (sentIdx <- 0 until sentLens.size) yield {
for (tokenIdx <- 0 until sentLens(sentIdx)) yield {
val chunksStartingHere = nerChunks(sentIdx).filter(chunk => chunk.start == tokenIdx).sortBy(- _.end);
val numChunksEndingHere = nerChunks(sentIdx).filter(chunk => chunk.end - 1 == tokenIdx).size;
var str = "";
for (chunk <- chunksStartingHere) {
str += "(" + chunk.label;
}
str += "*";
for (i <- 0 until numChunksEndingHere) {
str += ")";
}
str;
}
}
}
def getCorefBits(sentLens: Seq[Int], corefChunks: Seq[Seq[Chunk[Int]]]): Seq[Seq[String]] = {
for (sentIdx <- 0 until sentLens.size) yield {
val mentionStarts = new HashMap[Int,ArrayBuffer[Int]];
val mentionEnds = new HashMap[Int,ArrayBuffer[Int]];
val mentionStartEnds = new HashMap[Int,Int];
val chunksThisSent = corefChunks(sentIdx);
for (chunk <- chunksThisSent) {
val start = chunk.start;
val end = chunk.end - 1;
if (start == end) {
mentionStartEnds.put(start, chunk.label);
} else {
if (!mentionStarts.contains(start)) {
mentionStarts.put(start, new ArrayBuffer[Int]())
}
mentionStarts(start) += chunk.label;
if (!mentionEnds.contains(end)) {
mentionEnds.put(end, new ArrayBuffer[Int]())
}
mentionEnds(end) += chunk.label;
}
}
for (tokenIdx <- 0 until sentLens(sentIdx)) yield {
var corefBit = "";
if (mentionStarts.contains(tokenIdx)) {
for (start <- mentionStarts(tokenIdx)) {
corefBit += "(" + start + "|";
}
}
if (mentionStartEnds.contains(tokenIdx)) {
corefBit += "(" + mentionStartEnds(tokenIdx) + ")|";
}
if (mentionEnds.contains(tokenIdx)) {
for (end <- mentionEnds(tokenIdx)) {
corefBit += end + ")|";
}
}
if (corefBit.isEmpty) "-" else corefBit.dropRight(1);
}
}
}
def writeDocIllinoisColumnFormat(writer: PrintWriter, conllDoc: ConllDoc) {
writer.println("O\\t0\\t0\\tO\\t-X-\\t-DOCSTART-\\tx\\tx\\t0");
// B-LOC 0 0 I-NP NNP Portugal x x 0
//O 0 1 I-VP VBD called x x 0
//O 0 2 I-PP IN up x x 0
//B-ORG 0 3 I-NP NNP Porto x x 0
//O 0 4 I-NP JJ central x x 0
//O 0 5 I-NP NN defender x x 0
//B-PER 0 6 I-NP NNP Joao x x 0
//I-PER 0 7 I-NP NNP Manuel x x 0
//I-PER 0 8 I-VP NNP Pinto x x 0
//O 0 9 I-PP IN on x x 0
//O 0 10 I-NP NNP Friday x x 0
for (sentIdx <- 0 until conllDoc.words.size) {
val sent = conllDoc.words(sentIdx);
for (tokenIdx <- 0 until sent.size) {
val line = getConllNerBit(tokenIdx, conllDoc.nerChunks(sentIdx)) + "\\t" + sentIdx + "\\t" + tokenIdx + "\\t" +
"x\\t" + conllDoc.pos(sentIdx)(tokenIdx) + "\\t" + conllDoc.words(sentIdx)(tokenIdx).replaceAll("\\\\s+", "_") + "\\t" +
"x\\tx\\t0";
writer.println(line);
}
writer.println();
}
}
def getConllNerBit(tokIdx: Int, nerChunks: Seq[Chunk[String]]): String = {
val relevantNerChunks = nerChunks.filter(chunk => chunk.start <= tokIdx && tokIdx < chunk.end);
if (relevantNerChunks.size > 0) {
val relevantNerChunk = relevantNerChunks(0);
if (tokIdx == relevantNerChunk.start) {
"B-" + relevantNerChunk.label;
} else {
"I-" + relevantNerChunk.label;
}
} else {
"O"
}
}
def convertToIllinoisColumnFormat(dataDir: String, outFile: String, shuffle: Boolean) {
val goldDocuments = ConllDocReader.loadRawConllDocsWithSuffix(dataDir, -1, "gold_conll", Language.ENGLISH);
val goldDocumentsShuffled = if (shuffle) new scala.util.Random(0).shuffle(goldDocuments) else goldDocuments
val outWriter = IOUtils.openOutHard(outFile);
for (doc <- goldDocumentsShuffled) {
writeDocIllinoisColumnFormat(outWriter, doc);
}
outWriter.close();
}
def main(args: Array[String]) = {
convertToIllinoisColumnFormat("data/ontonotes-conll/train", "data/illinois-2011/train-shuf.col", true);
convertToIllinoisColumnFormat("data/ontonotes-conll/dev", "data/illinois-2011/dev.col", false);
convertToIllinoisColumnFormat("data/ontonotes-conll/test", "data/illinois-2011/test.col", false);
// convertToIllinoisColumnFormat("data/conll-2012-en/train", "data/illinois-2012/train-shuf.col", true);
// convertToIllinoisColumnFormat("data/conll-2012-en/dev", "data/illinois-2012/dev.col", false);
// convertToIllinoisColumnFormat("data/conll-2012-en/test", "data/illinois-2012/test.col", false);
}
}
| matthewfl/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/ConllDocWriter.scala | Scala | gpl-3.0 | 13,908 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
// SBT
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "com.snowplowanalytics",
version := "0.5.0",
description := "Kinesis sink for Elasticsearch",
scalaVersion := "2.10.1",
scalacOptions := Seq("-deprecation", "-encoding", "utf8",
"-feature", "-target:jvm-1.7"),
scalacOptions in Test := Seq("-Yrangepos"),
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+= (sourceManaged in Compile, version, name, organization) map { (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, """package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch.generated
|object Settings {
| val organization = "%s"
| val version = "%s"
| val name = "%s"
|}
|""".stripMargin.format(o, v, n))
Seq(file)
})
// sbt-assembly settings for building a fat jar
import sbtassembly.AssemblyPlugin.autoImport._
import sbtassembly.AssemblyPlugin.defaultShellScript
lazy val sbtAssemblySettings = baseAssemblySettings ++ Seq(
// Executable jarfile
assemblyOption in assembly ~= { _.copy(prependShellScript = Some(defaultShellScript)) },
// Name it as an executable
assemblyJarName in assembly := { s"${name.value}-${version.value}" },
// Patch for joda-convert merge issue with elasticsearch 1.7.0
// See: https://github.com/elastic/elasticsearch/pull/10294
assemblyExcludedJars in assembly := {
val cp = (fullClasspath in assembly).value
cp filter {_.data.getName == "joda-convert-1.2.jar"}
}
)
lazy val buildSettings = basicSettings ++ scalifySettings ++ sbtAssemblySettings
}
| jramos/snowplow | 4-storage/kinesis-elasticsearch-sink/project/BuildSettings.scala | Scala | apache-2.0 | 2,626 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf
import java.nio.ByteOrder
import com.intel.analytics.bigdl.dllib.nn._
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, DataFormat}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import Tensorflow._
import BigDLToTensorflow._
import org.tensorflow.framework.{DataType, NodeDef}
import scala.collection.mutable.ArrayBuffer
/**
* Wrapper of logic to convert module to tensorflow node definition
*/
trait BigDLToTensorflow {
/**
* Convert the module to a tensorflow nodedef
* @return Mapped nodedef list, the first is the output node
*/
def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef]
}
object BigDLToTensorflow {
/**
* This method is just for test purpose. Do not use the bigdl.saveNHWC for real use case
* @return
*/
private[tf] def processSaveDim(dim: Int): Int = {
if (System.getProperty("bigdl.enableNHWC", "false").toBoolean) {
if (dim == 2) return 4
if (dim == 3) return 2
if (dim == 4) return 3
dim
} else {
dim
}
}
/**
* This method is just for test purpose. Do not use the bigdl.enableNHWC for real use case
* @return
*/
private[tf] def getDataFormat(): TensorflowDataFormat = {
if (System.getProperty("bigdl.enableNHWC", "false").toBoolean) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
}
}
object InputToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Input only accept one input")
Seq(identity(inputs(0), module.getName()))
}
}
object ReLUToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Relu only accept one input")
Seq(relu(inputs(0), module.getName()))
}
}
object LinearToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Linear only accept one input")
val linear = module.asInstanceOf[Linear[_]]
val weight = const(linear.weight.t().contiguous(), linear.getName() + "/weight", byteOrder)
val weightReader = identity(weight, linear.getName() + "/weightReader")
val mm = matmul(inputs(0), weightReader, linear.getName() + "/matmul")
val bias = const(linear.bias, linear.getName() + "/bias", byteOrder)
val biasReader = identity(bias, linear.getName() + "/biasReader")
val addNode = add(mm, biasReader, linear.getName() + "/add")
Seq(addNode, biasReader, bias, mm, weightReader, weight)
}
}
object SpatialConvolutionToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
import scala.language.existentials
require(inputs.length == 1, "SpatialConvolution only accept one input")
val spatialConv = module.asInstanceOf[SpatialConvolution[_]]
if (spatialConv.nGroup == 1) {
val (dataFormat, filterTensor) = if (spatialConv.format == DataFormat.NCHW) {
(TensorflowDataFormat.NCHW,
spatialConv.weight.select(1, 1)
.transpose(2, 3).transpose(3, 4)
.transpose(1, 2).transpose(2, 3)
.transpose(3, 4).contiguous())
} else {
(TensorflowDataFormat.NHWC, spatialConv.weight.select(1, 1))
}
val filter = const(filterTensor, spatialConv.getName() + "/filter", byteOrder)
val filterReader = identity(filter, spatialConv.getName() + "/filterReader")
val conv = conv2D(inputs(0), filterReader, spatialConv.strideW, spatialConv.strideH,
spatialConv.kernelW, spatialConv.kernelH, spatialConv.padW, spatialConv.padH,
dataFormat, spatialConv.getName() + "/conv2D")
if (spatialConv.bias != null) {
val bias = const(spatialConv.bias, spatialConv.getName() + "/bias", byteOrder)
val biasReader = identity(bias, spatialConv.getName() + "/biasReader")
val add = biasAdd(conv, biasReader, dataFormat,
spatialConv.getName() + "/biasAdd")
Seq(add, biasReader, bias, conv, filterReader, filter)
} else {
Seq(conv, filterReader, filter)
}
} else {
require(spatialConv.format == DataFormat.NCHW, "Only NCHW support conv group")
val nodes = new ArrayBuffer[NodeDef]()
val splitDim = const(Tensor.scalar[Int](1), spatialConv.getName() + "/split_dim",
ByteOrder.LITTLE_ENDIAN)
val splits = split(splitDim, inputs(0), spatialConv.nGroup, spatialConv.getName() + "/split")
nodes.append(splitDim)
nodes.appendAll(splits)
val axis = const(Tensor.scalar[Int](1), spatialConv.getName() + "/concat/axis",
ByteOrder.LITTLE_ENDIAN)
nodes.append(axis)
val outputs = (0 until spatialConv.nGroup).map(g => {
val filterTensor = spatialConv.weight.select(1, g + 1)
.transpose(2, 3).transpose(3, 4)
.transpose(1, 2).transpose(2, 3)
.transpose(3, 4).contiguous()
val filter = const(filterTensor, spatialConv.getName() + s"/group$g/filter", byteOrder)
val filterReader = identity(filter, spatialConv.getName() + s"/group$g/filterReader")
val conv = conv2D(splits(g), filterReader, spatialConv.strideW, spatialConv.strideH,
spatialConv.kernelW, spatialConv.kernelH, spatialConv.padW, spatialConv.padH,
TensorflowDataFormat.NCHW, spatialConv.getName() + s"/group$g/conv2D")
if (spatialConv.bias != null) {
val bias = const(spatialConv.bias.narrow(1,
g * spatialConv.nOutputPlane / spatialConv.nGroup + 1,
spatialConv.nOutputPlane / spatialConv.nGroup),
spatialConv.getName() + s"/group$g/bias", byteOrder)
val biasReader = identity(bias, spatialConv.getName() + s"/group$g/biasReader")
val add = biasAdd(conv, biasReader, TensorflowDataFormat.NCHW,
spatialConv.getName() + s"/group$g/biasAdd")
nodes.append(add, biasReader, bias, conv, filterReader, filter)
add
} else {
nodes.append(conv, filterReader, filter)
conv
}
}) ++ Seq(axis)
val concatNode = concat(outputs, spatialConv.getName() + "/concat/output")
Seq(concatNode) ++ nodes
}
}
}
object TemporalConvolutionToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "SpatialConvolution only accept one input")
val spatialConv = module.asInstanceOf[TemporalConvolution[_]]
val const1 = const(Tensor.scalar[Int](1), spatialConv.getName() + "/dim1", byteOrder)
val expandDimsInput = expandDims(inputs.head, const1,
spatialConv.getName() + "/expandDimsInput")
val filterTensor = spatialConv.weight
.view(spatialConv.outputFrameSize, spatialConv.kernelW, spatialConv.inputFrameSize)
.transpose(2, 3).transpose(1, 3).contiguous()
val filter = const(filterTensor, spatialConv.getName() + "/filter", byteOrder)
val filterReader = identity(filter, spatialConv.getName() + "/filterReader")
val const2 = const(Tensor.scalar[Int](0), spatialConv.getName() + "/dim2", byteOrder)
val expandDimsWeight = expandDims(filterReader, const2,
spatialConv.getName() + "/expandDimsWeight")
val conv = conv2D(expandDimsInput, expandDimsWeight, spatialConv.strideW, 1,
spatialConv.kernelW, 1, 0, 0,
getDataFormat(), spatialConv.getName() + "/conv2D")
val sq = squeeze(conv, Seq(1), spatialConv.getName() + "/squeeze")
val bias = const(spatialConv.bias, spatialConv.getName() + "/bias", byteOrder)
val biasReader = identity(bias, spatialConv.getName() + "/biasReader")
val add = biasAdd(sq, biasReader, getDataFormat(),
spatialConv.getName() + "/biasAdd")
Seq(add, biasReader, bias, conv, filterReader, filter, sq,
expandDimsInput, expandDimsWeight, const1, const2)
}
}
object SqueezeToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Squeeze only accept one input")
val sq = module.asInstanceOf[Squeeze[_]]
Seq(squeeze(inputs(0), sq.dims.map(processSaveDim(_) - 1), sq.getName()))
}
}
object TanhToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Tanh only accept one input")
Seq(tanh(inputs(0), module.getName()))
}
}
object ReshapeToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Reshape only accept one input")
val rh = module.asInstanceOf[Reshape[_]]
val size = Tensor[Int](rh.size.length)
var i = 0
while(i < rh.size.length) {
size.setValue(i + 1, rh.size(i))
i += 1
}
val shape = const(size, rh.getName() + "/shape", byteOrder)
val reshapeNode = reshape(inputs(0), shape, rh.getName())
Seq(reshapeNode, shape)
}
}
object ViewToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Reshape only accept one input")
val viewLayer = module.asInstanceOf[View[_]]
val size = Tensor[Int](viewLayer.sizes.length + 1).setValue(1, -1)
var i = 1
while(i < viewLayer.sizes.length + 1) {
size.setValue(i + 1, viewLayer.sizes(i - 1))
i += 1
}
val shape = const(size, viewLayer.getName() + "/shape", byteOrder)
val reshapeNode = reshape(inputs(0), shape, viewLayer.getName())
Seq(reshapeNode, shape)
}
}
object MaxpoolToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Maxpool only accept one input")
val layer = module.asInstanceOf[SpatialMaxPooling[_]]
val dataFormat = if (layer.format == DataFormat.NHWC) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
Seq(maxPool(inputs(0), layer.kW, layer.kH, layer.padW, layer.padH,
layer.dW, layer.dH, dataFormat, layer.getName(), layer.ceilMode))
}
}
object PaddingToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Padding only accept one input")
val layer = module.asInstanceOf[Padding[_]]
require(layer.nIndex == 1, "only support padding nIndex == 1")
require(layer.nInputDim > 0, "nInputDim must be explicit specified")
val padding = Tensor[Int](layer.nInputDim, 2).zero()
if (layer.pad < 0) {
padding.setValue(layer.dim, 1, -layer.pad)
}
else {
padding.setValue(layer.dim, 2, layer.pad)
}
val paddingsNode = const(padding, layer.getName() + "/padding", byteOrder)
val padNode = pad(inputs(0), paddingsNode, layer.getName() + "/output")
Seq(padNode, paddingsNode)
}
}
object AvgpoolToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Avgpool only accept one input")
val layer = module.asInstanceOf[SpatialAveragePooling[_]]
val dataFormat = if (layer.format == DataFormat.NHWC) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
Seq(avgPool(inputs(0), layer.kW, layer.kH, layer.padW, layer.padH,
layer.dW, layer.dH, dataFormat, layer.getName(), layer.ceilMode))
}
}
object SigmoidToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Sigmoid only accept one input")
Seq(sigmoid(inputs(0), module.getName()))
}
}
object DropoutToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Dropout only accept one input")
val layer = module.asInstanceOf[Dropout[_]]
require(layer.isTraining() == false, "only support evaluating mode dropout")
require(inputs.length == 1, "require only one tensor input")
Seq(identity(inputs(0), layer.getName()))
}
}
object ScaleToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
val layer = module.asInstanceOf[Scale[_]]
val weight = const(layer.cmul.weight, layer.getName() + "/mul/weight", ByteOrder.LITTLE_ENDIAN)
val mulNode = multiply(weight, inputs(0), layer.getName() + "/mul/mul")
val bias = const(layer.cadd.bias, layer.getName() + "/add/bias", ByteOrder.LITTLE_ENDIAN)
val output = add(mulNode, bias, layer.getName() + "/add/add")
Seq(output, bias, mulNode, weight)
}
}
object CAddTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
Seq(addN(inputs, module.getName()))
}
}
object CMultTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 2, "Tensorflow only support two tensor multiply together")
Seq(multiply(inputs(0), inputs(1), module.getName()))
}
}
object JoinTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
val layer = module.asInstanceOf[JoinTable[_]]
val axis = const(Tensor.scalar[Int](layer.dimension - 1), layer.getName() + "/axis", byteOrder)
val updateInputs = new ArrayBuffer[NodeDef]()
updateInputs ++= inputs.reverse
updateInputs.append(axis)
Seq(concat(updateInputs, layer.getName()), axis)
}
}
object MeanToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Mean only accept one input")
val layer = module.asInstanceOf[Mean[_]]
require(layer.squeeze == true, "Mean must squeeze input")
val dimsTensor = Tensor[Int](layer.dimension)
dimsTensor.setValue(1, layer.dimension - 1)
val dims = const(dimsTensor, layer.getName() + "/dims", byteOrder)
val mean = reduceMean(inputs(0), dims, false, layer.getName() + "/output")
Seq(mean, dims)
}
}
object SoftMaxToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Softmax only accept one input")
Seq(softmax(inputs(0), module.getName()))
}
}
object LogSoftMaxToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "LogSoftmax only accept one input")
Seq(logSoftmax(inputs(0), module.getName()))
}
}
object BatchNorm2DToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "BatchNorm only accept one input")
val layer = module.asInstanceOf[SpatialBatchNormalization[_]]
require(!layer.isTraining(), "Only support evaluate mode batch norm")
// reshape to nchw
val size = Tensor[Int](layer.nDim)
for (i <- 0 until layer.nDim) {
size.setValue(i + 1, 1)
}
size(2) = layer.runningVar.size(1)
if (layer.weight != null) {
val shapeVar = const(size, layer.getName() + "/reshape_1/shape", byteOrder)
val shapeMean = const(size, layer.getName() + "/reshape_2/shape", byteOrder)
val shapeScale = const(size, layer.getName() + "/reshape_3/shape", byteOrder)
val shapeOffset = const(size, layer.getName() + "/reshape_4/shape", byteOrder)
val varNode = const(layer.runningVar, layer.getName() + "/var", byteOrder)
val mean = const(layer.runningMean, layer.getName() + "/mean", byteOrder)
val scale = const(layer.weight, layer.getName() + "/scale", byteOrder)
val offset = const(layer.bias, layer.getName() + "/offset", byteOrder)
val reshapeVar = reshape(varNode, shapeVar, s"${layer.getName()}/reshape_1")
val reshapeMean = reshape(mean, shapeMean, s"${layer.getName()}/reshape_2")
val reshapeScale = reshape(scale, shapeScale, s"${layer.getName()}/reshape_3")
val reshapeOffset = reshape(offset, shapeOffset, s"${layer.getName()}/reshape_4")
// construct graph
val sqrtVar = rsqrt(reshapeVar, layer.getName() + "/sqrtvar")
val mul0 = multiply(reshapeScale, sqrtVar, layer.getName() + "/mul0")
val mul1 = multiply(inputs(0), mul0, layer.getName() + "/mul1")
val mul2 = multiply(reshapeMean, mul0, layer.getName() + "/mul2")
val sub = subtract(reshapeOffset, mul2, layer.getName() + "/sub")
val output = add(mul1, sub, layer.getName() + "/output")
Seq(output, sub, mul2, mul1, mul0, reshapeOffset, reshapeMean, reshapeScale,
shapeOffset, shapeMean, shapeScale, offset, scale, mean,
sqrtVar, reshapeVar, shapeVar, varNode)
} else {
val shapeVar = const(size, layer.getName() + "/reshape_1/shape", byteOrder)
val shapeMean = const(size, layer.getName() + "/reshape_2/shape", byteOrder)
val varNode = const(layer.runningVar, layer.getName() + "/var", byteOrder)
val mean = const(layer.runningMean, layer.getName() + "/mean", byteOrder)
val reshapeVar = reshape(varNode, shapeVar, s"${layer.getName()}/reshape_1")
val reshapeMean = reshape(mean, shapeMean, s"${layer.getName()}/reshape_2")
// construct graph
val sqrtVar = rsqrt(reshapeVar, layer.getName() + "/sqrtvar")
val mul1 = multiply(inputs(0), sqrtVar, layer.getName() + "/mul1")
val mul2 = multiply(reshapeMean, sqrtVar, layer.getName() + "/mul2")
val output = subtract(mul1, mul2, layer.getName() + "/output")
Seq(output, mul2, mul1, reshapeMean, shapeMean, mean, sqrtVar, reshapeVar, shapeVar, varNode)
}
}
}
object LRNToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
val layer = module.asInstanceOf[SpatialCrossMapLRN[_]]
if (layer.format == DataFormat.NHWC) {
Seq(lrn(inputs(0), (layer.size - 1) / 2, layer.k.toFloat, (layer.alpha / layer.size).toFloat,
layer.beta.toFloat, module.getName()))
} else {
val perm1 = const(Tensor[Int](T(0, 2, 3, 1)), module.getName() + "/perm1",
ByteOrder.LITTLE_ENDIAN)
val transpose1 = transpose(inputs(0), perm1, module.getName() + "/transpose1")
val lrnNode = lrn(transpose1, (layer.size - 1) / 2, layer.k.toFloat,
(layer.alpha / layer.size).toFloat,
layer.beta.toFloat, module.getName() + "/lrn")
val perm2 = const(Tensor[Int](T(0, 3, 1, 2)), module.getName() + "/perm2",
ByteOrder.LITTLE_ENDIAN)
val output = transpose(lrnNode, perm2, module.getName() + "/transpose2")
Seq(output, perm1, transpose1, lrnNode, perm2)
}
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/BigDLToTensorflow.scala | Scala | apache-2.0 | 20,995 |
package com.twitter.bijection
/**
* Type tag used to indicate that an instance of a type such as String contains a valid
* representation of another type, such as Int or URL.
*/
trait Rep[A]
/**
* Useful HasRep
*/
object Rep {
implicit def toRepOpsEnrichment[A](a: A) = new ToRepOps(a)
/**
* Adds toRep[B] syntax to elements of type A if there is an implicit HasRep[A, B] in scope. TODO
* make implicit class in 2.10
*/
class ToRepOps[A](a: A) extends java.io.Serializable {
def toRep[B](implicit ev: HasRep[A, B]): Option[A @@ Rep[B]] =
ev.toRep(a)
}
}
/**
* Type class for summoning the function that can check whether the instance can be tagged with Rep
*/
trait HasRep[A, B] {
def toRep(a: A): Option[A @@ Rep[B]]
}
| twitter/bijection | bijection-core/src/main/scala/com/twitter/bijection/Rep.scala | Scala | apache-2.0 | 770 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.report
import play.api.libs.json.Json
import model.OnlineTestCommands.Implicits._
import model.{ PhoneAndEmail, ReportWithPersonalDetails }
case class OnlineTestPassMarkReportItem(
application: ApplicationForOnlineTestPassMarkReportItem,
questionnaire: QuestionnaireReportItem)
object OnlineTestPassMarkReportItem {
implicit val onlineTestPassMarkReportFormat = Json.format[OnlineTestPassMarkReportItem]
}
| hmrc/fset-faststream | app/model/report/OnlineTestPassMarkReportItem.scala | Scala | apache-2.0 | 1,084 |
import sbt._
import sbt.Keys._
import sbtassembly._
import sbtassembly.AssemblyKeys._
import spray.revolver.RevolverPlugin._
object CurioDB extends Build {
val scalaV = "2.11.7"
val akkaV = "2.4.11"
val sprayV = "1.3.4"
val curiodbV = "0.0.1"
val dependencies = Seq(
"com.typesafe.akka" %% "akka-actor" % akkaV,
"com.typesafe.akka" %% "akka-persistence" % akkaV,
"com.typesafe.akka" %% "akka-cluster" % akkaV,
"io.spray" %% "spray-can" % sprayV,
"io.spray" %% "spray-json" % "1.3.2",
"com.wandoulabs.akka" %% "spray-websocket" % "0.1.4",
"org.luaj" % "luaj-jse" % "3.0.1"
)
val reverseConcat: MergeStrategy = new MergeStrategy {
val name = "reverseConcat"
def apply(tempDir: File, path: String, files: Seq[File]): Either[String, Seq[(File, String)]] =
MergeStrategy.concat(tempDir, path, files.reverse)
}
lazy val indexedTreeMap = RootProject(uri("git://github.com/stephenmcd/indexed-tree-map.git"))
lazy val hyperLogLog = RootProject(uri("git://github.com/stephenmcd/java-hll.git#with-fastutils"))
lazy val root = Project("root", file("."), settings = Seq(
name := "curiodb",
version := curiodbV,
scalaVersion in Global := scalaV,
//resolvers += "Local Maven Repository" at "file://" + Path.userHome.absolutePath + "/.ivy2/local",
resolvers += "Akka Snapshots" at "http://repo.akka.io/snapshots/",
resolvers += "spray repo" at "http://repo.spray.io",
libraryDependencies ++= dependencies,
assemblyOption in assembly := (assemblyOption in assembly).value.copy(prependShellScript =
Option(Seq("#!/usr/bin/env sh", s"""exec -a ${name.value} java -jar "$$0" "$$@" """))),
assemblyJarName in assembly := s"${name.value}-${version.value}",
assemblyMergeStrategy in assembly := {
case PathList(ps @ _*) if ps.takeRight(2).mkString("/") == "leveldb/DB.class" => MergeStrategy.first
case x if Assembly.isConfigFile(x) => reverseConcat
case x => (assemblyMergeStrategy in assembly).value(x)
}
) ++ Revolver.settings).dependsOn(indexedTreeMap, hyperLogLog)
}
| stephenmcd/curiodb | project/Build.scala | Scala | bsd-2-clause | 2,140 |
package edu.rice.habanero.benchmarks.fjcreate
import edu.rice.habanero.actors.{ScalaActor, ScalaActorState}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object ForkJoinScalaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ForkJoinScalaActorBenchmark)
}
private final class ForkJoinScalaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ForkJoinConfig.parseArgs(args)
}
def printArgInfo() {
ForkJoinConfig.printArgs()
}
def runIteration() {
val message = new Object()
var i = 0
while (i < ForkJoinConfig.N) {
val fjRunner = new ForkJoinActor()
fjRunner.start()
fjRunner.send(message)
i += 1
}
ScalaActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private class ForkJoinActor extends ScalaActor[AnyRef] {
override def process(msg: AnyRef) {
ForkJoinConfig.performComputation(37.2)
exit()
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/fjcreate/ForkJoinScalaActorBenchmark.scala | Scala | gpl-2.0 | 1,175 |
package org.apache.spark.sql.snowflake
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.test.SQLTestData
import org.apache.spark.sql.test.SQLTestData._
/**
* Most test data in SQLTestData invokes RDD API, which is not supported by TS yet.
* Please override all test data used by TS test suites in this class
*/
trait SFTestData extends SQLTestData {
import org.apache.spark.sql.test.SQLTestData.CourseSales
override protected lazy val testData: DataFrame =
spark.createDataFrame((1 to 100).map(i => TestData(i, i.toString)))
override protected lazy val testData2: DataFrame =
spark.createDataFrame(
TestData2(1, 1) ::
TestData2(1, 2) ::
TestData2(2, 1) ::
TestData2(2, 2) ::
TestData2(3, 1) ::
TestData2(3, 2) :: Nil)
override protected lazy val testData3: DataFrame =
spark.createDataFrame(
TestData3(1, None) ::
TestData3(2, Some(2)) :: Nil)
override protected lazy val decimalData: DataFrame =
spark.createDataFrame(
DecimalData(1, 1) ::
DecimalData(1, 2) ::
DecimalData(2, 1) ::
DecimalData(2, 2) ::
DecimalData(3, 1) ::
DecimalData(3, 2) :: Nil)
override protected lazy val courseSales: DataFrame =
spark
.createDataFrame(
CourseSales("dotNET", 2012, 10000) ::
CourseSales("Java", 2012, 20000) ::
CourseSales("dotNET", 2012, 5000) ::
CourseSales("dotNET", 2013, 48000) ::
CourseSales("Java", 2013, 30000) :: Nil)
override protected lazy val lowerCaseData: DataFrame =
spark
.createDataFrame(
LowerCaseData(1, "a") ::
LowerCaseData(2, "b") ::
LowerCaseData(3, "c") ::
LowerCaseData(4, "d") :: Nil)
override protected lazy val upperCaseData: DataFrame =
spark
.createDataFrame(
UpperCaseData(1, "A") ::
UpperCaseData(2, "B") ::
UpperCaseData(3, "C") ::
UpperCaseData(4, "D") ::
UpperCaseData(5, "E") ::
UpperCaseData(6, "F") :: Nil)
override protected lazy val nullInts: DataFrame =
spark
.createDataFrame(
NullInts(1) ::
NullInts(2) ::
NullInts(3) ::
NullInts(null) :: Nil)
override protected lazy val allNulls: DataFrame =
spark
.createDataFrame(
NullInts(null) ::
NullInts(null) ::
NullInts(null) ::
NullInts(null) :: Nil)
override protected lazy val nullStrings: DataFrame =
spark
.createDataFrame(
NullStrings(1, "abc") ::
NullStrings(2, "ABC") ::
NullStrings(3, null) :: Nil)
override protected lazy val lowerCaseDataWithDuplicates: DataFrame =
spark
.createDataFrame(
LowerCaseData(1, "a") ::
LowerCaseData(2, "b") ::
LowerCaseData(2, "b") ::
LowerCaseData(3, "c") ::
LowerCaseData(3, "c") ::
LowerCaseData(3, "c") ::
LowerCaseData(4, "d") :: Nil)
}
| snowflakedb/spark-snowflakedb | src/it/scala/org/apache/spark/sql/snowflake/SFTestData.scala | Scala | apache-2.0 | 3,027 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{execution, DataFrame, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Range, Repartition, Sort, Union}
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ReuseExchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
class PlannerSuite extends SharedSQLContext {
import testImplicits._
setupTestData()
private def testPartialAggregationPlan(query: LogicalPlan): Unit = {
val planner = spark.sessionState.planner
import planner._
val plannedOption = Aggregation(query).headOption
val planned =
plannedOption.getOrElse(
fail(s"Could query play aggregation query $query. Is it an aggregation query?"))
val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n }
// For the new aggregation code path, there will be four aggregate operator for
// distinct aggregations.
assert(
aggregations.size == 2 || aggregations.size == 4,
s"The plan of query $query does not have partial aggregations.")
}
test("count is partially aggregated") {
val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("count distinct is partially aggregated") {
val query = testData.groupBy('value).agg(countDistinct('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("mixed aggregates are partially aggregated") {
val query =
testData.groupBy('value).agg(count('value), countDistinct('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("mixed aggregates with same distinct columns") {
def assertNoExpand(plan: SparkPlan): Unit = {
assert(plan.collect { case e: ExpandExec => e }.isEmpty)
}
withTempView("v") {
Seq((1, 1.0, 1.0), (1, 2.0, 2.0)).toDF("i", "j", "k").createTempView("v")
// one distinct column
val query1 = sql("SELECT sum(DISTINCT j), max(DISTINCT j) FROM v GROUP BY i")
assertNoExpand(query1.queryExecution.executedPlan)
// 2 distinct columns
val query2 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT j, k) FROM v GROUP BY i")
assertNoExpand(query2.queryExecution.executedPlan)
// 2 distinct columns with different order
val query3 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT k, j) FROM v GROUP BY i")
assertNoExpand(query3.queryExecution.executedPlan)
}
}
test("sizeInBytes estimation of limit operator for broadcast hash join optimization") {
def checkPlan(fieldTypes: Seq[DataType]): Unit = {
withTempView("testLimit") {
val fields = fieldTypes.zipWithIndex.map {
case (dataType, index) => StructField(s"c${index}", dataType, true)
} :+ StructField("key", IntegerType, true)
val schema = StructType(fields)
val row = Row.fromSeq(Seq.fill(fields.size)(null))
val rowRDD = sparkContext.parallelize(row :: Nil)
spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("testLimit")
val planned = sql(
"""
|SELECT l.a, l.b
|FROM testData2 l JOIN (SELECT * FROM testLimit LIMIT 1) r ON (l.a = r.key)
""".stripMargin).queryExecution.sparkPlan
val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join }
val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join }
assert(broadcastHashJoins.size === 1, "Should use broadcast hash join")
assert(sortMergeJoins.isEmpty, "Should not use sort merge join")
}
}
val simpleTypes =
NullType ::
BooleanType ::
ByteType ::
ShortType ::
IntegerType ::
LongType ::
FloatType ::
DoubleType ::
DecimalType(10, 5) ::
DecimalType.SYSTEM_DEFAULT ::
DateType ::
TimestampType ::
StringType ::
BinaryType :: Nil
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "16434") {
checkPlan(simpleTypes)
}
val complexTypes =
ArrayType(DoubleType, true) ::
ArrayType(StringType, false) ::
MapType(IntegerType, StringType, true) ::
MapType(IntegerType, ArrayType(DoubleType), false) ::
StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", ArrayType(DoubleType), nullable = false),
StructField("c", DoubleType, nullable = false))) :: Nil
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "901617") {
checkPlan(complexTypes)
}
}
test("InMemoryRelation statistics propagation") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") {
withTempView("tiny") {
testData.limit(3).createOrReplaceTempView("tiny")
sql("CACHE TABLE tiny")
val a = testData.as("a")
val b = spark.table("tiny").as("b")
val planned = a.join(b, $"a.key" === $"b.key").queryExecution.sparkPlan
val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join }
val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join }
assert(broadcastHashJoins.size === 1, "Should use broadcast hash join")
assert(sortMergeJoins.isEmpty, "Should not use shuffled hash join")
spark.catalog.clearCache()
}
}
}
test("SPARK-11390 explain should print PushedFilters of PhysicalRDD") {
withTempPath { file =>
val path = file.getCanonicalPath
testData.write.parquet(path)
val df = spark.read.parquet(path)
df.createOrReplaceTempView("testPushed")
withTempView("testPushed") {
val exp = sql("select * from testPushed where key = 15").queryExecution.sparkPlan
assert(exp.toString.contains("PushedFilters: [IsNotNull(key), EqualTo(key,15)]"))
}
}
}
test("efficient terminal limit -> sort should use TakeOrderedAndProject") {
val query = testData.select('key, 'value).sort('key).limit(2)
val planned = query.queryExecution.executedPlan
assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec])
assert(planned.output === testData.select('key, 'value).logicalPlan.output)
}
test("terminal limit -> project -> sort should use TakeOrderedAndProject") {
val query = testData.select('key, 'value).sort('key).select('value, 'key).limit(2)
val planned = query.queryExecution.executedPlan
assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec])
assert(planned.output === testData.select('value, 'key).logicalPlan.output)
}
test("terminal limits that are not handled by TakeOrderedAndProject should use CollectLimit") {
val query = testData.select('value).limit(2)
val planned = query.queryExecution.sparkPlan
assert(planned.isInstanceOf[CollectLimitExec])
assert(planned.output === testData.select('value).logicalPlan.output)
}
test("TakeOrderedAndProject can appear in the middle of plans") {
val query = testData.select('key, 'value).sort('key).limit(2).filter('key === 3)
val planned = query.queryExecution.executedPlan
assert(planned.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined)
}
test("CollectLimit can appear in the middle of a plan when caching is used") {
val query = testData.select('key, 'value).limit(2).cache()
val planned = query.queryExecution.optimizedPlan.asInstanceOf[InMemoryRelation]
assert(planned.cachedPlan.isInstanceOf[CollectLimitExec])
}
test("TakeOrderedAndProjectExec appears only when number of limit is below the threshold.") {
withSQLConf(SQLConf.TOP_K_SORT_FALLBACK_THRESHOLD.key -> "1000") {
val query0 = testData.select('value).orderBy('key).limit(100)
val planned0 = query0.queryExecution.executedPlan
assert(planned0.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined)
val query1 = testData.select('value).orderBy('key).limit(2000)
val planned1 = query1.queryExecution.executedPlan
assert(planned1.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isEmpty)
}
}
test("SPARK-23375: Cached sorted data doesn't need to be re-sorted") {
val query = testData.select('key, 'value).sort('key.desc).cache()
assert(query.queryExecution.optimizedPlan.isInstanceOf[InMemoryRelation])
val resorted = query.sort('key.desc)
assert(resorted.queryExecution.optimizedPlan.collect { case s: Sort => s}.isEmpty)
assert(resorted.select('key).collect().map(_.getInt(0)).toSeq ==
(1 to 100).reverse)
// with a different order, the sort is needed
val sortedAsc = query.sort('key)
assert(sortedAsc.queryExecution.optimizedPlan.collect { case s: Sort => s}.size == 1)
assert(sortedAsc.select('key).collect().map(_.getInt(0)).toSeq == (1 to 100))
}
test("PartitioningCollection") {
withTempView("normal", "small", "tiny") {
testData.createOrReplaceTempView("normal")
testData.limit(10).createOrReplaceTempView("small")
testData.limit(3).createOrReplaceTempView("tiny")
// Disable broadcast join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
{
val numExchanges = sql(
"""
|SELECT *
|FROM
| normal JOIN small ON (normal.key = small.key)
| JOIN tiny ON (small.key = tiny.key)
""".stripMargin
).queryExecution.executedPlan.collect {
case exchange: ShuffleExchangeExec => exchange
}.length
assert(numExchanges === 5)
}
{
// This second query joins on different keys:
val numExchanges = sql(
"""
|SELECT *
|FROM
| normal JOIN small ON (normal.key = small.key)
| JOIN tiny ON (normal.key = tiny.key)
""".stripMargin
).queryExecution.executedPlan.collect {
case exchange: ShuffleExchangeExec => exchange
}.length
assert(numExchanges === 5)
}
}
}
}
test("collapse adjacent repartitions") {
val doubleRepartitioned = testData.repartition(10).repartition(20).coalesce(5)
def countRepartitions(plan: LogicalPlan): Int = plan.collect { case r: Repartition => r }.length
assert(countRepartitions(doubleRepartitioned.queryExecution.analyzed) === 3)
assert(countRepartitions(doubleRepartitioned.queryExecution.optimizedPlan) === 2)
doubleRepartitioned.queryExecution.optimizedPlan match {
case Repartition (numPartitions, shuffle, Repartition(_, shuffleChild, _)) =>
assert(numPartitions === 5)
assert(shuffle === false)
assert(shuffleChild === true)
}
}
///////////////////////////////////////////////////////////////////////////
// Unit tests of EnsureRequirements for Exchange
///////////////////////////////////////////////////////////////////////////
// When it comes to testing whether EnsureRequirements properly ensures distribution requirements,
// there two dimensions that need to be considered: are the child partitionings compatible and
// do they satisfy the distribution requirements? As a result, we need at least four test cases.
private def assertDistributionRequirementsAreSatisfied(outputPlan: SparkPlan): Unit = {
if (outputPlan.children.length > 1) {
val childPartitionings = outputPlan.children.zip(outputPlan.requiredChildDistribution)
.filter {
case (_, UnspecifiedDistribution) => false
case (_, _: BroadcastDistribution) => false
case _ => true
}.map(_._1.outputPartitioning)
if (childPartitionings.map(_.numPartitions).toSet.size > 1) {
fail(s"Partitionings doesn't have same number of partitions: $childPartitionings")
}
}
outputPlan.children.zip(outputPlan.requiredChildDistribution).foreach {
case (child, requiredDist) =>
assert(child.outputPartitioning.satisfies(requiredDist),
s"$child output partitioning does not satisfy $requiredDist:\n$outputPlan")
}
}
test("EnsureRequirements with child partitionings with different numbers of output partitions") {
val clustering = Literal(1) :: Nil
val distribution = ClusteredDistribution(clustering)
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 1)),
DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 2))
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
}
test("EnsureRequirements with compatible child partitionings that do not satisfy distribution") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
// The left and right inputs have compatible partitionings but they do not satisfy the
// distribution because they are clustered on different columns. Thus, we need to shuffle.
val childPartitioning = HashPartitioning(Literal(2) :: Nil, 1)
assert(!childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = childPartitioning),
DummySparkPlan(outputPartitioning = childPartitioning)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.isEmpty) {
fail(s"Exchange should have been added:\n$outputPlan")
}
}
test("EnsureRequirements with compatible child partitionings that satisfy distribution") {
// In this case, all requirements are satisfied and no exchange should be added.
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = childPartitioning),
DummySparkPlan(outputPartitioning = childPartitioning)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) {
fail(s"Exchange should not have been added:\n$outputPlan")
}
}
// This is a regression test for SPARK-9703
test("EnsureRequirements should not repartition if only ordering requirement is unsatisfied") {
// Consider an operator that imposes both output distribution and ordering requirements on its
// children, such as sort merge join. If the distribution requirements are satisfied but
// the output ordering requirements are unsatisfied, then the planner should only add sorts and
// should not need to add additional shuffles / exchanges.
val outputOrdering = Seq(SortOrder(Literal(1), Ascending))
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = SinglePartition),
DummySparkPlan(outputPartitioning = SinglePartition)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(outputOrdering, outputOrdering)
)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) {
fail(s"No Exchanges should have been added:\n$outputPlan")
}
}
test("EnsureRequirements eliminates Exchange if child has same partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val partitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(partitioning.satisfies(distribution))
val inputPlan = ShuffleExchangeExec(
partitioning,
DummySparkPlan(outputPartitioning = partitioning),
None)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 2) {
fail(s"Topmost Exchange should have been eliminated:\n$outputPlan")
}
}
test("EnsureRequirements does not eliminate Exchange with different partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val partitioning = HashPartitioning(Literal(2) :: Nil, 5)
assert(!partitioning.satisfies(distribution))
val inputPlan = ShuffleExchangeExec(
partitioning,
DummySparkPlan(outputPartitioning = partitioning),
None)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 1) {
fail(s"Topmost Exchange should not have been eliminated:\n$outputPlan")
}
}
test("EnsureRequirements should respect ClusteredDistribution's num partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil, Some(13))
// Number of partitions differ
val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 13)
val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(!childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil,
requiredChildDistribution = Seq(distribution),
requiredChildOrdering = Seq(Seq.empty))
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
val shuffle = outputPlan.collect { case e: ShuffleExchangeExec => e }
assert(shuffle.size === 1)
assert(shuffle.head.newPartitioning === finalPartitioning)
}
test("Reuse exchanges") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
val childPartitioning = HashPartitioning(Literal(2) :: Nil, 5)
assert(!childPartitioning.satisfies(distribution))
val shuffle = ShuffleExchangeExec(finalPartitioning,
DummySparkPlan(
children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil,
requiredChildDistribution = Seq(distribution),
requiredChildOrdering = Seq(Seq.empty)),
None)
val inputPlan = SortMergeJoinExec(
Literal(1) :: Nil,
Literal(1) :: Nil,
Inner,
None,
shuffle,
shuffle)
val outputPlan = ReuseExchange(spark.sessionState.conf).apply(inputPlan)
if (outputPlan.collect { case e: ReusedExchangeExec => true }.size != 1) {
fail(s"Should re-use the shuffle:\n$outputPlan")
}
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size != 1) {
fail(s"Should have only one shuffle:\n$outputPlan")
}
// nested exchanges
val inputPlan2 = SortMergeJoinExec(
Literal(1) :: Nil,
Literal(1) :: Nil,
Inner,
None,
ShuffleExchangeExec(finalPartitioning, inputPlan),
ShuffleExchangeExec(finalPartitioning, inputPlan))
val outputPlan2 = ReuseExchange(spark.sessionState.conf).apply(inputPlan2)
if (outputPlan2.collect { case e: ReusedExchangeExec => true }.size != 2) {
fail(s"Should re-use the two shuffles:\n$outputPlan2")
}
if (outputPlan2.collect { case e: ShuffleExchangeExec => true }.size != 2) {
fail(s"Should have only two shuffles:\n$outputPlan")
}
}
///////////////////////////////////////////////////////////////////////////
// Unit tests of EnsureRequirements for Sort
///////////////////////////////////////////////////////////////////////////
private val exprA = Literal(1)
private val exprB = Literal(2)
private val exprC = Literal(3)
private val orderingA = SortOrder(exprA, Ascending)
private val orderingB = SortOrder(exprB, Ascending)
private val orderingC = SortOrder(exprC, Ascending)
private val planA = DummySparkPlan(outputOrdering = Seq(orderingA),
outputPartitioning = HashPartitioning(exprA :: Nil, 5))
private val planB = DummySparkPlan(outputOrdering = Seq(orderingB),
outputPartitioning = HashPartitioning(exprB :: Nil, 5))
private val planC = DummySparkPlan(outputOrdering = Seq(orderingC),
outputPartitioning = HashPartitioning(exprC :: Nil, 5))
assert(orderingA != orderingB && orderingA != orderingC && orderingB != orderingC)
private def assertSortRequirementsAreSatisfied(
childPlan: SparkPlan,
requiredOrdering: Seq[SortOrder],
shouldHaveSort: Boolean): Unit = {
val inputPlan = DummySparkPlan(
children = childPlan :: Nil,
requiredChildOrdering = Seq(requiredOrdering),
requiredChildDistribution = Seq(UnspecifiedDistribution)
)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (shouldHaveSort) {
if (outputPlan.collect { case s: SortExec => true }.isEmpty) {
fail(s"Sort should have been added:\n$outputPlan")
}
} else {
if (outputPlan.collect { case s: SortExec => true }.nonEmpty) {
fail(s"No sorts should have been added:\n$outputPlan")
}
}
}
test("EnsureRequirements skips sort when either side of join keys is required after inner SMJ") {
Seq(Inner, Cross).foreach { joinType =>
val innerSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB)
// Both left and right keys should be sorted after the SMJ.
Seq(orderingA, orderingB).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = innerSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = false)
}
}
}
test("EnsureRequirements skips sort when key order of a parent SMJ is propagated from its " +
"child SMJ") {
Seq(Inner, Cross).foreach { joinType =>
val childSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB)
val parentSmj = SortMergeJoinExec(exprB :: Nil, exprC :: Nil, joinType, None, childSmj, planC)
// After the second SMJ, exprA, exprB and exprC should all be sorted.
Seq(orderingA, orderingB, orderingC).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = parentSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = false)
}
}
}
test("EnsureRequirements for sort operator after left outer sort merge join") {
// Only left key is sorted after left outer SMJ (thus doesn't need a sort).
val leftSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, LeftOuter, None, planA, planB)
Seq((orderingA, false), (orderingB, true)).foreach { case (ordering, needSort) =>
assertSortRequirementsAreSatisfied(
childPlan = leftSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = needSort)
}
}
test("EnsureRequirements for sort operator after right outer sort merge join") {
// Only right key is sorted after right outer SMJ (thus doesn't need a sort).
val rightSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, RightOuter, None, planA, planB)
Seq((orderingA, true), (orderingB, false)).foreach { case (ordering, needSort) =>
assertSortRequirementsAreSatisfied(
childPlan = rightSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = needSort)
}
}
test("EnsureRequirements adds sort after full outer sort merge join") {
// Neither keys is sorted after full outer SMJ, so they both need sorts.
val fullSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, FullOuter, None, planA, planB)
Seq(orderingA, orderingB).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = fullSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = true)
}
}
test("EnsureRequirements adds sort when there is no existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq.empty),
requiredOrdering = Seq(orderingB),
shouldHaveSort = true)
}
test("EnsureRequirements skips sort when required ordering is prefix of existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA, orderingB)),
requiredOrdering = Seq(orderingA),
shouldHaveSort = false)
}
test("EnsureRequirements skips sort when required ordering is semantically equal to " +
"existing ordering") {
val exprId: ExprId = NamedExpression.newExprId
val attribute1 =
AttributeReference(
name = "col1",
dataType = LongType,
nullable = false
) (exprId = exprId,
qualifier = Seq("col1_qualifier")
)
val attribute2 =
AttributeReference(
name = "col1",
dataType = LongType,
nullable = false
) (exprId = exprId)
val orderingA1 = SortOrder(attribute1, Ascending)
val orderingA2 = SortOrder(attribute2, Ascending)
assert(orderingA1 != orderingA2, s"$orderingA1 should NOT equal to $orderingA2")
assert(orderingA1.semanticEquals(orderingA2),
s"$orderingA1 should be semantically equal to $orderingA2")
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA1)),
requiredOrdering = Seq(orderingA2),
shouldHaveSort = false)
}
// This is a regression test for SPARK-11135
test("EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA)),
requiredOrdering = Seq(orderingA, orderingB),
shouldHaveSort = true)
}
test("SPARK-24242: RangeExec should have correct output ordering and partitioning") {
val df = spark.range(10)
val rangeExec = df.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
val range = df.queryExecution.optimizedPlan.collect {
case r: Range => r
}
assert(rangeExec.head.outputOrdering == range.head.outputOrdering)
assert(rangeExec.head.outputPartitioning ==
RangePartitioning(rangeExec.head.outputOrdering, df.rdd.getNumPartitions))
val rangeInOnePartition = spark.range(1, 10, 1, 1)
val rangeExecInOnePartition = rangeInOnePartition.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
assert(rangeExecInOnePartition.head.outputPartitioning == SinglePartition)
val rangeInZeroPartition = spark.range(-10, -9, -20, 1)
val rangeExecInZeroPartition = rangeInZeroPartition.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
assert(rangeExecInZeroPartition.head.outputPartitioning == UnknownPartitioning(0))
}
test("SPARK-24495: EnsureRequirements can return wrong plan when reusing the same key in join") {
val plan1 = DummySparkPlan(outputOrdering = Seq(orderingA),
outputPartitioning = HashPartitioning(exprA :: exprA :: Nil, 5))
val plan2 = DummySparkPlan(outputOrdering = Seq(orderingB),
outputPartitioning = HashPartitioning(exprB :: Nil, 5))
val smjExec = SortMergeJoinExec(
exprA :: exprA :: Nil, exprB :: exprC :: Nil, Inner, None, plan1, plan2)
val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(smjExec)
outputPlan match {
case SortMergeJoinExec(leftKeys, rightKeys, _, _, _, _) =>
assert(leftKeys == Seq(exprA, exprA))
assert(rightKeys == Seq(exprB, exprC))
case _ => fail()
}
}
test("SPARK-24500: create union with stream of children") {
val df = Union(Stream(
Range(1, 1, 1, 1),
Range(1, 2, 1, 1)))
df.queryExecution.executedPlan.execute()
}
test("SPARK-25278: physical nodes should be different instances for same logical nodes") {
val range = Range(1, 1, 1, 1)
val df = Union(range, range)
val ranges = df.queryExecution.optimizedPlan.collect {
case r: Range => r
}
assert(ranges.length == 2)
// Ensure the two Range instances are equal according to their equal method
assert(ranges.head == ranges.last)
val execRanges = df.queryExecution.sparkPlan.collect {
case r: RangeExec => r
}
assert(execRanges.length == 2)
// Ensure the two RangeExec instances are different instances
assert(!execRanges.head.eq(execRanges.last))
}
test("SPARK-24556: always rewrite output partitioning in ReusedExchangeExec " +
"and InMemoryTableScanExec") {
def checkOutputPartitioningRewrite(
plans: Seq[SparkPlan],
expectedPartitioningClass: Class[_]): Unit = {
assert(plans.size == 1)
val plan = plans.head
val partitioning = plan.outputPartitioning
assert(partitioning.getClass == expectedPartitioningClass)
val partitionedAttrs = partitioning.asInstanceOf[Expression].references
assert(partitionedAttrs.subsetOf(plan.outputSet))
}
def checkReusedExchangeOutputPartitioningRewrite(
df: DataFrame,
expectedPartitioningClass: Class[_]): Unit = {
val reusedExchange = df.queryExecution.executedPlan.collect {
case r: ReusedExchangeExec => r
}
checkOutputPartitioningRewrite(reusedExchange, expectedPartitioningClass)
}
def checkInMemoryTableScanOutputPartitioningRewrite(
df: DataFrame,
expectedPartitioningClass: Class[_]): Unit = {
val inMemoryScan = df.queryExecution.executedPlan.collect {
case m: InMemoryTableScanExec => m
}
checkOutputPartitioningRewrite(inMemoryScan, expectedPartitioningClass)
}
// ReusedExchange is HashPartitioning
val df1 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
val df2 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
checkReusedExchangeOutputPartitioningRewrite(df1.union(df2), classOf[HashPartitioning])
// ReusedExchange is RangePartitioning
val df3 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
val df4 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
checkReusedExchangeOutputPartitioningRewrite(df3.union(df4), classOf[RangePartitioning])
// InMemoryTableScan is HashPartitioning
Seq(1 -> "a").toDF("i", "j").repartition($"i").persist()
checkInMemoryTableScanOutputPartitioningRewrite(
Seq(1 -> "a").toDF("i", "j").repartition($"i"), classOf[HashPartitioning])
// InMemoryTableScan is RangePartitioning
spark.range(1, 100, 1, 10).toDF().persist()
checkInMemoryTableScanOutputPartitioningRewrite(
spark.range(1, 100, 1, 10).toDF(), classOf[RangePartitioning])
// InMemoryTableScan is PartitioningCollection
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m").persist()
checkInMemoryTableScanOutputPartitioningRewrite(
Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m"),
classOf[PartitioningCollection])
}
}
}
// Used for unit-testing EnsureRequirements
private case class DummySparkPlan(
override val children: Seq[SparkPlan] = Nil,
override val outputOrdering: Seq[SortOrder] = Nil,
override val outputPartitioning: Partitioning = UnknownPartitioning(0),
override val requiredChildDistribution: Seq[Distribution] = Nil,
override val requiredChildOrdering: Seq[Seq[SortOrder]] = Nil
) extends SparkPlan {
override protected def doExecute(): RDD[InternalRow] = throw new NotImplementedError
override def output: Seq[Attribute] = Seq.empty
}
| michalsenkyr/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala | Scala | apache-2.0 | 33,626 |
/*
* Copyright 2013 Bruno Bieth
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.backuity.matchete
trait OrderedMatchers extends CoreMatcherSupport {
private def ordered[O](operator: (O, O) => Boolean, operatorDescription: String)(ref: O)(implicit formatter: Formatter[O]) = matcher[O](
description = s"be $operatorDescription $ref",
validate = operator(_, ref),
failureDescription = (other: O) => s"${formatter.format(other)} is not $operatorDescription ${formatter.format(ref)}")
def be_<[O: Ordering : Formatter](max: O) = ordered[O](implicitly[Ordering[O]].lt, "<")(max)
def be_>[O: Ordering : Formatter](min: O) = ordered[O](implicitly[Ordering[O]].gt, ">")(min)
def be_<=[O: Ordering : Formatter](max: O) = ordered[O](implicitly[Ordering[O]].lteq, "<=")(max)
def be_>=[O: Ordering : Formatter](min: O) = ordered[O](implicitly[Ordering[O]].gteq, ">=")(min)
}
| backuity/matchete | core/src/main/scala/org/backuity/matchete/OrderedMatchers.scala | Scala | apache-2.0 | 1,416 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import scala.util.control.NonFatal
import kafka.api._
import kafka.common.{ErrorMapping, OffsetAndMetadata, OffsetMetadataAndError, TopicAndPartition}
import kafka.consumer.{ConsumerConfig, SimpleConsumer}
import org.apache.spark.SparkException
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Convenience methods for interacting with a Kafka cluster.
* See <a href="https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol">
* A Guide To The Kafka Protocol</a> for more details on individual api calls.
* @param kafkaParams Kafka <a href="http://kafka.apache.org/documentation.html#configuration">
* configuration parameters</a>.
* Requires "metadata.broker.list" or "bootstrap.servers" to be set with Kafka broker(s),
* NOT zookeeper servers, specified in host1:port1,host2:port2 form
*/
@DeveloperApi
class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable {
import KafkaCluster.{Err, LeaderOffset, SimpleConsumerConfig}
// ConsumerConfig isn't serializable
@transient private var _config: SimpleConsumerConfig = null
def config: SimpleConsumerConfig = this.synchronized {
if (_config == null) {
_config = SimpleConsumerConfig(kafkaParams)
}
_config
}
def connect(host: String, port: Int): SimpleConsumer =
new SimpleConsumer(host, port, config.socketTimeoutMs,
config.socketReceiveBufferBytes, config.clientId)
def connectLeader(topic: String, partition: Int): Either[Err, SimpleConsumer] =
findLeader(topic, partition).right.map(hp => connect(hp._1, hp._2))
// Metadata api
// scalastyle:off
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-MetadataAPI
// scalastyle:on
def findLeader(topic: String, partition: Int): Either[Err, (String, Int)] = {
val req = TopicMetadataRequest(TopicMetadataRequest.CurrentVersion,
0, config.clientId, Seq(topic))
val errs = new Err
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>
val resp: TopicMetadataResponse = consumer.send(req)
resp.topicsMetadata.find(_.topic == topic).flatMap { tm: TopicMetadata =>
tm.partitionsMetadata.find(_.partitionId == partition)
}.foreach { pm: PartitionMetadata =>
pm.leader.foreach { leader =>
return Right((leader.host, leader.port))
}
}
}
Left(errs)
}
def findLeaders(
topicAndPartitions: Set[TopicAndPartition]
): Either[Err, Map[TopicAndPartition, (String, Int)]] = {
val topics = topicAndPartitions.map(_.topic)
val response = getPartitionMetadata(topics).right
val answer = response.flatMap { tms: Set[TopicMetadata] =>
val leaderMap = tms.flatMap { tm: TopicMetadata =>
tm.partitionsMetadata.flatMap { pm: PartitionMetadata =>
val tp = TopicAndPartition(tm.topic, pm.partitionId)
if (topicAndPartitions(tp)) {
pm.leader.map { l =>
tp -> (l.host -> l.port)
}
} else {
None
}
}
}.toMap
if (leaderMap.keys.size == topicAndPartitions.size) {
Right(leaderMap)
} else {
val missing = topicAndPartitions.diff(leaderMap.keySet)
val err = new Err
err += new SparkException(s"Couldn't find leaders for ${missing}")
Left(err)
}
}
answer
}
def getPartitions(topics: Set[String]): Either[Err, Set[TopicAndPartition]] = {
getPartitionMetadata(topics).right.map { r =>
r.flatMap { tm: TopicMetadata =>
tm.partitionsMetadata.map { pm: PartitionMetadata =>
TopicAndPartition(tm.topic, pm.partitionId)
}
}
}
}
def getPartitionMetadata(topics: Set[String]): Either[Err, Set[TopicMetadata]] = {
val req = TopicMetadataRequest(
TopicMetadataRequest.CurrentVersion, 0, config.clientId, topics.toSeq)
val errs = new Err
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>
val resp: TopicMetadataResponse = consumer.send(req)
val respErrs = resp.topicsMetadata.filter(m => m.errorCode != ErrorMapping.NoError)
if (respErrs.isEmpty) {
return Right(resp.topicsMetadata.toSet)
} else {
respErrs.foreach { m =>
val cause = ErrorMapping.exceptionFor(m.errorCode)
val msg = s"Error getting partition metadata for '${m.topic}'. Does the topic exist?"
errs += new SparkException(msg, cause)
}
}
}
Left(errs)
}
// Leader offset api
// scalastyle:off
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI
// scalastyle:on
def getLatestLeaderOffsets(
topicAndPartitions: Set[TopicAndPartition]
): Either[Err, Map[TopicAndPartition, LeaderOffset]] =
getLeaderOffsets(topicAndPartitions, OffsetRequest.LatestTime)
def getEarliestLeaderOffsets(
topicAndPartitions: Set[TopicAndPartition]
): Either[Err, Map[TopicAndPartition, LeaderOffset]] =
getLeaderOffsets(topicAndPartitions, OffsetRequest.EarliestTime)
def getLeaderOffsets(
topicAndPartitions: Set[TopicAndPartition],
before: Long
): Either[Err, Map[TopicAndPartition, LeaderOffset]] = {
getLeaderOffsets(topicAndPartitions, before, 1).right.map { r =>
r.map { kv =>
// mapValues isn't serializable, see SI-7005
kv._1 -> kv._2.head
}
}
}
private def flip[K, V](m: Map[K, V]): Map[V, Seq[K]] =
m.groupBy(_._2).map { kv =>
kv._1 -> kv._2.keys.toSeq
}
def getLeaderOffsets(
topicAndPartitions: Set[TopicAndPartition],
before: Long,
maxNumOffsets: Int
): Either[Err, Map[TopicAndPartition, Seq[LeaderOffset]]] = {
findLeaders(topicAndPartitions).right.flatMap { tpToLeader =>
val leaderToTp: Map[(String, Int), Seq[TopicAndPartition]] = flip(tpToLeader)
val leaders = leaderToTp.keys
var result = Map[TopicAndPartition, Seq[LeaderOffset]]()
val errs = new Err
withBrokers(leaders, errs) { consumer =>
val partitionsToGetOffsets: Seq[TopicAndPartition] =
leaderToTp((consumer.host, consumer.port))
val reqMap = partitionsToGetOffsets.map { tp: TopicAndPartition =>
tp -> PartitionOffsetRequestInfo(before, maxNumOffsets)
}.toMap
val req = OffsetRequest(reqMap)
val resp = consumer.getOffsetsBefore(req)
val respMap = resp.partitionErrorAndOffsets
partitionsToGetOffsets.foreach { tp: TopicAndPartition =>
respMap.get(tp).foreach { por: PartitionOffsetsResponse =>
if (por.error == ErrorMapping.NoError) {
if (por.offsets.nonEmpty) {
result += tp -> por.offsets.map { off =>
LeaderOffset(consumer.host, consumer.port, off)
}
} else {
errs += new SparkException(
s"Empty offsets for ${tp}, is ${before} before log beginning?")
}
} else {
errs += ErrorMapping.exceptionFor(por.error)
}
}
}
if (result.keys.size == topicAndPartitions.size) {
return Right(result)
}
}
val missing = topicAndPartitions.diff(result.keySet)
errs += new SparkException(s"Couldn't find leader offsets for ${missing}")
Left(errs)
}
}
// Consumer offset api
// scalastyle:off
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
// scalastyle:on
// this 0 here indicates api version, in this case the original ZK backed api.
private def defaultConsumerApiVersion: Short = 0
/**
* Requires Kafka 0.8.1.1 or later.
* Defaults to the original ZooKeeper backed API version.
*/
def getConsumerOffsets(
groupId: String,
topicAndPartitions: Set[TopicAndPartition]
): Either[Err, Map[TopicAndPartition, Long]] =
getConsumerOffsets(groupId, topicAndPartitions, defaultConsumerApiVersion)
def getConsumerOffsets(
groupId: String,
topicAndPartitions: Set[TopicAndPartition],
consumerApiVersion: Short
): Either[Err, Map[TopicAndPartition, Long]] = {
getConsumerOffsetMetadata(groupId, topicAndPartitions, consumerApiVersion).right.map { r =>
r.map { kv =>
kv._1 -> kv._2.offset
}
}
}
/**
* Requires Kafka 0.8.1.1 or later.
* Defaults to the original ZooKeeper backed API version.
*/
def getConsumerOffsetMetadata(
groupId: String,
topicAndPartitions: Set[TopicAndPartition]
): Either[Err, Map[TopicAndPartition, OffsetMetadataAndError]] =
getConsumerOffsetMetadata(groupId, topicAndPartitions, defaultConsumerApiVersion)
def getConsumerOffsetMetadata(
groupId: String,
topicAndPartitions: Set[TopicAndPartition],
consumerApiVersion: Short
): Either[Err, Map[TopicAndPartition, OffsetMetadataAndError]] = {
var result = Map[TopicAndPartition, OffsetMetadataAndError]()
val req = OffsetFetchRequest(groupId, topicAndPartitions.toSeq, consumerApiVersion)
val errs = new Err
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>
val resp = consumer.fetchOffsets(req)
val respMap = resp.requestInfo
val needed = topicAndPartitions.diff(result.keySet)
needed.foreach { tp: TopicAndPartition =>
respMap.get(tp).foreach { ome: OffsetMetadataAndError =>
if (ome.error == ErrorMapping.NoError) {
result += tp -> ome
} else {
errs += ErrorMapping.exceptionFor(ome.error)
}
}
}
if (result.keys.size == topicAndPartitions.size) {
return Right(result)
}
}
val missing = topicAndPartitions.diff(result.keySet)
errs += new SparkException(s"Couldn't find consumer offsets for ${missing}")
Left(errs)
}
/**
* Requires Kafka 0.8.1.1 or later.
* Defaults to the original ZooKeeper backed API version.
*/
def setConsumerOffsets(
groupId: String,
offsets: Map[TopicAndPartition, Long]
): Either[Err, Map[TopicAndPartition, Short]] =
setConsumerOffsets(groupId, offsets, defaultConsumerApiVersion)
def setConsumerOffsets(
groupId: String,
offsets: Map[TopicAndPartition, Long],
consumerApiVersion: Short
): Either[Err, Map[TopicAndPartition, Short]] = {
val meta = offsets.map { kv =>
kv._1 -> OffsetAndMetadata(kv._2)
}
setConsumerOffsetMetadata(groupId, meta, consumerApiVersion)
}
/**
* Requires Kafka 0.8.1.1 or later.
* Defaults to the original ZooKeeper backed API version.
*/
def setConsumerOffsetMetadata(
groupId: String,
metadata: Map[TopicAndPartition, OffsetAndMetadata]
): Either[Err, Map[TopicAndPartition, Short]] =
setConsumerOffsetMetadata(groupId, metadata, defaultConsumerApiVersion)
def setConsumerOffsetMetadata(
groupId: String,
metadata: Map[TopicAndPartition, OffsetAndMetadata],
consumerApiVersion: Short
): Either[Err, Map[TopicAndPartition, Short]] = {
var result = Map[TopicAndPartition, Short]()
val req = OffsetCommitRequest(groupId, metadata, consumerApiVersion)
val errs = new Err
val topicAndPartitions = metadata.keySet
withBrokers(Random.shuffle(config.seedBrokers), errs) { consumer =>
val resp = consumer.commitOffsets(req)
val respMap = resp.commitStatus
val needed = topicAndPartitions.diff(result.keySet)
needed.foreach { tp: TopicAndPartition =>
respMap.get(tp).foreach { err: Short =>
if (err == ErrorMapping.NoError) {
result += tp -> err
} else {
errs += ErrorMapping.exceptionFor(err)
}
}
}
if (result.keys.size == topicAndPartitions.size) {
return Right(result)
}
}
val missing = topicAndPartitions.diff(result.keySet)
errs += new SparkException(s"Couldn't set offsets for ${missing}")
Left(errs)
}
// Try a call against potentially multiple brokers, accumulating errors
private def withBrokers(brokers: Iterable[(String, Int)], errs: Err)
(fn: SimpleConsumer => Any): Unit = {
brokers.foreach { hp =>
var consumer: SimpleConsumer = null
try {
consumer = connect(hp._1, hp._2)
fn(consumer)
} catch {
case NonFatal(e) =>
errs += e
} finally {
if (consumer != null) {
consumer.close()
}
}
}
}
}
@DeveloperApi
object KafkaCluster {
type Err = ArrayBuffer[Throwable]
/** If the result is right, return it, otherwise throw SparkException */
def checkErrors[T](result: Either[Err, T]): T = {
result.fold(
errs => throw new SparkException(errs.mkString("\\n")),
ok => ok
)
}
case class LeaderOffset(host: String, port: Int, offset: Long)
/**
* High-level kafka consumers connect to ZK. ConsumerConfig assumes this use case.
* Simple consumers connect directly to brokers, but need many of the same configs.
* This subclass won't warn about missing ZK params, or presence of broker params.
*/
class SimpleConsumerConfig private(brokers: String, originalProps: Properties)
extends ConsumerConfig(originalProps) {
val seedBrokers: Array[(String, Int)] = brokers.split(",").map { hp =>
val hpa = hp.split(":")
if (hpa.size == 1) {
throw new SparkException(s"Broker not in the correct format of <host>:<port> [$brokers]")
}
(hpa(0), hpa(1).toInt)
}
}
object SimpleConsumerConfig {
/**
* Make a consumer config without requiring group.id or zookeeper.connect,
* since communicating with brokers also needs common settings such as timeout
*/
def apply(kafkaParams: Map[String, String]): SimpleConsumerConfig = {
// These keys are from other pre-existing kafka configs for specifying brokers, accept either
val brokers = kafkaParams.get("metadata.broker.list")
.orElse(kafkaParams.get("bootstrap.servers"))
.getOrElse(throw new SparkException(
"Must specify metadata.broker.list or bootstrap.servers"))
val props = new Properties()
kafkaParams.foreach { case (key, value) =>
// prevent warnings on parameters ConsumerConfig doesn't know about
if (key != "metadata.broker.list" && key != "bootstrap.servers") {
props.put(key, value)
}
}
Seq("zookeeper.connect", "group.id").foreach { s =>
if (!props.containsKey(s)) {
props.setProperty(s, "")
}
}
new SimpleConsumerConfig(brokers, props)
}
}
}
| aokolnychyi/spark | external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala | Scala | apache-2.0 | 15,803 |
package com.hj.examples
import com.hj.constant.Const
import org.apache.spark.{SparkConf, SparkContext}
object RDFS9 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("RDFS9.in").setMaster("local[2]")
val sc = new SparkContext(conf)
val lines = sc.textFile("input/RDFS9.in")
val triples = lines.map(x => {
val arr = x.split(" ")
(arr(0), arr(1), arr(2))
})
/*
u rdf:type v
v rdfs:subClassOf w
=>
u rdf:type w
*/
val types = triples.filter(x => x._2.equals(Const.RDF_TYPE)).map(x => (x._1, x._3))
val subClass = triples.filter(x => x._2.equals(Const.RDFS_SUBCLASS_OF)).map(x => (x._1, x._3))
val reverseTypes = types.map(x => (x._2, x._1))
val joined = subClass.join(reverseTypes)
val res = joined.map(x => (x._2._2, x._2._1))
res.foreach(x => println(x))
}
}
| huangjuegeek/SparkSRE | src/main/scala/com/hj/examples/RDFS9.scala | Scala | apache-2.0 | 864 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs/contributors
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.api
import java.io.File
import scalaz.deriving
import scalaz.std.either._
import scalaz.std.list._
import scalaz.std.option._
import spray.json.{ JsReader, JsWriter }
import org.ensime.io.Canon
import org.ensime.sexp.{ SexpReader, SexpWriter }
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class RpcRequestInvalid(error: String)
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class RpcRequestEnvelope(req: RpcRequest, callId: Int)
/**
* All messages into the ENSIME server from the client are part of
* this family.
*
* NOTE: we intend to simplify these messages
* https://github.com/ensime/ensime-server/issues/845
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
sealed trait RpcRequest
// queries related to connection startup
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
sealed trait RpcStartupRequest extends RpcRequest
/**
* Responds with a `ConnectionInfo`.
*/
@deprecating("Please switch to asynchronous connection handling.")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
case object ConnectionInfoReq extends RpcStartupRequest
// related to managing the state of the analyser
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
sealed trait RpcAnalyserRequest extends RpcRequest
/**
* Request details about implicit conversions applied inside the given
* range.
*
* Responds with `ImplicitInfos`.
*
* @param file source.
* @param range in the file to inspect.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class ImplicitInfoReq(
file: Either[File, SourceFileInfo],
range: OffsetRange
) extends RpcAnalyserRequest
/**
* Tell the Analyzer that this file has been deleted. This is
* different to simply unloading the file (which can keeps symbols
* around).
*
* Responds with a `VoidResponse`.
*/
@deprecating("prefer UnloadFilesReq")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class RemoveFileReq(file: File) extends RpcAnalyserRequest
/**
* Responds with a `VoidResponse`.
*/
@deprecating("redundant query, use TypecheckFilesReq")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class TypecheckFileReq(fileInfo: SourceFileInfo)
extends RpcAnalyserRequest
/**
* Responds with a `VoidResponse`
*/
@deprecating("prefer UnloadFilesReq")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class UnloadFileReq(fileInfo: SourceFileInfo)
extends RpcAnalyserRequest
/**
* Unload the given files from the compiler. The additional `remove`
* flag signals if previously loaded symbols should be removed (use
* this if the user has deleted / renamed the file on disk).
*
* Responds with a `VoidResponse`
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class UnloadFilesReq(
source: List[SourceFileInfo],
remove: Boolean
) extends RpcAnalyserRequest
/**
* Response with a `VoidResponse`.
*/
@deprecating("replaced by RestartAnalyzerReq")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class TypecheckModule(moduleId: EnsimeProjectId)
extends RpcAnalyserRequest
/**
* Responds with a `VoidResponse`.
*/
@deprecating("replaced by RestartAnalyzerReq")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
case object UnloadAllReq extends RpcAnalyserRequest
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
sealed trait ReloadStrategy
object ReloadStrategy {
/** a clean slate, client should reload all open files */
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
case object UnloadAll extends ReloadStrategy
/**
* compiles all project sources, e.g. project is not batch compiled.
* Client should reload all third party files.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
case object LoadProject extends ReloadStrategy
/** reload all the files that were previously loaded */
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
case object KeepLoaded extends ReloadStrategy
}
/**
* Restart the scala presentation compiler for the given id, using the
* provided file loading strategy.
*
* No RPC response, there will be CompilerRestartedEvent
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class RestartScalaCompilerReq(
id: Option[EnsimeProjectId],
strategy: ReloadStrategy
) extends RpcAnalyserRequest
/**
* Responds with a `VoidResponse`.
*/
@deprecating("should only support SourceFileInfo")
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class TypecheckFilesReq(files: List[Either[File, SourceFileInfo]])
extends RpcAnalyserRequest
// related to searching the indexer
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
sealed trait RpcSearchRequest extends RpcRequest
/**
* Responds with `SymbolSearchResults`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class PublicSymbolSearchReq(
keywords: List[String],
maxResults: Int
) extends RpcSearchRequest
/**
* Responds with [ImportSuggestions].
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class ImportSuggestionsReq(
file: Either[File, SourceFileInfo],
point: Int,
names: List[String],
maxResults: Int
) extends RpcSearchRequest
/**
* Responds with `FullyQualifiedName`
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class FqnOfSymbolAtPointReq(file: SourceFileInfo, point: Int)
extends RpcAnalyserRequest
/**
* Responds with `FullyQualifiedName`
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class FqnOfTypeAtPointReq(file: SourceFileInfo, point: Int)
extends RpcAnalyserRequest
/**
* Responds with `SourcePositions`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class UsesOfSymbolAtPointReq(
file: SourceFileInfo,
point: Int
) extends RpcRequest
/**
* Responds with `HierarchyInfo`
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class HierarchyOfTypeAtPointReq(
file: SourceFileInfo,
point: Int
) extends RpcRequest
/**
* Responds with `SourcePositions`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class FindUsages(fqn: String) extends RpcSearchRequest
/**
* Responds with `HierarchyInfo`
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class FindHierarchy(fqn: String) extends RpcSearchRequest
/**
* Responds with a `StringResponse` for the URL of the documentation if valid,
* or `FalseResponse`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class DocUriAtPointReq(
file: Either[File, SourceFileInfo],
point: OffsetRange
) extends RpcAnalyserRequest
/**
* Responds with a `CompletionInfoList`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class CompletionsReq(
fileInfo: SourceFileInfo,
point: Int,
maxResults: Int,
caseSens: Boolean,
reload: Boolean
) extends RpcAnalyserRequest
/**
* Responds with `TypeInfo` if valid, or `FalseResponse`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class TypeAtPointReq(
file: Either[File, SourceFileInfo],
range: OffsetRange
) extends RpcAnalyserRequest
/**
* Responds with a `SymbolInfo` if valid, or `FalseResponse`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class SymbolAtPointReq(file: Either[File, SourceFileInfo],
point: Int)
extends RpcAnalyserRequest
/**
* Responds with a `RefactorFailure` or a `RefactorDiffEffect`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class RefactorReq(
procId: Int,
params: RefactorDesc,
interactive: Boolean
) extends RpcAnalyserRequest
/**
* Request the semantic classes of symbols in the given range.
* Intended for semantic highlighting.
*
* Responds with a `SymbolDesignations`.
*
* @param file source.
* @param start of character offset of the input range.
* @param end of character offset of the input range.
* @param requestedTypes semantic classes in which we are interested.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class SymbolDesignationsReq(
file: Either[File, SourceFileInfo],
start: Int,
end: Int,
requestedTypes: List[SourceSymbol]
) extends RpcAnalyserRequest
/**
* Responds with a `FileRange`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class ExpandSelectionReq(file: File, start: Int, end: Int)
extends RpcAnalyserRequest
/**
* Responds with a `StructureView`.
*/
@deriving(JsReader, JsWriter, SexpReader, SexpWriter, Canon)
final case class StructureViewReq(fileInfo: SourceFileInfo)
extends RpcAnalyserRequest
| ensime/ensime-server | api/src/main/scala/org/ensime/api/incoming.scala | Scala | gpl-3.0 | 9,040 |
import org.junit.runner.RunWith
import org.sarrufat.sudoku.Board
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
trait Output {
def print(s: String) = Console.println(s)
}
@RunWith(classOf[JUnitRunner])
class TestLS extends FlatSpec with Output {
val board = Board.mainBoard("Sudoku2.txt")
"Mainboard" should "look" in {
print(board toString ())
print("...........\\n")
print(board.solveBoard toString ())
}
"Mainboard" should " be solved" in {
assert(board.solveBoard.isSolved == true)
}
} | sarrufat/ScalaSudoku | src/test/scala/TestLS.scala | Scala | gpl-3.0 | 574 |
package scorex.transaction
import java.util
import com.google.common.primitives.{Bytes, Ints, Longs}
import play.api.libs.json.{JsObject, Json}
import scorex.account.{Account, PrivateKeyAccount, PublicKeyAccount}
import scorex.crypto.EllipticCurveImpl
import scorex.crypto.encode.Base58
import scorex.serialization.Deser
import scorex.transaction.LagonakiTransaction.TransactionType
import scala.util.{Failure, Try}
@SerialVersionUID(-4989881425715590828L)
case class PaymentTransaction(sender: PublicKeyAccount,
override val recipient: Account,
override val amount: Long,
override val fee: Long,
override val timestamp: Long,
override val signature: Array[Byte])
extends LagonakiTransaction(TransactionType.PaymentTransaction, recipient, amount, fee, timestamp, signature) {
import scorex.transaction.LagonakiTransaction._
import scorex.transaction.PaymentTransaction._
override lazy val dataLength = TypeLength + BaseLength
override lazy val creator = Some(sender)
override lazy val json: JsObject = jsonBase() ++ Json.obj(
"sender" -> sender.address,
"recipient" -> recipient.address,
"amount" -> amount
)
override lazy val bytes: Array[Byte] = {
val typeBytes = Array(TypeId.toByte)
val timestampBytes = Longs.toByteArray(timestamp)
val amountBytes = Longs.toByteArray(amount)
val feeBytes = Longs.toByteArray(fee)
Bytes.concat(typeBytes, timestampBytes, sender.publicKey, recipient.bytes, amountBytes, feeBytes, signature)
}
override lazy val signatureValid: Boolean = {
val data = signatureData(sender, recipient, amount, fee, timestamp)
EllipticCurveImpl.verify(signature, data, sender.publicKey)
}
override def validate: ValidationResult.Value =
if (!Account.isValid(recipient)) {
ValidationResult.InvalidAddress //CHECK IF RECIPIENT IS VALID ADDRESS
} else if (amount <= 0) {
ValidationResult.NegativeAmount //CHECK IF AMOUNT IS POSITIVE
} else if (fee <= 0) {
ValidationResult.NegativeFee //CHECK IF FEE IS POSITIVE
} else ValidationResult.ValidateOke
override def involvedAmount(account: Account): Long = {
val address = account.address
if (address.equals(sender.address) && address.equals(recipient.address)) {
-fee
} else if (address.equals(sender.address)) {
-amount - fee
} else if (address.equals(recipient.address)) {
amount
} else 0
}
override def balanceChanges(): Seq[(Account, Long)] =
Seq((sender, -amount - fee), (recipient, amount))
}
object PaymentTransaction extends Deser[PaymentTransaction] {
import scorex.transaction.LagonakiTransaction._
private val SenderLength = 32
private val FeeLength = 8
private val SignatureLength = 64
private val BaseLength = TimestampLength + SenderLength + RecipientLength + AmountLength + FeeLength + SignatureLength
def apply(sender: PrivateKeyAccount, recipient: Account,
amount: Long, fee: Long, timestamp: Long): PaymentTransaction = {
val sig = generateSignature(sender, recipient, amount, fee, timestamp)
PaymentTransaction(sender, recipient, amount, fee, timestamp, sig)
}
def parseBytes(data: Array[Byte]): Try[PaymentTransaction] = {
data.head match {
case transactionType: Byte if transactionType == TransactionType.PaymentTransaction.id =>
parseTail(data.tail)
case transactionType =>
Failure(new Exception(s"Incorrect transaction type '$transactionType' in PaymentTransaction data"))
}
}
def parseTail(data: Array[Byte]): Try[PaymentTransaction] = Try {
require(data.length >= BaseLength, "Data does not match base length")
var position = 0
//READ TIMESTAMP
val timestampBytes = data.take(TimestampLength)
val timestamp = Longs.fromByteArray(timestampBytes)
position += TimestampLength
//READ SENDER
val senderBytes = util.Arrays.copyOfRange(data, position, position + SenderLength)
val sender = new PublicKeyAccount(senderBytes)
position += SenderLength
//READ RECIPIENT
val recipientBytes = util.Arrays.copyOfRange(data, position, position + RecipientLength)
val recipient = new Account(Base58.encode(recipientBytes))
position += RecipientLength
//READ AMOUNT
val amountBytes = util.Arrays.copyOfRange(data, position, position + AmountLength)
val amount = Longs.fromByteArray(amountBytes)
position += AmountLength
//READ FEE
val feeBytes = util.Arrays.copyOfRange(data, position, position + FeeLength)
val fee = Longs.fromByteArray(feeBytes)
position += FeeLength
//READ SIGNATURE
val signatureBytes = util.Arrays.copyOfRange(data, position, position + SignatureLength)
PaymentTransaction(sender, recipient, amount, fee, timestamp, signatureBytes)
}
def generateSignature(sender: PrivateKeyAccount, recipient: Account,
amount: Long, fee: Long, timestamp: Long): Array[Byte] = {
EllipticCurveImpl.sign(sender, signatureData(sender, recipient, amount, fee, timestamp))
}
private def signatureData(sender: PublicKeyAccount, recipient: Account,
amount: Long, fee: Long, timestamp: Long): Array[Byte] = {
val typeBytes = Ints.toByteArray(TransactionType.PaymentTransaction.id)
val timestampBytes = Longs.toByteArray(timestamp)
val amountBytes = Longs.toByteArray(amount)
val feeBytes = Longs.toByteArray(fee)
Bytes.concat(typeBytes, timestampBytes, sender.publicKey, recipient.bytes, amountBytes, feeBytes)
}
}
| alexeykiselev/WavesScorex | scorex-transaction/src/main/scala/scorex/transaction/PaymentTransaction.scala | Scala | cc0-1.0 | 5,672 |
package domala.tests.entity.noanno
import java.time.{LocalDate, LocalDateTime}
import domala._
import domala.jdbc.{BatchResult, Config, Result}
import domala.tests.H2TestConfigTemplate
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.seasar.doma.BatchInsert
class EmbeddedTestSuite extends FunSuite with BeforeAndAfter {
implicit val config: Config = new H2TestConfigTemplate("runtime-embedded"){}
val dao: RuntimeEmbeddedDao = RuntimeEmbeddedDao.impl
val entity = Embedded(Some(ID(1)), Name("foo"), Times(Some(MyTime(LocalDateTime.of(2017, 1, 12, 12, 59, 59, 999999999))), LocalDate.of(2018, 1, 12)), Values(123.456, Some(BigDecimal(987.654))))
val entities = Seq(
Embedded(Some(ID(2)), Name("bar"), Times(None, LocalDate.of(2018, 1, 13)), Values(234.567, Some(BigDecimal(876.543)))),
Embedded(Some(ID(3)), Name("baz"), Times(Some(MyTime(LocalDateTime.of(2017, 1, 13, 0, 0, 0, 1))), LocalDate.of(2018, 1, 13)), Values(345.678, Some(BigDecimal(765.432))))
)
before {
Required {
dao.create()
}
}
after {
Required {
dao.drop()
}
}
test("insert & select all") {
Required {
val Result(cnt, inserted) = dao.insert(entity)
assert(cnt == 1)
assert(inserted == entity)
dao.batchInsert(entities)
assert(dao.selectAll() == entity +: entities)
}
}
test("update") {
Required {
dao.batchInsert(entities)
val modified = entities.head.copy(name = Name("xxx"))
val Result(cnt, updated) = dao.update(modified)
assert(cnt == 1)
assert(updated == modified)
assert(dao.selectAll() == modified +: entities.drop(1))
}
}
test("delete") {
Required {
dao.batchInsert(entities)
val Result(cnt, deleted) = dao.delete(entities.head)
assert(cnt == 1)
assert(deleted == entities.head)
assert(dao.selectAll() == entities.drop(1))
}
}
test("insert with sql") {
Required {
val Result(cnt, inserted) = dao.insertWithSql(entity)
assert(cnt == 1)
assert(inserted == entity)
assert(dao.selectAll() == inserted.copy(values = inserted.values.copy(value2 = None)) :: Nil)
}
}
test("update with sql") {
Required {
dao.batchInsert(entities)
val modified = entities.drop(1).head.copy(name = Name("xxx"))
val Result(cnt, updated) = dao.updateWithSql(modified, Name("baz"))
assert(cnt == 1)
assert(updated == modified)
assert(dao.selectAll() == entities.head :: modified :: Nil)
}
}
test("delete with sql") {
Required {
dao.batchInsert(entities)
val cnt = dao.deleteWithSql(Name("baz"))
assert(cnt == 1)
assert(dao.selectAll() == entities.head :: Nil)
}
}
test("batch insert with sql") {
Required {
val BatchResult(cnt, inserted) = dao.batchInsertWithSql(entities)
assert(cnt sameElements Array(1, 1))
assert(inserted == entities)
assert(dao.selectAll() == entities.map(e => e.copy(values = e.values.copy(value2 = None))))
}
}
test("batch update with sql") {
Required {
dao.batchInsert(entities)
val BatchResult(cnt, updated) = dao.batchUpdateWithSql(entities.map(e => e.copy(values = e.values)))
assert(cnt sameElements Array(1, 1))
assert(updated == entities.map(e => e.copy(values = e.values)))
assert(dao.selectAll() == entities.map(e => e.copy(values = e.values)))
}
}
test("batch delete with sql") {
Required {
dao.batchInsert(entities)
val BatchResult(cnt, deleted) = dao.batchDeleteWithSql(entities)
assert(cnt sameElements Array(1, 1))
assert(deleted == entities)
assert(dao.selectAll() == Nil)
}
}
test("sql interpolation") {
Required {
val insert = (entity: Embedded) =>
update"""
insert into runtime_entity (id, name, time, value, value2, date) values(
${entity.id}, ${entity.name}, ${entity.times.time}, ${entity.values.value}, ${entity.values.value2}, ${entity.times.date},
)"""
insert(entity).execute()
entities.foreach(insert andThen(_.execute()))
val update = (id: ID[Embedded], name: Name) =>
update"""
update runtime_entity set name = $name where id = $id
"""
update(ID(2), Name("hoge")).execute()
val selectByIds = (ids: Seq[ID[Embedded]]) =>
select"""
select /*%expand*/* from runtime_entity where id in ($ids)
"""
assert(selectByIds(Seq(ID(1), ID(2), ID(3))).getList[Embedded] == entity +: entities.head.copy(name = Name("hoge")) +: entities.drop(1))
val delete = (ids: Seq[ID[Embedded]]) =>
update"""
delete runtime_entity where id in ($ids)
"""
delete(Seq(ID(2))).execute()
assert(selectByIds(Seq(ID(1), ID(2), ID(3))).getList[Embedded] == entity +: entities.drop(1))
}
}
}
@Table("runtime_entity")
case class Embedded(
@Id
id: Option[ID[Embedded]],
name: Name,
times: Times,
values: Values
)
case class Times(
time: Option[MyTime],
date: LocalDate
)
case class Values(
value: Double,
value2: Option[BigDecimal]
)
@Dao
trait RuntimeEmbeddedDao {
@Script(sql =
"""
create table runtime_entity(
id int not null identity primary key,
name varchar(20),
time timestamp,
value double,
value2 double,
date date
);
""")
def create()
@Script(sql = "drop table runtime_entity")
def drop()
@Select("select * from runtime_entity order by id")
def selectAll(): Seq[Embedded]
@Insert
def insert(entity: Embedded): Result[Embedded]
@Update
def update(entity: Embedded): Result[Embedded]
@Delete
def delete(entity: Embedded): Result[Embedded]
@Insert(sql = """
insert into runtime_entity (id, name, time, value, date)
values(
/* entity.id */0,
/* entity.name */'foo',
/* entity.times.time */'2018-01-01 23:59:59.999999',
/* entity.values.value */0.0,
/* entity.times.date */'2018-01-01'
)
""")
def insertWithSql(entity: Embedded): Result[Embedded]
@Update(sql = """
update runtime_entity
set /*%populate*/id
where name = /* name */'hoge'
""")
def updateWithSql(entity: Embedded, name: Name): Result[Embedded]
@Update(sql = """
delete from runtime_entity
where name = /* name */'hoge'
""")
def deleteWithSql(name: Name): Int
@BatchInsert
def batchInsert(entity: Seq[Embedded]): BatchResult[Embedded]
@BatchInsert("""
insert into runtime_entity (id, name, time, value, date)
values(
/* entity.id */0,
/* entity.name */'foo',
/* entity.times.time */'2018-01-01 23:59:59.999999',
/* entity.values.value */0.0,
/* entity.times.date */'2018-01-01'
)
""")
def batchInsertWithSql(entity: Seq[Embedded]): BatchResult[Embedded]
@BatchUpdate(sql = """
update runtime_entity
set /*%populate*/id
where id = /* entity.id */0
""")
def batchUpdateWithSql(entity: Seq[Embedded]): BatchResult[Embedded]
@BatchDelete(sql = """
delete runtime_entity
where id = /* entity.id */0
""")
def batchDeleteWithSql(entity: Seq[Embedded]): BatchResult[Embedded]
}
| bakenezumi/domala | paradise/src/test/scala/domala/tests/entity/noanno/EmbeddedTestSuite.scala | Scala | apache-2.0 | 7,081 |
package org.dbpedia.spotlight.db
import io._
import java.io.{FileOutputStream, FileInputStream, File}
import org.dbpedia.spotlight.db.memory.{MemoryQuantizedCountStore, MemoryStore}
import model.{TextTokenizer, StringTokenizer, Stemmer}
import org.dbpedia.spotlight.log.SpotlightLog
import scala.io.Source
import org.tartarus.snowball.SnowballProgram
import java.util.{Locale, Properties}
import org.dbpedia.spotlight.io.WikipediaHeldoutCorpus
import org.apache.commons.io.FileUtils
import opennlp.tools.tokenize.{TokenizerModel, TokenizerME}
import opennlp.tools.sentdetect.{SentenceModel, SentenceDetectorME}
import opennlp.tools.postag.{POSModel, POSTaggerME}
import opennlp.tools.chunker.ChunkerModel
import stem.SnowballStemmer
import tokenize._
import scala.Some
import scala.collection.immutable.HashMap
import scala.collection.mutable
/**
* This script creates a Spotlight model folder from the results of
* Apache Pig jobs. For a tutorial, see:
*
* https://github.com/dbpedia-spotlight/dbpedia-spotlight/wiki/Internationalization-(DB-backed-core)
*
* @author Joachim Daiber
* @author Philipp Dowling
*/
object CreateSpotlightModel {
val minimumContextCounts = mutable.Map("en" -> 3).withDefaultValue(1)
val minimumSFCounts = mutable.Map("en" -> 2).withDefaultValue(1)
val OPENNLP_FOLDER = "opennlp"
def main(args: Array[String]) {
val (localeCode: String, rawDataFolder: File, outputFolder: File, opennlpFolder: Option[File], stopwordsFile: File, stemmer: Stemmer) = try {
(
args(0), // locale code
new File(args(1)), // raw data folder
new File(args(2)), // output folder
if (args(3) equals "None") None else Some(new File(args(3))), // openNLP
new File(args(4)), // stopwords
if ((args(5) equals "None") || (args (5) equals "NoneStemmer")) new Stemmer() else new SnowballStemmer(args(5)) // stemmer
)
} catch {
case e: Exception => {
e.printStackTrace()
System.err.println("Usage:")
System.err.println(" - English: mvn scala:run -DmainClass=org.dbpedia.spotlight.db.CreateSpotlightModel -Dexec.args=\\"en /data/input /data/output /data/opennlp /data/stopwords.list EnglishStemmer\\"")
System.err.println(" - no stemmer: mvn scala:run -DmainClass=org.dbpedia.spotlight.db.CreateSpotlightModel -Dexec.args=\\"en /data/input /data/output /data/opennlp /data/stopwords.list None\\"")
System.exit(1)
}
}
val Array(lang, country) = localeCode.split("_")
val locale = new Locale(lang, country)
if (args.size > 6) {
//The last addition can be pruning parameter of the form prune=3,2
val a = args(6).split("=")
if (a(1) equals "prune") {
val Array(pSF, pCX) = a(2).split(",")
minimumContextCounts.put(lang, pCX.toInt)
minimumSFCounts.put(lang, pSF.toInt)
println("Using provided pruning values %s and %s".format(pSF, pCX))
}
}
if(!outputFolder.mkdir()) {
System.err.println("Folder %s already exists, I am too afraid to overwrite it!".format(outputFolder.toString))
System.exit(1)
}
FileUtils.copyFile(stopwordsFile, new File(outputFolder, "stopwords.list"))
if (opennlpFolder.isDefined) {
val opennlpModels = opennlpFolder.get.listFiles()
val opennlpOut = new File(outputFolder, OPENNLP_FOLDER)
opennlpOut.mkdir()
def getModel(name: String) = opennlpModels.filter(_.getName.endsWith(name)).headOption
try {
FileUtils.copyFile(getModel("-token.bin").get, new File(opennlpOut, "token.bin"))
FileUtils.copyFile(getModel("-sent.bin").get, new File(opennlpOut, "sent.bin"))
} catch {
case _: Exception => {
System.err.println(
"""Problem with OpenNLP models:
| You need to have at least the following model files in your opennlp folder:
| *-sent.bin
| *-token.bin
|
| For the best result, you should also have:
| *-chunker.bin
| *-pos-maxent.bin
""".stripMargin)
System.exit(1)
}
}
try {
FileUtils.copyFile(getModel("-pos-maxent.bin").get, new File(opennlpOut, "pos-maxent.bin"))
} catch {
case _: Exception => //Ignore
}
try {
getModel("-chunker.bin") match {
case Some(model) => FileUtils.copyFile(model, new File(opennlpOut, "chunker.bin"))
case _ =>
}
} catch {
case _: Exception => //Ignore
}
}
val rawTokenizer: StringTokenizer = if (opennlpFolder.isDefined) {
val opennlpOut = new File(outputFolder, OPENNLP_FOLDER)
val onlpTokenizer = new TokenizerME(new TokenizerModel(new FileInputStream(new File(opennlpOut, "token.bin"))))
new OpenNLPStringTokenizer(
onlpTokenizer,
stemmer
)
} else {
new LanguageIndependentStringTokenizer(locale, stemmer)
}
val namespace = if (locale.getLanguage.equals("en")) {
"http://dbpedia.org/resource/"
} else {
"http://%s.dbpedia.org/resource/".format(locale.getLanguage)
}
//Set default properties
val defaultProperties = new Properties()
defaultProperties.setProperty("stemmer", args(5))
defaultProperties.setProperty("namespace", namespace)
defaultProperties.setProperty("locale", localeCode)
defaultProperties.setProperty("version", "1.0")
defaultProperties.store(new FileOutputStream(new File(outputFolder, "model.properties")), null)
//Create models:
val modelDataFolder = new File(outputFolder, "model")
modelDataFolder.mkdir()
val quantizedCountStore = new MemoryQuantizedCountStore()
val memoryIndexer = new MemoryStoreIndexer(modelDataFolder, quantizedCountStore)
val diskIndexer = new JDBMStoreIndexer(new File("data/"))
val wikipediaToDBpediaClosure =
if (new File(rawDataFolder, "redirects.nt").exists() && new File(rawDataFolder, "disambiguations.nt").exists()) {
new WikipediaToDBpediaClosure(
namespace,
new FileInputStream(new File(rawDataFolder, "redirects.nt")),
new FileInputStream(new File(rawDataFolder, "disambiguations.nt"))
)
}
else {
SpotlightLog.warn(this.getClass,
"No redirects and disambiguations supplied! Not loading WikipediaToDBpediaClosure."
)
System.exit(1)
null
}
memoryIndexer.tokenizer = Some(rawTokenizer)
memoryIndexer.addSurfaceForms(
SurfaceFormSource.fromPigFiles(
new File(rawDataFolder, "sfAndTotalCounts"),
wikiClosure=wikipediaToDBpediaClosure
),
SurfaceFormSource.lowercaseCountsFromPigInputStream(new FileInputStream(new File(rawDataFolder, "sfAndTotalCounts"))),
minimumSFCounts(lang)
)
memoryIndexer.addResources(
DBpediaResourceSource.fromPigFiles(
wikipediaToDBpediaClosure,
new File(rawDataFolder, "uriCounts"),
if (new File(rawDataFolder, "instance_types.nt").exists())
new File(rawDataFolder, "instance_types.nt")
else if (new File(rawDataFolder, "instanceTypes.tsv").exists())
new File(rawDataFolder, "instanceTypes.tsv")
else
null,
namespace
)
)
val resStore = MemoryStore.loadResourceStore(new FileInputStream(new File(modelDataFolder, "res.mem")), quantizedCountStore)
val sfStore = MemoryStore.loadSurfaceFormStore(new FileInputStream(new File(modelDataFolder, "sf.mem")), quantizedCountStore)
memoryIndexer.addCandidatesByID(
CandidateMapSource.fromPigFiles(
new File(rawDataFolder, "pairCounts"),
wikipediaToDBpediaClosure,
resStore,
sfStore
),
sfStore.size
)
memoryIndexer.addTokenTypes(
TokenSource.fromPigFile(
new File(rawDataFolder, "tokenCounts"),
additionalTokens = Some(TokenSource.fromSFStore(sfStore, rawTokenizer)),
minimumContextCounts(lang)
)
)
val tokenStore = MemoryStore.loadTokenTypeStore(new FileInputStream(new File(modelDataFolder, "tokens.mem")))
memoryIndexer.createContextStore(resStore.size)
memoryIndexer.addTokenOccurrences(
TokenOccurrenceSource.fromPigFile(
new File(rawDataFolder, "tokenCounts"),
tokenStore,
wikipediaToDBpediaClosure,
resStore,
minimumContextCounts(lang)
)
)
memoryIndexer.writeTokenOccurrences()
memoryIndexer.writeQuantizedCounts()
val tokenizer: TextTokenizer = if (opennlpFolder.isDefined) {
val opennlpOut = new File(outputFolder, OPENNLP_FOLDER)
val oToken = new TokenizerME(new TokenizerModel(new FileInputStream(new File(opennlpOut, "token.bin"))))
val oSent = new SentenceDetectorME(new SentenceModel(new FileInputStream(new File(opennlpOut, "sent.bin"))))
new OpenNLPTokenizer(
oToken,
Set[String](),
stemmer,
oSent,
null,
tokenStore
)
} else {
new LanguageIndependentTokenizer(Set[String](), stemmer, locale, tokenStore)
}
val fsaDict = FSASpotter.buildDictionary(sfStore, tokenizer)
MemoryStore.dump(fsaDict, new File(outputFolder, "fsa_dict.mem"))
if(new File(stopwordsFile.getParentFile, "spotter_thresholds.txt").exists())
FileUtils.copyFile(new File(stopwordsFile.getParentFile, "spotter_thresholds.txt"), new File(outputFolder, "spotter_thresholds.txt"))
else
FileUtils.write(
new File(outputFolder, "spotter_thresholds.txt"),
"1.0 0.2 -0.2 0.1" //Defaults!
)
if (new File(rawDataFolder, "wiki2vec_syn0.csv").exists() && new File(rawDataFolder, "wiki2vec_ids.txt").exists()){
SpotlightLog.debug(this.getClass, "Found vector file, building vectors.mem store.")
val memoryVectorStoreIndexer: MemoryVectorStoreIndexer =
new MemoryVectorStoreIndexer(
new File(rawDataFolder, "wiki2vec_syn0.csv"),
new File(rawDataFolder, "wiki2vec_ids.txt")
)
memoryVectorStoreIndexer.loadVectorDict(tokenStore, resStore)
memoryVectorStoreIndexer.loadVectorsAndWriteToStore(new File(modelDataFolder, "vectors.mem"))
} else {
SpotlightLog.info(this.getClass, "No vectors supplied, not building memory vector store.")
}
}
}
| Skunnyk/dbpedia-spotlight-model | index/src/main/scala/org/dbpedia/spotlight/db/CreateSpotlightModel.scala | Scala | apache-2.0 | 10,387 |
import sbt._
import Keys._
object Dispatch extends Build {
val shared = Defaults.defaultSettings ++ ls.Plugin.lsSettings ++ Seq(
organization := "net.databinder",
version := "0.8.10",
scalaVersion := "2.10.4",
parallelExecution in Test := false,
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "sequential", "true"),
scalacOptions ++= "-deprecation" :: Nil,
crossScalaVersions :=
Seq("2.9.0", "2.9.0-1", "2.9.1", "2.9.1-1", "2.9.2", "2.9.3", "2.10.4", "2.11.1"),
libraryDependencies <++= (scalaVersion) { sv => Seq(
sv.split("[.-]").toList match {
case "2" :: "9" :: _ =>
"org.specs2" % "specs2_2.9.2" % "1.12.4" % "test"
case _ =>
"org.specs2" %% "specs2" % "2.3.12" % "test"
})
},
publishMavenStyle := true,
publishTo <<= version { (v: String) =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials"),
homepage :=
Some(new java.net.URL("http://dispatch-classic.databinder.net/")),
publishArtifact in Test := false,
licenses := Seq("LGPL v3" -> url("http://www.gnu.org/licenses/lgpl.txt")),
pomExtra := (
<scm>
<url>git@github.com:dispatch/reboot.git</url>
<connection>scm:git:git@github.com:dispatch/reboot.git</connection>
</scm>
<developers>
<developer>
<id>n8han</id>
<name>Nathan Hamblen</name>
<url>http://twitter.com/n8han</url>
</developer>
</developers>)
)
val httpShared = shared ++ Seq(
libraryDependencies +=
"org.apache.httpcomponents" % "httpclient" % "4.1.3"
)
lazy val dispatch =
Project("Dispatch", file("."), settings = shared ++ Seq(
sources in (Compile, doc) <<=
(thisProjectRef, buildStructure) flatMap (aggregateTask(sources)),
dependencyClasspath in (Compile, doc) <<=
(thisProjectRef, buildStructure) flatMap
aggregateTask(dependencyClasspath),
ls.Plugin.LsKeys.skipWrite := true
)) aggregate(
futures, core, http, nio, mime, json, http_json, oauth, gae, tagsoup,
jsoup
)
lazy val futures =
Project("dispatch-futures", file("futures"), settings = shared ++ Seq(
description := "Common interface to Java and Scala futures",
// https://github.com/harrah/xsbt/issues/85#issuecomment-1687483
unmanagedClasspath in Compile += Attributed.blank(new java.io.File("doesnotexist")),
actorsDependency
))
lazy val core =
Project("dispatch-core", file("core"), settings = httpShared ++ Seq(
description :=
"Core interfaces, applied by dispatch-http and dispatch-nio executors",
xmlDependency
))
lazy val http =
Project("dispatch-http", file("http"), settings = httpShared ++ Seq(
description :=
"Standard HTTP executor, uses Apache DefaultHttpClient",
sources in Test := {
if (scalaVersion.value.startsWith("2.9.")) Nil
else (sources in Test).value
}
)) dependsOn(
core, futures)
lazy val gae =
Project("dispatch-gae", file("http-gae"), settings = httpShared ++ Seq(
description :=
"Executor with a modified Apache HttpClient for Google App Engine",
libraryDependencies +=
"com.google.appengine" % "appengine-api-1.0-sdk" % "1.5.5"
)) dependsOn(http)
lazy val nio =
Project("dispatch-nio", file("nio"), settings = httpShared ++ Seq(
description :=
"NIO HTTP executor, uses Apache DefaultHttpAsyncClient",
libraryDependencies +=
("org.apache.httpcomponents" % "httpasyncclient" % "4.0-alpha1")
)) dependsOn(core, futures)
lazy val mime =
Project("dispatch-mime", file("mime"), settings = httpShared ++ Seq(
description :=
"Support for multipart MIME POSTs",
libraryDependencies ++= Seq(
"org.apache.httpcomponents" % "httpmime" % "4.1.2" intransitive(),
"commons-logging" % "commons-logging" % "1.1.1",
"org.apache.james" % "apache-mime4j-core" % "0.7.2"
),
actorsDependency
)) dependsOn(core)
lazy val json =
Project("dispatch-json", file("json"), settings = shared ++ Seq(
description := "A JSON parser",
sources in Test := {
if (scalaVersion.value.startsWith("2.9.")) Nil
else (sources in Test).value
},
// https://github.com/harrah/xsbt/issues/85#issuecomment-1687483
unmanagedClasspath in Compile += Attributed.blank(new java.io.File("doesnotexist")),
parserDependency
))
lazy val http_json =
Project("dispatch-http-json", file("http+json"),
settings = httpShared ++ Seq(
description := "Adds JSON handler verbs to Dispatch"
)) dependsOn(core, json)
lazy val oauth =
Project("dispatch-oauth", file("oauth"), settings = httpShared ++ Seq(
description := "OAuth 1.0a signing for Dispatch requests"
)) dependsOn(
core, http)
lazy val tagsoup =
Project("dispatch-tagsoup", file("tagsoup"), settings = httpShared ++ Seq(
description := "Adds TagSoup handler verbs to Dispatch",
libraryDependencies ++= Seq(
"org.ccil.cowan.tagsoup" % "tagsoup" % "1.2.1",
"org.eclipse.jetty.aggregate" % "jetty-server" % "7.5.4.v20111024" % "test"
)
)) dependsOn(core, http)
lazy val jsoup =
Project("dispatch-jsoup", file("jsoup"), settings = httpShared ++ Seq(
description := "Adds JSoup handler verbs to Dispatch",
libraryDependencies ++= Seq(
"org.jsoup" % "jsoup" % "1.6.1",
"org.eclipse.jetty.aggregate" % "jetty-server" % "7.5.4.v20111024" % "test"
)
)) dependsOn(core, http)
def aggregateTask[T](key: TaskKey[Seq[T]])
(proj: ProjectRef, struct: BuildStructure) = {
def collectProjects(op: ResolvedProject => Seq[ProjectRef])
(projRef: ProjectRef,
struct: BuildStructure): Seq[ProjectRef] = {
val delg = Project.getProject(projRef, struct).toSeq.flatMap(op)
// Dependencies/aggregates might have their own dependencies/aggregates
// so go recursive and do distinct.
delg.flatMap(ref => ref +: collectProjects(op)(ref, struct)).distinct
}
collectProjects(_.aggregate)(proj, struct).flatMap(
key in (_, Compile, doc) get struct.data
).join.map(_.flatten)
}
lazy val actorsDependency = libraryDependencies <<= (libraryDependencies, scalaVersion){
(dependencies, scalaVersion) =>
if(scalaVersion.startsWith("2.10") || scalaVersion.startsWith("2.11"))
("org.scala-lang" % "scala-actors" % scalaVersion) +: dependencies
else
dependencies
}
lazy val xmlDependency = libraryDependencies <<= (libraryDependencies, scalaVersion){
(dependencies, scalaVersion) =>
if(scalaVersion.startsWith("2.11"))
("org.scala-lang.modules" %% "scala-xml" % "1.0.1") +: dependencies
else
dependencies
}
lazy val parserDependency = libraryDependencies <<= (libraryDependencies, scalaVersion){
(dependencies, scalaVersion) =>
if(scalaVersion.startsWith("2.11"))
("org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.1") +: dependencies
else
dependencies
}
}
| dispatch/dispatch | project/build.scala | Scala | lgpl-2.1 | 7,516 |
object Implicits {
import language.implicitConversions
implicit class Any2some[T](x: T) {
def some: Option[T] = Some(x)
}
implicit class int2factorial(n: Int) {
def ! = (1 to n).product
}
implicit class int2times(n: Int) {
def times(block: => Unit) = for (j <- 1 to n) block
}
}
| grzegorzbalcerek/scala-exercises | Implicits/solutionImplicits.scala | Scala | bsd-2-clause | 305 |
package scala.meta.tests
package semanticdb
import org.scalatest._
import scala.meta._
import scala.meta.internal.semanticdb.{vfs => v}
import scala.meta.internal.semanticdb.{schema => s}
trait BaseSemanticSuite extends FunSuiteLike {
val sourcepath = Sourcepath(BuildInfo.databaseSourcepath)
val classpath = Classpath(BuildInfo.databaseClasspath)
}
class MirrorConstructionSuite extends BaseSemanticSuite {
def semanticdbs: List[AbsolutePath] = classpath.deep.collect {
case path if v.SemanticdbPaths.isSemanticdb(path.name) =>
path.base.resolve(path.name)
}
test("Database.load(Classpath, Sourcepath)") {
val database = Database.load(classpath, sourcepath)
assert(database.documents.nonEmpty)
}
test("Database.load(Array[Byte])") {
semanticdbs.foreach { path =>
val database = Database.load(path.readAllBytes)
assert(database.documents.nonEmpty, path.toString)
}
}
test("Database.load(Classpath)") {
val database = Database.load(classpath)
assert(database.documents.nonEmpty)
}
test("s.Document.filename has no Windows slash (\\\\)") {
semanticdbs.foreach { path =>
val sattrs = s.Document.parseFrom(path.readAllBytes)
assert(!sattrs.filename.contains('\\\\'))
}
}
}
| DavidDudson/scalameta | tests/shared/src/test/scala/scala/meta/tests/semanticdb/MirrorConstructionSuite.scala | Scala | bsd-3-clause | 1,263 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.kudu.spark.kudu
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.streaming.OutputMode
import org.junit.Before
import org.junit.Test
class StreamingTest extends KuduTestSuite {
implicit var sqlContext: SQLContext = _
var kuduOptions: Map[String, String] = _
@Before
def setUp(): Unit = {
sqlContext = ss.sqlContext
kuduOptions =
Map("kudu.table" -> simpleTableName, "kudu.master" -> harness.getMasterAddressesAsString)
}
@Test
def testKuduContextWithSparkStreaming() {
val spark = ss
import spark.implicits._
val checkpointDir = java.nio.file.Files.createTempDirectory("spark_kudu")
val input = MemoryStream[Int]
val query = input
.toDS()
.map(v => (v + 1, v.toString))
.toDF("key", "val")
.writeStream
.format("kudu")
.option("kudu.master", harness.getMasterAddressesAsString)
.option("kudu.table", simpleTableName)
.option("checkpointLocation", checkpointDir.toFile.getCanonicalPath)
.outputMode(OutputMode.Update)
.start()
def verifyOutput(expectedData: Seq[(Int, String)]): Unit = {
val df = sqlContext.read.options(kuduOptions).format("kudu").load
val actual = df.rdd
.map { row =>
(row.get(0), row.getString(1))
}
.collect()
.toSet
assert(actual === expectedData.toSet)
}
input.addData(1, 2, 3)
query.processAllAvailable()
verifyOutput(expectedData = Seq((2, "1"), (3, "2"), (4, "3")))
query.stop()
}
}
| helifu/kudu | java/kudu-spark/src/test/scala/org/apache/kudu/spark/kudu/StreamingTest.scala | Scala | apache-2.0 | 2,397 |
/*
* Copyright (c) 2011-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import test._
import testutil._
class HListTests {
import nat._
import poly._
import syntax.std.traversable._
import syntax.singleton._
import syntax.typeable._
import ops.hlist._
import ops.record.SelectAll
type SI = Set[Int] :: HNil
type OI = Option[Int] :: HNil
type III = Int :: Int :: Int :: HNil
type SISS = Set[Int] :: Set[String] :: HNil
type OIOS = Option[Int] :: Option[String] :: HNil
type ISII = Int :: String :: Int :: Int :: HNil
type IIII = Int :: Int :: Int :: Int :: HNil
type IYII = Int :: Any :: Int :: Int :: HNil
type OIOSOIOI = Option[Int] :: Option[String] :: Option[Int] :: Option[Int] :: HNil
type SISSSISI = Set[Int] :: Set[String] :: Set[Int] :: Set[Int] :: HNil
type BBBB = Boolean :: Boolean :: Boolean :: Boolean :: HNil
trait Fruit
case class Apple() extends Fruit
case class Pear() extends Fruit
case class Banana() extends Fruit
type PWS = Product with Serializable with Fruit
type YYYY = Any :: Any :: Any :: Any :: HNil
type FF = Fruit :: Fruit :: HNil
type AP = Apple :: Pear :: HNil
type BP = Banana :: Pear :: HNil
type AF = Apple :: Fruit :: HNil
type FFFF = Fruit :: Fruit :: Fruit :: Fruit :: HNil
type APAP = Apple :: Pear :: Apple :: Pear :: HNil
type APBP = Apple :: Pear :: Banana :: Pear :: HNil
type APB = Apple :: Pear :: Banana :: HNil
type PBPA = Pear :: Banana :: Pear :: Apple :: HNil
type PABP = Pear :: Apple :: Banana :: Pear :: HNil
type APc = Apple :+: Pear :+: CNil
type ABPc = Apple :+: Banana :+: Pear :+: CNil
val a : Apple = Apple()
val p : Pear = Pear()
val b : Banana = Banana()
val f : Fruit = new Fruit {}
val ap : AP = a :: p :: HNil
val bp : BP = b :: p :: HNil
val apap : APAP = a :: p :: a :: p :: HNil
val apbp : APBP = a :: p :: b :: p :: HNil
val apapList = a :: p :: a :: p :: Nil
val apbpList = a :: p :: b :: p :: Nil
val apapArray = Array(a, p, a, p)
val apbpArray = Array(a, p, b, p)
trait Ctv[-T]
type CICSCICICD = Ctv[Int] :: Ctv[String] :: Ctv[Int] :: Ctv[Int] :: Ctv[Double] :: HNil
val ci: Ctv[Int] = new Ctv[Int] {}
val cs: Ctv[String] = new Ctv[String] {}
val cd: Ctv[Double] = new Ctv[Double] {}
val cicscicicdList = ci :: cs :: ci :: ci :: cd :: Nil
val cicscicicdArray = Array(ci, cs, ci, ci, cd)
val cicscicicd: CICSCICICD = ci :: cs :: ci :: ci :: cd :: HNil
trait M[T]
type MIMSMIMIMD = M[Int] :: M[String] :: M[Int] :: M[Int] :: M[Double] :: HNil
val mi: M[Int] = new M[Int] {}
val ms: M[String] = new M[String] {}
val md: M[Double] = new M[Double] {}
val mimsmimimdList = mi :: ms :: mi :: mi :: md :: Nil
val mimsmimimdArray = Array(mi, ms, mi, mi, md)
val mimsmimimd: MIMSMIMIMD = mi :: ms :: mi :: mi :: md :: HNil
import language.existentials
val mExist: M[_] = new M[Double] {}
type MIMSMIMEMD = M[Int] :: M[String] :: M[Int] :: M[_] :: M[Double] :: HNil
val mimsmimemdList = mi :: ms :: mi :: mExist :: md :: Nil
val mimsmimemdArray = Array[M[_]](mi, ms, mi, mExist, md)
val mimsmimemd: MIMSMIMEMD = mi :: ms :: mi :: mExist :: md :: HNil
trait M2[A,B]
type M2IM2SM2IM2IM2D = M2[Int, Unit] :: M2[String, Unit] :: M2[Int, Unit] :: M2[Int, Unit] :: M2[Double, Unit] :: HNil
val m2i: M2[Int, Unit] = new M2[Int, Unit] {}
val m2s: M2[String, Unit] = new M2[String, Unit] {}
val m2d: M2[Double, Unit] = new M2[Double, Unit] {}
val m2im2sm2im2im2dList = m2i :: m2s :: m2i :: m2i :: m2d :: Nil
val m2im2sm2im2im2dArray = Array(m2i, m2s, m2i, m2i, m2d)
val m2im2sm2im2im2d: M2IM2SM2IM2IM2D = m2i :: m2s :: m2i :: m2i :: m2d :: HNil
val m2iExist: M2[Int, _] = new M2[Int, Unit] {}
val m2sExist: M2[String, _] = new M2[String, Unit] {}
val m2dExist: M2[Double, _] = new M2[Double, Unit] {}
type M2EIM2ESM2EIM2EEM2ED = M2[Int, _] :: M2[String, _] :: M2[Int, _] :: M2[Int, _] :: M2[Double, _] :: HNil
val m2eim2esm2eim2eem2edList = m2iExist :: m2sExist :: m2iExist :: m2iExist :: m2dExist :: Nil
val m2eim2esm2eim2eem2edArray = Array(m2iExist, m2sExist, m2iExist, m2iExist, m2dExist)
val m2eim2esm2eim2eem2ed: M2EIM2ESM2EIM2EEM2ED = m2iExist :: m2sExist :: m2iExist :: m2iExist :: m2dExist :: HNil
object mkString extends (Any -> String)(_.toString)
object fruit extends (Fruit -> Fruit)(f => f)
object incInt extends (Int >-> Int)(_ + 1)
object extendedChoose extends LiftU(choose)
@Test
def testBasics {
val l = 1 :: "foo" :: 2.0 :: HNil
val r1 = l.head
assertTypedEquals[Int](1, r1)
val r2 = l.tail.head
assertTypedEquals[String]("foo", r2)
assertEquals(2.0, l.tail.tail.head, Double.MinPositiveValue)
illTyped("""
HNil.head
""")
illTyped("""
HNil.tail
""")
illTyped("""
l.tail.tail.tail.head
""")
}
@Test
def testMap {
implicitly[Mapper.Aux[choose.type, HNil, HNil]]
implicitly[choose.Case[Set[Int]]]
implicitly[Mapper.Aux[choose.type, Set[Int] :: HNil, Option[Int] :: HNil]]
val s1 = Set(1) :: HNil
val o1 = s1 map choose
assertTypedEquals[OI](Option(1) :: HNil, o1)
val s2 = Set(1) :: Set("foo") :: HNil
val o2 = s2 map choose
assertTypedEquals[OIOS](Option(1) :: Option("foo") :: HNil, o2)
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
val l2 = l1 map singleton
assertTypedEquals[SISSSISI](Set(1) :: Set("foo") :: Set(2) :: Set(3) :: HNil, l2)
val l3 = l1 map option
assertTypedEquals[OIOSOIOI](Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil, l3)
val l4 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val l5 = l4 map get
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, l5)
typed[Int](l5.head)
typed[String](l5.tail.head)
typed[Int](l5.tail.tail.head)
typed[Int](l5.tail.tail.tail.head)
val l6 = l1 map identity
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, l6)
val l7 = l4 map isDefined
assertTypedEquals[BBBB](true :: true :: true :: true :: HNil, l7)
val l8 = 23 :: "foo" :: true :: HNil
val l9 = l8 map mkString
assertTypedEquals[String :: String :: String :: HNil]("23" :: "foo" :: "true" :: HNil, l9)
val l10 = apbp map fruit
assertTypedEquals[Fruit :: Fruit :: Fruit :: Fruit :: HNil](apbp, l10)
val l11 = apbp map mkString
assertTypedEquals[String :: String :: String :: String :: HNil]("Apple()" :: "Pear()" :: "Banana()" :: "Pear()" :: HNil, l11)
}
object dup extends Poly1 {
implicit def default[T] = at[T](t => t :: t :: HNil)
}
@Test
def testFlatMap {
val l1 = 1 :: "foo" :: true :: HNil
val l2 = l1 flatMap dup
assertTypedEquals[Int :: Int :: String :: String :: Boolean :: Boolean :: HNil](
1 :: 1 :: "foo" :: "foo" :: true :: true :: HNil, l2)
val l3 = (1 :: "foo" :: HNil) :: (HNil : HNil) :: (2.0 :: true :: HNil) :: ("bar" :: HNil) :: HNil
val l4 = l3 flatMap identity
assertTypedEquals[Int :: String :: Double :: Boolean :: String :: HNil](
1 :: "foo" :: 2.0 :: true :: "bar" :: HNil, l4)
val l5 = 23 :: "foo" :: 7 :: true :: 0 :: HNil
val l6 = l5 flatMap incInt
assertTypedEquals[Int :: Int :: Int :: HNil](24 :: 8 :: 1 :: HNil, l6)
val l7 = Set(23) :: "foo" :: Set(true) :: 23 :: HNil
val l8 = l7 flatMap extendedChoose
assertTypedEquals[Option[Int] :: Option[Boolean] :: HNil](Option(23) :: Option(true) :: HNil, l8)
}
@Test
def testConformance {
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
assertTypedEquals[Any :: AnyRef :: Any :: Any :: HNil](1 :: "foo" :: 2 :: 3 :: HNil, l1)
val ap = a :: p :: HNil
typed[AP](ap)
val bp = b :: p :: HNil
typed[BP](bp)
val apap = a :: p :: a :: p :: HNil
typed[APAP](apap)
val apbp = a :: p :: b :: p :: HNil
typed[APBP](apbp)
val ffff : FFFF = apap
typed[FFFF](ffff)
}
@Test
def testLength {
val l0 = HNil
typed[Nat._0](l0.length)
assertEquals(0, Nat toInt l0.length)
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
typed[Nat._4](l1.length)
assertEquals(4, Nat toInt l1.length)
val ap = a :: p :: HNil
typed[Nat._2](ap.length)
assertEquals(2, Nat toInt ap.length)
val bp = b :: p :: HNil
typed[Nat._2](bp.length)
assertEquals(2, Nat toInt bp.length)
val apap = a :: p :: a :: p :: HNil
typed[Nat._4](apap.length)
assertEquals(4, Nat toInt apap.length)
val apbp = a :: p :: b :: p :: HNil
typed[Nat._4](apbp.length)
assertEquals(4, Nat toInt apbp.length)
val ffff : FFFF = apap
typed[Nat._4](ffff.length)
assertEquals(4, Nat toInt ffff.length)
}
@Test
def testRuntimeLength {
assertEquals(0, HNil.runtimeLength)
assertEquals(1, (123 :: HNil).runtimeLength)
assertEquals(2, ("abc" :: 123 :: HNil).runtimeLength)
}
@Test
def testRuntimeList {
assertEquals(Nil, HNil.runtimeList)
assertEquals(123 :: Nil, (123 :: HNil).runtimeList)
assertEquals("abc" :: 123 :: Nil, ("abc" :: 123 :: HNil).runtimeList)
}
@Test
def testInitLast {
val lp = apbp.last
assertTypedEquals[Pear](p, lp)
val iapb = apbp.init
assertTypedEquals[APB](a :: p :: b :: HNil, iapb)
}
@Test
def testAlign {
type M0 = Int :: String :: Boolean :: HNil
type M1 = Int :: Boolean :: String :: HNil
type M2 = String :: Int :: Boolean :: HNil
type M3 = String :: Boolean :: Int :: HNil
type M4 = Boolean :: Int :: String :: HNil
type M5 = Boolean :: String :: Int :: HNil
val m0 = 13 :: "bar" :: false :: HNil
val m1 = 13 :: false :: "bar" :: HNil
val m2 = "bar" :: 13 :: false :: HNil
val m3 = "bar" :: false :: 13 :: HNil
val m4 = false :: 13 :: "bar" :: HNil
val m5 = false :: "bar" :: 13 :: HNil
val l = 23 :: "foo" :: true :: HNil
val a0 = l.align(m0)
assertTypedEquals[M0](23 :: "foo" :: true :: HNil, a0)
val a1 = l.align(m1)
assertTypedEquals[M1](23 :: true :: "foo" :: HNil, a1)
val a2 = l.align(m2)
assertTypedEquals[M2]("foo" :: 23 :: true :: HNil, a2)
val a3 = l.align(m3)
assertTypedEquals[M3]("foo" :: true :: 23 :: HNil, a3)
val a4 = l.align(m4)
assertTypedEquals[M4](true :: 23 :: "foo" :: HNil, a4)
val a5 = l.align(m5)
assertTypedEquals[M5](true :: "foo" :: 23 :: HNil, a5)
val b0 = l.align[M0]
assertTypedEquals[M0](23 :: "foo" :: true :: HNil, b0)
val b1 = l.align[M1]
assertTypedEquals[M1](23 :: true :: "foo" :: HNil, b1)
val b2 = l.align[M2]
assertTypedEquals[M2]("foo" :: 23 :: true :: HNil, b2)
val b3 = l.align[M3]
assertTypedEquals[M3]("foo" :: true :: 23 :: HNil, b3)
val b4 = l.align[M4]
assertTypedEquals[M4](true :: 23 :: "foo" :: HNil, b4)
val b5 = l.align[M5]
assertTypedEquals[M5](true :: "foo" :: 23 :: HNil, b5)
val c0 = (HNil: HNil).align[HNil]
typed[HNil](c0)
val c1 = (23 :: HNil).align[Int :: HNil]
typed[Int :: HNil](c1)
val c2 = (23 :: "foo" :: HNil).align[String :: Int :: HNil]
typed[String :: Int :: HNil](c2)
illTyped("""
(HNil: HNil).align[Int :: HNil]
""")
illTyped("""
(23 :: HNil).align[String :: HNil]
""")
illTyped("""
(23 :: "foo" :: HNil).align[String :: String :: HNil]
""")
}
@Test
def testReverse {
val pbpa = apbp.reverse
assertTypedEquals[PBPA](p :: b :: p :: a :: HNil, pbpa)
val al = a :: HNil
val ral = al.reverse
assertTypedEquals[Apple :: HNil](a :: HNil, ral)
}
@Test
def testPrepend {
val apbp2 = ap ::: bp
assertTypedEquals[APBP](a :: p :: b :: p :: HNil, apbp2)
typed[Apple](apbp2.head)
typed[Pear](apbp2.tail.head)
typed[Banana](apbp2.tail.tail.head)
typed[Pear](apbp2.tail.tail.tail.head)
val pabp = ap reverse_::: bp
assertTypedEquals[PABP](p :: a :: b :: p :: HNil, pabp)
{
// must compile without requiring an implicit Prepend
def prependWithHNil[L <: HList](list: L) = HNil ::: list
def prependToHNil[L <: HList](list: L) = list ::: HNil
val r1 = prependWithHNil(ap)
assertTypedSame[AP](ap, r1)
val r2 = prependToHNil(ap)
assertTypedSame[AP](ap, r2)
val r3 = HNil ::: HNil
assertTypedSame[HNil](HNil, r3)
val r4 = prependWithHNil(pabp)
assertTypedSame[PABP](pabp, r4)
val r5 = prependToHNil(pabp)
assertTypedSame[PABP](pabp, r5)
}
{
// must also pass with the default implicit
val r1 = HNil ::: ap
assertTypedSame[AP](ap, r1)
val r2 = ap ::: HNil
assertTypedSame[AP](ap, r2)
val r4 = HNil ::: pabp
assertTypedSame[PABP](pabp, r4)
val r5 = pabp ::: HNil
assertTypedSame[PABP](pabp, r5)
}
{
// must compile without requiring an implicit ReversePrepend
def reversePrependWithHNil[L <: HList](list: L) = HNil reverse_::: list
def reversePrependToHNil[L <: HList : Reverse](list: L) = list reverse_::: HNil
val r4 = reversePrependWithHNil(ap)
assertTypedSame[AP](ap, r4)
val r5 = reversePrependToHNil(ap)
assertTypedEquals[Pear :: Apple :: HNil](ap.reverse, r5)
val r6 = HNil reverse_::: HNil
assertTypedSame[HNil](HNil, r6)
}
}
@Test
def testToSizedList {
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val hnil = HNil
val snil = hnil.toSized[List]
assertEquals(Nat toInt hnil.length, snil.length)
val expectedUnsized = List.empty[Nothing]
equalInferredTypes(expectedUnsized, snil.unsized)
assertEquals(expectedUnsized, snil.unsized)
implicitly[ToSized.Aux[HNil, List, Nothing, _0]]
implicitly[ToSized.Aux[HNil, List, Int, _0]]
{
implicitly[ToSized.Aux[M[Int] :: HNil, List, M[Int], _1]]
implicitly[ToSized.Aux[M[Int] :: HNil, List, M[_], _1]]
}
val sizedApap = apap.toSized[List]
assertEquals(Nat toInt apap.length, sizedApap.length)
equalInferredTypes(apapList, sizedApap.unsized)
assertEquals(apapList, sizedApap.unsized)
val sizedApbp = apbp.toSized[List]
assertEquals(Nat toInt apbp.length, sizedApbp.length)
equalInferredTypes(apbpList, sizedApbp.unsized)
assertEquals(apbpList, sizedApbp.unsized)
val sizedCicscicicd = cicscicicd.toSized[List]
assertEquals(Nat toInt cicscicicd.length, sizedCicscicicd.length)
equalInferredTypes(cicscicicdList, sizedCicscicicd.unsized)
assertEquals(cicscicicdList, sizedCicscicicd.unsized)
val sizedMimsmimimd = mimsmimimd.toSized[List]
assertEquals(Nat toInt mimsmimimd.length, sizedMimsmimimd.length)
equalInferredTypes(mimsmimimdList, sizedMimsmimimd.unsized)
assertEquals(mimsmimimdList, sizedMimsmimimd.unsized)
val sizedMimsmimemd = mimsmimemd.toSized[List]
assertEquals(Nat toInt mimsmimemd.length, sizedMimsmimemd.length)
// equalInferredTypes(mimsmimemdList, sizedMimsmimemd.unsized)
typed[List[M[_]]](sizedMimsmimemd.unsized)
assertEquals(mimsmimemdList, sizedMimsmimemd.unsized)
val sizedM2im2sm2im2im2d = m2im2sm2im2im2d.toSized[List]
assertEquals(Nat toInt m2im2sm2im2im2d.length, sizedM2im2sm2im2im2d.length)
equalInferredTypes(m2im2sm2im2im2dList, sizedM2im2sm2im2im2d.unsized)
assertEquals(m2im2sm2im2im2dList, sizedM2im2sm2im2im2d.unsized)
val sizedM2eim2esm2eim2eem2ed = m2eim2esm2eim2eem2ed.toSized[List]
assertEquals(Nat toInt m2eim2esm2eim2eem2ed.length, sizedM2eim2esm2eim2eem2ed.length)
// equalInferredTypes(m2eim2esm2eim2eem2edList, sizedM2eim2esm2eim2eem2ed.unsized)
assertTypedEquals[List[M2[_ >: Double with Int with String, _]]](
m2eim2esm2eim2eem2edList, sizedM2eim2esm2eim2eem2ed.unsized)
}
@Test
def testToSizedArray {
def assertArrayEquals2[T](arr1 : Array[T], arr2 : Array[T]) =
assertArrayEquals(arr1.asInstanceOf[Array[Object]], arr2.asInstanceOf[Array[Object]])
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val hnil = HNil
val snil = hnil.toSized[Array]
assertEquals(Nat toInt hnil.length, snil.length)
val expectedUnsized = Array.empty[Nothing]
equalInferredTypes(expectedUnsized, snil.unsized)
assertArrayEquals2(expectedUnsized, snil.unsized)
implicitly[ToSized.Aux[HNil, Array, Nothing, _0]]
implicitly[ToSized.Aux[HNil, Array, Int, _0]]
val sizedApap = apap.toSized[Array]
assertEquals(Nat toInt apap.length, sizedApap.length)
equalInferredTypes(apapArray, sizedApap.unsized)
assertArrayEquals2(apapArray, sizedApap.unsized)
val sizedApbp = apbp.toSized[Array]
assertEquals(Nat toInt apbp.length, sizedApbp.length)
equalInferredTypes(apbpArray, sizedApbp.unsized)
assertArrayEquals2(apbpArray, sizedApbp.unsized)
val sizedCicscicicd = cicscicicd.toSized[Array]
assertEquals(Nat toInt cicscicicd.length, sizedCicscicicd.length)
equalInferredTypes(cicscicicdArray, sizedCicscicicd.unsized)
assertArrayEquals2(cicscicicdArray, sizedCicscicicd.unsized)
val sizedMimsmimimd = mimsmimimd.toSized[Array]
assertEquals(Nat toInt mimsmimimd.length, sizedMimsmimimd.length)
equalInferredTypes(mimsmimimdArray, sizedMimsmimimd.unsized)
assertArrayEquals2(mimsmimimdArray, sizedMimsmimimd.unsized)
val sizedMimsmimemd = mimsmimemd.toSized[Array]
assertEquals(Nat toInt mimsmimemd.length, sizedMimsmimemd.length)
// equalInferredTypes(mimsmimemdArray, sizedMimsmimemd.unsized)
typed[Array[M[_]]](sizedMimsmimemd.unsized)
assertArrayEquals2(mimsmimemdArray, sizedMimsmimemd.unsized)
val sizedM2im2sm2im2im2d = m2im2sm2im2im2d.toSized[Array]
assertEquals(Nat toInt m2im2sm2im2im2d.length, sizedM2im2sm2im2im2d.length)
equalInferredTypes(m2im2sm2im2im2dArray, sizedM2im2sm2im2im2d.unsized)
assertArrayEquals2(m2im2sm2im2im2dArray, sizedM2im2sm2im2im2d.unsized)
val sizedM2eim2esm2eim2eem2ed = m2eim2esm2eim2eem2ed.toSized[Array]
assertEquals(Nat toInt m2eim2esm2eim2eem2ed.length, sizedM2eim2esm2eim2eem2ed.length)
// equalInferredTypes(m2eim2esm2eim2eem2edArray, sizedM2eim2esm2eim2eem2ed.unsized)
typed[Array[M2[_ >: Double with Int with String, _]]](sizedM2eim2esm2eim2eem2ed.unsized)
assertArrayEquals2(m2eim2esm2eim2eem2edArray.map(x => x: Any), sizedM2eim2esm2eim2eem2ed.unsized.map(x => x: Any))
}
@Test
def testUnifier {
def lub[X, Y, L](x : X, y : Y)(implicit lb : Lub[X, Y, L]) : (L, L) = (lb.left(x), lb.right(y))
val u21 = lub(a, a)
typed[(Apple, Apple)](u21)
val u22 = lub(a, p)
typed[(Fruit, Fruit)](u22)
val u23 = lub(a, f)
typed[(Fruit, Fruit)](u23)
val u24 = lub(p, a)
typed[(Fruit, Fruit)](u24)
val u25 = lub(p, p)
typed[(Pear, Pear)](u25)
val u26 = lub(f, f)
typed[(Fruit, Fruit)](u26)
val u27 = lub(f, a)
typed[(Fruit, Fruit)](u27)
val u28 = lub(f, p)
typed[(Fruit, Fruit)](u28)
val u29 = lub(f, f)
typed[(Fruit, Fruit)](u29)
implicitly[Lub[HNil, HNil, HNil]]
implicitly[Lub[Apple :: HNil, Apple :: HNil, Apple :: HNil]]
implicitly[Lub[Fruit :: Pear :: HNil, Fruit :: Fruit :: HNil, Fruit :: Fruit :: HNil]]
implicitly[Lub[Apple :: Pear :: HNil, Pear :: Apple :: HNil, Fruit :: Fruit :: HNil]]
implicitly[Lub[ISII, IIII, IYII]]
val u31 = lub(HNil, HNil)
typed[(HNil, HNil)](u31)
val u32 = lub(a :: HNil, a :: HNil)
typed[(Apple :: HNil, Apple :: HNil)](u32)
val u33 = lub(f :: p :: HNil, f :: f :: HNil)
typed[(Fruit :: Fruit :: HNil, Fruit :: Fruit :: HNil)](u33)
val u34 = lub(a :: p :: HNil, p :: a :: HNil)
typed[(Fruit :: Fruit :: HNil, Fruit :: Fruit :: HNil)](u34)
val u35 = lub(1 :: "two" :: 3 :: 4 :: HNil, 1 :: 2 :: 3 :: 4 :: HNil)
typed[(Int :: Any :: Int :: Int :: HNil, Int :: Any :: Int :: Int :: HNil)](u35)
implicitly[Unifier.Aux[Apple :: HNil, Apple :: HNil]]
implicitly[Unifier.Aux[Fruit :: Pear :: HNil, Fruit :: Fruit :: HNil]]
implicitly[Unifier.Aux[Apple :: Pear :: HNil, Fruit :: Fruit :: HNil]]
implicitly[Unifier.Aux[Int :: String :: Int :: Int :: HNil, YYYY]]
val uapap = implicitly[Unifier.Aux[Apple :: Pear :: Apple :: Pear :: HNil, FFFF]]
val unified1 = uapap(apap)
typed[FFFF](unified1)
val unified2 = apap.unify
typed[FFFF](unified2)
val ununified1 = unified2.cast[APAP]
assertTrue(ununified1.isDefined)
typed[APAP](ununified1.get)
val ununified2 = unified2.cast[APBP]
assertFalse(ununified2.isDefined)
typed[Option[APBP]](ununified2)
def getUnifier[L <: HList, Out <: HList](l : L)(implicit u : Unifier.Aux[L, Out]) = u
val u2 = getUnifier(a :: HNil)
typed[Unifier.Aux[Apple :: HNil, Apple :: HNil]](u2)
val u3 = getUnifier(a :: a :: HNil)
typed[Unifier.Aux[Apple :: Apple :: HNil, Apple :: Apple :: HNil]](u3)
val u4 = getUnifier(a :: a :: a :: HNil)
typed[Unifier.Aux[Apple :: Apple :: Apple :: HNil, Apple :: Apple :: Apple :: HNil]](u4)
val u5 = getUnifier(a :: a :: a :: a :: HNil)
typed[Unifier.Aux[Apple :: Apple :: Apple :: Apple :: HNil, Apple :: Apple :: Apple :: Apple :: HNil]](u5)
val u6 = getUnifier(a :: p :: HNil)
//typed[Unifier.Aux[Apple :: Pear :: HNil, Fruit :: Fruit :: HNil]](u6)
val u7 = getUnifier(a :: f :: HNil)
typed[Unifier.Aux[Apple :: Fruit :: HNil, Fruit :: Fruit :: HNil]](u7)
val u8 = getUnifier(f :: a :: HNil)
typed[Unifier.Aux[Fruit :: Apple :: HNil, Fruit :: Fruit :: HNil]](u8)
val u9a = getUnifier(a :: f :: HNil)
typed[Unifier.Aux[Apple :: Fruit :: HNil, FF]](u9a)
val u9b = getUnifier(a :: p :: HNil)
typed[Unifier.Aux[Apple :: Pear :: HNil, PWS :: PWS :: HNil]](u9b)
val u10 = getUnifier(apap)
typed[Unifier.Aux[APAP, PWS :: PWS :: PWS :: PWS :: HNil]](u10)
val u11 = getUnifier(apbp)
typed[Unifier.Aux[APBP, PWS :: PWS :: PWS :: PWS :: HNil]](u11)
val invar1 = Set(23) :: Set("foo") :: HNil
val uinvar1 = invar1.unify
typed[Set[_ >: Int with String] :: Set[_ >: Int with String] :: HNil](uinvar1)
// Unifying three or more elements which have an invariant outer type constructor and differing type
// arguments fails, presumably due to a failure to compute a sensible LUB.
//val invar2 = Set(23) :: Set("foo") :: Set(true) :: HNil
//val uinvar2 = invar.unify
}
@Test
def testSubtypeUnifier {
val fruits : Apple :: Pear :: Fruit :: HNil = a :: p :: f :: HNil
typed[Fruit :: Fruit :: Fruit :: HNil](fruits.unifySubtypes[Fruit])
typed[Apple :: Pear :: Fruit :: HNil](fruits.unifySubtypes[Apple])
assertEquals(a :: p :: f :: HNil, fruits.unifySubtypes[Fruit].filter[Fruit])
val stuff : Apple :: String :: Pear :: HNil = a :: "foo" :: p :: HNil
typed[Fruit :: String :: Fruit :: HNil](stuff.unifySubtypes[Fruit])
assertEquals(HNil, stuff.filter[Fruit])
assertEquals(a :: p :: HNil, stuff.unifySubtypes[Fruit].filter[Fruit])
}
@Test
def testToTraversableList {
val r1 = HNil.to[List]
assertTypedEquals[List[Nothing]](Nil, r1)
ToList[HNil, Nothing]
ToList[HNil, Int]
{
implicitly[ToTraversable.Aux[M[Int] :: HNil, List, M[Int]]]
implicitly[ToTraversable.Aux[M[Int] :: HNil, List, M[_]]]
}
val r2 = apap.to[List]
assertTypedEquals[List[Fruit]](List(a, p, a, p), r2)
val fruits2 = apbp.to[List]
assertTypedEquals[List[Fruit]](List(a, p, b, p), fruits2)
val fruits3 = fruits2.toHList[APBP]
assertTrue(fruits3.isDefined)
assertTypedEquals[APBP](apbp, fruits3.get)
val stuff = (1 :: "foo" :: 2 :: 3 :: HNil).to[List]
assertTypedEquals[List[Any]](List(1, "foo", 2, 3), stuff)
val stuff2 = stuff.toHList[ISII]
assertTrue(stuff2.isDefined)
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, stuff2.get)
val l4 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val l7 = l4 map isDefined
assertTypedEquals[BBBB](true :: true :: true :: true :: HNil, l7)
val ll2 = l7.to[List]
typed[Boolean](ll2.head)
val moreStuff = (a :: "foo" :: p :: HNil).to[List]
typed[List[Any]](moreStuff)
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val ctv = cicscicicd.to[List]
equalInferredTypes(cicscicicdList, ctv)
assertTypedEquals[List[Ctv[Int with String with Double]]](cicscicicdList, ctv)
val m = mimsmimimd.to[List]
equalInferredTypes(mimsmimimdList, m)
assertTypedEquals[List[M[_ >: Int with String with Double]]](mimsmimimdList, m)
val mWithEx = mimsmimemd.to[List]
// equalType(mimsmimemdList, mWithEx)
assertTypedEquals[List[M[_]]](mimsmimemdList, mWithEx)
val m2 = m2im2sm2im2im2d.to[List]
equalInferredTypes(m2im2sm2im2im2dList, m2)
assertTypedEquals[List[M2[_ >: Int with String with Double, Unit]]](m2im2sm2im2im2dList, m2)
val m2e = m2eim2esm2eim2eem2ed.to[List]
// equalType(m2eim2esm2eim2eem2edList, m2e)
assertTypedEquals[List[M2[_ >: Int with String with Double, _]]](m2eim2esm2eim2eem2edList, m2e)
}
@Test
def testToPreciseList {
val r1 = HNil.toCoproduct[List]
assertTypedEquals[List[CNil]](Nil, r1)
val r2 = ap.toCoproduct[List]
assertTypedEquals[List[APc]](List(Coproduct[APc](a), Coproduct[APc](p)), r2)
val r3 = apap.toCoproduct[List]
assertTypedEquals[List[APc]](List(Coproduct[APc](a), Coproduct[APc](p), Coproduct[APc](a), Coproduct[APc](p)), r3)
val r4 = apbp.toCoproduct[Vector]
assertTypedEquals[Vector[ABPc]](Vector[ABPc](Coproduct[ABPc](a), Coproduct[ABPc](p), Coproduct[ABPc](b), Coproduct[ABPc](p)), r4)
def equalInferedCoproducts[A <: Coproduct, B <: Coproduct](a: A, b: B)(implicit bInA: ops.coproduct.Basis[A, B], aInB: ops.coproduct.Basis[B, A]){}
val abpc = Coproduct[ABPc](a)
val r5 = (a :: b :: a :: p :: b :: a :: HNil).toCoproduct[Set]
equalInferedCoproducts(abpc, r5.head)
val r6 = (p :: a :: a :: p :: p :: b :: HNil).toCoproduct[Set]
equalInferedCoproducts(abpc, r6.head)
val r7 = (a :: b :: p :: HNil).toCoproduct[Seq]
equalInferedCoproducts(abpc, r7.head)
val r8 = (a :: b :: HNil).toCoproduct[Seq]
illTyped{
"""equalInferedCoproducts(abpc, r8.head)"""
}
illTyped{
"""(1 :: "foo" :: HNil).toPrecise[Array]"""
}
}
@Test
def testToList {
val r1 = HNil.toList
assertTypedEquals[List[Nothing]](Nil, r1)
implicitly[ToTraversable.Aux[HNil, List, Nothing]]
implicitly[ToTraversable.Aux[HNil, List, Int]]
{
val l1 = (mi :: HNil).toList[M[Int]]
val l2 = (mi :: HNil).toList[M[_]]
assertTypedEquals[List[M[Int]]](List(mi), l1)
assertTypedEquals[List[M[_]]](List(mi), l2)
}
val fruits1 = apap.toList
assertTypedEquals[List[Fruit]](List(a, p, a, p), fruits1)
val fruits2 = apbp.toList
assertTypedEquals[List[Fruit]](List(a, p, b, p), fruits2)
val fruits3 = fruits2.toHList[APBP]
assertTrue(fruits3.isDefined)
assertTypedEquals[APBP](apbp, fruits3.get)
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
val stuff = l1.toList
assertTypedEquals[List[Any]](List(1, "foo", 2, 3), stuff)
val stuff2 = stuff.toHList[ISII]
assertTrue(stuff2.isDefined)
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, stuff2.get)
val l4 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val l7 = l4 map isDefined
assertTypedEquals[BBBB](true :: true :: true :: true :: HNil, l7)
val ll2 = l7.toList
typed[Boolean](ll2.head)
val moreStuff = (a :: "foo" :: p :: HNil).toList
typed[List[Any]](moreStuff)
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val ctv = cicscicicd.toList
equalInferredTypes(cicscicicdList, ctv)
assertTypedEquals[List[Ctv[Int with String with Double]]](cicscicicdList, ctv)
val m = mimsmimimd.toList
equalInferredTypes(mimsmimimdList, m)
assertTypedEquals[List[M[_ >: Int with String with Double]]](mimsmimimdList, m)
// With existentials, it gets more tricky
val mWithEx = mimsmimemd.toList
// Compiler fails complaining that it
// Cannot prove that List[HListTests.this.M[_ >: Double with _$1 with Int with String]] =:= List[HListTests.this.M[_]]
// equalType(mimsmimemdList, mWithEx)
assertTypedEquals[List[M[_]]](mimsmimemdList, mWithEx)
// Second order higher kinded types are ok...
val m2 = m2im2sm2im2im2d.toList
equalInferredTypes(m2im2sm2im2im2dList, m2)
assertTypedEquals[List[M2[_ >: Int with String with Double, Unit]]](m2im2sm2im2im2dList, m2)
// ...as long as existentials are not involved.
val m2e = m2eim2esm2eim2eem2ed.toList
// Compiler complains that it
// Cannot prove that List[HListTests.this.M2[_ >: Double with Int with Int with String with Int, _ >: _$5 with _$3 with _$3 with _$4 with _$3]] =:= List[HListTests.this.M2[_35,_36] forSome { type _$10; type _$9; type _34 >: _$10 with _$9; type _$8; type _$7; type _32 >: _$8 with _$7; type _35 >: Double with Int with Int with String; type _36 >: _34 with _32 }]
// equalType(m2eim2esm2eim2eem2edList, m2e)
assertTypedEquals[List[M2[_ >: Int with String with Double, _]]](m2eim2esm2eim2eem2edList, m2e)
}
@Test
def testToTraversableArray {
def assertArrayEquals2[T](arr1 : Array[T], arr2 : Array[T]) =
assertArrayEquals(arr1.asInstanceOf[Array[Object]], arr2.asInstanceOf[Array[Object]])
val empty = HNil.to[Array]
typed[Array[Nothing]](empty)
assertArrayEquals2(Array[Nothing](), empty)
implicitly[ToTraversable.Aux[HNil, Array, Nothing]]
implicitly[ToTraversable.Aux[HNil, Array, Int]]
{
implicitly[ToTraversable.Aux[M[Int] :: HNil, Array, M[Int]]]
implicitly[ToTraversable.Aux[M[Int] :: HNil, Array, M[_]]]
}
val fruits1 = apap.to[Array].map(x => x : Fruit) // Default inferred type is too precise
// (Product with Serializable with Fruit)
typed[Array[Fruit]](fruits1)
assertArrayEquals2(Array[Fruit](a, p, a, p), fruits1)
val fruits2 = apbp.to[Array].map(x => x : Fruit)
typed[Array[Fruit]](fruits2)
assertArrayEquals2(Array[Fruit](a, p, b, p), fruits2)
val fruits3 = fruits2.toHList[APBP]
assertTrue(fruits3.isDefined)
assertTypedEquals[APBP](apbp, fruits3.get)
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
val stuff = l1.to[Array]
typed[Array[Any]](stuff)
assertArrayEquals2(Array(1, "foo", 2, 3), stuff)
val stuff2 = stuff.toHList[ISII]
assertTrue(stuff2.isDefined)
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, stuff2.get)
val l4 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val l7 = l4 map isDefined
assertTypedEquals[BBBB](true :: true :: true :: true :: HNil, l7)
val ll2 = l7.to[Array]
typed[Boolean](ll2(0))
val moreStuff = (a :: "foo" :: p :: HNil).to[Array].map(x => x : AnyRef)
typed[Array[AnyRef]](moreStuff)
assertArrayEquals2(Array[AnyRef](a, "foo", p), moreStuff)
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val ctv = cicscicicd.to[Array]
equalInferredTypes(cicscicicdArray, ctv)
typed[Array[Ctv[Int with String with Double]]](ctv)
assertArrayEquals2(cicscicicdArray, ctv)
val m = mimsmimimd.to[Array]
equalInferredTypes(mimsmimimdArray, m)
typed[Array[M[_ >: Int with String with Double]]](m)
assertArrayEquals2(mimsmimimdArray, m)
val mWithEx = mimsmimemd.to[Array]
// equalType(mimsmimemdArray, mWithEx)
typed[Array[M[_]]](mWithEx)
assertArrayEquals2(mimsmimemdArray, mWithEx)
val m2 = m2im2sm2im2im2d.to[Array]
equalInferredTypes(m2im2sm2im2im2dArray, m2)
typed[Array[M2[_ >: Int with String with Double, Unit]]](m2)
assertArrayEquals2(m2im2sm2im2im2dArray, m2)
val m2e = m2eim2esm2eim2eem2ed.to[Array]
// equalInferredTypes(m2eim2esm2eim2eem2edArray, m2e)
typed[Array[M2[_ >: Int with String with Double, _]]](m2e)
assertArrayEquals2(m2eim2esm2eim2eem2edArray.map(x => x : Any), m2e.map(x => x : Any))
}
@Test
def testToArray {
def assertArrayEquals2[T](arr1 : Array[T], arr2 : Array[T]) =
assertArrayEquals(arr1.asInstanceOf[Array[Object]], arr2.asInstanceOf[Array[Object]])
val empty = HNil.toArray
typed[Array[Nothing]](empty)
assertArrayEquals2(Array[Nothing](), empty)
ToArray[HNil, Nothing]
ToArray[HNil, Int]
{
val a1 = (mi :: HNil).toArray[M[Int]]
val a2 = (mi :: HNil).toArray[M[_]]
typed[Array[M[Int]]](a1)
typed[Array[M[_]]](a2)
assertArrayEquals2(Array[M[Int]](mi), a1)
assertArrayEquals2(Array[M[_]](mi), a2)
}
val fruits1 = apap.toArray[Fruit]
typed[Array[Fruit]](fruits1)
assertArrayEquals2(Array[Fruit](a, p, a, p), fruits1)
val fruits2 = apbp.toArray[Fruit]
typed[Array[Fruit]](fruits2)
assertArrayEquals2(Array[Fruit](a, p, b, p), fruits2)
val fruits3 = fruits2.toHList[APBP]
assertTrue(fruits3.isDefined)
assertTypedEquals[APBP](apbp, fruits3.get)
val l1 = 1 :: "foo" :: 2 :: 3 :: HNil
val stuff = l1.toArray
typed[Array[Any]](stuff)
assertArrayEquals2(Array(1, "foo", 2, 3), stuff)
val ssl = "foo" :: "bar" :: 1L :: HNil
val ssla = ssl.toArray
typed[Array[Any]](ssla)
assertArrayEquals2(Array("foo", "bar", 1L), ssla)
val stuff2 = stuff.toHList[ISII]
assertTrue(stuff2.isDefined)
assertTypedEquals[ISII](1 :: "foo" :: 2 :: 3 :: HNil, stuff2.get)
val l4 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val l7 = l4 map isDefined
assertTypedEquals[BBBB](true :: true :: true :: true :: HNil, l7)
val ll2 = l7.toArray
typed[Boolean](ll2(0))
val moreStuff = (a :: "foo" :: p :: HNil).toArray[AnyRef]
typed[Array[AnyRef]](moreStuff)
assertArrayEquals2(Array[AnyRef](a, "foo", p), moreStuff)
def equalInferredTypes[A,B](a: A, b: B)(implicit eq: A =:= B) {}
val ctv = cicscicicd.toArray
equalInferredTypes(cicscicicdArray, ctv)
typed[Array[Ctv[Int with String with Double]]](ctv)
assertArrayEquals2(cicscicicdArray, ctv)
val m = mimsmimimd.toArray
equalInferredTypes(mimsmimimdArray, m)
typed[Array[M[_ >: Int with String with Double]]](m)
assertArrayEquals2(mimsmimimdArray, m)
val mWithEx = mimsmimemd.toArray[M[_]]
// equalType(mimsmimemdArray, mWithEx)
typed[Array[M[_]]](mWithEx)
assertArrayEquals2(mimsmimemdArray, mWithEx)
val m2 = m2im2sm2im2im2d.toArray
equalInferredTypes(m2im2sm2im2im2dArray, m2)
typed[Array[M2[_ >: Int with String with Double, Unit]]](m2)
assertArrayEquals2(m2im2sm2im2im2dArray, m2)
val m2e = m2eim2esm2eim2eem2ed.toArray
// equalInferredTypes(m2eim2esm2eim2eem2edArray, m2e)
typed[Array[M2[_ >: Int with String with Double, _]]](m2e)
assertArrayEquals2(m2eim2esm2eim2eem2edArray.map(x => x : Any), m2e.map(x => x : Any))
}
@Test
def testFoldMap {
implicitly[Mapper.Aux[isDefined.type, HNil, HNil]]
implicitly[Mapper.Aux[isDefined.type, Option[Int] :: HNil, Boolean :: HNil]]
val tl1 = Option(1) :: Option("foo") :: Option(2) :: Option(3) :: HNil
val tl2 = Option(1) :: Option("foo") :: (None : Option[Int]) :: Option(3) :: HNil
val mlfl1 = (tl1 map isDefined).toList.foldLeft(true)(_ && _)
assertTrue(mlfl1)
val mlfl2 = (tl2 map isDefined).toList.foldLeft(true)(_ && _)
assertFalse(mlfl2)
val fl1 = tl1.foldMap(true)(isDefined)(_ && _)
assertTrue(fl1)
val fl2 = tl2.foldMap(true)(isDefined)(_ && _)
assertFalse(fl2)
}
@Test
def testAt {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val at0 = sn1(_0)
assertTypedEquals[Int](23, at0)
val at1 = sn1(_1)
typed[Double](at1)
assertEquals(3.0, at1, Double.MinPositiveValue)
val at2 = sn1(_2)
assertTypedEquals[String]("foo", at2)
val at3 = sn1(_3)
assertTypedEquals[Unit]((), at3)
val at4 = sn1(_4)
assertTypedEquals[String]("bar", at4)
val at5 = sn1(_5)
assertTypedEquals[Boolean](true, at5)
val at6 = sn1(_6)
assertTypedEquals[Long](5L, at6)
val sn2 =
0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 ::
10 :: 11 :: 12 :: 13 :: 14 :: 15 :: 16 :: 17 :: 18 :: 19 ::
20 :: 21 :: 22 :: HNil
val at22 = sn2(_22)
assertTypedEquals[Int](22, at22)
}
@Test
def testAtLiteral {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val at0 = sn1(0)
assertTypedEquals[Int](23, at0)
val at1 = sn1(1)
typed[Double](at1)
assertEquals(3.0, at1, Double.MinPositiveValue)
val at2 = sn1(2)
assertTypedEquals[String]("foo", at2)
val at3 = sn1(3)
assertTypedEquals[Unit]((), at3)
val at4 = sn1(4)
assertTypedEquals[String]("bar", at4)
val at5 = sn1(5)
assertTypedEquals[Boolean](true, at5)
val at6 = sn1(6)
assertTypedEquals[Long](5L, at6)
val sn2 =
0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 ::
10 :: 11 :: 12 :: 13 :: 14 :: 15 :: 16 :: 17 :: 18 :: 19 ::
20 :: 21 :: 22 :: HNil
val at22 = sn2(22)
assertTypedEquals[Int](22, at22)
}
@Test
def testTakeDrop {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val r1 = sn1.take(_0)
assertTypedEquals[HNil](HNil, r1)
val r2 = sn1.drop(_0)
assertTypedEquals[Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil](
23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil, r2)
val r3 = sn1.take(_2)
assertTypedEquals[Int :: Double :: HNil](23 :: 3.0 :: HNil, r3)
val r4 = sn1.drop(_2)
assertTypedEquals[String :: Unit :: String :: Boolean :: Long :: HNil](
"foo" :: () :: "bar" :: true :: 5L :: HNil, r4)
val r5 = sn1.take(_7)
assertTypedEquals[Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil](
23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil, r5)
val r6 = sn1.drop(_7)
assertTypedEquals[HNil](HNil, r6)
}
@Test
def testTakeDropLiteral {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val r1 = sn1.take(0)
assertTypedEquals[HNil](HNil, r1)
val r2 = sn1.drop(0)
assertTypedEquals[Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil](
23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil, r2)
val r3 = sn1.take(2)
assertTypedEquals[Int :: Double :: HNil](23 :: 3.0 :: HNil, r3)
val r4 = sn1.drop(2)
assertTypedEquals[String :: Unit :: String :: Boolean :: Long :: HNil](
"foo" :: () :: "bar" :: true :: 5L :: HNil, r4)
val r5 = sn1.take(7)
assertTypedEquals[Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil](
23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil, r5)
val r6 = sn1.drop(7)
assertTypedEquals[HNil](HNil, r6)
}
@Test
def testSplit {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val sni0 = sn1.split(_0)
typed[(HNil, (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](sni0)
val sni1 = sn1.split(_1)
typed[((Int :: HNil), (Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](sni1)
val sni2 = sn1.split(_2)
typed[((Int :: Double :: HNil), (String :: Unit :: String :: Boolean :: Long :: HNil))](sni2)
val sni3 = sn1.split(_3)
typed[((Int :: Double :: String :: HNil), (Unit :: String :: Boolean :: Long :: HNil))](sni3)
val sni4 = sn1.split(_4)
typed[((Int :: Double :: String :: Unit :: HNil), (String :: Boolean :: Long :: HNil))](sni4)
val sni5 = sn1.split(_5)
typed[((Int :: Double :: String :: Unit :: String :: HNil), (Boolean :: Long :: HNil))](sni5)
val sni6 = sn1.split(_6)
typed[((Int :: Double :: String :: Unit :: String :: Boolean :: HNil), (Long :: HNil))](sni6)
val sni7 = sn1.split(_7)
typed[((Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil), HNil)](sni7)
val snri0 = sn1.reverse_split(_0)
typed[(HNil, (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](snri0)
val snri1 = sn1.reverse_split(_1)
typed[((Int :: HNil), (Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](snri1)
val snri2 = sn1.reverse_split(_2)
typed[((Double :: Int :: HNil), (String :: Unit :: String :: Boolean :: Long :: HNil))](snri2)
val snri3 = sn1.reverse_split(_3)
typed[((String :: Double :: Int :: HNil), (Unit :: String :: Boolean :: Long :: HNil))](snri3)
val snri4 = sn1.reverse_split(_4)
typed[((Unit :: String :: Double :: Int :: HNil), (String :: Boolean :: Long :: HNil))](snri4)
val snri5 = sn1.reverse_split(_5)
typed[((String :: Unit :: String :: Double :: Int :: HNil), (Boolean :: Long :: HNil))](snri5)
val snri6 = sn1.reverse_split(_6)
typed[((Boolean :: String :: Unit :: String :: Double :: Int :: HNil), (Long :: HNil))](snri6)
val snri7 = sn1.reverse_split(_7)
typed[((Long :: Boolean :: String :: Unit :: String :: Double :: Int :: HNil), HNil)](snri7)
}
@Test
def testSplitLiteral {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val sni0 = sn1.split(0)
typed[(HNil, (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](sni0)
val sni1 = sn1.split(1)
typed[((Int :: HNil), (Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](sni1)
val sni2 = sn1.split(2)
typed[((Int :: Double :: HNil), (String :: Unit :: String :: Boolean :: Long :: HNil))](sni2)
val sni3 = sn1.split(3)
typed[((Int :: Double :: String :: HNil), (Unit :: String :: Boolean :: Long :: HNil))](sni3)
val sni4 = sn1.split(4)
typed[((Int :: Double :: String :: Unit :: HNil), (String :: Boolean :: Long :: HNil))](sni4)
val sni5 = sn1.split(5)
typed[((Int :: Double :: String :: Unit :: String :: HNil), (Boolean :: Long :: HNil))](sni5)
val sni6 = sn1.split(6)
typed[((Int :: Double :: String :: Unit :: String :: Boolean :: HNil), (Long :: HNil))](sni6)
val sni7 = sn1.split(7)
typed[((Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil), HNil)](sni7)
val snri0 = sn1.reverse_split(0)
typed[(HNil, (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](snri0)
val snri1 = sn1.reverse_split(1)
typed[((Int :: HNil), (Double :: String :: Unit :: String :: Boolean :: Long :: HNil))](snri1)
val snri2 = sn1.reverse_split(2)
typed[((Double :: Int :: HNil), (String :: Unit :: String :: Boolean :: Long :: HNil))](snri2)
val snri3 = sn1.reverse_split(3)
typed[((String :: Double :: Int :: HNil), (Unit :: String :: Boolean :: Long :: HNil))](snri3)
val snri4 = sn1.reverse_split(4)
typed[((Unit :: String :: Double :: Int :: HNil), (String :: Boolean :: Long :: HNil))](snri4)
val snri5 = sn1.reverse_split(5)
typed[((String :: Unit :: String :: Double :: Int :: HNil), (Boolean :: Long :: HNil))](snri5)
val snri6 = sn1.reverse_split(6)
typed[((Boolean :: String :: Unit :: String :: Double :: Int :: HNil), (Long :: HNil))](snri6)
val snri7 = sn1.reverse_split(7)
typed[((Long :: Boolean :: String :: Unit :: String :: Double :: Int :: HNil), HNil)](snri7)
}
@Test
def testSplitP {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val sni0 = sn1.splitP(_0)
typed[(HNil) :: (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni0)
val sni1 = sn1.splitP(_1)
typed[(Int :: HNil) :: (Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni1)
val sni2 = sn1.splitP(_2)
typed[(Int :: Double :: HNil) :: (String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni2)
val sni3 = sn1.splitP(_3)
typed[(Int :: Double :: String :: HNil) :: (Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni3)
val sni4 = sn1.splitP(_4)
typed[(Int :: Double :: String :: Unit :: HNil) :: (String :: Boolean :: Long :: HNil) :: HNil](sni4)
val sni5 = sn1.splitP(_5)
typed[(Int :: Double :: String :: Unit :: String :: HNil) :: (Boolean :: Long :: HNil) :: HNil](sni5)
val sni6 = sn1.splitP(_6)
typed[(Int :: Double :: String :: Unit :: String :: Boolean :: HNil) :: (Long :: HNil) :: HNil](sni6)
val sni7 = sn1.splitP(_7)
typed[(Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: (HNil) :: HNil](sni7)
val snri0 = sn1.reverse_splitP(_0)
typed[(HNil) :: (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri0)
val snri1 = sn1.reverse_splitP(_1)
typed[(Int :: HNil) :: (Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri1)
val snri2 = sn1.reverse_splitP(_2)
typed[(Double :: Int :: HNil) :: (String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri2)
val snri3 = sn1.reverse_splitP(_3)
typed[(String :: Double :: Int :: HNil) :: (Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri3)
val snri4 = sn1.reverse_splitP(_4)
typed[(Unit :: String :: Double :: Int :: HNil) :: (String :: Boolean :: Long :: HNil) :: HNil](snri4)
val snri5 = sn1.reverse_splitP(_5)
typed[(String :: Unit :: String :: Double :: Int :: HNil) :: (Boolean :: Long :: HNil) :: HNil](snri5)
val snri6 = sn1.reverse_splitP(_6)
typed[(Boolean :: String :: Unit :: String :: Double :: Int :: HNil) :: (Long :: HNil) :: HNil](snri6)
val snri7 = sn1.reverse_splitP(_7)
typed[(Long :: Boolean :: String :: Unit :: String :: Double :: Int :: HNil) :: (HNil) :: HNil](snri7)
}
@Test
def testSplitPLiteral {
val sn1 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val sni0 = sn1.splitP(0)
typed[(HNil) :: (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni0)
val sni1 = sn1.splitP(1)
typed[(Int :: HNil) :: (Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni1)
val sni2 = sn1.splitP(2)
typed[(Int :: Double :: HNil) :: (String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni2)
val sni3 = sn1.splitP(3)
typed[(Int :: Double :: String :: HNil) :: (Unit :: String :: Boolean :: Long :: HNil) :: HNil](sni3)
val sni4 = sn1.splitP(4)
typed[(Int :: Double :: String :: Unit :: HNil) :: (String :: Boolean :: Long :: HNil) :: HNil](sni4)
val sni5 = sn1.splitP(5)
typed[(Int :: Double :: String :: Unit :: String :: HNil) :: (Boolean :: Long :: HNil) :: HNil](sni5)
val sni6 = sn1.splitP(6)
typed[(Int :: Double :: String :: Unit :: String :: Boolean :: HNil) :: (Long :: HNil) :: HNil](sni6)
val sni7 = sn1.splitP(7)
typed[(Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: (HNil) :: HNil](sni7)
val snri0 = sn1.reverse_splitP(0)
typed[(HNil) :: (Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri0)
val snri1 = sn1.reverse_splitP(1)
typed[(Int :: HNil) :: (Double :: String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri1)
val snri2 = sn1.reverse_splitP(2)
typed[(Double :: Int :: HNil) :: (String :: Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri2)
val snri3 = sn1.reverse_splitP(3)
typed[(String :: Double :: Int :: HNil) :: (Unit :: String :: Boolean :: Long :: HNil) :: HNil](snri3)
val snri4 = sn1.reverse_splitP(4)
typed[(Unit :: String :: Double :: Int :: HNil) :: (String :: Boolean :: Long :: HNil) :: HNil](snri4)
val snri5 = sn1.reverse_splitP(5)
typed[(String :: Unit :: String :: Double :: Int :: HNil) :: (Boolean :: Long :: HNil) :: HNil](snri5)
val snri6 = sn1.reverse_splitP(6)
typed[(Boolean :: String :: Unit :: String :: Double :: Int :: HNil) :: (Long :: HNil) :: HNil](snri6)
val snri7 = sn1.reverse_splitP(7)
typed[(Long :: Boolean :: String :: Unit :: String :: Double :: Int :: HNil) :: (HNil) :: HNil](snri7)
}
@Test
def testSelect {
val sl = 1 :: true :: "foo" :: 2.0 :: HNil
val si = sl.select[Int]
assertTypedEquals[Int](1, si)
val sb = sl.select[Boolean]
assertTypedEquals[Boolean](true, sb)
val ss = sl.select[String]
assertTypedEquals[String]("foo", ss)
val sd = sl.select[Double]
assertEquals(2.0, sd, Double.MinPositiveValue)
}
@Test
def testSelectMany {
val si = 1 :: true :: "foo" :: 2.0 :: HNil
val si1 = si.selectManyType[HNil]
assertTypedEquals[HNil](HNil, si1)
val si2 = si.selectManyType[_0::HNil]
assertTypedEquals[Int::HNil](1::HNil, si2)
val si3 = si.selectManyType[_2::HNil]
assertTypedEquals[String::HNil]("foo"::HNil, si3)
val si4 = si.selectManyType[_0::_1::_2::_3::HNil]
assertTypedEquals[Int::Boolean::String::Double::HNil](1 :: true :: "foo" :: 2.0 :: HNil, si4)
val si5 = si.selectMany(0)
assertTypedEquals[Int::HNil](1::HNil, si5)
val si6 = si.selectMany(2)
assertTypedEquals[String::HNil]("foo"::HNil, si6)
val si7 = si.selectMany(0,1,2,3)
assertTypedEquals[Int::Boolean::String::Double::HNil](1 :: true :: "foo" :: 2.0 :: HNil, si7)
}
@Test
def testSelectRange: Unit = {
val sl = 1 :: true :: "foo" :: 2.0 :: HNil
val sl1 = sl.selectRange[_0,_0]
val sl1i = sl.selectRange(0,0)
assertTypedEquals[HNil](HNil, sl1)
assertTypedEquals[HNil](HNil, sl1i)
val sl2 = sl.selectRange[_1,_1]
val sl2i = sl.selectRange(1,1)
assertTypedEquals[HNil](HNil, sl2)
assertTypedEquals[HNil](HNil, sl2i)
val sl3 = sl.selectRange[_0,_2]
val sl3i = sl.selectRange(0,2)
assertTypedEquals[Int::Boolean::HNil](1::true::HNil, sl3)
assertTypedEquals[Int::Boolean::HNil](1::true::HNil, sl3i)
val sl4 = sl.selectRange[_2,_4]
val sl4i = sl.selectRange(2,4)
assertTypedEquals[String::Double::HNil]("foo"::2.0::HNil, sl4)
assertTypedEquals[String::Double::HNil]("foo"::2.0::HNil, sl4i)
val sl5 = sl.selectRange[_0,_4]
val sl5i = sl.selectRange(0,4)
assertTypedEquals[Int::Boolean::String::Double::HNil](1 :: true :: "foo" :: 2.0 :: HNil, sl5)
assertTypedEquals[Int::Boolean::String::Double::HNil](1 :: true :: "foo" :: 2.0 :: HNil, sl5i)
}
@Test
def testFilter {
val l1 = 1 :: 2 :: HNil
val f1 = l1.filter[Int]
assertTypedEquals[Int :: Int :: HNil](1 :: 2 :: HNil, f1)
val l2 = 1 :: true :: "foo" :: 2 :: HNil
val f2 = l2.filter[Int]
assertTypedEquals[Int :: Int :: HNil](1 :: 2 :: HNil, f2)
typed[HNil](l2.filter[Double])
}
@Test
def testFilterNot {
val l1 = 1 :: 2 :: HNil
val f1 = l1.filterNot[String]
assertTypedEquals[Int :: Int :: HNil](1 :: 2 :: HNil, f1)
val l2 = 1 :: true :: "foo" :: 2 :: HNil
val f2 = l2.filterNot[String]
assertTypedEquals[Int :: Boolean :: Int :: HNil](1 :: true :: 2 :: HNil, f2)
typed[HNil](l2.filter[Double])
}
@Test
def testPartition {
val l1 = 1 :: 2 :: HNil
val l2 = 1 :: true :: "foo" :: 2 :: HNil
val r1 = l1.partition[Int]
assertTypedEquals[(Int :: Int :: HNil, HNil)]((1 :: 2 :: HNil, HNil), r1)
val r2 = l1.partitionP[Int]
assertTypedEquals[(Int :: Int :: HNil) :: HNil :: HNil]((1 :: 2 :: HNil) :: HNil :: HNil, r2)
val r3 = l2.partition[Int]
assertTypedEquals[(Int :: Int :: HNil, Boolean :: String :: HNil)]((1 :: 2 :: HNil, true :: "foo" :: HNil), r3)
val r4 = l2.partitionP[Int]
assertTypedEquals[(Int :: Int :: HNil) :: (Boolean :: String :: HNil) :: HNil](
(1 :: 2 :: HNil) :: (true :: "foo" :: HNil) :: HNil, r4
)
}
@Test
def testReplace {
val sl = 1 :: true :: "foo" :: 2.0 :: HNil
val (i, r1) = sl.replace(23)
assertTypedEquals[Int](1, i)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](23 :: true :: "foo" :: 2.0 :: HNil, r1)
val (b, r2) = sl.replace(false)
assertTypedEquals[Boolean](true, b)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: false :: "foo" :: 2.0 :: HNil, r2)
val (s, r3) = sl.replace("bar")
assertTypedEquals[String]("foo", s)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: true :: "bar" :: 2.0 :: HNil, r3)
val (d, r4) = sl.replace(3.0)
typed[Double](d)
assertEquals(2.0, d, Double.MinPositiveValue)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: true :: "foo" :: 3.0 :: HNil, r4)
val (i2, r5) = sl.replaceType[Int]('*')
typed[Char](r5(0))
assertTypedEquals[Int](1, i2)
assertTypedEquals[Char :: Boolean :: String :: Double :: HNil]('*' :: true :: "foo" :: 2.0 :: HNil, r5)
val (b2, r6) = sl.replaceType[Boolean]('*')
typed[Char](r6(1))
assertTypedEquals[Boolean](true, b2)
assertTypedEquals[Int :: Char :: String :: Double :: HNil](1 :: '*' :: "foo" :: 2.0 :: HNil, r6)
val (s2, r7) = sl.replaceType[String]('*')
typed[Char](r7(2))
assertTypedEquals[String]("foo", s2)
assertTypedEquals[Int :: Boolean :: Char :: Double :: HNil](1 :: true :: '*' :: 2.0 :: HNil, r7)
val (d2, r8) = sl.replaceType[Double]('*')
typed[Double](d2)
typed[Char](r8(3))
assertEquals(2.0, d2, Double.MinPositiveValue)
assertTypedEquals[Int :: Boolean :: String :: Char :: HNil](1 :: true :: "foo" :: '*' :: HNil, r8)
val fruits = a :: p :: a :: f :: HNil
val (x1, rr1) = fruits.replaceType[Pear](a)
typed[Pear](x1)
typed[Apple :: Apple :: Apple :: Fruit :: HNil](rr1)
val (x2, rr2) = fruits.replaceType[Pear](f)
typed[Pear](x2)
typed[Apple :: Fruit :: Apple :: Fruit :: HNil](rr2)
val (x3, rr3) = fruits.replaceType[Fruit](p)
typed[Fruit](x3)
typed[Apple :: Pear :: Apple :: Pear :: HNil](rr3)
val (x4, rr4) = fruits.replace(p)
typed[Pear](x4)
typed[Apple :: Pear :: Apple :: Fruit :: HNil](rr4)
val (x5, rr5) = fruits.replace(f)
typed[Fruit](x5)
typed[Apple :: Pear :: Apple :: Fruit :: HNil](rr5)
}
@Test
def testUpdate {
type SL = Int :: Boolean :: String :: Double :: HNil
val sl: SL = 1 :: true :: "foo" :: 2.0 :: HNil
val r1 = sl.updatedElem(23)
assertTypedEquals[SL](23 :: true :: "foo" :: 2.0 :: HNil, r1)
val r2 = sl.updatedElem(false)
assertTypedEquals[SL](1 :: false :: "foo" :: 2.0 :: HNil, r2)
val r3 = sl.updatedElem("bar")
assertTypedEquals[SL](1 :: true :: "bar" :: 2.0 :: HNil, r3)
val r4 = sl.updatedElem(3.0)
assertTypedEquals[SL](1 :: true :: "foo" :: 3.0 :: HNil, r4)
val r5 = sl.updatedType[Int]('*')
assertTypedEquals[Char :: Boolean :: String :: Double :: HNil]('*' :: true :: "foo" :: 2.0 :: HNil, r5)
val r6 = sl.updatedType[Boolean]('*')
assertTypedEquals[Int :: Char :: String :: Double :: HNil](1 :: '*' :: "foo" :: 2.0 :: HNil, r6)
val r7 = sl.updatedType[String]('*')
assertTypedEquals[Int :: Boolean :: Char :: Double :: HNil](1 :: true :: '*' :: 2.0 :: HNil, r7)
val r8 = sl.updatedType[Double]('*')
assertTypedEquals(1 :: true :: "foo" :: '*' :: HNil, r8)
val r9 = sl.updateWith((i : Int) => i * 2)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](2 :: true :: "foo" :: 2.0 :: HNil, r9)
val r10 = sl.updateWith((b : Boolean) => !b)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: false :: "foo" :: 2.0 :: HNil, r10)
val r11 = sl.updateWith((s : String) => s.toUpperCase)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: true :: "FOO" :: 2.0 :: HNil, r11)
val r12 = sl.updateWith((d : Double) => d / 2.0)
assertTypedEquals[Int :: Boolean :: String :: Double :: HNil](1 :: true :: "foo" :: 1.0 :: HNil, r12)
val r13 = sl.updateWith((i : Int) => i.toString)
assertTypedEquals[String :: Boolean :: String :: Double :: HNil]("1" :: true :: "foo" :: 2.0 :: HNil, r13)
val r14 = sl.updateWith((b : Boolean) => b.toString)
assertTypedEquals[Int :: String :: String :: Double :: HNil](1 :: "true" :: "foo" :: 2.0 :: HNil, r14)
val r15 = sl.updateWith((_ : String) => 0xF00)
assertTypedEquals[Int :: Boolean :: Int :: Double :: HNil](1 :: true :: 0xF00 :: 2.0 :: HNil, r15)
val r16 = sl.updateWith((d : Double) => d.toString)
assertTypedEquals[Int :: Boolean :: String :: String :: HNil](1 :: true :: "foo" :: 2.0.toString :: HNil, r16)
val fruits = a :: p :: a :: f :: HNil
val rr1 = fruits.updatedType[Pear](a)
typed[Apple :: Apple :: Apple :: Fruit :: HNil](rr1)
val rr2 = fruits.updatedType[Pear](f)
typed[Apple :: Fruit :: Apple :: Fruit :: HNil](rr2)
val rr3 = fruits.updatedType[Fruit](p)
typed[Apple :: Pear :: Apple :: Pear :: HNil](rr3)
val rr4 = fruits.updatedElem(p)
typed[Apple :: Pear :: Apple :: Fruit :: HNil](rr4)
val rr5 = fruits.updatedElem(f)
typed[Apple :: Pear :: Apple :: Fruit :: HNil](rr5)
}
@Test
def testSplitLeft {
type SL = Int :: Boolean :: String :: Double :: HNil
type SL2 = Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil
val sl: SL = 1 :: true :: "foo" :: 2.0 :: HNil
val sl2: SL2 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val (sp1, sp2) = sl.splitLeft[String]
typed[String :: Double :: HNil](sp2)
typed[Int :: Boolean :: HNil](sp1)
assertTypedEquals[SL]((sp1 ::: sp2), sl)
val (sli1, sli2) = sl2.splitLeft[String]
typed[Int :: Double :: HNil](sli1)
typed[String :: Unit :: String :: Boolean :: Long :: HNil](sli2)
assertTypedEquals[SL2]((sli1 ::: sli2), sl2)
val (rsp1, rsp2) = sl.reverse_splitLeft[String]
typed[Boolean :: Int :: HNil](rsp1)
typed[String :: Double :: HNil](rsp2)
assertTypedEquals[SL]((rsp1 reverse_::: rsp2), sl)
val (rsli1, rsli2) = sl2.reverse_splitLeft[String]
typed[Double :: Int :: HNil](rsli1)
typed[String :: Unit :: String :: Boolean :: Long :: HNil](rsli2)
assertTypedEquals[SL2]((rsli1 reverse_::: rsli2), sl2)
}
@Test
def testSplitLeftP {
type SL = Int :: Boolean :: String :: Double :: HNil
type SL2 = Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil
val sl: SL = 1 :: true :: "foo" :: 2.0 :: HNil
val sl2: SL2 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val sp1 :: sp2 :: HNil = sl.splitLeftP[String]
typed[String :: Double :: HNil](sp2)
typed[Int :: Boolean :: HNil](sp1)
assertTypedEquals[SL]((sp1 ::: sp2), sl)
val sli1 :: sli2 :: HNil = sl2.splitLeftP[String]
typed[Int :: Double :: HNil](sli1)
typed[String :: Unit :: String :: Boolean :: Long :: HNil](sli2)
assertTypedEquals[SL2]((sli1 ::: sli2), sl2)
val rsp1 :: rsp2 :: HNil = sl.reverse_splitLeftP[String]
typed[Boolean :: Int :: HNil](rsp1)
typed[String :: Double :: HNil](rsp2)
assertTypedEquals[SL]((rsp1 reverse_::: rsp2), sl)
val rsli1 :: rsli2 :: HNil = sl2.reverse_splitLeftP[String]
typed[Double :: Int :: HNil](rsli1)
typed[String :: Unit :: String :: Boolean :: Long :: HNil](rsli2)
assertTypedEquals[SL2]((rsli1 reverse_::: rsli2), sl2)
}
@Test
def testSplitRight {
type SL = Int :: Boolean :: String :: Double :: HNil
type SL2 = Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil
val sl: SL = 1 :: true :: "foo" :: 2.0 :: HNil
val sl2: SL2 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val (srp1, srp2) = sl.splitRight[String]
typed[Int :: Boolean :: String :: HNil](srp1)
typed[Double :: HNil](srp2)
assertTypedEquals[SL]((srp1 ::: srp2), sl)
val (srli1, srli2) = sl2.splitRight[String]
typed[Int :: Double :: String :: Unit :: String :: HNil](srli1)
typed[Boolean :: Long :: HNil](srli2)
assertTypedEquals[SL2](sl2, srli1 ::: srli2)
val (rsrp1, rsrp2) = sl.reverse_splitRight[String]
typed[String :: Boolean :: Int :: HNil](rsrp1)
typed[Double :: HNil](rsrp2)
assertTypedEquals[SL]((rsrp1 reverse_::: rsrp2), sl)
val (rsrli1, rsrli2) = sl2.reverse_splitRight[String]
typed[String :: Unit :: String :: Double :: Int :: HNil](rsrli1)
typed[Boolean :: Long :: HNil](rsrli2)
assertTypedEquals[SL2]((rsrli1 reverse_::: rsrli2), sl2)
}
@Test
def testSplitRightP {
type SL = Int :: Boolean :: String :: Double :: HNil
type SL2 = Int :: Double :: String :: Unit :: String :: Boolean :: Long :: HNil
val sl: SL = 1 :: true :: "foo" :: 2.0 :: HNil
val sl2: SL2 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val srp1 :: srp2 :: HNil = sl.splitRightP[String]
typed[Int :: Boolean :: String :: HNil](srp1)
typed[Double :: HNil](srp2)
assertTypedEquals[SL]((srp1 ::: srp2), sl)
val srli1 :: srli2 :: HNil = sl2.splitRightP[String]
typed[Int :: Double :: String :: Unit :: String :: HNil](srli1)
typed[Boolean :: Long :: HNil](srli2)
assertTypedEquals[SL2](sl2, srli1 ::: srli2)
val rsrp1 :: rsrp2 :: HNil = sl.reverse_splitRightP[String]
typed[String :: Boolean :: Int :: HNil](rsrp1)
typed[Double :: HNil](rsrp2)
assertTypedEquals[SL]((rsrp1 reverse_::: rsrp2), sl)
val rsrli1 :: rsrli2 :: HNil = sl2.reverse_splitRightP[String]
typed[String :: Unit :: String :: Double :: Int :: HNil](rsrli1)
typed[Boolean :: Long :: HNil](rsrli2)
assertTypedEquals[SL2]((rsrli1 reverse_::: rsrli2), sl2)
}
@Test
def testTranspose {
val l1 = 1 :: HNil
val l2 = ("a" :: HNil) :: HNil
val r1 = l1.zipOne(l2)
assertTypedEquals[(Int :: String :: HNil) :: HNil]((1 :: "a" :: HNil) :: HNil, r1)
val r2 = l1.mapConst(HNil)
assertTypedEquals[HNil :: HNil](HNil :: HNil, r2)
val r3 = (l1 :: HNil).transpose
assertTypedEquals[(Int :: HNil) :: HNil]((1 :: HNil) :: HNil, r3)
val l3 = 1 :: 2 :: 3 :: HNil
val l4 = ("a" :: 1.0 :: HNil) :: ("b" :: 2.0 :: HNil) :: ("c" :: 3.0 :: HNil) :: HNil
type ISD = Int :: String :: Double :: HNil
val z2 = l3.zipOne(l4)
assertTypedEquals[ISD :: ISD :: ISD :: HNil](
(1 :: "a" :: 1.0 :: HNil) :: (2 :: "b" :: 2.0 :: HNil) :: (3 :: "c" :: 3.0 :: HNil) :: HNil, z2
)
val r5 = l3.mapConst(HNil)
assertTypedEquals[HNil :: HNil :: HNil :: HNil](HNil :: HNil :: HNil :: HNil, r5)
val t2 = l4.transpose
assertTypedEquals[
(String :: String :: String :: HNil) ::
(Double :: Double :: Double :: HNil) :: HNil
](("a" :: "b" :: "c" :: HNil) :: (1.0 :: 2.0 :: 3.0 :: HNil) :: HNil, t2)
val t3 = z2.transpose
assertTypedEquals[
(Int :: Int :: Int :: HNil) ::
(String :: String :: String :: HNil) ::
(Double :: Double :: Double :: HNil) :: HNil
](
(1 :: 2 :: 3 :: HNil) ::
("a" :: "b" :: "c" :: HNil) ::
(1.0 :: 2.0 :: 3.0 :: HNil) :: HNil,
t3
)
val r8 = t3.transpose
assertTypedEquals[ISD :: ISD :: ISD :: HNil](z2, r8)
}
@Test
def testZipUnzip {
val l1 = 1 :: "a" :: 1.0 :: HNil
val l2 = 2 :: "b" :: 2.0 :: HNil
val t1 = (l1 :: l2 :: HNil).transpose
val z1 = t1.map(tupled)
assertTypedEquals[(Int, Int) :: (String, String) :: (Double, Double) :: HNil](
(1, 2) :: ("a", "b") :: (1.0, 2.0) :: HNil, z1)
def zip[L <: HList, OutT <: HList](l : L)
(implicit
transposer : Transposer.Aux[L, OutT],
mapper : Mapper[tupled.type, OutT]) = l.transpose.map(tupled)
val z2 = zip(l1 :: l2 :: HNil)
assertTypedEquals[(Int, Int) :: (String, String) :: (Double, Double) :: HNil](
(1, 2) :: ("a", "b") :: (1.0, 2.0) :: HNil, z2)
val z3 = (l1 :: l2 :: HNil).zip
assertTypedEquals[(Int, Int) :: (String, String) :: (Double, Double) :: HNil](
(1, 2) :: ("a", "b") :: (1.0, 2.0) :: HNil, z3)
val t2 = z1.map(productElements).transpose
val u1 = t2.tupled
assertTypedEquals[(Int :: String :: Double :: HNil, Int :: String :: Double :: HNil)](
(1 :: "a" :: 1.0 :: HNil, 2 :: "b" :: 2.0 :: HNil), u1)
def unzip[L <: HList, OutM <: HList, OutT <: HList](l : L)
(implicit
mapper : Mapper.Aux[productElements.type, L, OutM],
transposer : Transposer.Aux[OutM, OutT],
tupler : Tupler[OutT]) = l.map(productElements).transpose.tupled
val u2 = unzip(z1)
assertTypedEquals[(Int :: String :: Double :: HNil, Int :: String :: Double :: HNil)](
(1 :: "a" :: 1.0 :: HNil, 2 :: "b" :: 2.0 :: HNil), u2)
val r1 = z1.unzip
assertTypedEquals[(Int :: String :: Double :: HNil, Int :: String :: Double :: HNil)](
(1 :: "a" :: 1.0 :: HNil, 2 :: "b" :: 2.0 :: HNil), r1)
val r2 = l1 zip l2
assertTypedEquals[(Int, Int) :: (String, String) :: (Double, Double) :: HNil](
(1, 2) :: ("a", "b") :: (1.0, 2.0) :: HNil, r2)
val intInc : Int => Int = _+1
val stringInc : String => String = _+"*"
val doubleInc : Double => Int = _.toInt+1
val l3 = intInc :: stringInc :: doubleInc :: HNil
val z5 = l3 zipApply l1
assertTypedEquals[Int :: String :: Int :: HNil](2 :: "a*" :: 2 :: HNil, z5)
}
@Test
def testUnapply {
val l = 1 :: true :: "foo" :: 2.0 :: HNil
val l2 = 23 :: 3.0 :: "foo" :: () :: "bar" :: true :: 5L :: HNil
val is = l match {
case i :: true :: s :: 2.0 :: HNil => (i, s)
}
assertTypedEquals[Int](1, is._1)
assertTypedEquals[String]("foo", is._2)
val is2 = (l : Any) match {
case (i : Int) :: true :: (s : String) :: 2.0 :: HNil => (i, s)
case _ => sys.error("Not matched")
}
assertTypedEquals[Int](1, is2._1)
assertTypedEquals[String]("foo", is2._2)
import HList.ListCompat._
val tl = l2 match {
case 23 #: 3.0 #: s #: xs => (s, xs)
case _ => sys.error("Not matched")
}
assertTypedEquals[String]("foo", tl._1)
assertTypedEquals[Unit :: String :: Boolean :: Long :: HNil](() :: "bar" :: true :: 5L :: HNil, tl._2)
val tl2 = (l2 : Any) match {
case 23 #: 3.0 #: (s : String) #: xs => (s, xs)
case _ => sys.error("Not matched")
}
assertTypedEquals[String]("foo", tl2._1)
assertTypedEquals[HList](() :: "bar" :: true :: 5L :: HNil, tl2._2)
val ll = List(1, 2, 3, 4)
val tll = ll match {
case 1 :: 2 :: x :: y :: Nil => (x, y)
case _ => sys.error("Not matched")
}
assertTypedEquals[Int](3, tll._1)
assertTypedEquals[Int](4, tll._2)
val tll2 = ll match {
case 1 :: xs => xs
case _ => sys.error("Not matched")
}
assertTypedEquals[List[Int]](List(2, 3, 4), tll2)
val mixed = 23 :: "foo" :: (1 :: 2 :: 3 :: 4 :: 5 :: Nil) :: false :: () :: HNil
val tmixed = mixed match {
case _ #: _ #: (_ :: 2 :: x :: tl1) #: tl2 => (x, tl1, tl2)
case _ => sys.error("Not matched")
}
assertTypedEquals[Int](3, tmixed._1)
assertTypedEquals[List[Int]](4 :: 5 :: Nil, tmixed._2)
assertTypedEquals[Boolean :: Unit :: HNil](false :: () :: HNil, tmixed._3)
}
@Test
def testRemove {
val l = 1 :: true :: "foo" :: HNil
val li = l.removeElem[Int]
assertTypedEquals[(Int, Boolean :: String :: HNil)]((1, true :: "foo" :: HNil), li)
val lb = l.removeElem[Boolean]
assertTypedEquals[(Boolean, Int :: String :: HNil)]((true, 1 :: "foo" :: HNil), lb)
val ls = l.removeElem[String]
assertTypedEquals[(String, Int :: Boolean :: HNil)](("foo", 1 :: true :: HNil), ls)
val withDuplicates = 1 :: 'a' :: 'b' :: HNil
val remover = implicitly[Remove.Aux[Int :: Char :: Char :: HNil, Char, (Char, Int :: Char :: HNil)]]
assertTypedEquals[(Char, Int :: Char :: HNil)](('a', 1 :: 'b' :: HNil), remover(withDuplicates))
}
@Test
def testRemoveAll {
val l = 1 :: true :: "foo" :: HNil
val lnil = l.removeAll[HNil]
assertTypedEquals[(HNil, Int :: Boolean :: String :: HNil)]((HNil, 1 :: true :: "foo" :: HNil), lnil)
val li = l.removeAll[Int :: HNil]
assertTypedEquals[(Int :: HNil, Boolean :: String :: HNil)]((1 :: HNil, true :: "foo" :: HNil), li)
val lb = l.removeAll[Boolean :: HNil]
assertTypedEquals[(Boolean :: HNil, Int :: String :: HNil)]((true :: HNil, 1 :: "foo" :: HNil), lb)
val lbi = l.removeAll[Boolean :: Int :: HNil]
assertTypedEquals[(Boolean :: Int :: HNil, String :: HNil)]((true :: 1 :: HNil, "foo" :: HNil), lbi)
}
@Test
def testUnion {
type L1 = String :: Long :: HNil
val l1: L1 = "foo" :: 3L :: HNil
type L2 = Int :: String :: Boolean :: HNil
val l2: L2 = 2 :: "bar" :: true :: HNil
type L3 = Int :: Int :: HNil
val l3: L3 = 1 :: 2 :: HNil
type L4 = Int :: Int :: Int :: HNil
val l4: L4 = 4 :: 5 :: 6 :: HNil
val lnil = l1.union[HNil](HNil)
assertTypedEquals[L1](l1, lnil)
val lself = l1.union(l1)
assertTypedEquals[L1](l1, lself)
val l12 = l1.union(l2)
assertTypedEquals[String :: Long :: Int :: Boolean :: HNil]("foo" :: 3L :: 2 :: true :: HNil, l12)
val l21 = l2.union(l1)
assertTypedEquals[Int :: String :: Boolean :: Long :: HNil](2 :: "bar" :: true :: 3L :: HNil, l21)
illTyped { """implicitly[Union.Aux[Int :: HNil, Int :: HNil, Int :: Int :: HNil]]"""}
val ldup1 = (l3).union(l4)
assertTypedEquals[Int :: Int :: Int :: HNil](1 :: 2 :: 6 :: HNil, ldup1)
val ldup2 = (l4).union(l3)
assertTypedEquals[Int :: Int :: Int :: HNil](4 :: 5 :: 6 :: HNil, ldup2)
}
@Test
def testIntersection {
type L1 = String :: Long :: Int :: HNil
val l1: L1 = "foo" :: 1L :: 3 :: HNil
type L2 = Int :: String :: Boolean :: HNil
val l2: L2 = 2 :: "bar" :: true :: HNil
type L3 = Int :: String :: Int :: HNil
val l3: L3 = 4 :: "foo" :: 5 :: HNil
val lnil = l1.intersect[HNil]
assertTypedEquals[HNil](HNil, lnil)
val lself = l1.intersect[L1]
assertTypedEquals[L1](l1, lself)
val l12 = l1.intersect[L2]
assertTypedEquals[String :: Int :: HNil]("foo" :: 3 :: HNil, l12)
val l21 = l2.intersect[L1]
assertTypedEquals[Int :: String :: HNil](2 :: "bar" :: HNil, l21)
illTyped { """implicitly[Intersection.Aux[Int :: HNil, Int :: HNil, HNil]]"""}
val ldup1 = (l3).intersect[Int :: HNil]
assertTypedEquals[Int :: HNil](4 :: HNil, ldup1)
val ldup2 = (l3).intersect[Int :: Int :: HNil]
assertTypedEquals[Int :: Int :: HNil](4 :: 5 :: HNil, ldup2)
val ldup3 = (l3).intersect[String :: HNil]
assertTypedEquals[String :: HNil]("foo" :: HNil, ldup3)
}
@Test
def testDiff {
type L1 = String :: Long :: Int :: HNil
val l1: L1 = "foo" :: 1L :: 3 :: HNil
type L2 = Int :: String :: Boolean :: HNil
val l2: L2 = 2 :: "bar" :: true :: HNil
type L3 = Int :: Boolean :: Int :: HNil
val l3: L3 = 4 :: false :: 5 :: HNil
val lnil = l1.diff[HNil]
assertTypedEquals[L1](l1, lnil)
val lself = l1.diff[L1]
assertTypedEquals[HNil](HNil, lself)
val l12 = l1.diff[L2]
assertTypedEquals[Long :: HNil](1L :: HNil, l12)
val l21 = l2.diff[L1]
assertTypedEquals[Boolean :: HNil](true :: HNil, l21)
val ldup1 = (l3).diff[Int :: HNil]
assertTypedEquals[Boolean :: Int :: HNil](false :: 5 :: HNil, ldup1)
val ldup2 = (l3).diff[Int :: Int :: HNil]
assertTypedEquals[Boolean :: HNil](false :: HNil, ldup2)
val ldup3 = (l3).diff[Boolean :: HNil]
assertTypedEquals[Int :: Int :: HNil](4 :: 5 :: HNil, ldup3)
}
@Test
def testReinsert {
type L = Int :: Boolean :: String :: HNil
val l: L = 1 :: true :: "foo" :: HNil
val (i, li) = l.removeElem[Int]
assertTypedEquals[L](li.reinsert[L](i), l)
val (b, lb) = l.removeElem[Boolean]
assertTypedEquals[L](lb.reinsert[L](b), l)
val (s, ls) = l.removeElem[String]
assertTypedEquals[L](ls.reinsert[L](s), l)
}
@Test
def testReinsertAll {
type L = Int :: Boolean :: String :: HNil
val l = 1 :: true :: "foo" :: HNil
val (nil, lnil) = l.removeAll[HNil]
assertTypedEquals[L](lnil.reinsertAll[L](nil), l)
val (i, li) = l.removeAll[Int :: HNil]
assertTypedEquals[L](li.reinsertAll[L](i), l)
val (b, lb) = l.removeAll[Boolean :: HNil]
assertTypedEquals[L](lb.reinsertAll[L](b), l)
val (bi, lbi) = l.removeAll[Boolean :: Int :: HNil]
assertTypedEquals[L](lbi.reinsertAll[L](bi), l)
}
object combine extends Poly {
implicit def caseCharString = use((c : Char, s : String) => s.indexOf(c))
implicit def caseIntBoolean = use((i : Int, b : Boolean) => if ((i >= 0) == b) "pass" else "fail")
}
@Test
def testFoldLeft {
val c1a = combine('o', "foo")
val c1b = combine(c1a, true)
assertTypedEquals[String]("pass", c1b)
implicitly[LeftFolder.Aux[HNil, String, combine.type, String]]
implicitly[LeftFolder.Aux[Boolean :: HNil, Int, combine.type, String]]
implicitly[LeftFolder.Aux[String :: Boolean :: HNil, Char, combine.type, String]]
val tf1 = implicitly[LeftFolder[HNil, String, combine.type]]
val tf2 = implicitly[LeftFolder[Boolean :: HNil, Int, combine.type]]
val tf3 = implicitly[LeftFolder[String :: Boolean :: HNil, Char, combine.type]]
val l1 = "foo" :: true :: HNil
val f1 = l1.foldLeft('o')(combine)
assertTypedEquals[String]("pass", f1)
val c2a = combine('o', "bar")
val c2b = combine(c2a, false)
assertTypedEquals[String]("pass", c2b)
val l2 = "bar" :: false :: HNil
val f2 = l2.foldLeft('o')(combine)
assertTypedEquals[String]("pass", f2)
}
@Test
def testUpdatedAt {
type IBS = Int :: Boolean :: String :: HNil
val l = 1 :: true :: "foo" :: HNil
val r1 = l.updatedAt[_0](2)
assertTypedEquals[IBS](2 :: true :: "foo" :: HNil, r1)
val r2 = l.updatedAt[_1](false)
assertTypedEquals[IBS](1 :: false :: "foo" :: HNil, r2)
val r3 = l.updatedAt[_2]("bar")
assertTypedEquals[IBS](1 :: true :: "bar" :: HNil, r3)
}
@Test
def testUpdatedAtLiteral {
type IBS = Int :: Boolean :: String :: HNil
val l = 1 :: true :: "foo" :: HNil
val r1 = l.updatedAt(0, 2)
assertTypedEquals[IBS](2 :: true :: "foo" :: HNil, r1)
val r2 = l.updatedAt(1, false)
assertTypedEquals[IBS](1 :: false :: "foo" :: HNil, r2)
val r3 = l.updatedAt(2, "bar")
assertTypedEquals[IBS](1 :: true :: "bar" :: HNil, r3)
}
@Test
def testNatTRel {
type L1 = Int :: String :: Boolean :: HNil
type L2 = List[Int] :: List[String] :: List[Boolean] :: HNil
type L3 = Option[Int] :: Option[String] :: Option[Boolean] :: HNil
type L4 = Int :: Int :: Int :: HNil
type L5 = String :: String :: String :: HNil
implicitly[NatTRel[L1, Id, L2, List]]
implicitly[NatTRel[L2, List, L1, Id]]
implicitly[NatTRel[L2, List, L3, Option]]
implicitly[NatTRel[L1, Id, L4, Const[Int]#λ]]
implicitly[NatTRel[L2, List, L4, Const[Int]#λ]]
}
object optionToList extends (Option ~> List) {
def apply[A](fa: Option[A]): List[A] = List.fill(3)(fa.toList).flatten
}
@Test
def testNatTRelMap {
type L1 = Option[Int] :: Option[Boolean] :: Option[String] :: Option[Nothing] :: HNil
type L2 = List[Int] :: List[Boolean] :: List[String] :: List[Nothing] :: HNil
val nattrel = implicitly[NatTRel[L1, Option, L2, List]]
val l1: L1 = Option(1) :: Option(true) :: Option("three") :: None :: HNil
val l2 = nattrel.map(optionToList, l1)
assertTypedEquals[L2](l2,
List(1, 1, 1) :: List(true, true, true) :: List("three", "three", "three") :: List() :: HNil)
}
@Test
def testZipConst {
type IBS = Int :: Boolean :: String :: HNil
val c = 5
type WithConst = (Int, Int) :: (Boolean, Int) :: (String, Int) :: HNil
val l = 1 :: true :: "a" :: HNil
typed[IBS](l)
val expected = (1, c) :: (true, c) :: ("a", c) :: HNil
val zcIntIbs = ZipConst[Int, IBS]
val zipped1 = zcIntIbs(c, l)
assertTypedEquals[WithConst](expected, zipped1)
val zcaIntIbs = implicitly[ZipConst.Aux[Int, IBS, WithConst]]
assertTypedEquals[WithConst](expected, zcaIntIbs(c, l))
val x = l.zipConst(c)
assertTypedEquals[WithConst](expected, x)
}
@Test
def testZipWith {
import poly._
object empty extends Poly2
object add extends Poly2 {
implicit val caseIntInt = at[Int, Int](_ + _)
}
// HNil zipWith HNil (emptyFn)
val r1 = (HNil: HNil).zipWith(HNil: HNil)(empty)
assertTypedEquals[HNil](HNil, r1)
// HNil zipWith nonEmpty (emptyFn)
val r2 = (HNil: HNil).zipWith(1 :: HNil)(empty)
assertTypedEquals[HNil](HNil, r2)
// nonEmpty zipWith HNil (emptyFn)
val r3 = (1 :: HNil).zipWith(HNil: HNil)(empty)
assertTypedEquals[HNil](HNil, r3)
// singleton zipWith singleton
val r4 = (1 :: HNil).zipWith(2 :: HNil)(add)
assertTypedEquals[Int :: HNil](3 :: HNil, r4)
{ // longList zipWith longerList
type Left = Int :: String :: Double :: HNil
type Right = Int :: Double :: String :: Boolean :: HNil
val left: Left = 1 :: "foo" :: 1.2 :: HNil
val right: Right = 2 :: 2.3 :: "3.4" :: true :: HNil
object zipFn extends Poly2 {
implicit val caseIntInt = at[Int, Int](_ + _)
implicit val caseStringDouble = at[String, Double](_ + " -> " + _.toString)
implicit val caseDoubleString = at[Double, String](_ + _.toDouble)
}
val r5 = left.zipWith(right)(zipFn)
assertTypedEquals[Int :: String :: Double :: HNil](3 :: "foo -> 2.3" :: 4.6 :: HNil, r5)
}
def testZipWithIndex: Unit = {
// HNil zipWithIndex
val r1 = (HNil: HNil).zipWithIndex
assertTypedEquals[HNil](HNil, r1)
// One element HList zipWithIndex
val r2 = (0::HNil).zipWithIndex
assertTypedEquals[(Int,_0)::HNil]((0,_0)::HNil, r2)
// HList zipWithIndex
val r3 = (0::1::2::3::HNil).zipWithIndex
assertTypedEquals[(Int,_0)::(Int,_1)::(Int,_2)::(Int,_3)::HNil]((0,_0)::(1,_1)::(2,_2)::(3,_3)::HNil, r3)
}
{ // invalid polys
illTyped("""
(1 :: HNil).zipWith(2 :: HNil)(empty)
""")
object noIntFn extends Poly2 {
implicit val caseDoubleDouble = at[Double, Double](_ + _)
}
illTyped("""
(1 :: HNil).zipWith(2 :: HNil)(noIntFn)
""")
illTyped("""
(1.0 :: 2 :: HNil).zipWith(2.0 :: 3 :: HNil)(noIntFn)
""")
}
}
@Test
def testWithKeys {
import record._
import syntax.singleton._
val orig =
("intField" ->> 1) ::
("boolField" ->> true) ::
HNil
val result = orig.values.zipWithKeys(orig.keys)
sameTyped(orig)(result)
assertEquals(orig, result)
val int = result.get("intField")
assertTypedEquals[Int](1, int)
val bool = result.get("boolField")
assertTypedEquals[Boolean](true, bool)
illTyped("""result.get("otherField")""")
// key/value lengths must match up
illTyped("orig.tail.values.zipWithKeys(orig.keys)")
illTyped("orig.values.zipWithKeys(orig.keys.tail)")
// Explicit type argument
{
val result = orig.values.zipWithKeys[HList.`"intField", "boolField"`.T]
sameTyped(orig)(result)
assertEquals(orig, result)
val int = result.get("intField")
assertTypedEquals[Int](1, int)
val bool = result.get("boolField")
assertTypedEquals[Boolean](true, bool)
illTyped("""result.get("otherField")""")
// key/value lengths must match up
illTyped(""" orig.tail.values.zipWithKeys[HList.`"intField", "boolField"`.T] """)
illTyped(""" orig.values.zipWithKeys[HList.`"boolField"`.T] """)
}
}
@Test
def testCollect {
import poly._
object empty extends Poly1
object complex extends Poly1 {
implicit val caseInt = at[Int](_.toDouble)
implicit val caseString = at[String](_ => 1)
}
val in: Int :: String :: Double :: HNil = 1 :: "foo" :: 2.2 :: HNil
// HNil collect p
val r1 = (HNil: HNil).collect(empty)
assertTypedEquals[HNil](HNil, r1)
val r2 = (HNil: HNil).collect(poly.identity)
assertTypedEquals[HNil](HNil, r2)
val r3 = (HNil: HNil).collect(complex)
assertTypedEquals[HNil](HNil, r3)
// non-HNil collect empty
val r4 = in.collect(empty)
assertTypedEquals[HNil](HNil, r4)
// non-HNil collect identity
val r5 = in.collect(identity)
assertTypedEquals[Int :: String :: Double :: HNil](in, r5)
// non-HNil collect complex
val r6 = in.collect(complex)
assertTypedEquals[Double :: Int :: HNil](1.0 :: 1 :: HNil, r6)
}
@Test
def testOrdering {
assertEquals(List(HNil: HNil, HNil), List(HNil: HNil, HNil).sorted)
assertEquals(List(1 :: HNil, 2 :: HNil, 3 :: HNil), List(2 :: HNil, 1 :: HNil, 3 :: HNil).sorted)
assertEquals(
List(1 :: "abc" :: HNil, 1 :: "def" :: HNil, 2 :: "abc" :: HNil, 2 :: "def" :: HNil),
List(2 :: "abc" :: HNil, 1 :: "def" :: HNil, 2 :: "def" :: HNil, 1 :: "abc" :: HNil).sorted
)
}
@Test
def testMapCons {
type C = Char; type S = String; type I = Int; type D = Double
val r1 = (HNil: HNil).mapCons('a')
assertTypedEquals[HNil](HNil, r1)
val r2 = (HNil :: HNil).mapCons('a')
assertTypedEquals[(Char :: HNil) :: HNil]((('a' :: HNil) :: HNil), r2)
val r3 = ((1 :: HNil) :: ("foo" :: HNil) :: (2.0 :: HNil) :: HNil).mapCons('a')
assertTypedEquals[(C::I::HNil) :: (C::S::HNil) :: (C::D::HNil) :: HNil](
('a' :: 1 :: HNil) :: ('a' :: "foo" :: HNil) :: ('a' :: 2.0 :: HNil) :: HNil,
r3
)
}
@Test
def testInterleave {
type C = Char; type S = String; type I = Int; type D = Double
def interleave[I, L <: HList](i: I, l: L)(implicit interleave: Interleave[I, L]): interleave.Out = interleave(i, l)
val r1 = interleave('i', HNil)
assertTypedEquals[(Char :: HNil) :: HNil](('i' :: HNil) :: HNil, r1)
val r2 = interleave('i', 1 :: HNil)
assertTypedEquals[(C::I::HNil) :: (I::C::HNil) :: HNil](('i' :: 1 :: HNil) :: (1 :: 'i' :: HNil) :: HNil,
r2
)
val r3 = interleave('i', 1 :: "foo" :: HNil)
assertTypedEquals[(C::I::S::HNil) :: (I::C::S::HNil) :: (I::S::C::HNil) :: HNil](
('i' :: 1 :: "foo" :: HNil) ::
(1 :: 'i' :: "foo" :: HNil) ::
(1 :: "foo" :: 'i' :: HNil) :: HNil,
r3
)
val r4 = interleave('i', 1 :: "foo" :: 2.0 :: HNil)
assertTypedEquals[(C::I::S::D::HNil) :: (I::C::S::D::HNil) :: (I::S::C::D::HNil) :: (I::S::D::C::HNil) :: HNil](
('i' :: 1 :: "foo" :: 2.0 :: HNil) ::
(1 :: 'i' :: "foo" :: 2.0 :: HNil) ::
(1 :: "foo" :: 'i' :: 2.0 :: HNil) ::
(1 :: "foo" :: 2.0 :: 'i' :: HNil) :: HNil,
r4
)
}
@Test
def testFlatMapInterleave {
type C = Char; type I = Int
def flatMapInterleave[I, L <: HList](i: I, l: L)(implicit flatMapInterleave: FlatMapInterleave[I, L]) =
flatMapInterleave(i, l)
val r1 = flatMapInterleave('i', HNil)
assertTypedEquals[HNil](HNil, r1)
val r2 = flatMapInterleave('i', HNil :: HNil)
assertTypedEquals[(Char :: HNil) :: HNil](('i' :: HNil) :: HNil, r2)
val r3 = flatMapInterleave('i', (1 :: HNil) :: (2 :: HNil) :: HNil)
assertTypedEquals[(C::I::HNil) :: (I::C::HNil) :: (C::I::HNil) :: (I::C::HNil) :: HNil](
('i' :: 1 :: HNil) ::
(1 :: 'i' :: HNil) ::
('i' :: 2 :: HNil) ::
(2 :: 'i' :: HNil) :: HNil,
r3
)
}
@Test
def testPermutations {
type S = String; type I = Int; type D = Double
val r1 = HNil.permutations
assertTypedEquals[HNil :: HNil](HNil :: HNil, r1)
val r2 = (1 :: HNil).permutations
assertTypedEquals[(Int :: HNil) :: HNil]((1 :: HNil) :: HNil, r2)
val r3 = (1 :: "foo" :: HNil).permutations
assertTypedEquals[(I::S::HNil) :: (S::I::HNil) :: HNil](
(1 :: "foo" :: HNil) ::
("foo" :: 1 :: HNil) :: HNil,
r3
)
val r4 = (1 :: "foo" :: 2.0 :: HNil).permutations
assertTypedEquals[
(I::S::D::HNil) :: (S::I::D::HNil) :: (S::D::I::HNil) ::
(I::D::S::HNil) :: (D::I::S::HNil) :: (D::S::I::HNil) :: HNil
](
(1 :: "foo" :: 2.0 :: HNil) ::
("foo" :: 1 :: 2.0 :: HNil) ::
("foo" :: 2.0 :: 1 :: HNil) ::
(1 :: 2.0 :: "foo" :: HNil) ::
(2.0 :: 1 :: "foo" :: HNil) ::
(2.0 :: "foo" :: 1 :: HNil) :: HNil,
r4
)
}
@Test
def testMkString {
assertEquals(s"⸨1, foo, ${2.0}⸩", (1 :: "foo" :: 2.0 :: HNil).mkString("⸨", ", ", "⸩"))
}
@Test
def testRotateLeft {
val in0 = HNil
val in1 = 1 :: HNil
val in2 = 1 :: "foo" :: HNil
val in3 = 1 :: "foo" :: 2.0 :: HNil
val in4 = 1 :: "foo" :: 2.0 :: 'a' :: HNil
type S = String; type I = Int; type D = Double; type C = Char
{ // rotateLeft(0)
val r1 = in0.rotateLeft(0)
assertTypedSame[HNil](HNil, r1)
val r2 = in1.rotateLeft(0)
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateLeft(0)
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in3.rotateLeft(0)
assertTypedSame[I :: S :: D :: HNil](in3, r4)
val r5 = in4.rotateLeft(0)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r5)
}
{ // rotateLeft[_0]
val r1 = in0.rotateLeft[_0]
assertTypedSame[HNil](HNil, r1)
val r2 = in1.rotateLeft[_0]
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateLeft[_0]
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in3.rotateLeft[_0]
assertTypedSame[I :: S :: D :: HNil](in3, r4)
val r5 = in4.rotateLeft[_0]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r5)
}
{ // rotateLeft(n % size == 0)
val r1 = in1.rotateLeft(1)
assertTypedSame[I :: HNil](in1, r1)
val r2 = in1.rotateLeft(2)
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateLeft(2)
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in2.rotateLeft(4)
assertTypedSame[I :: S :: HNil](in2, r4)
val r5 = in3.rotateLeft(3)
assertTypedSame[I :: S :: D :: HNil](in3, r5)
val r6 = in3.rotateLeft(6)
assertTypedSame[I :: S :: D :: HNil](in3, r6)
val r7 = in4.rotateLeft(4)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r7)
val r8 = in4.rotateLeft(8)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r8)
}
{ // rotateLeft[N % Size == 0]
val r1 = in1.rotateLeft[_1]
assertTypedSame[I :: HNil](in1, r1)
val r2 = in1.rotateLeft[_2]
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateLeft[_2]
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in2.rotateLeft[_4]
assertTypedSame[I :: S :: HNil](in2, r4)
val r5 = in3.rotateLeft[_3]
assertTypedSame[I :: S :: D :: HNil](in3, r5)
val r6 = in3.rotateLeft[_6]
assertTypedSame[I :: S :: D :: HNil](in3, r6)
val r7 = in4.rotateLeft[_4]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r7)
val r8 = in4.rotateLeft[_8]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r8)
}
{ // other(n)
val r1 = in2.rotateLeft(1)
assertTypedEquals[S :: I :: HNil]("foo" :: 1 :: HNil, r1)
val r2 = in3.rotateLeft(1)
assertTypedEquals[S :: D :: I :: HNil]("foo" :: 2.0 :: 1 :: HNil, r2)
val r3 = in4.rotateLeft(1)
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r3)
val r4 = in4.rotateLeft(2)
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r4)
val r5 = in4.rotateLeft(3)
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r5)
val r6 = in4.rotateLeft(5)
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r6)
val r7 = in4.rotateLeft(6)
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r7)
}
{ // other[N]
val r1 = in2.rotateLeft[_1]
assertTypedEquals[S :: I :: HNil]("foo" :: 1 :: HNil, r1)
val r2 = in3.rotateLeft[_1]
assertTypedEquals[S :: D :: I :: HNil]("foo" :: 2.0 :: 1 :: HNil, r2)
val r3 = in4.rotateLeft[_1]
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r3)
val r4 = in4.rotateLeft[_2]
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r4)
val r5 = in4.rotateLeft[_3]
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r5)
val r6 = in4.rotateLeft[_5]
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r6)
val r7 = in4.rotateLeft[_6]
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r7)
}
}
@Test
def testRotateRight {
val in0 = HNil
val in1 = 1 :: HNil
val in2 = 1 :: "foo" :: HNil
val in3 = 1 :: "foo" :: 2.0 :: HNil
val in4 = 1 :: "foo" :: 2.0 :: 'a' :: HNil
type S = String; type I = Int; type D = Double; type C = Char
{ // rotateRight(0)
val r1 = in0.rotateRight(0)
assertTypedSame[HNil](HNil, r1)
val r2 = in1.rotateRight(0)
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateRight(0)
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in3.rotateRight(0)
assertTypedSame[I :: S :: D :: HNil](in3, r4)
val r5 = in4.rotateRight(0)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r5)
}
{ // rotateRight[_0]
val r1 = in0.rotateRight[_0]
assertTypedSame[HNil](HNil, r1)
val r2 = in1.rotateRight[_0]
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateRight[_0]
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in3.rotateRight[_0]
assertTypedSame[I :: S :: D :: HNil](in3, r4)
val r5 = in4.rotateRight[_0]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r5)
}
{ // rotateRight(n % size == 0)
val r1 = in1.rotateRight(1)
assertTypedSame[I :: HNil](in1, r1)
val r2 = in1.rotateRight(2)
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateRight(2)
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in2.rotateRight(4)
assertTypedSame[I :: S :: HNil](in2, r4)
val r5 = in3.rotateRight(3)
assertTypedSame[I :: S :: D :: HNil](in3, r5)
val r6 = in3.rotateRight(6)
assertTypedSame[I :: S :: D :: HNil](in3, r6)
val r7 = in4.rotateRight(4)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r7)
val r8 = in4.rotateRight(8)
assertTypedSame[I :: S :: D :: C :: HNil](in4, r8)
}
{ // rotateRight[N % Size == 0]
val r1 = in1.rotateRight[_1]
assertTypedSame[I :: HNil](in1, r1)
val r2 = in1.rotateRight[_2]
assertTypedSame[I :: HNil](in1, r2)
val r3 = in2.rotateRight[_2]
assertTypedSame[I :: S :: HNil](in2, r3)
val r4 = in2.rotateRight[_4]
assertTypedSame[I :: S :: HNil](in2, r4)
val r5 = in3.rotateRight[_3]
assertTypedSame[I :: S :: D :: HNil](in3, r5)
val r6 = in3.rotateRight[_6]
assertTypedSame[I :: S :: D :: HNil](in3, r6)
val r7 = in4.rotateRight[_4]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r7)
val r8 = in4.rotateRight[_8]
assertTypedSame[I :: S :: D :: C :: HNil](in4, r8)
}
{ // others(n)
val r1 = in2.rotateRight(1)
assertTypedEquals[S :: I :: HNil]("foo" :: 1 :: HNil, r1)
val r2 = in3.rotateRight(1)
assertTypedEquals[D :: I :: S :: HNil](2.0 :: 1 :: "foo" :: HNil, r2)
val r3 = in4.rotateRight(1)
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r3)
val r4 = in4.rotateRight(2)
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r4)
val r5 = in4.rotateRight(3)
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r5)
val r6 = in4.rotateRight(5)
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r6)
val r7 = in4.rotateRight(6)
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r7)
}
{ // others[N]
val r1 = in2.rotateRight[_1]
assertTypedEquals[S :: I :: HNil]("foo" :: 1 :: HNil, r1)
val r2 = in3.rotateRight[_1]
assertTypedEquals[D :: I :: S :: HNil](2.0 :: 1 :: "foo" :: HNil, r2)
val r3 = in4.rotateRight[_1]
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r3)
val r4 = in4.rotateRight[_2]
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r4)
val r5 = in4.rotateRight[_3]
assertTypedEquals[S :: D :: C :: I :: HNil]("foo" :: 2.0 :: 'a' :: 1 :: HNil, r5)
val r6 = in4.rotateRight[_5]
assertTypedEquals[C :: I :: S :: D :: HNil]('a' :: 1 :: "foo" :: 2.0 :: HNil, r6)
val r7 = in4.rotateRight[_6]
assertTypedEquals[D :: C :: I :: S :: HNil](2.0 :: 'a' :: 1 :: "foo" :: HNil, r7)
}
}
object smear extends Poly {
implicit val caseIntInt = use((x: Int, y: Int) => x + y)
implicit val caseStringInt = use((x: String, y: Int) => x.toInt + y)
implicit val caseIntString = use((x: Int, y: String) => x + y.toInt)
}
@Test
def testScanLeft {
val in = 1 :: "2" :: HNil
val out = in.scanLeft(1)(smear)
typed[Int :: Int :: Int :: HNil](out)
assertEquals(1 :: 2 :: 4 :: HNil, out)
}
@Test
def testScanRight{
val in = 1 :: "2" :: HNil
val out = in.scanRight(1)(smear)
typed[Int :: Int :: Int :: HNil](out)
assertEquals(4 :: 3 :: 1 :: HNil, out)
}
@Test
def testFill {
{
val empty = HList.fill(0)(true)
typed[_0](empty.length)
}
{
val empty = HList.fill[Boolean](0)(true)
typed[_0](empty.length)
}
{
val single = HList.fill(1)(None)
typed[_1](single.length)
typed[None.type](single.head)
assertEquals(None, single.head)
}
{
val single = HList.fill[None.type](1)(None)
typed[_1](single.length)
typed[None.type](single.head)
assertEquals(None, single.head)
}
{
val three = HList.fill(3)(m2i)
typed[_3](three.length)
typed[M2[Int, Unit]](three(_0))
typed[M2[Int, Unit]](three(_1))
typed[M2[Int, Unit]](three(_2))
assertEquals(m2i, three(_0))
assertEquals(m2i, three(_1))
assertEquals(m2i, three(_2))
}
{
val three = HList.fill[M2[Int, Unit]](3)(m2i)
typed[_3](three.length)
typed[M2[Int, Unit]](three(_0))
typed[M2[Int, Unit]](three(_1))
typed[M2[Int, Unit]](three(_2))
assertEquals(m2i, three(_0))
assertEquals(m2i, three(_1))
assertEquals(m2i, three(_2))
}
{
val empty = HList.fill(0, 0)(true)
typed[_0](empty.length)
}
{
val empty = HList.fill[Boolean](0, 0)(true)
typed[_0](empty.length)
}
{
val empty = HList.fill(2, 0)(true)
typed[_2](empty.length)
typed[_0](empty(_0).length)
typed[_0](empty(_1).length)
}
{
val empty = HList.fill[Boolean](2, 0)(true)
typed[_2](empty.length)
typed[_0](empty(_0).length)
typed[_0](empty(_1).length)
}
{
val empty = HList.fill(0, 2)(true)
typed[_0](empty.length)
}
{
val empty = HList.fill[Boolean](0, 2)(true)
typed[_0](empty.length)
}
{
val oneByTwo = HList.fill(1, 2)(None)
typed[_1](oneByTwo.length)
typed[_2](oneByTwo.head.length)
typed[None.type](oneByTwo.head(_0))
typed[None.type](oneByTwo.head(_1))
assertEquals(None, oneByTwo.head(_0))
assertEquals(None, oneByTwo.head(_1))
}
{
val oneByTwo = HList.fill[None.type](1, 2)(None)
typed[_1](oneByTwo.length)
typed[_2](oneByTwo.head.length)
typed[None.type](oneByTwo.head(_0))
typed[None.type](oneByTwo.head(_1))
assertEquals(None, oneByTwo.head(_0))
assertEquals(None, oneByTwo.head(_1))
}
{
val twoByThree = HList.fill(2, 3)(None)
typed[_2](twoByThree.length)
typed[_3](twoByThree(_0).length)
typed[_3](twoByThree(_1).length)
typed[None.type](twoByThree.at[_0].at[_0])
typed[None.type](twoByThree.at[_0].at[_1])
typed[None.type](twoByThree.at[_0].at[_2])
typed[None.type](twoByThree.at[_1].at[_0])
typed[None.type](twoByThree.at[_1].at[_1])
typed[None.type](twoByThree.at[_1].at[_2])
assertEquals(None, twoByThree.at[_0].at[_0])
assertEquals(None, twoByThree.at[_0].at[_1])
assertEquals(None, twoByThree.at[_0].at[_2])
assertEquals(None, twoByThree.at[_1].at[_0])
assertEquals(None, twoByThree.at[_1].at[_1])
assertEquals(None, twoByThree.at[_1].at[_2])
}
{
val twoByThree = HList.fill[None.type](2, 3)(None)
typed[_2](twoByThree.length)
typed[_3](twoByThree(_0).length)
typed[_3](twoByThree(_1).length)
typed[None.type](twoByThree.at[_0].at[_0])
typed[None.type](twoByThree.at[_0].at[_1])
typed[None.type](twoByThree.at[_0].at[_2])
typed[None.type](twoByThree.at[_1].at[_0])
typed[None.type](twoByThree.at[_1].at[_1])
typed[None.type](twoByThree.at[_1].at[_2])
assertEquals(None, twoByThree.at[_0].at[_0])
assertEquals(None, twoByThree.at[_0].at[_1])
assertEquals(None, twoByThree.at[_0].at[_2])
assertEquals(None, twoByThree.at[_1].at[_0])
assertEquals(None, twoByThree.at[_1].at[_1])
assertEquals(None, twoByThree.at[_1].at[_2])
}
}
@Test
def testPatch {
val basehl = 1 :: 2 :: "three" :: HNil
{ //patch an empty hlist
val out = HNil.patch(0, basehl, 0)
val out2 = HNil.patch[_0,_0](basehl)
typed[Int :: Int :: String :: HNil](out)
assertEquals(out, basehl)
assertTypedEquals[Int :: Int :: String :: HNil](out, out2)
}
{ //single patch w/ nothing removed
val out = basehl.patch(1, 4 :: HNil, 0)
val out2 = basehl.patch[_1,_0](4 :: HNil)
typed[Int :: Int :: Int :: String :: HNil](out)
assertEquals(1 :: 4 :: 2 :: "three" :: HNil, out)
assertTypedEquals[Int :: Int :: Int :: String :: HNil](out, out2)
}
{ //single patch w/ 2 elements removed
val out = basehl.patch(1, 3 :: HNil, 2)
val out2 = basehl.patch[_1,_2](3 :: HNil)
typed[Int :: Int :: HNil](out)
assertEquals(1 :: 3 :: HNil, out)
assertTypedEquals[Int :: Int :: HNil](out, out2)
}
{ //essentially append
val p = 4 :: 5 :: "six" :: HNil
val out = basehl.patch(3, p, 0)
val out2 = basehl.patch[_3,_0](p)
typed[Int :: Int :: String :: Int :: Int :: String :: HNil](out)
assertEquals(1 :: 2 :: "three" :: 4 :: 5 :: "six" :: HNil, out)
assertTypedEquals[Int :: Int :: String :: Int :: Int :: String :: HNil](out, out2)
}
{ //several patched w/ everything from original removed
val sub = 4 :: "five" :: "six" :: HNil
val out = basehl.patch(0, sub, 3)
val out2 = basehl.patch[_0,_3](sub)
typed[Int :: String :: String :: HNil](out)
assertEquals(sub, out)
assertTypedEquals[Int :: String :: String :: HNil](out, out2)
}
}
@Test
def testToCoproduct {
type PISB = Int :: String :: Boolean :: HNil
type CISBa = Int :+: String :+: Boolean :+: CNil
type CISBb = the.`ToCoproduct[PISB]`.Out
implicitly[CISBa =:= CISBb]
}
@Test
def testToSum {
type PISB = Int :: String :: Boolean :: HNil
type CISBa = Int :+: String :+: Boolean :+: CNil
type SISBa = the.`ToSum[PISB]`.Out
implicitly[CISBa =:= SISBa]
type PIISSB = Int :: Int :: String :: String :: Boolean :: HNil
type SISBb = the.`ToSum[PIISSB]`.Out
implicitly[CISBa =:= SISBb]
}
@Test
def testHListTypeSelector {
import syntax.singleton._
typed[HList.` `.T](HNil)
typed[HList.`Int`.T](23 :: HNil)
typed[HList.`Int, String`.T](23 :: "foo" :: HNil)
typed[HList.`Int, String, Boolean`.T](23 :: "foo" :: true :: HNil)
// Literal types
typed[HList.`2`.T](2.narrow :: HNil)
typed[HList.`2, "a", true`.T](2.narrow :: "a".narrow :: true.narrow :: HNil)
illTyped(""" typed[HList.`2`.T](3.narrow :: HNil) """)
// Mix of standard and literal types
typed[HList.`2, String, true`.T](2.narrow :: "a" :: true.narrow :: HNil)
}
object Foo extends ProductArgs {
def applyProduct[L <: HList](args: L): L = args
}
@Test
def testProductArgs {
val l = Foo(23, "foo", true)
typed[Int :: String :: Boolean :: HNil](l)
val v1 = l.head
typed[Int](v1)
assertEquals(23, v1)
val v2 = l.tail.head
typed[String](v2)
assertEquals("foo", v2)
val v3 = l.tail.tail.head
typed[Boolean](v3)
assertEquals(true, v3)
val v4 = l.tail.tail.tail
typed[HNil](v4)
illTyped("""
r.tail.tail.tail.head
""")
}
object SFoo extends SingletonProductArgs {
def applyProduct[L <: HList](args: L): L = args
}
case class Quux(i: Int, s: String, b: Boolean)
object selectAll extends SingletonProductArgs {
class Apply[K <: HList] {
def from[T, R <: HList, S <: HList, Out](t: T)
(implicit
gen: LabelledGeneric.Aux[T, R],
sel: SelectAll.Aux[R, K, S],
tp: Tupler.Aux[S, Out]
): Out =
tp(sel(gen.to(t)))
}
def applyProduct[K <: HList](keys: K) = new Apply[K]
}
trait NonSingletonHNilTC[T]
object NonSingletonHNilTC {
def apply[T](t: T)(implicit i: NonSingletonHNilTC[T]): NonSingletonHNilTC[T] = i
implicit val nsHNilTC: NonSingletonHNilTC[HNil] = new NonSingletonHNilTC[HNil] {}
}
@Test
def testSingletonProductArgs {
object Obj
val l = SFoo(23, "foo", 'bar, Obj, true)
typed[Witness.`23`.T :: Witness.`"foo"`.T :: Witness.`'bar`.T :: Obj.type :: Witness.`true`.T :: HNil](l)
// Annotations on the LHS here and subsequently, otherwise scalac will
// widen the RHS to a non-singleton type.
val v1: Witness.`23`.T = l.head
assertEquals(23, v1)
val v2: Witness.`"foo"`.T = l.tail.head
assertEquals("foo", v2)
val v3: Witness.`'bar`.T = l.tail.tail.head
assertEquals('bar, v3)
val v4: Obj.type = l.tail.tail.tail.head
assertEquals(Obj, v4)
val v5: Witness.`true`.T = l.tail.tail.tail.tail.head
assertEquals(true, v5)
val v6 = l.tail.tail.tail.tail.tail
typed[HNil](v6)
illTyped("""
r.tail.tail.tail.tail.tail.tail.head
""")
// Verify that we infer HNil rather than HNil.type at the end
NonSingletonHNilTC(SFoo(23).tail)
NonSingletonHNilTC(SFoo())
val quux = Quux(23, "foo", true)
val ib = selectAll('i, 'b).from(quux)
typed[(Int, Boolean)](ib)
assertEquals((23, true), ib)
}
@Test
def selectAllTest: Unit ={
import shapeless._, record._ , ops.hlist.SelectAll
//is there any way to do it without runtime overhead?
class TypeCaptured[T](val value: T) {
type _type = T
}
def getFieldsByTypesOfSuper[Sub <: HList, Super <: HList](l: Sub)(implicit sa: SelectAll[Sub, Super]) = sa(l)
val hsuper = new TypeCaptured("2":: true :: HNil)
val hsub = new TypeCaptured(1 :: "2":: true :: HNil)
//testing with plain HList
assertTypedEquals[hsuper._type](hsuper.value, getFieldsByTypesOfSuper[hsub._type, hsuper._type](hsub.value))
val rsuper = new TypeCaptured(Record(b = true, c = "blah"))
val rsub = new TypeCaptured(Record(a = 1, b = true, c = "blah"))
//testing with Record
assertTypedEquals[rsuper._type](rsuper.value, getFieldsByTypesOfSuper[rsub._type, rsuper._type](rsub.value))
}
object FooNat extends NatProductArgs {
def applyNatProduct[L <: HList](args: L): L = args
}
object FooNatTypeParams extends NatProductArgs {
def applyNatProduct[L <: HList](implicit len: Length[L]) = len()
}
@Test
def testNatProductArgs {
val l = FooNat(1, 2, 3)
typed[_1 :: _2 :: _3 :: HNil](l)
val v1 = l.head
typed[_1](v1)
assertEquals(_1, v1)
val v2 = l.tail.head
typed[_2](v2)
assertEquals(_2, v2)
val v3 = l.tail.tail.head
typed[_3](v3)
assertEquals(_3, v3)
val v4 = l.tail.tail.tail
typed[HNil](v4)
illTyped("""
r.tail.tail.tail.head
""")
val res = FooNatTypeParams(1,2,3,4)
assertEquals(_4,res)
}
implicit class Interpolator(val sc: StringContext) {
class Args extends ProductArgs {
def applyProduct[L <: HList](l: L): L = l
}
val hlist: Args = new Args
}
@Test
def testStringInterpolator {
val (i, s, b) = (23, "foo", true)
val l = hlist"Int: $i, String: $s, Boolean: $b"
typed[Int :: String :: Boolean :: HNil](l)
val v1 = l.head
typed[Int](v1)
assertEquals(23, v1)
val v2 = l.tail.head
typed[String](v2)
assertEquals("foo", v2)
val v3 = l.tail.tail.head
typed[Boolean](v3)
assertEquals(true, v3)
val v4 = l.tail.tail.tail
typed[HNil](v4)
illTyped("""
r.tail.tail.tail.head
""")
}
@Test
def testCollectFirst {
object Foo extends Poly1{
implicit def iinst = at[Int]{ _ + 1 }
}
val hlist1 = "foo" :: 2.0 :: 1 :: HNil
assertTypedEquals[Int](hlist1.collectFirst(Foo), 2)
val hlist2 = "foo" :: 2.0 :: HNil
illTyped("""hlist2.collectFirst(Foo)""")
}
@Test
def testGrouper {
object toInt extends Poly1 {
implicit def default[N <: Nat](implicit toi: ops.nat.ToInt[N]) = at[N](_ => toi())
}
def range[R <: HList](a: Nat, b: Nat)(implicit
range: ops.nat.Range.Aux[a.N, b.N, R],
mapper: ops.hlist.Mapper[toInt.type, R]
) = mapper(range())
// group HNil
assertEquals(HNil: HNil, (HNil: HNil) group(2, 1))
// group a HList of 4 items into 2 (4/2) tuples of 2 items
assertEquals(
(0, 1) ::(2, 3) :: HNil,
range(0, 4) group(2, 2)
)
// group a HList of 5 items into 2 (5/2) tuples of 2 items
// the last item does not make a complete partition and is dropped.
assertEquals(
(0, 1) ::(2, 3) :: HNil,
range(0, 5) group(2, 2)
)
// uses the step to select the starting point for each partition
assertEquals(
(0, 1) ::(4, 5) :: HNil,
range(0, 6) group(2, 4)
)
// if the step is smaller than the partition size, items will be reused
assertEquals(
(0, 1) ::(1, 2) ::(2, 3) :: HNil,
range(0, 4) group(2, 1)
)
// when there are not enough items to fill the last partition, a pad can be supplied.
assertEquals(
(0, 1) ::(2, 3) ::(4, 'a') :: HNil,
range(0, 5) group(2, 2, 'a' :: HNil)
)
// but only as many pad elements are used as necessary to fill the final partition.
assertEquals(
(0, 1) ::(2, 3) ::(4, 'a') :: HNil,
range(0, 5) group(2, 2, 'a' :: 'b' :: 'c' :: HNil)
)
}
@Test
def testLiftAll {
trait F[A]
implicit object FInt extends F[Int]
implicit object FString extends F[String]
assertEquals(HNil, implicitly[LiftAll[F, HNil]].instances)
assertEquals(FInt :: HNil, implicitly[LiftAll[F, Int :: HNil]].instances)
assertEquals(FString :: FInt :: HNil, implicitly[LiftAll[F, String :: Int :: HNil]].instances)
illTyped("implicitly[LiftAll[F, Long :: String :: Int :: HNil]]")
assertEquals(FInt :: HNil, LiftAll[F](1 :: HNil).instances)
}
@Test
def testPadTo {
val p1 = (1 :: "a" :: HNil).padTo(3, 0)
assertTypedEquals[Int :: String :: Int :: HNil](1 :: "a" :: 0 :: HNil, p1)
val p2 = (1 :: "a" :: HNil).padTo(2, 0)
assertTypedEquals[Int :: String :: HNil](1 :: "a" :: HNil, p2)
val p3 = (HNil: HNil).padTo(2, "a")
assertTypedEquals[String :: String :: HNil]("a" :: "a" :: HNil, p3)
val p4 = (HNil: HNil).padTo(0, "a")
assertTypedEquals[HNil](HNil, p4)
illTyped(""" (1 :: "a" :: HNil).padTo(1, 0) """)
}
@Test
def testSlice {
val r1 = (1 :: "a" :: 3 :: HNil).slice(0, 2)
assertTypedEquals[Int :: String :: HNil](1 :: "a" :: HNil, r1)
val r2 = (1 :: "a" :: 3 :: HNil).slice(1, 2)
assertTypedEquals[String :: HNil]("a" :: HNil, r2)
val r3 = (1 :: "a" :: 3 :: HNil).slice(2, 3)
assertTypedEquals[Int :: HNil](3 :: HNil, r3)
val r4 = (HNil: HNil).slice(0, 0)
assertTypedEquals[HNil](HNil, r4)
illTyped(""" (1 :: "a" :: 3 :: HNil).slice(0, 4) """)
illTyped(""" (1 :: "a" :: 3 :: HNil).slice(1, 0) """)
}
@Test
def testToSizedHList {
val ns = List(1,2,3,4)
assertTypedEquals[Option[III]](None, ns.toSizedHList(3))
assertTypedEquals[Option[IIII]](Some(1 :: 2 :: 3 :: 4 :: HNil), ns.toSizedHList(4))
}
@Test
def testModifierAt {
// first element
assertEquals((1, 42 :: 2 :: 3 :: HNil), (1 :: 2 :: 3 :: HNil).updateAtWith(0)(_ => 42))
//last element
assertEquals((3, 1 :: 2 :: 42 :: HNil), (1 :: 2 :: 3 :: HNil).updateAtWith(2)(_ => 42))
//different type
assertEquals((3, 1 :: 2 :: 42.0 :: HNil), (1 :: 2 :: 3 :: HNil).updateAtWith(2)(_ => 42.0))
}
@Test
def testReify {
import syntax.singleton._
assertTypedEquals(HNil, Reify[HNil].apply)
val s1 = HList.`'a`
assertTypedEquals('a.narrow :: HNil, Reify[s1.T].apply)
val s2 = HList.`'a, 1, "b", true`
assertTypedEquals('a.narrow :: 1.narrow :: "b".narrow :: true.narrow :: HNil, Reify[s2.T].apply)
illTyped(""" Reify[String :: Int :: HNil] """)
illTyped(""" Reify[String :: HList.`'a, 1, "b", true`.T] """)
}
}
| liff/shapeless | core/src/test/scala/shapeless/hlist.scala | Scala | apache-2.0 | 109,979 |
package alexsmirnov.pbconsole
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class MacroTest extends FlatSpec with Matchers {
"Empty string" should "prepared as empty iterator" in {
val m = Macro("foo","","")
Macro.prepare("", new Settings) shouldBe empty
}
"Single line string" should "prepared as single value" in {
Macro.prepare("foo", new Settings).toStream should contain only("foo")
}
"Multiline string" should "prepared as single value" in {
Macro.prepare("foo command\\n;bar comment\\nbaz\\n\\nend line", new Settings).toStream should contain inOrderOnly("foo command",";bar comment","baz","","end line")
}
"variable substitution" should "substitute bed width and depth and height" in {
val s = new Settings
s.bedWidth.update(12.0)
s.bedDepth.update(110.0)
s.height.update(22.00)
s.zOffset.update(15.6)
Macro.prepare("G1 X${bedWidth} Y${bedDepth} Z${height} E${zOffset}", s).toStream should contain only("G1 X12.0 Y110.0 Z22.0 E15.6")
}
} | alexsmirnov/printrbot-g2-console | src/test/scala/alexsmirnov/pbconsole/MacroTest.scala | Scala | bsd-3-clause | 1,013 |
// Copyright (C) 2015, codejitsu.
package net.codejitsu.tasks.dsl
abstract class Stage {
def name: String
override def toString: String = name
}
final class Dev extends Stage {
override val name: String = "Development"
}
final class Test extends Stage {
override val name: String = "Test"
}
final class QA extends Stage {
override val name: String = "QA"
}
final class Production extends Stage {
override val name: String = "Production"
}
object Stage {
implicit val defaultStage: Stage = new Stage {
override def name: String = "Unstaged"
}
}
| codejitsu/tasks | tasks-dsl/src/main/scala/net/codejitsu/tasks/dsl/Stage.scala | Scala | apache-2.0 | 571 |
package com.nrinaudo.fetch
import java.io._
import java.nio.charset.Charset
trait RequestEntityLike[+Self <: RequestEntityLike[Self]] {
this: Self =>
/** Length, in bytes, of the request entity.
*
* Note that this is different from the [[contentLength content length]], which represents the number of bytes that
* will actually be transferred. This can be less than the entity's length if, for example, the [[gzip]] encoding
* is used.
*/
def length: Option[Long]
/** Media type of the request entity. */
def mediaType: MediaType
/** Encoding in which the request entity should be transferred.
*
* Some but not all servers will accept encodings other than [[Encoding.Identity]]. When such is known to be the
* case, using [[Encoding.Gzip]], for example, can yield significant performance gains.
*/
def encoding: Encoding
/** Creates a new instance of this class with the specified values. */
protected def build(mediaType: MediaType, encoding: Encoding): Self
/** Writes this request entity to the specified output stream. */
protected def write(out: OutputStream): Unit
/** Number of bytes that will be transferred when this request entity is sent to a remote host. */
def contentLength: Option[Long] =
if(encoding == Encoding.Identity) length
else None
/** Sets this request entity's media type. */
def mediaType(value: MediaType): Self = build(value, encoding)
/** Sets this request entity's transfer encoding to [[Encoding.Gzip]]. */
def gzip: Self = encoding(Encoding.Gzip)
/** Sets this request entity's transfer encoding to [[Encoding.Deflate]]. */
def deflate: Self = encoding(Encoding.Deflate)
/** Sets this request entity's transfer encoding to the specified value. */
def encoding(value: Encoding): Self = build(mediaType, value)
/** Writes this request entity to the specified output stream, applying its transfer encoding if applicable. */
def apply(out: OutputStream): Unit = {
val stream = encoding.encode(out)
try {write(stream)}
finally {stream.close()}
}
}
object RequestEntity {
// - Stream helpers --------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
private class StreamRequestEntity(private val f: OutputStream => Unit,
override val mediaType: MediaType = MediaType.OctetStream,
override val encoding: Encoding = Encoding.Identity) extends RequestEntity {
override def length: Option[Long] = None
override protected def build(mediaType: MediaType, encoding: Encoding): StreamRequestEntity =
new StreamRequestEntity(f, mediaType, encoding)
override protected def write(out: OutputStream): Unit = f(out)
}
def bytes(f: OutputStream => Unit): RequestEntity = new StreamRequestEntity(f)
def apply(in: InputStream): RequestEntity = bytes(writeBytes(in, _))
// - Writer helpers --------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
private class WriterRequestEntity(private val f: Writer => Unit,
override val mediaType: MediaType = MediaType.PlainText,
override val encoding: Encoding = Encoding.Identity) extends TextRequestEntity {
override def length: Option[Long] = None
override protected def build(mediaType: MediaType, encoding: Encoding): WriterRequestEntity =
new WriterRequestEntity(f, mediaType, encoding)
override protected def write(out: Writer): Unit = f(out)
}
def chars(f: Writer => Unit): TextRequestEntity = new WriterRequestEntity(f)
def apply(in: Reader): TextRequestEntity = chars(writeChars(in, _))
// - String helper ---------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
private class StringEntity(val content: String, override val mediaType: MediaType, override val encoding: Encoding)
extends TextRequestEntity with RequestEntityLike[StringEntity] {
override lazy val length: Option[Long] = Some(content.getBytes(charset).length.toLong)
override protected def write(out: Writer): Unit = out.write(content)
override protected def build(mediaType: MediaType, encoding: Encoding): StringEntity = new StringEntity(content, mediaType, encoding)
override def toString = "String(%s)" format content
}
def apply(str: String): TextRequestEntity =
new StringEntity(str, MediaType.PlainText, Encoding.Identity)
// - File helper -----------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
private class FileEntity(val file: File, override val mediaType: MediaType, override val encoding: Encoding)
extends RequestEntity with RequestEntityLike[FileEntity] {
override def write(out: OutputStream): Unit = {
val in = new BufferedInputStream(new FileInputStream(file))
try {writeBytes(in, out)}
finally {in.close()}
}
override lazy val length: Option[Long] = Some(file.length())
override protected def build(mediaType: MediaType, encoding: Encoding): FileEntity =
new FileEntity(file, mediaType, encoding)
override def toString = "File(%s)" format file
}
def apply(file: File): RequestEntity = new FileEntity(file, MediaType.OctetStream, Encoding.Identity)
}
trait RequestEntity extends RequestEntityLike[RequestEntity]
trait TextRequestEntity extends RequestEntity with RequestEntityLike[TextRequestEntity] {
def charset: Charset = mediaType.charset.getOrElse(DefaultCharset)
protected def write(out: Writer): Unit
override protected def write(out: OutputStream): Unit = {
val writer = new OutputStreamWriter(out, charset)
write(writer)
writer.flush()
}
} | nrinaudo/fetch | core/src/main/scala/com/nrinaudo/fetch/RequestEntity.scala | Scala | mit | 6,322 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.sql.Date
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.catalyst.plans.logical.{EventTimeTimeout, ProcessingTimeTimeout}
import org.apache.spark.sql.execution.streaming.GroupStateImpl._
import org.apache.spark.sql.streaming.{GroupState, GroupStateTimeout}
import org.apache.spark.unsafe.types.CalendarInterval
/**
* Internal implementation of the [[GroupState]] interface. Methods are not thread-safe.
*
* @param optionalValue Optional value of the state
* @param batchProcessingTimeMs Processing time of current batch, used to calculate timestamp
* for processing time timeouts
* @param timeoutConf Type of timeout configured. Based on this, different operations will
* be supported.
* @param hasTimedOut Whether the key for which this state wrapped is being created is
* getting timed out or not.
*/
private[sql] class GroupStateImpl[S](
optionalValue: Option[S],
batchProcessingTimeMs: Long,
eventTimeWatermarkMs: Long,
timeoutConf: GroupStateTimeout,
override val hasTimedOut: Boolean) extends GroupState[S] {
// Constructor to create dummy state when using mapGroupsWithState in a batch query
def this(optionalValue: Option[S]) = this(
optionalValue,
batchProcessingTimeMs = NO_TIMESTAMP,
eventTimeWatermarkMs = NO_TIMESTAMP,
timeoutConf = GroupStateTimeout.NoTimeout,
hasTimedOut = false)
private var value: S = optionalValue.getOrElse(null.asInstanceOf[S])
private var defined: Boolean = optionalValue.isDefined
private var updated: Boolean = false // whether value has been updated (but not removed)
private var removed: Boolean = false // whether value has been removed
private var timeoutTimestamp: Long = NO_TIMESTAMP
// ========= Public API =========
override def exists: Boolean = defined
override def get: S = {
if (defined) {
value
} else {
throw new NoSuchElementException("State is either not defined or has already been removed")
}
}
override def getOption: Option[S] = {
if (defined) {
Some(value)
} else {
None
}
}
override def update(newValue: S): Unit = {
if (newValue == null) {
throw new IllegalArgumentException("'null' is not a valid state value")
}
value = newValue
defined = true
updated = true
removed = false
}
override def remove(): Unit = {
defined = false
updated = false
removed = true
}
override def setTimeoutDuration(durationMs: Long): Unit = {
if (timeoutConf != ProcessingTimeTimeout) {
throw new UnsupportedOperationException(
"Cannot set timeout duration without enabling processing time timeout in " +
"map/flatMapGroupsWithState")
}
if (durationMs <= 0) {
throw new IllegalArgumentException("Timeout duration must be positive")
}
if (batchProcessingTimeMs != NO_TIMESTAMP) {
timeoutTimestamp = durationMs + batchProcessingTimeMs
} else {
// This is being called in a batch query, hence no processing timestamp.
// Just ignore any attempts to set timeout.
}
}
override def setTimeoutDuration(duration: String): Unit = {
setTimeoutDuration(parseDuration(duration))
}
@throws[IllegalArgumentException]("if 'timestampMs' is not positive")
@throws[IllegalStateException]("when state is either not initialized, or already removed")
@throws[UnsupportedOperationException](
"if 'timeout' has not been enabled in [map|flatMap]GroupsWithState in a streaming query")
override def setTimeoutTimestamp(timestampMs: Long): Unit = {
checkTimeoutTimestampAllowed()
if (timestampMs <= 0) {
throw new IllegalArgumentException("Timeout timestamp must be positive")
}
if (eventTimeWatermarkMs != NO_TIMESTAMP && timestampMs < eventTimeWatermarkMs) {
throw new IllegalArgumentException(
s"Timeout timestamp ($timestampMs) cannot be earlier than the " +
s"current watermark ($eventTimeWatermarkMs)")
}
if (batchProcessingTimeMs != NO_TIMESTAMP) {
timeoutTimestamp = timestampMs
} else {
// This is being called in a batch query, hence no processing timestamp.
// Just ignore any attempts to set timeout.
}
}
@throws[IllegalArgumentException]("if 'additionalDuration' is invalid")
@throws[IllegalStateException]("when state is either not initialized, or already removed")
@throws[UnsupportedOperationException](
"if 'timeout' has not been enabled in [map|flatMap]GroupsWithState in a streaming query")
override def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(parseDuration(additionalDuration) + timestampMs)
}
@throws[IllegalStateException]("when state is either not initialized, or already removed")
@throws[UnsupportedOperationException](
"if 'timeout' has not been enabled in [map|flatMap]GroupsWithState in a streaming query")
override def setTimeoutTimestamp(timestamp: Date): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(timestamp.getTime)
}
@throws[IllegalArgumentException]("if 'additionalDuration' is invalid")
@throws[IllegalStateException]("when state is either not initialized, or already removed")
@throws[UnsupportedOperationException](
"if 'timeout' has not been enabled in [map|flatMap]GroupsWithState in a streaming query")
override def setTimeoutTimestamp(timestamp: Date, additionalDuration: String): Unit = {
checkTimeoutTimestampAllowed()
setTimeoutTimestamp(timestamp.getTime + parseDuration(additionalDuration))
}
override def toString: String = {
s"GroupState(${getOption.map(_.toString).getOrElse("<undefined>")})"
}
// ========= Internal API =========
/** Whether the state has been marked for removing */
def hasRemoved: Boolean = removed
/** Whether the state has been updated */
def hasUpdated: Boolean = updated
/** Return timeout timestamp or `TIMEOUT_TIMESTAMP_NOT_SET` if not set */
def getTimeoutTimestamp: Long = timeoutTimestamp
private def parseDuration(duration: String): Long = {
if (StringUtils.isBlank(duration)) {
throw new IllegalArgumentException(
"Provided duration is null or blank.")
}
val intervalString = if (duration.startsWith("interval")) {
duration
} else {
"interval " + duration
}
val cal = CalendarInterval.fromString(intervalString)
if (cal == null) {
throw new IllegalArgumentException(
s"Provided duration ($duration) is not valid.")
}
if (cal.milliseconds < 0 || cal.months < 0) {
throw new IllegalArgumentException(s"Provided duration ($duration) is not positive")
}
val millisPerMonth = CalendarInterval.MICROS_PER_DAY / 1000 * 31
cal.milliseconds + cal.months * millisPerMonth
}
private def checkTimeoutTimestampAllowed(): Unit = {
if (timeoutConf != EventTimeTimeout) {
throw new UnsupportedOperationException(
"Cannot set timeout timestamp without enabling event time timeout in " +
"map/flatMapGroupsWithState")
}
}
}
private[sql] object GroupStateImpl {
// Value used represent the lack of valid timestamp as a long
val NO_TIMESTAMP = -1L
}
| jrshust/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/GroupStateImpl.scala | Scala | apache-2.0 | 8,205 |
import sbt._
import Keys._
import org.scalatra.sbt._
import play.twirl.sbt.SbtTwirl
import play.twirl.sbt.Import.TwirlKeys._
import sbtassembly._
import sbtassembly.AssemblyKeys._
object MyBuild extends Build {
val Organization = "gitbucket"
val Name = "gitbucket"
val Version = "3.9.0"
val ScalaVersion = "2.11.6"
val ScalatraVersion = "2.3.1"
lazy val project = Project (
"gitbucket",
file(".")
)
.settings(ScalatraPlugin.scalatraWithJRebel: _*)
.settings(
test in assembly := {},
assemblyMergeStrategy in assembly := {
case PathList("META-INF", xs @ _*) =>
(xs map {_.toLowerCase}) match {
case ("manifest.mf" :: Nil) => MergeStrategy.discard
case _ => MergeStrategy.discard
}
case x => MergeStrategy.first
}
)
.settings(
sourcesInBase := false,
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers ++= Seq(
Classpaths.typesafeReleases,
"amateras-repo" at "http://amateras.sourceforge.jp/mvn/",
"amateras-snapshot-repo" at "http://amateras.sourceforge.jp/mvn-snapshot/"
),
scalacOptions := Seq("-deprecation", "-language:postfixOps"),
libraryDependencies ++= Seq(
"org.eclipse.jgit" % "org.eclipse.jgit.http.server" % "3.4.2.201412180340-r",
"org.eclipse.jgit" % "org.eclipse.jgit.archive" % "3.4.2.201412180340-r",
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"org.scalatra" %% "scalatra-json" % ScalatraVersion,
"org.json4s" %% "json4s-jackson" % "3.2.11",
"jp.sf.amateras" %% "scalatra-forms" % "0.2.0",
"commons-io" % "commons-io" % "2.4",
"io.github.gitbucket" % "markedj" % "1.0.6-SNAPSHOT",
"org.apache.commons" % "commons-compress" % "1.9",
"org.apache.commons" % "commons-email" % "1.3.3",
"org.apache.httpcomponents" % "httpclient" % "4.3.6",
"org.apache.sshd" % "apache-sshd" % "0.11.0",
"org.apache.tika" % "tika-core" % "1.10",
"com.typesafe.slick" %% "slick" % "2.1.0",
"com.novell.ldap" % "jldap" % "2009-10-07",
"com.h2database" % "h2" % "1.4.180",
"ch.qos.logback" % "logback-classic" % "1.1.1" % "container",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.16.v20140903" % "container;provided",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts Artifact("javax.servlet", "jar", "jar"),
"junit" % "junit" % "4.12" % "test",
"com.mchange" % "c3p0" % "0.9.5",
"com.typesafe" % "config" % "1.2.1",
"com.typesafe.akka" %% "akka-actor" % "2.3.10",
"com.enragedginger" %% "akka-quartz-scheduler" % "1.3.0-akka-2.3.x" exclude("c3p0","c3p0")
),
play.twirl.sbt.Import.TwirlKeys.templateImports += "gitbucket.core._",
javacOptions in compile ++= Seq("-target", "7", "-source", "7"),
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
javaOptions in Test += "-Dgitbucket.home=target/gitbucket_home_for_test",
testOptions in Test += Tests.Setup( () => new java.io.File("target/gitbucket_home_for_test").mkdir() ),
fork in Test := true,
packageOptions += Package.MainClass("JettyLauncher")
).enablePlugins(SbtTwirl)
}
| swaldman/gitbucket | project/build.scala | Scala | apache-2.0 | 3,358 |
package com.sg.pnx.math
import Ordering.Implicits._
/**
* Created by bodie on 7/19/14.
*/
object Math {
val PI = 3.141592f
val TWO_PI = PI*2.0f
val HALF_PI = PI*0.5f
val QUARTER_PI = PI*0.25f
// Every 15 degrees or pi/12 radians
val sinValues = List( 0.0f, 0.258819f, 0.5f, 0.707107f, 0.866025f, 0.965926f,
1.0f, 0.965926f, 0.866025f, 0.707107f, 0.5f, 0.258819f,
-0.0f, -0.258819f, -0.5f, -0.707107f, -0.866025f, -0.965926f,
-1.0f, -0.965926f, -0.866025f, -0.707107f, -0.5f, -0.258819f, 0f )
/*
<aloiscochard> ! def f[T](ord: scala.math.Ordering[T])(x: T): T = x
<multibot_> f: [T](ord: scala.math.Ordering[T])(x: T)T
<aloiscochard> ! def g[T](ord: scala.math.Ordering[T])(f0: T => T = f(ord) _): Int = ???
<multibot_> g: [T](ord: scala.math.Ordering[T])(f0: T => T)Int
*/
def clamp[T : Ordering]( v: T, lower: T, upper: T ): T = {
clampWith(v, lower, upper)( defaultClamp )
}
def clampWith[T]( v: T, lower: T, upper: T )( f0: (T, T, T) => T ): T = {
f0( v, lower, upper )
}
def defaultClamp[T : Ordering]( v: T, lower: T, upper: T ): T = {
if( v > upper ) upper
else if( v < lower ) lower
else v
}
def test(x: Float): Float = 2f * x
def remap( v: Float, inputLower: Float, inputUpper: Float, mappedLower: Float = 0f, mappedUpper: Float = 1f): Float = {
val domain = inputUpper - inputLower
(((v-inputLower)/domain)*mappedUpper)+mappedLower
}
def lerp( a: Float, b: Float, blend: Float = 0.5f ): Float = {
b*blend + a*(1-blend)
}
def fastSin( a: Float ): Float = {
val rad = modulo(a, TWO_PI ) //Gets radians between 0 and 2PI
val perc = rad / TWO_PI
lerp( sinValues( math.floor( perc*24 ).toInt ),
sinValues( math.ceil( perc*24 ).toInt ),
math.ceil( perc*24 ).toFloat - perc*24
)
}
def fastCos( a: Float ): Float = {
fastSin( a-HALF_PI )
}
def modulo( a: Float, b: Float ): Float = {
var c = a
while( c > b ) c -= b
while( c < 0f ) c += b
c
}
def toRad( degrees: Float ): Float = {
degrees*0.01745329251994329576923690768489f
}
def toDeg( rads: Float ): Float = {
rads*57.295779513082320876798154814105f
}
}
| synapse-garden/phoenix | src/main/scala/com/sg/pnx/math/Math.scala | Scala | mit | 2,253 |
/*
* Copyright 2016 Actian Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actian.spark_vector.sql
import com.actian.spark_vector.vector.{VectorConnectionProperties, JDBCPort}
import scala.annotation.meta.param
/** A reference to a `Vector` table */
case class TableRef(host: String, port: JDBCPort, database: String, user: Option[String], password: Option[String], table: String, cols: Seq[String]) {
def toConnectionProps: VectorConnectionProperties = VectorConnectionProperties(host, port, database, user, password)
}
object TableRef {
def apply(parameters: Map[String, String]): TableRef = {
val host = parameters("host")
val instance = parameters.get("instance")
val database = parameters("database")
val table = parameters("table")
val instanceOffset = parameters.get("instanceOffset")
val port = parameters.get("port")
val user = parameters.get("user");
val password = parameters.get("password")
val colsToLoad = parameters.get("cols").map(_.split(",").map(_.trim).toSeq).getOrElse(Nil)
TableRef(host, JDBCPort(instance, instanceOffset, port), database, user, password, table, colsToLoad)
}
def apply(connectionProps: VectorConnectionProperties, table: String): TableRef = TableRef(connectionProps.host,
connectionProps.port, connectionProps.database, connectionProps.user, connectionProps.password, table, Nil)
}
| ActianCorp/spark-vector | src/main/scala/com/actian/spark_vector/sql/TableRef.scala | Scala | apache-2.0 | 1,915 |
package webapp
import org.scalajs.dom
import org.scalajs.dom.raw.HTMLImageElement
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.scalajs.js.JSApp
import ApiController._
import org.scalajs.dom.html.{Div, Title}
import scala.scalajs.js.annotation.JSExportTopLevel
object FoodMatchApp extends JSApp {
var voted: Boolean = false
var deal: Deal = _
@JSExportTopLevel("initMatch")
def initMatch(): Unit = {
dom.document.addEventListener(
"keypress",
(e0: dom.Event) => {
val e = e0.asInstanceOf[dom.KeyboardEvent]
e.key match {
case "a" => chooseLeft()
case "l" => chooseRight()
case "r" => changeDeal()
case _ =>
}
},
useCapture = false)
changeDeal()
}
@JSExportTopLevel("initLeaderboard")
def initLeaderboard(): Unit = {
val container = dom.document.getElementById("leaderboard-container")
val dishExample = dom.document.getElementById("leaderboard-example")
fetchLeaderboard().map(leaderboard => {
var i = 1
leaderboard.foreach(dish => {
val div = dishExample.cloneNode(true).asInstanceOf[Div]
div.id = s"leaderboard-$i"
//div.childNodes(0).asInstanceOf[Div].childNodes(0).asInstanceOf[HTMLImageElement].src = dish.imageUrl
div.getElementsByTagName("img")(0).asInstanceOf[HTMLImageElement].src = dish.imageUrl
div.getElementsByTagName("h5")(0).textContent = dish.title
div.getElementsByTagName("p")(0).textContent = dish.keywords
container.appendChild(div)
i += 1
})
})
}
def changeDeal(): Unit = {
val leftTitle = dom.document.getElementById("left-dish-title")
val rightTitle = dom.document.getElementById("right-dish-title")
val leftKeywords = dom.document.getElementById("left-dish-keywords")
val rightKeywords = dom.document.getElementById("right-dish-keywords")
val leftImage = dom.document.getElementById("left-image").asInstanceOf[HTMLImageElement]
val rightImage = dom.document.getElementById("right-image").asInstanceOf[HTMLImageElement]
leftTitle.textContent = ""
rightTitle.textContent = ""
leftKeywords.textContent = ""
rightKeywords.textContent = ""
leftImage.src = ""
rightImage.src = ""
fetchDeal().map(deal => {
this.deal = deal
leftTitle.textContent = deal.left.title
rightTitle.textContent = deal.right.title
leftImage.src = deal.left.imageUrl
rightImage.src = deal.right.imageUrl
leftKeywords.textContent = deal.left.keywords
rightKeywords.textContent = deal.right.keywords
turnGrayOff("left-image")
turnGrayOff("right-image")
voted = false
})
}
def turnGrayOn(imageId: String): Unit = {
dom.document.getElementById(imageId).classList.add("grayscale")
}
def turnGrayOff(imageId: String): Unit = {
dom.document.getElementById(imageId).classList.remove("grayscale")
}
def choose(choice: String, grayout: String): Unit = {
if (!voted) {
voted = true
turnGrayOn(grayout)
postChoice(choice, deal.uid).map(_ => changeDeal())
}
}
@JSExportTopLevel("chooseLeft")
def chooseLeft(): Unit = choose("left", "right-image")
@JSExportTopLevel("chooseRight")
def chooseRight(): Unit = choose("right", "left-image")
override def main(): Unit = {}
}
| pikkle/FoodMatch | client/src/main/scala/webapp/FoodMatchApp.scala | Scala | apache-2.0 | 3,217 |
package com.github.opengrabeso.mixtio
import java.time.temporal.ChronoUnit
import com.garmin.fit
import com.garmin.fit.{Event => FitEvent, _}
import Main.ActivityEvents
import common.Util._
import common.model._
import java.time.ZonedDateTime
object FitExport {
type Encoder = MesgListener with MesgDefinitionListener
private def createEncoder: BufferEncoder = {
new BufferEncoder
}
def encodeHeader(encoder: Encoder): Unit = {
//Generate FileIdMessage
val fileIdMesg = new FileIdMesg
fileIdMesg.setType(fit.File.ACTIVITY)
encoder.onMesg(fileIdMesg)
}
def toTimestamp(zonedTime: ZonedDateTime): DateTime = {
val instant = zonedTime.toInstant
val timestamp = instant.toEpochMilli / 1000 - DateTime.OFFSET / 1000.0
val dateTime = new DateTime(0, timestamp)
dateTime
}
def export(events: ActivityEvents): Array[Byte] = {
val encoder = createEncoder
abstract class FitEvent {
def time: ZonedDateTime
def encode(encoder: Encoder)
}
abstract class DataEvent(time: ZonedDateTime, set: RecordMesg => Unit) extends FitEvent {
override def encode(encoder: Encoder): Unit = {
val myMsg = new RecordMesg()
myMsg.setTimestamp(toTimestamp(time))
set(myMsg)
encoder.onMesg(myMsg)
}
}
def encodeGPS(msg: RecordMesg, gps: GPSPoint) = {
val longLatScale = (1L << 31).toDouble / 180
msg.setPositionLong((gps.longitude * longLatScale).toInt)
msg.setPositionLat((gps.latitude * longLatScale).toInt)
gps.elevation.foreach(e => msg.setAltitude(e.toFloat))
}
class GPSEvent(val time: ZonedDateTime, val gps: GPSPoint) extends DataEvent(time, encodeGPS(_, gps))
class AttribEvent(val time: ZonedDateTime, data: Int, set: (RecordMesg, Int) => Unit) extends DataEvent(time, set(_, data))
val gpsAsEvents = events.gps.stream map { case (t, gps) =>
new GPSEvent(t, gps)
}
val attributesWithLast = events.attributes.map { attr =>
attr.stream
}
val attributesAsEvents = events.attributes.flatMap { attrib =>
val createAttribEvent: (RecordMesg, Int) => Unit = (msg, value) =>
attrib match {
case x: DataStreamHR => msg.setHeartRate(value.toShort)
case x: DataStreamAttrib =>
x.attribName match {
case "watts" => msg.setPower(value)
case "cadence" => msg.setCadence(value.toShort)
case "temp" => msg.setTemperature(value.toByte)
case _ => // unsupported attribute
}
case _ => ???
}
val attribStream = if (false) {
// attempt to fix Strava not showing temperature: make sure each attribute is present for the last GPS value
val lastGPSTime = events.gps.stream.lastKey
if (attrib.stream contains lastGPSTime) {
attrib.stream
} else {
attrib.stream ++ attrib.stream.until(lastGPSTime).lastOption.map(lastGPSTime -> _._2)
}
} else attrib.stream
attribStream.map { case (t, data) =>
new AttribEvent(t, data.asInstanceOf[Int], createAttribEvent)
}
}
trait AutoClose {
def emitMsg(time: ZonedDateTime, endTime: ZonedDateTime)
private var isOpen = false
private var counter = 0
private var lastStart = events.id.startTime
def count: Int = counter
def openLap(time: ZonedDateTime): Unit = {
lastStart = time
isOpen = true
}
def closeLap(time: ZonedDateTime): Unit = {
if (isOpen && time > lastStart) {
emitMsg(lastStart, time)
counter += 1
}
openLap(time)
}
}
object LapAutoClose extends AutoClose {
def emitMsg(startTime: ZonedDateTime, endTime: ZonedDateTime): Unit = {
val myMsg = new LapMesg()
myMsg.setEvent(FitEvent.LAP)
myMsg.setEventType(EventType.STOP)
myMsg.setStartTime(toTimestamp(startTime))
myMsg.setTimestamp(toTimestamp(endTime))
myMsg.setMessageIndex(count)
val lapDurationSec = ChronoUnit.SECONDS.between(startTime, endTime).toFloat
myMsg.setTotalElapsedTime(lapDurationSec)
myMsg.setTotalTimerTime(lapDurationSec)
encoder.onMesg(myMsg)
}
}
def closeActivity(timeEnd: ZonedDateTime): Unit = {
val myMsg = new ActivityMesg()
myMsg.setTimestamp(toTimestamp(timeEnd))
myMsg.setNumSessions(1)
myMsg.setType(Activity.MANUAL)
myMsg.setEvent(FitEvent.ACTIVITY)
myMsg.setEventType(EventType.STOP)
encoder.onMesg(myMsg)
}
class LapFitEvent(val time: ZonedDateTime) extends FitEvent {
override def encode(encoder: Encoder): Unit = {
LapAutoClose.closeLap(time)
}
}
val lapsAsEvents = events.events.collect {
case LapEvent(time) =>
new LapFitEvent(time)
}
val allEvents = (gpsAsEvents ++ attributesAsEvents ++ lapsAsEvents).toVector.sortBy(_.time)
val timeBeg = allEvents.head.time
val timeEnd = allEvents.last.time
def encodeHeader(encoder: Encoder): Unit = {
//Generate FileIdMessage
val fileIdMesg = new FileIdMesg
fileIdMesg.setManufacturer(Manufacturer.SUUNTO)
fileIdMesg.setType(fit.File.ACTIVITY)
fileIdMesg.setProduct(1) // TODO: detect for real
encoder.onMesg(fileIdMesg)
}
encodeHeader(encoder)
LapAutoClose.openLap(timeBeg)
allEvents.foreach(_.encode(encoder))
val durationSec = ChronoUnit.SECONDS.between(timeBeg, timeEnd)
LapAutoClose.closeLap(timeEnd)
val (sport, subsport) = events.id.sportName match {
// TODO: handle other sports
case Event.Sport.Run => (Sport.RUNNING, SubSport.STREET)
case Event.Sport.Ride => (Sport.CYCLING, SubSport.ROAD)
case Event.Sport.Swim => (Sport.SWIMMING, SubSport.GENERIC)
case Event.Sport.Hike => (Sport.HIKING, SubSport.GENERIC)
case Event.Sport.Walk => (Sport.WALKING, SubSport.GENERIC)
case Event.Sport.NordicSki => (Sport.CROSS_COUNTRY_SKIING, SubSport.GENERIC)
case Event.Sport.AlpineSki => (Sport.ALPINE_SKIING, SubSport.GENERIC)
case Event.Sport.Canoeing => (Sport.PADDLING, SubSport.GENERIC)
case Event.Sport.Rowing => (Sport.ROWING, SubSport.GENERIC)
case Event.Sport.Surfing => (Sport.SURFING, SubSport.GENERIC)
case Event.Sport.IceSkate => (Sport.ICE_SKATING, SubSport.GENERIC)
case Event.Sport.InlineSkate => (Sport.INLINE_SKATING, SubSport.GENERIC)
case Event.Sport.Kayaking => (Sport.KAYAKING, SubSport.GENERIC)
case Event.Sport.WindSurf => (Sport.WINDSURFING, SubSport.GENERIC)
case Event.Sport.KiteSurf => (Sport.KITESURFING, SubSport.GENERIC)
case Event.Sport.Snowshoe => (Sport.SNOWSHOEING, SubSport.GENERIC)
case Event.Sport.EbikeRide => (Sport.E_BIKING, SubSport.GENERIC)
//case Event.Sport.WindSurfing => (Sport.SAILING, SubSport.GENERIC)
case _ => (Sport.GENERIC, SubSport.GENERIC)
}
{
val myMsg = new SessionMesg()
myMsg.setStartTime(toTimestamp(timeBeg))
myMsg.setTimestamp(toTimestamp(timeEnd))
myMsg.setSport(sport)
myMsg.setSubSport(subsport)
myMsg.setTotalElapsedTime(durationSec.toFloat)
myMsg.setTotalTimerTime(durationSec.toFloat)
myMsg.setMessageIndex(0)
myMsg.setFirstLapIndex(0)
myMsg.setNumLaps(LapAutoClose.count + 1)
myMsg.setEvent(FitEvent.SESSION)
myMsg.setEventType(EventType.STOP)
encoder.onMesg(myMsg)
}
closeActivity(timeEnd)
encoder.close
}
}
| OndrejSpanel/Stravamat | backend/src/main/scala/com/github/opengrabeso/mixtio/FitExport.scala | Scala | gpl-2.0 | 7,592 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package provider
package servlet
import javax.servlet._
import javax.servlet.http._
import net.liftweb.common._
import net.liftweb.util._
import net.liftweb.http._
import Helpers._
trait ServletFilterProvider extends Filter with HTTPProvider {
var ctx: HTTPContext = _
//We need to capture the ServletContext on init
def init(config: FilterConfig) {
ctx = new HTTPServletContext(config.getServletContext)
LiftRules.setContext(ctx)
bootLift(Box.legacyNullTest(config.getInitParameter("bootloader")))
}
//And throw it away on destruction
def destroy {
ctx = null
terminate
}
def context: HTTPContext = ctx
/**
* Executes the Lift filter component.
*/
def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain) = {
if (LiftRules.ending) chain.doFilter(req, res)
else {
LiftRules.reqCnt.incrementAndGet()
try {
TransientRequestVarHandler(Empty,
RequestVarHandler(Empty,
(req, res) match {
case (httpReq: HttpServletRequest, httpRes: HttpServletResponse) =>
val httpRequest = new HTTPRequestServlet(httpReq, this)
val httpResponse = new HTTPResponseServlet(httpRes)
service(httpRequest, httpResponse) {
chain.doFilter(req, res)
}
case _ => chain.doFilter(req, res)
}))
} finally {LiftRules.reqCnt.decrementAndGet()}
}
}
}
| wsaccaco/lift | framework/lift-base/lift-webkit/src/main/scala/net/liftweb/http/provider/servlet/ServletFilterProvider.scala | Scala | apache-2.0 | 2,196 |
/* Copyright 2009-2018 EPFL, Lausanne */
package inox
package solvers
package unrolling
class FunctionEqualitySuite extends SolvingTestSuite with DatastructureUtils {
import inox.trees._
import dsl._
val f = FreshIdentifier("f")
val mmapID = FreshIdentifier("MMap")
val mmapConsID = mmapID.freshen
val mmap = mkSort(mmapID)("A","B") {
case Seq(aT, bT) => Seq(
(mmapConsID, Seq(ValDef(f, aT =>: T(optionID)(bT))))
)
}
val containsID = FreshIdentifier("contains")
val contains = mkFunDef(containsID)("A", "B") { case Seq(aT, bT) => (
Seq("m" :: T(mmapID)(aT, bT), "k" :: aT), BooleanType(), { case Seq(m, k) =>
m.getField(f)(k) is someID
})
}
val symbols = baseSymbols
.withFunctions(Seq(contains))
.withSorts(Seq(mmap))
val program = InoxProgram(symbols)
test("simple theorem") { implicit ctx =>
val clause = let(
"states" :: T(mmapID)(IntegerType(), IntegerType() =>: IntegerType()),
C(mmapConsID)(IntegerType(), IntegerType() =>: IntegerType())(\("i" :: IntegerType())(i => C(someID)(IntegerType() =>: IntegerType())(\("x" :: IntegerType())(x => IntegerLiteral(0)))))
)(states => contains(IntegerType(), IntegerType() =>: IntegerType())(states, IntegerLiteral(0)) && E(false))
assert(SimpleSolverAPI(program.getSolver).solveSAT(Not(clause)).isSAT)
}
test("possible equality 1") { implicit ctx =>
val f = ("f" :: (IntegerType() =>: IntegerType())).toVariable
val g = ("g" :: (IntegerType() =>: IntegerType())).toVariable
val clause = f === (\("x" :: IntegerType())(x => g(x)))
assert(SimpleSolverAPI(program.getSolver).solveSAT(clause).isSAT)
}
test("possible equality 2") { implicit ctx =>
val f = ("f" :: (IntegerType() =>: IntegerType())).toVariable
val g = ("g" :: (IntegerType() =>: IntegerType())).toVariable
val clause = g === (\("x" :: IntegerType())(x => f(x)))
assert(SimpleSolverAPI(program.getSolver).solveSAT(clause).isSAT)
}
test("impossible equality 1") { implicit ctx =>
val f = ("f" :: (IntegerType() =>: IntegerType())).toVariable
val clause = f === (\("x" :: IntegerType())(x => f(x)))
assert(SimpleSolverAPI(program.getSolver).solveSAT(clause).isUNSAT)
}
test("impossible equality 2") { implicit ctx =>
val f = ("f" :: (IntegerType() =>: IntegerType())).toVariable
val g = ("g" :: (IntegerType() =>: IntegerType())).toVariable
val clause = f === (\("x" :: IntegerType())(x => g(x))) && g === (\("x" :: IntegerType())(x => f(x)))
assert(SimpleSolverAPI(program.getSolver).solveSAT(clause).isUNSAT)
}
}
| romac/inox | src/it/scala/inox/solvers/unrolling/FunctionEqualitySuite.scala | Scala | apache-2.0 | 2,601 |
package eu.inn.binders
import eu.inn.binders.value.internal.DynamicMacro
import scala.language.experimental.macros
package object value {
implicit class ValueReader(val value: Value) {
def fromValue[O]: O = macro DynamicMacro.fromValue[O]
}
implicit class ValueGenerator[O](val obj: O) {
def toValue: Value = macro DynamicMacro.toValue[O]
}
implicit def int2number(i: Int): Number = Number(i)
implicit def long2number(i: Long): Number = Number(i)
implicit def bigdecimal2number(i: BigDecimal): Number = Number(i)
implicit def double2number(i: Double): Number = Number(i)
implicit def string2text(s: String): Text = Text(s)
implicit def boolean2bool(b: Boolean): Bool = Bool(b)
implicit def seq2lst(seq: Seq[Value]): Lst = Lst(seq)
implicit def map2obj(map: Map[String, Value]): Obj = Obj(map)
}
| InnovaCo/binders | src/main/scala/eu/inn/binders/value/package.scala | Scala | bsd-3-clause | 834 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
/** This class provides a simple way to get unique objects for equal strings.
* Since symbols are interned, they can be compared using reference equality.
* Instances of `Symbol` can be created easily with Scala's built-in quote
* mechanism.
*
* For instance, the Scala term `'mysym` will
* invoke the constructor of the `Symbol` class in the following way:
* `Symbol("mysym")`.
*/
final class Symbol private (val name: String) extends Serializable {
/** Converts this symbol to a string.
*/
override def toString(): String = "'" + name
@throws(classOf[java.io.ObjectStreamException])
private def readResolve(): Any = Symbol.apply(name)
override def hashCode = name.hashCode()
override def equals(other: Any) = this eq other.asInstanceOf[AnyRef]
}
object Symbol extends UniquenessCache[String, Symbol] {
override def apply(name: String): Symbol = super.apply(name)
protected def valueFromKey(name: String): Symbol = new Symbol(name)
protected def keyFromValue(sym: Symbol): Option[String] = Some(sym.name)
}
/** This is private so it won't appear in the library API, but
* abstracted to offer some hope of reusability. */
private[scala] abstract class UniquenessCache[K, V >: Null]
{
import java.lang.ref.WeakReference
import java.util.WeakHashMap
import java.util.concurrent.locks.ReentrantReadWriteLock
private[this] val rwl = new ReentrantReadWriteLock()
private[this] val rlock = rwl.readLock
private[this] val wlock = rwl.writeLock
private[this] val map = new WeakHashMap[K, WeakReference[V]]
protected def valueFromKey(k: K): V
protected def keyFromValue(v: V): Option[K]
def apply(name: K): V = {
def cached(): V = {
rlock.lock
try {
val reference = map get name
if (reference == null) null
else reference.get // will be null if we were gc-ed
}
finally rlock.unlock
}
def updateCache(): V = {
wlock.lock
try {
val res = cached()
if (res != null) res
else {
// If we don't remove the old String key from the map, we can
// wind up with one String as the key and a different String as
// the name field in the Symbol, which can lead to surprising GC
// behavior and duplicate Symbols. See scala/bug#6706.
map remove name
val sym = valueFromKey(name)
map.put(name, new WeakReference(sym))
sym
}
}
finally wlock.unlock
}
val res = cached()
if (res == null) updateCache()
else res
}
def unapply(other: V): Option[K] = keyFromValue(other)
}
| martijnhoekstra/scala | src/library/scala/Symbol.scala | Scala | apache-2.0 | 2,929 |
import scala.compiletime._
// works
val a = {
given Int = 0
summon[Int]
}
// doesn't
inline def summonInt = {
given Int = 0
summonInline[Int]
}
val b = summonInt
| lampepfl/dotty | tests/pos/i12997.scala | Scala | apache-2.0 | 181 |
package eu.semberal.dbstress.config
import java.io.{BufferedReader, InputStreamReader}
import better.files._
import org.scalatest.flatspec.AnyFlatSpec
class ConfigParserTest extends AnyFlatSpec {
"ConfigParser" should "correctly reject an unit with non-alphanumeric characters in the name" in {
val stream =
this.getClass.getClassLoader.getResourceAsStream("test_config2.yaml")
val result = ConfigParser.parseConfigurationYaml(
new BufferedReader(new InputStreamReader(stream)).autoClosed,
None
)
assert(
result === Left(
s"""Invalid value "Foo Bar" for configuration entry: "unit_name""""
)
)
}
}
| semberal/dbstress | src/test/scala/eu/semberal/dbstress/config/ConfigParserTest.scala | Scala | apache-2.0 | 664 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.lewuathe.dllib.example
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
import com.lewuathe.dllib.graph.Graph
import com.lewuathe.dllib.layer.{AffineLayer, ReLULayer, SoftmaxLayer}
import com.lewuathe.dllib.model.{InMemoryModel, Model}
import com.lewuathe.dllib.network.Network
import com.lewuathe.dllib.solver.MultiLayerPerceptron
class MNISTApp(miniBatchFraction: Double, numIter: Int, learningRate: Double) {
var numSamples = 5000
def createMNISTDataset(path: String, sc: SparkContext): DataFrame = {
val dataset = MNIST(path)
MNIST.asDF(dataset, sc, numSamples)
}
def submit(spark: SparkSession): Double = {
val sqlContext = spark.sqlContext
val df = createMNISTDataset("/tmp/", spark.sparkContext)
val nn3Graph = new Graph(
Array(
new AffineLayer(100, 784),
new ReLULayer(100, 100),
new AffineLayer(10, 100),
new SoftmaxLayer(10, 10)
))
val nn3Model = InMemoryModel(nn3Graph)
val nn3 = Network(nn3Model, nn3Graph)
val multilayerPerceptron = new MultiLayerPerceptron("MNIST", nn3)
multilayerPerceptron.setNumIterations(numIter)
multilayerPerceptron.miniBatchFraction = miniBatchFraction
multilayerPerceptron.learningRate = learningRate
val model = multilayerPerceptron.fit(df)
val result = model.transform(df)
result.filter("label = prediction").count() / numSamples.toDouble
}
}
object MNISTApp {
def submit(spark: SparkSession): Double =
new MNISTApp(0.03, 10, 0.5).submit(spark)
def apply(spark: SparkSession,
miniBatchFraction: Double,
numIterations: Int,
learningRate: Double): Double = {
Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
new MNISTApp(miniBatchFraction, numIterations, learningRate).submit(spark)
}
def apply(sparkConf: SparkConf,
miniBatchFraction: Double,
numIterations: Int,
learningRate: Double): Double = {
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
new MNISTApp(miniBatchFraction, numIterations, learningRate).submit(spark)
}
}
| Lewuathe/neurallib | src/main/scala/com/lewuathe/dllib/example/MNISTApp.scala | Scala | mit | 3,126 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird.statistics
/**
* used to keep track of stats and time spent processing iterators passed to the methods
* @author Julien Le Dem
*/
private class IterCallStatistics(threadSafe: Boolean) {
/**
* internal collection of a distribution of values on a log scale
*/
private class Statistics(threadSafe: Boolean) {
import scala.math.min
import java.lang.Long.numberOfLeadingZeros
val maxBucket = 10
val distribution = IndexedSeq.fill(maxBucket + 1) { Counter(threadSafe) }
val total = Counter(threadSafe)
def put(v: Long) {
total.add(v)
// log2(v + 1) for v up to 2^maxBucket
val bucket = min(64 - numberOfLeadingZeros(v), maxBucket)
distribution(bucket).increment
}
def count = distribution.foldLeft(0L) { _ + _.get } // sum
def pow2(i: Int): Int = 1 << i
override def toString =
distribution.zipWithIndex
.map {
case (v, i) =>
(if (i == maxBucket) ">" else "<" + pow2(i)) + ": " + v
}
.mkString(", ") + ", avg=" + total.toDouble / count + " count=" + count
}
private[this] final val countStats = new Statistics(threadSafe)
private[this] final val totalCallTime = Counter(threadSafe)
/** used to count how many values are pulled from the Iterator without iterating twice */
private class CountingIterator[T](val i: Iterator[T]) extends Iterator[T] {
private[this] final var nextCount: Long = 0
override def hasNext = i.hasNext
override def next = {
val n = i.next
nextCount += 1
n
}
def getNextCount = nextCount
}
/** measures the time spent calling f on iter and the size of iter */
def measure[T, O](iter: TraversableOnce[T])(f: (TraversableOnce[T]) => O): O = {
val ci = new CountingIterator(iter.toIterator)
val t0 = System.currentTimeMillis()
val r = f(ci)
val t1 = System.currentTimeMillis()
countStats.put(ci.getNextCount)
totalCallTime.add(t1 - t0)
r
}
def getCallCount = countStats.count
def getTotalCallTime = totalCallTime.get
override def toString =
countStats.toString + ", " +
"total time: " + totalCallTime + "ms, " +
"avg time: " + (totalCallTime.toDouble / countStats.count)
}
| nevillelyh/algebird | algebird-core/src/main/scala/com/twitter/algebird/statistics/IterCallStatistics.scala | Scala | apache-2.0 | 2,821 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.util
import com.spotify.scio.values.WindowedValue
import org.apache.beam.sdk.transforms.DoFn
import org.apache.beam.sdk.transforms.DoFn.ProcessElement
import org.apache.beam.sdk.transforms.windowing.BoundedWindow
import com.twitter.chill.ClosureCleaner
private[scio] object FunctionsWithWindowedValue {
def filterFn[T, U](f: WindowedValue[T] => Boolean): DoFn[T, T] =
new NamedDoFn[T, T] {
val g = ClosureCleaner.clean(f) // defeat closure
@ProcessElement
private[scio] def processElement(
c: DoFn[T, T]#ProcessContext,
window: BoundedWindow
): Unit = {
val wv = WindowedValue(c.element(), c.timestamp(), window, c.pane())
if (g(wv)) c.output(c.element())
}
}
def flatMapFn[T, U](f: WindowedValue[T] => TraversableOnce[WindowedValue[U]]): DoFn[T, U] =
new NamedDoFn[T, U] {
val g = ClosureCleaner.clean(f) // defeat closure
@ProcessElement
private[scio] def processElement(
c: DoFn[T, U]#ProcessContext,
window: BoundedWindow
): Unit = {
val wv = WindowedValue(c.element(), c.timestamp(), window, c.pane())
val i = g(wv).toIterator
while (i.hasNext) {
val v = i.next()
c.outputWithTimestamp(v.value, v.timestamp)
}
}
}
def mapFn[T, U](f: WindowedValue[T] => WindowedValue[U]): DoFn[T, U] =
new NamedDoFn[T, U] {
val g = ClosureCleaner.clean(f) // defeat closure
@ProcessElement
private[scio] def processElement(
c: DoFn[T, U]#ProcessContext,
window: BoundedWindow
): Unit = {
val wv = g(WindowedValue(c.element(), c.timestamp(), window, c.pane()))
c.outputWithTimestamp(wv.value, wv.timestamp)
}
}
}
| spotify/scio | scio-core/src/main/scala/com/spotify/scio/util/FunctionsWithWindowedValue.scala | Scala | apache-2.0 | 2,381 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.sql
import java.sql.Date
import java.sql.Timestamp
import java.util.{Map => JMap}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.{Map => SMap}
import scala.collection.Seq
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{ArrayType, DataType, DataTypes, MapType, StructType}
import org.apache.spark.sql.types.DataTypes.BinaryType
import org.apache.spark.sql.types.DataTypes.BooleanType
import org.apache.spark.sql.types.DataTypes.ByteType
import org.apache.spark.sql.types.DataTypes.DateType
import org.apache.spark.sql.types.DataTypes.DoubleType
import org.apache.spark.sql.types.DataTypes.FloatType
import org.apache.spark.sql.types.DataTypes.IntegerType
import org.apache.spark.sql.types.DataTypes.LongType
import org.apache.spark.sql.types.DataTypes.ShortType
import org.apache.spark.sql.types.DataTypes.StringType
import org.apache.spark.sql.types.DataTypes.TimestampType
import org.elasticsearch.hadoop.cfg.ConfigurationOptions.ES_SPARK_DATAFRAME_WRITE_NULL_VALUES_DEFAULT
import org.elasticsearch.hadoop.cfg.Settings
import org.elasticsearch.hadoop.serialization.EsHadoopSerializationException
import org.elasticsearch.hadoop.serialization.Generator
import org.elasticsearch.hadoop.serialization.SettingsAware
import org.elasticsearch.hadoop.serialization.builder.FilteringValueWriter
import org.elasticsearch.hadoop.serialization.builder.ValueWriter.Result
import org.elasticsearch.hadoop.util.unit.Booleans
class DataFrameValueWriter(writeUnknownTypes: Boolean = false) extends FilteringValueWriter[Any] with SettingsAware {
def this() {
this(false)
}
private var writeNullValues: Boolean = Booleans.parseBoolean(ES_SPARK_DATAFRAME_WRITE_NULL_VALUES_DEFAULT)
override def setSettings(settings: Settings): Unit = {
super.setSettings(settings)
writeNullValues = settings.getDataFrameWriteNullValues
}
override def write(value: Any, generator: Generator): Result = {
value match {
case Tuple2(row, schema: StructType) =>
writeStruct(schema, row, generator)
case map: Map[_, _] =>
writeMapWithInferredSchema(map, generator)
case seq: Seq[Row] =>
writeArray(seq, generator)
}
}
private[spark] def writeArray(value: Seq[Row], generator: Generator): Result = {
if (value.nonEmpty) {
val schema = value.head.schema
val result = write(DataTypes.createArrayType(schema), value, generator)
if (!result.isSuccesful) {
return handleUnknown(value, generator)
}
} else {
generator.writeBeginArray().writeEndArray()
}
Result.SUCCESFUL()
}
private[spark] def writeStruct(schema: StructType, value: Any, generator: Generator): Result = {
value match {
case r: Row =>
generator.writeBeginObject()
schema.fields.view.zipWithIndex foreach {
case (field, index) =>
if (shouldKeep(generator.getParentPath,field.name)) {
if (!r.isNullAt(index)) {
generator.writeFieldName(field.name)
val result = write(field.dataType, r(index), generator)
if (!result.isSuccesful) {
return handleUnknown(value, generator)
}
} else if (writeNullValues) {
generator.writeFieldName(field.name)
generator.writeNull()
}
}
}
generator.writeEndObject()
Result.SUCCESFUL()
}
}
private[spark] def write(schema: DataType, value: Any, generator: Generator): Result = {
schema match {
case s @ StructType(_) => writeStruct(s, value, generator)
case a @ ArrayType(_, _) => writeArray(a, value, generator)
case m @ MapType(_, _, _) => writeMap(m, value, generator)
case _ => writePrimitive(schema, value, generator)
}
}
private[spark] def writeArray(schema: ArrayType, value: Any, generator: Generator): Result = {
value match {
case a: Array[_] => doWriteSeq(schema.elementType, a, generator)
case s: Seq[_] => doWriteSeq(schema.elementType, s, generator)
// unknown array type
case _ => handleUnknown(value, generator)
}
}
private def doWriteSeq(schema: DataType, value: Seq[_], generator: Generator): Result = {
generator.writeBeginArray()
if (value != null) {
value.foreach { v =>
val result = write(schema, v, generator)
if (!result.isSuccesful()) {
return handleUnknown(value, generator)
}
}
}
generator.writeEndArray()
Result.SUCCESFUL()
}
private[spark] def writeMap(schema: MapType, value: Any, generator: Generator): Result = {
value match {
case sm: SMap[_, _] => doWriteMap(schema, sm, generator)
case jm: JMap[_, _] => doWriteMap(schema, jm.asScala, generator)
// unknown map type
case _ => handleUnknown(value, generator)
}
}
private def doWriteMap(schema: MapType, value: SMap[_, _], generator: Generator): Result = {
generator.writeBeginObject()
if (value != null) {
for ((k, v) <- value) {
if (shouldKeep(generator.getParentPath(), k.toString())) {
generator.writeFieldName(k.toString)
val result = write(schema.valueType, v, generator)
if (!result.isSuccesful()) {
return handleUnknown(v, generator)
}
}
}
}
generator.writeEndObject()
Result.SUCCESFUL()
}
private def writeMapWithInferredSchema(value: Any, generator: Generator): Result = {
value match {
case sm: SMap[_, _] => doWriteMapWithInferredSchema(sm, generator)
case jm: JMap[_, _] => doWriteMapWithInferredSchema(jm.asScala, generator)
// unknown map type
case _ => handleUnknown(value, generator)
}
}
private def doWriteMapWithInferredSchema(map: SMap[_, _], generator: Generator): Result = {
if (map != null && map.valuesIterator.hasNext) {
val sampleValueOption = getFirstNotNullElement(map.valuesIterator)
val schema = inferMapSchema(sampleValueOption)
doWriteMap(schema, map, generator)
} else {
writeEmptyMap(generator)
}
}
private def writeEmptyMap(generator: Generator): Result = {
generator.writeBeginObject().writeEndObject()
Result.SUCCESFUL()
}
private def inferMapSchema(valueOption: Option[Any]): MapType = {
if(valueOption.isDefined) {
val valueType = inferType(valueOption.get)
MapType(StringType, valueType) //The key type is never read
} else {
MapType(StringType, StringType) //Does not matter if the map is empty or has no values
}
}
def inferArraySchema(array: Array[_]): DataType = {
val EMPTY_ARRAY_TYPE = StringType //Makes no difference for an empty array
if (array.isEmpty) {
EMPTY_ARRAY_TYPE
} else {
val sampleValueOption = getFirstNotNullElement(array.iterator)
if (sampleValueOption.isDefined) {
inferType(sampleValueOption.get)
}
else {
EMPTY_ARRAY_TYPE
}
}
}
def getFirstNotNullElement(iterator: Iterator[_]): Option[Any] = {
iterator.find(value => Option(value).isDefined)
}
private def inferType(value: Any): DataType = {
value match {
case _: String => StringType
case _: Int => IntegerType
case _: Integer => IntegerType
case _: Boolean => BooleanType
case _: java.lang.Boolean => BooleanType
case _: Short => ShortType
case _: java.lang.Short => ShortType
case _: Long => LongType
case _: java.lang.Long => LongType
case _: Double => DoubleType
case _: java.lang.Double => DoubleType
case _: Float => FloatType
case _: java.lang.Float => FloatType
case _: Timestamp => TimestampType
case _: Date => DateType
case _: Array[Byte] => BinaryType
case array: Array[_] => ArrayType(inferArraySchema(array))
case map: Map[_, _] => inferMapSchema(getFirstNotNullElement(map.valuesIterator))
}
}
private[spark] def writePrimitive(schema: DataType, value: Any, generator: Generator): Result = {
if (value == null) {
generator.writeNull()
}
else schema match {
case BinaryType => generator.writeBinary(value.asInstanceOf[Array[Byte]])
case BooleanType => generator.writeBoolean(value.asInstanceOf[Boolean])
case ByteType => generator.writeNumber(value.asInstanceOf[Byte])
case ShortType => generator.writeNumber(value.asInstanceOf[Short])
case IntegerType => generator.writeNumber(value.asInstanceOf[Int])
case LongType => generator.writeNumber(value.asInstanceOf[Long])
case DoubleType => generator.writeNumber(value.asInstanceOf[Double])
case FloatType => generator.writeNumber(value.asInstanceOf[Float])
case TimestampType => generator.writeNumber(value.asInstanceOf[Timestamp].getTime())
case DateType => generator.writeNumber(value.asInstanceOf[Date].getTime())
case StringType => generator.writeString(value.toString)
case _ => {
val className = schema.getClass().getName()
if ("org.apache.spark.sql.types.DecimalType".equals(className) || "org.apache.spark.sql.catalyst.types.DecimalType".equals(className)) {
throw new EsHadoopSerializationException("Decimal types are not supported by Elasticsearch - consider using a different type (such as string)")
}
return handleUnknown(value, generator)
}
}
Result.SUCCESFUL()
}
protected def handleUnknown(value: Any, generator: Generator): Result = {
if (!writeUnknownTypes) {
println("can't handle type " + value);
Result.FAILED(value)
} else {
generator.writeString(value.toString())
Result.SUCCESFUL()
}
}
} | elastic/elasticsearch-hadoop | spark/sql-13/src/main/scala/org/elasticsearch/spark/sql/DataFrameValueWriter.scala | Scala | apache-2.0 | 10,903 |
package org.helianto.ingress.controller
import org.springframework.security.oauth2.provider.OAuth2Authentication
/**
* Mix-in to implicitly extract entity or identity from the principal.
*/
trait AuthorityExtractor {
def _entityId(implicit principal: OAuth2Authentication) = id(principal, "ENTITY_ID_")
def _userId(implicit principal: OAuth2Authentication) = id(principal, "USER_ID_")
def _identityId(implicit principal: OAuth2Authentication) = id(principal, "SELF_ID_")
private def id(principal: OAuth2Authentication, prefix: String) = {
import collection.JavaConversions._
principal
.getAuthorities
.filter(_.toString.startsWith(prefix))
.map(_.toString.substring(prefix.length))
.headOption.getOrElse("")
}
}
| iservport/helianto-spring | src/main/scala/org/helianto/ingress/controller/AuthorityExtractor.scala | Scala | apache-2.0 | 765 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.