code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.io.File
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.{Attribute, Predicate}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.FileBasedDataSourceTest
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation
import org.apache.spark.sql.execution.datasources.v2.orc.OrcScan
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION
/**
* OrcTest
* -> OrcSuite
* -> OrcSourceSuite
* -> HiveOrcSourceSuite
* -> OrcQueryTests
* -> OrcQuerySuite
* -> HiveOrcQuerySuite
* -> OrcPartitionDiscoveryTest
* -> OrcPartitionDiscoverySuite
* -> HiveOrcPartitionDiscoverySuite
* -> OrcFilterSuite
* -> HiveOrcFilterSuite
*/
abstract class OrcTest extends QueryTest with FileBasedDataSourceTest with BeforeAndAfterAll {
val orcImp: String = "native"
private var originalConfORCImplementation = "native"
override protected val dataSourceName: String = "orc"
override protected val vectorizedReaderEnabledKey: String =
SQLConf.ORC_VECTORIZED_READER_ENABLED.key
protected override def beforeAll(): Unit = {
super.beforeAll()
originalConfORCImplementation = spark.conf.get(ORC_IMPLEMENTATION)
spark.conf.set(ORC_IMPLEMENTATION.key, orcImp)
}
protected override def afterAll(): Unit = {
spark.conf.set(ORC_IMPLEMENTATION.key, originalConfORCImplementation)
super.afterAll()
}
/**
* Writes `data` to a Orc file, which is then passed to `f` and will be deleted after `f`
* returns.
*/
protected def withOrcFile[T <: Product: ClassTag: TypeTag]
(data: Seq[T])
(f: String => Unit): Unit = withDataSourceFile(data)(f)
/**
* Writes `data` to a Orc file and reads it back as a `DataFrame`,
* which is then passed to `f`. The Orc file will be deleted after `f` returns.
*/
protected def withOrcDataFrame[T <: Product: ClassTag: TypeTag]
(data: Seq[T], testVectorized: Boolean = true)
(f: DataFrame => Unit): Unit = withDataSourceDataFrame(data, testVectorized)(f)
/**
* Writes `data` to a Orc file, reads it back as a `DataFrame` and registers it as a
* temporary table named `tableName`, then call `f`. The temporary table together with the
* Orc file will be dropped/deleted after `f` returns.
*/
protected def withOrcTable[T <: Product: ClassTag: TypeTag]
(data: Seq[T], tableName: String, testVectorized: Boolean = true)
(f: => Unit): Unit = withDataSourceTable(data, tableName, testVectorized)(f)
protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
data: Seq[T], path: File): Unit = makeDataSourceFile(data, path)
protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
df: DataFrame, path: File): Unit = makeDataSourceFile(df, path)
protected def checkPredicatePushDown(df: DataFrame, numRows: Int, predicate: String): Unit = {
withTempPath { file =>
// It needs to repartition data so that we can have several ORC files
// in order to skip stripes in ORC.
df.repartition(numRows).write.orc(file.getCanonicalPath)
val actual = stripSparkFilter(spark.read.orc(file.getCanonicalPath).where(predicate)).count()
assert(actual < numRows)
}
}
protected def checkNoFilterPredicate
(predicate: Predicate, noneSupported: Boolean = false)
(implicit df: DataFrame): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan match {
case PhysicalOperation(_, filters,
DataSourceV2ScanRelation(_, o: OrcScan, _)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
if (noneSupported) {
assert(o.pushedFilters.isEmpty, "Unsupported filters should not show in pushed filters")
} else {
assert(o.pushedFilters.nonEmpty, "No filter is pushed down")
val maybeFilter = OrcFilters.createFilter(query.schema, o.pushedFilters)
assert(maybeFilter.isEmpty, s"Couldn't generate filter predicate for ${o.pushedFilters}")
}
case _ =>
throw new AnalysisException("Can not match OrcTable in the query.")
}
}
protected def readResourceOrcFile(name: String): DataFrame = {
val url = Thread.currentThread().getContextClassLoader.getResource(name)
// Copy to avoid URISyntaxException when `sql/hive` accesses the resources in `sql/core`
val file = File.createTempFile("orc-test", ".orc")
file.deleteOnExit();
FileUtils.copyURLToFile(url, file)
spark.read.orc(file.getAbsolutePath)
}
}
| kevinyu98/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcTest.scala | Scala | apache-2.0 | 5,819 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
package batches
import monix.execution.internal.Platform.recommendedBatchSize
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.reflect.ClassTag
/** Similar to Java's and Scala's `Iterator`, the `BatchCursor` type can
* can be used to iterate over the data in a collection, but it cannot
* be used to modify the underlying collection.
*
* Inspired by the standard `Iterator`, provides a way to efficiently
* apply operations such as `map`, `filter`, `collect` on the underlying
* collection without such operations having necessarily lazy behavior.
* So in other words, when wrapping a standard `Array`, an application of `map`
* will copy the data to a new `Array` instance with its elements
* modified, immediately and is thus having strict (eager) behavior.
* In other cases, when wrapping potentially infinite collections, like
* `Iterable` or `Stream`, that's when lazy behavior happens.
*
* Sample:
* {{{
* try while (cursor.hasNext()) {
* println(cursor.next())
* }
* catch {
* case NonFatal(ex) => report(ex)
* }
* }}}
*
* This class is provided as an alternative to Scala's
* [[scala.collection.Iterator Iterator]] because:
*
* - the list of supported operations is smaller
* - implementations specialized for primitives are provided
* to avoid boxing
* - depending on the implementation, the behaviour of operators
* can be eager (e.g. `map`, `filter`), but only in case the
* source cursor doesn't need to be consumed (if the cursor is
* backed by an array, then a new array gets created, etc.)
* - the `recommendedBatchSize` can signal how many batch
* can be processed in batches
*
* Used in the [[Iterant]] implementation.
*
* @define strictOrLazyNote NOTE: application of this function can be
* either strict or lazy (depending on the underlying cursor type),
* but it does not modify the original collection.
*/
abstract class BatchCursor[+A] extends Serializable {
/** Tests whether this cursor can provide another element.
*
* This method can be side-effecting, depending on the
* implementation and thus it can also throw exceptions.
* This is because in certain cases the only way to know
* if there is a next element or not involves triggering
* dangerous side-effects.
*
* This method is idempotent until the call to [[next]]
* happens, meaning that multiple `hasNext` calls can be
* made and implementations are advised to memoize the
* result.
*
* @return `true` if a subsequent call to [[next]] will yield
* an element, `false` otherwise.
*/
def hasNext(): Boolean
/** Produces the next element of this iterator.
*
* This method is side-effecting, as it mutates the internal
* state of the cursor and can throw exceptions.
*
* @return the next element of this iterator, if `hasNext` is `true`,
* undefined behavior otherwise (can throw exceptions).
*/
def next(): A
/** In case this cursor is going to be processed eagerly,
* in batches then this value should be the recommended
* batch size for the source cursor.
*
* Examples:
*
* - if this cursor is iterating over a standard
* collection with a finite size, it can be something
* generous like `1024`
* - if it's iterating over a cheap infinite iterator
* (e.g. `Iterator.range`), it could be 128.
* - if it does any sort of I/O or blocking of threads,
* then the recommended value is `1`.
*
* Basically the batch size should be adjusted according
* to how expensive processing this cursor is. If it's
* a strict collection of a finite size, then it can probably
* be processed all at once. But if it's a big database
* result set that can block threads on reads, then it's
* probably wise to do it one item at a time.
*/
def recommendedBatchSize: Int
/** Returns `true` in case our cursor is empty or `false` if there
* are more elements to process.
*
* Alias for `!cursor.hasNext()`.
*/
def isEmpty: Boolean = !hasNext()
/** Returns `true` in case our cursor has more elements
* to process or `false` if the cursor is empty.
*
* Alias for [[hasNext]].
*/
def nonEmpty: Boolean = hasNext()
/** Creates a new cursor that will only emit the
* first `n` values of this cursor.
*
* @param n is the number of values to take
* @return a cursor producing only of the first `n` values of
* this cursor, or else the whole sequence,
* if it produces fewer than `n` values.
*/
def take(n: Int): BatchCursor[A]
/** Creates a new cursor that advances this cursor past the
* first `n` elements, or the length of the cursor,
* whichever is smaller.
*
* @param n the number of elements to drop
* @return a cursor which produces all values of the current cursor,
* except it omits the first `n` values.
*/
def drop(n: Int): BatchCursor[A]
/** Creates an cursor returning an interval of the values
* produced by this cursor.
*
* @param from the index of the first element in this cursor
* which forms part of the slice.
* @param until the index of the first element
* following the slice.
* @return a cursor which advances this cursor past
* the first `from` elements using `drop`,
* and then takes `until - from` elements,
* using `take`
*/
def slice(from: Int, until: Int): BatchCursor[A]
/** Creates a new cursor that maps all produced values of this cursor
* to new values using a transformation function.
*
* $strictOrLazyNote
*
* @param f is the transformation function
* @return a new cursor which transforms every value produced by this
* cursor by applying the function `f` to it.
*/
def map[B](f: A => B): BatchCursor[B]
/** Returns an cursor over all the elements of the source cursor
* that satisfy the predicate `p`. The order of the elements
* is preserved.
*
* $strictOrLazyNote
*
* @param p the predicate used to test values.
* @return a cursor which produces those values of this cursor
* which satisfy the predicate `p`.
*/
def filter(p: A => Boolean): BatchCursor[A]
/** Creates a cursor by transforming values produced by the source
* cursor with a partial function, dropping those values for which
* the partial function is not defined.
*
* $strictOrLazyNote
*
* @param pf the partial function which filters and maps the cursor.
* @return a new cursor which yields each value `x` produced by this
* cursor for which `pf` is defined
*/
def collect[B](pf: PartialFunction[A,B]): BatchCursor[B]
/** Applies a binary operator to a start value and all elements
* of this cursor, going left to right.
*
* NOTE: applying this function on the cursor will consume it
* completely.
*
* @param initial is the start value.
* @param op the binary operator to apply
* @tparam R is the result type of the binary operator.
*
* @return the result of inserting `op` between consecutive elements
* of this cursor, going left to right with the start value
* `initial` on the left. Returns `initial` if the cursor
* is empty.
*/
def foldLeft[R](initial: R)(op: (R,A) => R): R = {
var result = initial
while (hasNext()) result = op(result, next())
result
}
/** Converts this cursor into a Scala immutable `List`,
* consuming it in the process.
*/
def toList: List[A] = {
val buffer = ListBuffer.empty[A]
while (hasNext()) buffer += next()
buffer.toList
}
/** Converts this cursor into an `Array`,
* consuming it in the process.
*/
def toArray[B >: A : ClassTag]: Array[B] = {
val buffer = ArrayBuffer.empty[B]
while (hasNext()) buffer += next()
buffer.toArray
}
/** Converts this cursor into a reusable array-backed [[Batch]],
* consuming it in the process.
*/
def toBatch: Batch[A] = {
val array = asInstanceOf[BatchCursor[AnyRef]].toArray
Batch.fromArray(array).asInstanceOf[Batch[A]]
}
/** Converts this cursor into a Scala `Iterator`. */
def toIterator: Iterator[A]
}
/** [[BatchCursor]] builders.
*
* @define fromAnyArrayDesc Builds an [[ArrayCursor]] instance
* from any array of boxed values.
*
* This will have lower performance than working with
* [[BatchCursor.fromArray[A](array:Array[A])* BatchCursor.fromArray]],
* since the values are boxed, however there is no requirement for a
* `ClassTag` and thus it can be used in any generic context.
*
* @define paramArray is the underlying reference to use for traversing
* and transformations
*
* @define paramArrayOffset is the offset to start from, which would have
* been zero by default
*
* @define paramArrayLength is the length of created cursor, which would
* have been `array.length` by default
*/
object BatchCursor {
/** Given a list of cursor, builds an array-backed [[BatchCursor]] out of it. */
def apply[A](elems: A*): BatchCursor[A] = {
val array = elems.asInstanceOf[Seq[AnyRef]].toArray
fromArray(array).asInstanceOf[BatchCursor[A]]
}
/** Converts a Scala [[scala.collection.Iterator]] into a [[BatchCursor]].
*
* @param iter is the [[scala.collection.Iterator Iterator]]
* to wrap in a `BatchCursor` instance
*/
def fromIterator[A](iter: Iterator[A]): BatchCursor[A] = {
val bs = if (iter.hasDefiniteSize) recommendedBatchSize else 1
new IteratorCursor[A](iter, bs)
}
/** Converts a Scala [[scala.collection.Iterator]] into a [[BatchCursor]].
*
* @param iter is the [[scala.collection.Iterator Iterator]]
* to wrap in a `BatchCursor` instance
*
* @param recommendedBatchSize specifies the
* [[BatchCursor.recommendedBatchSize]] for the resulting
* `BatchCursor` instance, specifying the batch size when
* doing eager processing.
*/
def fromIterator[A](iter: Iterator[A], recommendedBatchSize: Int): BatchCursor[A] =
new IteratorCursor[A](iter, recommendedBatchSize)
/** Builds a [[BatchCursor]] from a standard `Array`, with strict
* semantics on transformations.
*
* @param array $paramArray
*/
def fromArray[A](array: Array[A]): ArrayCursor[A] =
fromArray(array, 0, array.length)
/** Builds a [[BatchCursor]] from a standard `Array`, with strict
* semantics on transformations.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def fromArray[A](array: Array[A], offset: Int, length: Int): ArrayCursor[A] = {
val tp = ClassTag[A](array.getClass.getComponentType)
new ArrayCursor[A](array, offset, length)(tp)
}
/** $fromAnyArrayDesc
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def fromAnyArray[A](array: Array[_], offset: Int, length: Int): ArrayCursor[A] =
fromArray(array, offset, length).asInstanceOf[ArrayCursor[A]]
/** $fromAnyArrayDesc
*
* @param array $paramArray
*/
def fromAnyArray[A](array: Array[_]): ArrayCursor[A] =
fromAnyArray(array, 0, array.length)
/** Builds a [[BatchCursor]] from a Scala `Seq`, with lazy
* semantics on transformations.
*/
def fromSeq[A](seq: Seq[A]): BatchCursor[A] = {
val bs = if (seq.hasDefiniteSize) recommendedBatchSize else 1
fromSeq(seq, bs)
}
/** Builds a [[BatchCursor]] from a Scala `Seq`, with lazy
* semantics on transformations.
*/
def fromSeq[A](seq: Seq[A], recommendedBatchSize: Int): BatchCursor[A] =
fromIterator(seq.iterator, recommendedBatchSize)
/** Builds a [[BatchCursor]] from a Scala `IndexedSeq`, with strict
* semantics on transformations.
*/
def fromIndexedSeq[A](seq: IndexedSeq[A]): BatchCursor[A] = {
val ref = seq.asInstanceOf[IndexedSeq[AnyRef]].toArray
fromArray(ref).asInstanceOf[BatchCursor[A]]
}
/** Returns a generic, empty cursor instance. */
def empty[A]: BatchCursor[A] = EmptyCursor
/** Returns a [[BatchCursor]] specialized for `Boolean`.
*
* @param array $paramArray
*/
def booleans(array: Array[Boolean]): BooleansCursor =
booleans(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Boolean`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def booleans(array: Array[Boolean], offset: Int, length: Int): BooleansCursor =
new BooleansCursor(array, offset, length)
/** Returns a [[BatchCursor]] specialized for `Byte`.
*
* @param array $paramArray
*/
def bytes(array: Array[Byte]): BytesCursor =
bytes(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Byte`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def bytes(array: Array[Byte], offset: Int, length: Int): BytesCursor =
new BytesCursor(array, offset, length)
/** Returns a [[BatchCursor]] specialized for `Char`.
*
* @param array $paramArray
*/
def chars(array: Array[Char]): CharsCursor =
chars(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Char`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def chars(array: Array[Char], offset: Int, length: Int): CharsCursor =
new CharsCursor(array, offset, length)
/** Returns a [[BatchCursor]] specialized for `Int`.
*
* @param array $paramArray
*/
def integers(array: Array[Int]): IntegersCursor =
integers(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Int`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def integers(array: Array[Int], offset: Int, length: Int): IntegersCursor =
new IntegersCursor(array, offset, length)
/** Returns a [[BatchCursor]] specialized for `Long`.
*
* @param array $paramArray
*/
def longs(array: Array[Long]): LongsCursor =
longs(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Long`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def longs(array: Array[Long], offset: Int, length: Int): LongsCursor =
new LongsCursor(array, offset, length)
/** Returns a [[BatchCursor]] specialized for `Double`.
*
* @param array $paramArray
*/
def doubles(array: Array[Double]): DoublesCursor =
doubles(array, 0, array.length)
/** Returns a [[BatchCursor]] specialized for `Double`.
*
* @param array $paramArray
* @param offset $paramArrayOffset
* @param length $paramArrayLength
*/
def doubles(array: Array[Double], offset: Int, length: Int): DoublesCursor =
new DoublesCursor(array, offset, length)
/** A cursor producing equally spaced values in some integer interval.
*
* @param from the start value of the cursor
* @param until the end value of the cursor (the first value NOT returned)
* @param step the increment value of the cursor (must be positive or negative)
* @return the cursor producing values `from, from + step, ...` up to, but excluding `end`
*/
def range(from: Int, until: Int, step: Int = 1): BatchCursor[Int] =
BatchCursor.fromIterator(Iterator.range(from, until, step), recommendedBatchSize)
/** Creates an infinite-length iterator returning the results of evaluating
* an expression. The expression is recomputed for every element.
*
* @param f the computation to repeatedly evaluate
* @return the iterator containing an infinite number of results of evaluating `f`
*/
def continually[A](f: => A): BatchCursor[A] =
fromIterator(Iterator.continually(f), 1)
}
| Wogan/monix | monix-tail/shared/src/main/scala/monix/tail/batches/BatchCursor.scala | Scala | apache-2.0 | 16,987 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.metric
import org.apache.spark.{Accumulable, AccumulableParam, SparkContext}
/**
* Create a layer for specialized metric. We cannot add `@specialized` to
* `Accumulable/AccumulableParam` because it will break Java source compatibility.
*
* An implementation of SQLMetric should override `+=` and `add` to avoid boxing.
* SQLMetric的实现应该重写`+ =`和`add`以避免装箱
*/
private[sql] abstract class SQLMetric[R <: SQLMetricValue[T], T](
name: String, val param: SQLMetricParam[R, T])
extends Accumulable[R, T](param.zero, param, Some(name), true)
/**
* Create a layer for specialized metric. We cannot add `@specialized` to
* `Accumulable/AccumulableParam` because it will break Java source compatibility.
*/
private[sql] trait SQLMetricParam[R <: SQLMetricValue[T], T] extends AccumulableParam[R, T] {
def zero: R
}
/**
* Create a layer for specialized metric. We cannot add `@specialized` to
* `Accumulable/AccumulableParam` because it will break Java source compatibility.
*/
private[sql] trait SQLMetricValue[T] extends Serializable {
def value: T
override def toString: String = value.toString
}
/**
* A wrapper of Long to avoid boxing and unboxing when using Accumulator
* 一个Long的包装器,以避免使用Accumulator时装箱和拆箱
*/
private[sql] class LongSQLMetricValue(private var _value : Long) extends SQLMetricValue[Long] {
def add(incr: Long): LongSQLMetricValue = {
_value += incr
this
}
// Although there is a boxing here, it's fine because it's only called in SQLListener
//虽然这里有拳击,但它很好,因为它只在SQLListener中调用
override def value: Long = _value
}
/**
* A wrapper of Int to avoid boxing and unboxing when using Accumulator
* Int的包装器,以避免使用累加器时装箱和拆箱
*/
private[sql] class IntSQLMetricValue(private var _value: Int) extends SQLMetricValue[Int] {
def add(term: Int): IntSQLMetricValue = {
_value += term
this
}
// Although there is a boxing here, it's fine because it's only called in SQLListener
override def value: Int = _value
}
/**
* A specialized long Accumulable to avoid boxing and unboxing when using Accumulator's
* `+=` and `add`.
*/
private[sql] class LongSQLMetric private[metric](name: String)
extends SQLMetric[LongSQLMetricValue, Long](name, LongSQLMetricParam) {
override def +=(term: Long): Unit = {
localValue.add(term)
}
override def add(term: Long): Unit = {
localValue.add(term)
}
}
private object LongSQLMetricParam extends SQLMetricParam[LongSQLMetricValue, Long] {
override def addAccumulator(r: LongSQLMetricValue, t: Long): LongSQLMetricValue = r.add(t)
override def addInPlace(r1: LongSQLMetricValue, r2: LongSQLMetricValue): LongSQLMetricValue =
r1.add(r2.value)
override def zero(initialValue: LongSQLMetricValue): LongSQLMetricValue = zero
override def zero: LongSQLMetricValue = new LongSQLMetricValue(0L)
}
private[sql] object SQLMetrics {
def createLongMetric(sc: SparkContext, name: String): LongSQLMetric = {
val acc = new LongSQLMetric(name)
sc.cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* A metric that its value will be ignored. Use this one when we need a metric parameter but don't
* care about the value.
* 将忽略其值的度量标准,当我们需要度量参数但不关心该值时,请使用此值
*/
val nullLongMetric = new LongSQLMetric("null")
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala | Scala | apache-2.0 | 4,305 |
// code-examples/TypeLessDoMore/count-to-script.scala
def countTo(n: Int):Unit = {
def count(i: Int): Unit = {
if (i <= n) {
println(i)
count(i + 1)
}
}
count(1)
}
countTo(5)
| XClouded/t4f-core | scala/src/tmp/TypeLessDoMore/count-to-script.scala | Scala | apache-2.0 | 203 |
package recipestore.db.tinkerpop.models
object DSEIndexType extends Enumeration {
type DSEIndexType = Value
val Secondary, Materialized, TextSearch, StringSearch = Value
} | prad-a-RuntimeException/semantic-store | src/main/scala/recipestore/db/tinkerpop/models/DSEIndexType.scala | Scala | mit | 176 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.resources
import org.orbeon.oxf.test.ResourceManagerTestBase
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Test
class URLFactoryTest extends ResourceManagerTestBase with AssertionsForJUnit {
@Test def basic(): Unit = {
val expected = Seq(
("oxf", null, "/foo/bar.txt", null) → "oxf:/foo/bar.txt?a=42",
("system", null, "out", null) → "system:out?a=42",
("http", "example.org", "/foo/bar.txt", "a=42") → "http://example.org/foo/bar.txt?a=42",
("https", "example.org", "/foo/bar.txt", "a=42") → "https://example.org/foo/bar.txt?a=42",
("file", "", "/foo/bar.txt", null) → "file:/foo/bar.txt?a=42"
)
for ((parts, urlString) ← expected) {
val url = URLFactory.createURL(urlString)
assert(parts === (url.getProtocol, url.getHost, url.getPath, url.getQuery))
}
}
}
| ajw625/orbeon-forms | src/test/scala/org/orbeon/oxf/resources/URLFactoryTest.scala | Scala | lgpl-2.1 | 1,640 |
package org.seanpquig.mini.search.core
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import org.seanpquig.mini.search.ml.ImagenetPrediction
import spray.json._
/**
* This file contains classes that support the different interfaces of the MiniSearch API.
* Including requests and responses and support proper JSON (un)marshalling.
*/
//JSON support that can be utilized in routes
trait JsonSupport extends SprayJsonSupport with DefaultJsonProtocol {
implicit val searchRequestFormat: RootJsonFormat[SearchRequest] = jsonFormat2(SearchRequest)
implicit object TextDocJsonFormat extends RootJsonFormat[TextDoc] {
def write(d: TextDoc) = JsObject(
"id" -> JsString(d.id),
"text" -> JsString(d.text),
"title" -> JsString(d.title.getOrElse(""))
)
def read(value: JsValue): TextDoc = {
value.asJsObject.getFields("text", "title") match {
case Seq(JsString(text), JsString(title)) => TextDoc(text = text, title = Option(title))
case _ => deserializationError("Document object expected")
}
}
}
implicit val searchResponseFormat: RootJsonFormat[SearchResponse] = jsonFormat2(SearchResponse)
implicit val indexRequestFormat: RootJsonFormat[IndexRequest] = jsonFormat1(IndexRequest)
implicit val indexInfoFormat: RootJsonFormat[IndexInfo] = jsonFormat2(IndexInfo)
implicit val indicesResponseFormat: RootJsonFormat[IndicesResponse] = jsonFormat1(IndicesResponse)
implicit val imagenetPredFormat: RootJsonFormat[ImagenetPrediction] = jsonFormat3(ImagenetPrediction)
}
case class SearchRequest(query: String, limit: Int)
case class SearchResponse(message: String, docs: Iterable[TextDoc])
case class IndexRequest(docs: Iterable[TextDoc])
case class IndexResponse(message: String)
case class IndexInfo(name: String, docCount: Long)
case class IndicesResponse(indices: Iterable[IndexInfo])
| seanpquig/mini-search | src/main/scala/org/seanpquig/mini/search/core/ApiJsonObjects.scala | Scala | mit | 1,888 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package io.snappydata.impl
import java.lang
import java.lang.reflect.{Constructor, Method}
import java.sql.SQLException
import java.util.Properties
import java.util.concurrent.atomic.AtomicReference
import scala.collection.JavaConverters._
import akka.actor.ActorSystem
import com.gemstone.gemfire.distributed.internal.DistributionConfig
import com.gemstone.gemfire.distributed.internal.locks.{DLockService, DistributedMemberLock}
import com.gemstone.gemfire.internal.cache.GemFireCacheImpl
import com.pivotal.gemfirexd.FabricService.State
import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils
import com.pivotal.gemfirexd.internal.engine.store.ServerGroupUtils
import com.pivotal.gemfirexd.{FabricService, NetworkInterface}
import com.typesafe.config.{Config, ConfigFactory}
import io.snappydata._
import io.snappydata.util.ServiceUtils
import org.apache.thrift.transport.TTransportException
import spark.jobserver.JobServer
import org.apache.spark.sql.SnappyContext
import org.apache.spark.sql.collection.Utils
import org.apache.spark.{Logging, SparkConf, SparkContext, SparkException}
class LeadImpl extends ServerImpl with Lead
with ProtocolOverrides with Logging {
self =>
private val LOCK_SERVICE_NAME = "__PRIMARY_LEADER_LS"
private val bootProperties = new Properties()
private lazy val dls = {
val gfCache = GemFireCacheImpl.getInstance
if (gfCache == null || gfCache.isClosed) {
throw new Exception("GemFire Cache not initialized")
}
val dSys = gfCache.getDistributedSystem
DLockService.create(LOCK_SERVICE_NAME, dSys, true, true, true)
}
private var sparkContext: SparkContext = _
private var notifyStatusChange: ((FabricService.State) => Unit) = _
private lazy val primaryLeaderLock = new DistributedMemberLock(dls,
LOCK_SERVICE_NAME, DistributedMemberLock.NON_EXPIRING_LEASE,
DistributedMemberLock.LockReentryPolicy.PREVENT_SILENTLY)
private[snappydata] val snappyProperties = Utils.getFields(Property).collect {
case (_, SparkProperty(prop)) => prop
case (_, SnappySparkProperty(prop)) => prop
case (_, SparkSQLProperty(prop)) => prop
case (_, SnappySparkSQLProperty(prop)) => prop
case _ => ""
}.toSet
var _directApiInvoked: Boolean = false
var isTestSetup = false
def directApiInvoked: Boolean = _directApiInvoked
private var remoteInterpreterServerClass: Class[_] = _
private var remoteInterpreterServerObj: Any = _
@throws[SQLException]
override def start(bootProperties: Properties, ignoreIfStarted: Boolean): Unit = {
_directApiInvoked = true
isTestSetup = bootProperties.getProperty("isTest", "false").toBoolean
bootProperties.remove("isTest")
try {
val locator = {
bootProperties.getProperty(DistributionConfig.LOCATORS_NAME) match {
case v if v != null => v
case _ => Property.Locators.getProperty(bootProperties)
}
}
val conf = new SparkConf()
conf.setMaster(Constant.SNAPPY_URL_PREFIX + s"$locator").
setAppName("leaderLauncher").
set(Property.JobServerEnabled.name, "true").
set("spark.scheduler.mode", "FAIR")
Utils.setDefaultSerializerAndCodec(conf)
// inspect user input and add appropriate prefixes
// if property doesn't contain '.'
// if input prop key is found in io.snappydata.Property,
// its prefixed with 'snappydata.' otherwise its assumed
// to be snappydata.store.
bootProperties.asScala.foreach({ case (k, v) =>
val key = if (k.indexOf(".") < 0) {
if (snappyProperties(k)) {
Constant.PROPERTY_PREFIX + k
} else {
Constant.STORE_PROPERTY_PREFIX + k
}
}
else {
k
}
conf.set(key, v)
})
// set spark ui port to 5050 that is snappy's default
conf.set("spark.ui.port",
bootProperties.getProperty("spark.ui.port", LeadImpl.SPARKUI_PORT.toString))
if (bootProperties.getProperty(Constant.ENABLE_ZEPPELIN_INTERPRETER,
"false").equalsIgnoreCase("true")) {
try {
val zeppelinIntpUtilClass = Utils.classForName(
"org.apache.zeppelin.interpreter.ZeppelinIntpUtil")
/**
* This will initialize the zeppelin repl interpreter.
* This should be done before spark context is created as zeppelin
* interpreter will set some properties for classloader for repl
* which needs to be specified while creating sparkcontext in lead
*/
logInfo("About to initialize SparkContext with SparkConf")
val method: Method = zeppelinIntpUtilClass.getMethod(
"initializeZeppelinReplAndGetConfig")
val obj: Object = method.invoke(null)
val props: Properties = obj.asInstanceOf[Properties]
props.asScala.foreach(kv => conf.set(kv._1, kv._2))
} catch {
/* [Sachin] So we need to log warning that
interpreter not started or do we need to exit? */
case e: Throwable => logWarning("Cannot find zeppelin interpreter in the classpath")
throw e;
}
}
sparkContext = new SparkContext(conf)
checkAndStartZeppelinInterpreter(bootProperties)
} catch {
case ie: InterruptedException =>
logInfo(s"Thread interrupted, aborting.")
case e: Throwable =>
logWarning("Exception while starting lead node", e)
throw e
}
}
@throws[SparkException]
private[snappydata] def internalStart(sc: SparkContext): Unit = {
val conf = sc.getConf // this will get you a cloned copy
initStartupArgs(conf, sc)
logInfo("cluster configuration after overriding certain properties \\n"
+ conf.toDebugString)
val confProps = conf.getAll
val storeProps = ServiceUtils.getStoreProperties(confProps)
logInfo("passing store properties as " + storeProps)
super.start(storeProps, false)
status() match {
case State.RUNNING =>
bootProperties.putAll(confProps.toMap.asJava)
logInfo("ds connected. About to check for primary lead lock.")
// check for leader's primary election
val startStatus = primaryLeaderLock.tryLock()
startStatus match {
case true =>
logInfo("Primary lead lock acquired.")
// let go.
case false =>
if (!_directApiInvoked) {
// cleanup before throwing exception
internalStop(bootProperties)
throw new SparkException("Primary Lead node (Spark Driver) is " +
"already running in the system. You may use split cluster " +
"mode to connect to SnappyData cluster.")
}
serverstatus = State.STANDBY
val callback = notifyStatusChange
if (callback != null) {
logInfo("Notifying standby status ...")
callback(serverstatus)
}
logInfo("Primary Lead node (Spark Driver) is already running in the system." +
"Standing by as secondary.")
primaryLeaderLock.lockInterruptibly()
// TODO: check cancelInProgress and other shutdown possibilities.
logInfo("Resuming startup sequence from STANDBY ...")
serverstatus = State.STARTING
if (callback != null) {
callback(serverstatus)
}
}
case _ =>
logWarning(LocalizedMessages.res.getTextMessage("SD_LEADER_NOT_READY", status()))
}
}
@throws[SQLException]
override def stop(shutdownCredentials: Properties): Unit = {
val servers = GemFireXDUtils.getGfxdAdvisor.adviseDataStores(null)
if (servers.size() > 0) {
SnappyContext.flushSampleTables()
}
assert(sparkContext != null, "Mix and match of LeadService api " +
"and SparkContext is unsupported.")
if (!sparkContext.isStopped) {
sparkContext.stop()
sparkContext = null
}
Utils.clearDefaultSerializerAndCodec()
if (null != remoteInterpreterServerObj) {
val method: Method = remoteInterpreterServerClass.getMethod("isAlive")
val isAlive: lang.Boolean = method.invoke(remoteInterpreterServerObj)
.asInstanceOf[lang.Boolean]
val shutdown: Method = remoteInterpreterServerClass.getMethod("shutdown",
classOf[lang.Boolean])
if (isAlive) {
shutdown.invoke(remoteInterpreterServerObj, true.asInstanceOf[AnyRef])
}
}
}
private[snappydata] def internalStop(shutdownCredentials: Properties): Unit = {
bootProperties.clear()
val sc = SnappyContext.globalSparkContext
if (sc != null) sc.stop()
// TODO: [soubhik] find a way to stop jobserver.
sparkContext = null
if (null != remoteInterpreterServerObj) {
val method: Method = remoteInterpreterServerClass.getMethod("isAlive")
val isAlive: lang.Boolean = method.invoke(remoteInterpreterServerObj)
.asInstanceOf[lang.Boolean]
val shutdown: Method = remoteInterpreterServerClass.getMethod("shutdown",
classOf[lang.Boolean])
if (isAlive) {
shutdown.invoke(remoteInterpreterServerObj, true.asInstanceOf[AnyRef])
}
}
super.stop(shutdownCredentials)
}
private[snappydata] def initStartupArgs(conf: SparkConf, sc: SparkContext = null) = {
def changeOrAppend(attr: String, value: String,
overwrite: Boolean = false, ignoreIfPresent: Boolean = false,
sparkPrefix: String = null): Unit = {
val attrKey = if (sparkPrefix == null) attr else sparkPrefix + attr
conf.getOption(attrKey) match {
case None => if (sparkPrefix == null) {
changeOrAppend(attr, value, overwrite, ignoreIfPresent,
sparkPrefix = Constant.SPARK_PREFIX)
} else conf.set(attr, value)
case v if ignoreIfPresent => // skip setting property
case v if overwrite => conf.set(attr, value)
case Some(x) => conf.set(attr, x ++ s""",$value""")
}
}
changeOrAppend(Constant.STORE_PROPERTY_PREFIX +
com.pivotal.gemfirexd.Attribute.SERVER_GROUPS, LeadImpl.LEADER_SERVERGROUP)
assert(Property.Locators.getOption(conf).orElse(
Property.McastPort.getOption(conf)).isDefined,
s"Either ${Property.Locators} or ${Property.McastPort} " +
s"must be defined for SnappyData cluster to start")
import org.apache.spark.sql.collection.Utils
// skip overriding host-data if loner VM.
if (sc != null && Utils.isLoner(sc)) {
changeOrAppend(Constant.STORE_PROPERTY_PREFIX +
com.pivotal.gemfirexd.Attribute.GFXD_HOST_DATA,
"true", overwrite = true)
} else {
changeOrAppend(Constant.STORE_PROPERTY_PREFIX +
com.pivotal.gemfirexd.Attribute.GFXD_HOST_DATA,
"false", overwrite = true)
changeOrAppend(Constant.STORE_PROPERTY_PREFIX +
com.pivotal.gemfirexd.Attribute.GFXD_PERSIST_DD,
"false", overwrite = true)
}
changeOrAppend(Property.JobServerEnabled.name, "false",
ignoreIfPresent = true)
conf
}
protected[snappydata] def notifyOnStatusChange(f: (FabricService.State) => Unit): Unit =
this.notifyStatusChange = f
@throws[Exception]
private[snappydata] def startAddOnServices(sc: SparkContext): Unit = this.synchronized {
LeadImpl.setInitializingSparkContext(sc)
if (status() == State.UNINITIALIZED || status() == State.STOPPED) {
// for SparkContext.setMaster("local[xx]"), ds.connect won't happen
// until now.
logInfo("Connecting to snappydata cluster now...")
internalStart(sc)
}
val jobServerEnabled = Property.JobServerEnabled.getProperty(
bootProperties).toBoolean
if (_directApiInvoked && !isTestSetup) {
assert(jobServerEnabled,
"JobServer must have been enabled with lead.start(..) invocation")
}
if (jobServerEnabled) {
logInfo("Starting job server...")
val confFile = bootProperties.getProperty("jobserver.configFile") match {
case null => Array[String]()
case c => Array(c)
}
JobServer.start(confFile, getConfig, createActorSystem)
}
// This will use GfxdDistributionAdvisor#distributeProfileUpdate
// which inturn will create a new profile object via #instantiateProfile
// whereby ClusterCallbacks#getDriverURL should be now returning
// the correct URL given SparkContext is fully initialized.
logInfo("About to send profile update after initialization completed.")
ServerGroupUtils.sendUpdateProfile()
LeadImpl.clearInitializingSparkContext()
}
def getConfig(args: Array[String]): Config = {
System.setProperty("config.trace", "substitutions")
val notConfigurable = ConfigFactory.parseProperties(getDynamicOverrides).
withFallback(ConfigFactory.parseResources("jobserver-overrides.conf"))
val bootConfig = notConfigurable.withFallback(ConfigFactory.parseProperties(bootProperties))
val snappyDefaults = bootConfig.withFallback(
ConfigFactory.parseResources("jobserver-defaults.conf"))
val builtIn = ConfigFactory.load()
val finalConf = snappyDefaults.withFallback(builtIn).resolve()
logDebug("Passing JobServer with config " + finalConf.root.render())
finalConf
}
def getDynamicOverrides: Properties = {
val dynamicOverrides = new Properties()
val replaceString = "<basedir>"
def replace(key: String, value: String, newValue: String) = {
assert (value.indexOf(replaceString) >= 0)
dynamicOverrides.setProperty(key, value.replace(replaceString, newValue))
}
val workingDir = System.getProperty(
com.pivotal.gemfirexd.internal.iapi.reference.Property.SYSTEM_HOME_PROPERTY, ".")
val defaultConf = ConfigFactory.parseResources("jobserver-defaults.conf")
var key = "spark.jobserver.filedao.rootdir"
replace(key, defaultConf.getString(key), workingDir)
key = "spark.jobserver.datadao.rootdir"
replace(key, defaultConf.getString(key), workingDir)
val overrideConf = ConfigFactory.parseResources("jobserver-overrides.conf")
key = "spark.jobserver.sqldao.rootdir"
replace(key, overrideConf.getString(key), workingDir)
dynamicOverrides
}
def createActorSystem(conf: Config): ActorSystem = {
ActorSystem("SnappyLeadJobServer", conf)
}
@throws[SparkException]
override def startNetworkServer(bindAddress: String,
port: Int,
networkProperties: Properties): NetworkInterface = {
throw new SparkException("Network server cannot be started on lead node.")
}
@throws[SparkException]
override def startThriftServer(bindAddress: String,
port: Int,
networkProperties: Properties): NetworkInterface = {
throw new SparkException("Thrift server cannot be started on lead node.")
}
@throws[SparkException]
override def startDRDAServer(bindAddress: String,
port: Int,
networkProperties: Properties): NetworkInterface = {
throw new SparkException("DRDA server cannot be started on lead node.")
}
override def stopAllNetworkServers(): Unit = {
// nothing to do as none of the net servers are allowed to start.
}
/**
* This method is used to start the zeppelin interpreter thread.
* As discussed by default zeppelin interpreter will be enabled.User can disable it by
* setting "zeppelin.interpreter.enable" to false in leads conf file.User can also specify
* the port on which intrepreter should listen using property zeppelin.interpreter.port
*/
private def checkAndStartZeppelinInterpreter(bootProperties: Properties): Unit = {
// As discussed ZeppelinRemoteInterpreter Server will be enabled by default.
// [sumedh] Our startup times are already very high and we are looking to
// cut that down and not increase further with these external utilities.
if (bootProperties.getProperty(Constant.ENABLE_ZEPPELIN_INTERPRETER,
"false").equalsIgnoreCase("true")) {
val port = bootProperties.getProperty(Constant.ZEPPELIN_INTERPRETER_PORT,
"3768").toInt
try {
remoteInterpreterServerClass = Utils.classForName(
"org.apache.zeppelin.interpreter.SnappyInterpreterServer")
val constructor: Constructor[_] = remoteInterpreterServerClass
.getConstructor(classOf[Integer])
remoteInterpreterServerObj = constructor.newInstance(port.asInstanceOf[AnyRef])
remoteInterpreterServerClass.getSuperclass.getSuperclass
.getDeclaredMethod("start").invoke(remoteInterpreterServerObj)
logInfo(s"Starting Zeppelin RemoteInterpreter at port " + port)
} catch {
case tTransportException: TTransportException =>
logWarning("Error while starting zeppelin interpreter.Actual exception : " +
tTransportException.getMessage)
}
// Add memory listener for zeppelin will need it for zeppelin
// val listener = new LeadNodeMemoryListener();
// GemFireCacheImpl.getInstance().getResourceManager().
// addResourceListener(InternalResourceManager.ResourceType.ALL, listener)
}
}
}
object LeadImpl {
val SPARKUI_PORT = 5050
val LEADER_SERVERGROUP = "IMPLICIT_LEADER_SERVERGROUP"
private[this] val startingContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
def invokeLeadStart(sc: SparkContext): Unit = {
val lead = ServiceManager.getLeadInstance.asInstanceOf[LeadImpl]
lead.internalStart(sc)
}
def invokeLeadStartAddonService(sc: SparkContext): Unit = {
val lead = ServiceManager.getLeadInstance.asInstanceOf[LeadImpl]
lead.startAddOnServices(sc)
}
def invokeLeadStop(shutdownCredentials: Properties): Unit = {
val lead = ServiceManager.getLeadInstance.asInstanceOf[LeadImpl]
lead.internalStop(shutdownCredentials)
}
def setInitializingSparkContext(sc: SparkContext): Unit = {
assert(sc != null)
startingContext.set(sc)
}
def getInitializingSparkContext: SparkContext = {
val sc = SnappyContext.globalSparkContext
if (sc != null) {
return sc
}
val initSC = startingContext.get()
assert(initSC != null)
initSC
}
def clearInitializingSparkContext(): Unit = {
startingContext.set(null)
}
}
| vjr/snappydata | cluster/src/main/scala/io/snappydata/impl/LeadImpl.scala | Scala | apache-2.0 | 19,008 |
package com.twitter.finagle.mysql
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.{ClientConnection, ServiceFactory, ServiceProxy}
import com.twitter.util._
object Client {
/**
* Creates a new Client based on a ServiceFactory.
*/
def apply(
factory: ServiceFactory[Request, Result],
statsReceiver: StatsReceiver = NullStatsReceiver
): Client with Transactions with Cursors = new StdClient(factory, statsReceiver)
}
trait Client extends Closable {
/**
* Returns the result of executing the `sql` query on the server.
*/
def query(sql: String): Future[Result]
/**
* Sends the given `sql` to the server and maps each resulting row to
* `f`, a function from Row => T. If no ResultSet is returned, the function
* returns an empty Seq.
*/
def select[T](sql: String)(f: Row => T): Future[Seq[T]]
/**
* Returns a new PreparedStatement instance based on the given sql query.
* The returned prepared statement can be reused and applied with varying
* parameters.
*
* @note Mysql prepared statements are stateful, that is, they allocate
* resources on the mysql server. The allocations are managed by a
* finagle-mysql connection. Closing the client implicitly closes all
* outstanding PreparedStatements.
*/
def prepare(sql: String): PreparedStatement
/**
* Returns the result of pinging the server.
*/
def ping(): Future[Result]
}
trait Transactions {
/**
* Execute `f` in a transaction.
*
* If `f` throws an exception, the transaction is rolled back. Otherwise, the transaction is
* committed.
*
* @example {{{
* client.transaction[Foo] { c =>
* for {
* r0 <- c.query(q0)
* r1 <- c.query(q1)
* response: Foo <- buildResponse(r1, r2)
* } yield response
* }
* }}}
* @note we use a ServiceFactory that returns the same Service repeatedly to the client. This is
* to assure that a new MySQL connection (i.e. Service) from the connection pool (i.e.,
* ServiceFactory) will be used for each new transaction. Only upon completion of the transaction
* is the connection returned to the pool for re-use.
*/
def transaction[T](f: Client => Future[T]): Future[T]
}
trait Cursors {
/**
* Create a CursoredStatement with the given parameterized sql query.
* The returned cursored statement can be reused and applied with varying
* parameters.
*
* @note The cursored statements are built on a prepare -> execute -> fetch flow
* that requires state tracking. It is important to either fully consume the resulting
* stream, or explicitly call `close()`
*/
def cursor(sql: String): CursoredStatement
}
private class StdClient(factory: ServiceFactory[Request, Result], statsReceiver: StatsReceiver)
extends Client with Transactions with Cursors {
private[this] val service = factory.toService
private[this] val cursorStats = new CursorStats(statsReceiver)
def query(sql: String): Future[Result] = service(QueryRequest(sql))
def ping(): Future[Result] = service(PingRequest)
def select[T](sql: String)(f: Row => T): Future[Seq[T]] =
query(sql) map {
case rs: ResultSet => rs.rows.map(f)
case _ => Nil
}
def prepare(sql: String): PreparedStatement = new PreparedStatement {
def apply(ps: Parameter*): Future[Result] = factory() flatMap { svc =>
svc(PrepareRequest(sql)).flatMap {
case ok: PrepareOK => svc(ExecuteRequest(ok.id, ps.toIndexedSeq))
case r => Future.exception(new Exception("Unexpected result %s when preparing %s"
.format(r, sql)))
} ensure {
svc.close()
}
}
}
def cursor(sql: String): CursoredStatement = {
new CursoredStatement {
override def apply[T](rowsPerFetch: Int, params: Parameter*)(f: (Row) => T): Future[CursorResult[T]] = {
assert(rowsPerFetch > 0, "rowsPerFetch must be positive")
factory().map { svc =>
new StdCursorResult[T](cursorStats, svc, sql, rowsPerFetch, params, f)
}
}
}
}
def transaction[T](f: Client => Future[T]): Future[T] = {
val singleton = new ServiceFactory[Request, Result] {
val svc = factory()
// Because the `singleton` is used in the context of a `FactoryToService` we override
// `Service#close` to ensure that we can control the checkout lifetime of the `Service`.
val proxiedService = svc map { service =>
new ServiceProxy(service) {
override def close(deadline: Time) = Future.Done
}
}
def apply(conn: ClientConnection) = proxiedService
def close(deadline: Time): Future[Unit] = svc.flatMap(_.close(deadline))
}
val client = Client(singleton, statsReceiver)
val transaction = for {
_ <- client.query("START TRANSACTION")
result <- f(client)
_ <- client.query("COMMIT")
} yield {
result
}
// handle failures and put connection back in the pool
transaction transform {
case Return(r) =>
singleton.close()
Future.value(r)
case Throw(e) =>
client.query("ROLLBACK") transform { _ =>
singleton.close()
Future.exception(e)
}
}
}
def close(deadline: Time): Future[Unit] = factory.close(deadline)
}
| spockz/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/Client.scala | Scala | apache-2.0 | 5,344 |
/*
* @author Stefan Schurgast
*/
package ch.uzh.ifi.ddis.markovlogicinference
/**
* Abstract class TruthValue represents the truth value that can be taken by a
* grounded predicate or a grounded formula. It is either a Option for a boolean
* value Some(true) (object TRUE) or Some(false) (object FALSE), or unknown
* (object UNKNOWN where Option is None).
*
* @param value Option of boolean represents truth value.
*/
abstract class TruthValue(val value: Option[Boolean])
/* object for truth value Some(true) */
object TRUE extends TruthValue(Some(true)) {
/**
* Returns string representation of option Some(true) as defined in XML schema.
* @return string representation of true
*/
override val toString = "true"
val rdfString = "\\"true\\"^^<http://www.w3.org/2001/XMLSchema#boolean>"
}
/* object for truth value Some(false) */
object FALSE extends TruthValue(Some(false)) {
/**
* Returns string representation of option Some(false) as defined in XML schema.
* @return string representation of false
*/
override val toString = "false"
val rdfString = "\\"false\\"^^<http://www.w3.org/2001/XMLSchema#boolean>"
}
/* object for truth value None */
object UNKNOWN extends TruthValue(None) {
/**
* Returns string representation of option None.
* @return string representation of unknown
*/
override val toString = "unknown"
val rdfString = "unknown"
}
| schurgast/LISy | src/ch/uzh/ifi/ddis/markovlogicinference/logic/TruthValue.scala | Scala | apache-2.0 | 1,398 |
package org.psliwa.idea.composerJson.intellij.codeAssist.file
import com.intellij.psi.{PsiElement, PsiReference, PsiReferenceProvider}
import com.intellij.util.ProcessingContext
import org.psliwa.idea.composerJson.json.{EmailFormat, UriFormat}
private object UrlReferenceProvider extends PsiReferenceProvider {
override def getReferencesByElement(element: PsiElement, context: ProcessingContext): Array[PsiReference] = {
val text = element.getText.substring(1, element.getText.length - 1)
if (EmailFormat.isValid(text)) {
Array(new UrlPsiReference(element) {
override protected def url: Option[String] = Some("mailto:" + super.url)
})
} else if (UriFormat.isValid(text)) {
Array(new UrlPsiReference(element))
} else {
Array()
}
}
}
| psliwa/idea-composer-plugin | src/main/scala/org/psliwa/idea/composerJson/intellij/codeAssist/file/UrlReferenceProvider.scala | Scala | mit | 791 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.ksmpartners.ernie.engine.report
import org.testng.annotations.Test
import com.ksmpartners.ernie.model.{ ReportType, ParameterEntity, DefinitionEntity }
import org.joda.time.DateTime
import java.util
import org.testng.Assert
class DefinitionTest {
var definition: Definition = null
var definitionEntity: DefinitionEntity = null
@Test
def canCreateDefinition() {
definitionEntity = new DefinitionEntity()
definitionEntity.setCreatedDate(DateTime.now)
definitionEntity.setCreatedUser("default")
definitionEntity.setDefDescription("test desc")
definitionEntity.setDefId("test_def")
val lst = new util.ArrayList[String]()
lst.add("val1")
definitionEntity.setParamNames(lst)
val paramEnt = new ParameterEntity("val1", "integer", false, "10")
val params = new util.ArrayList[ParameterEntity]()
params.add(paramEnt)
definitionEntity.setParams(params)
val unsTypes = new util.ArrayList[ReportType]()
unsTypes.add(ReportType.CSV)
definitionEntity.setUnsupportedReportTypes(unsTypes)
definition = new Definition(definitionEntity)
}
@Test(dependsOnMethods = Array("canCreateDefinition"))
def getDefinitionEntityReturnsCopy() {
Assert.assertEquals(definition.getCreatedDate, definitionEntity.getCreatedDate)
Assert.assertEquals(definition.getCreatedUser, definitionEntity.getCreatedUser)
Assert.assertEquals(definition.getDefDescription, definitionEntity.getDefDescription)
Assert.assertEquals(definition.getDefId, definitionEntity.getDefId)
Assert.assertNotSame(definition.getEntity, definitionEntity)
}
@Test(dependsOnMethods = Array("canCreateDefinition"))
def canGetUnsupportedTypes() {
Assert.assertEquals(definition.getUnsupportedReportTypes.getClass, classOf[Array[ReportType]])
Assert.assertEquals(definition.getUnsupportedReportTypes.size, 1)
}
@Test(dependsOnMethods = Array("canCreateDefinition"))
def canGetParamNames() {
Assert.assertEquals(definition.getParamNames.getClass, classOf[Array[String]])
Assert.assertEquals(definition.getParamNames.size, 1)
}
@Test
def nullListsReturnEmptyListsInstead() {
val defi = new Definition(new DefinitionEntity())
Assert.assertNotNull(defi.getParamNames)
Assert.assertNotNull(defi.getUnsupportedReportTypes)
}
}
| ksmpartners/ernie | ernie-engine/src/test/scala/com/ksmpartners/ernie/engine/report/DefinitionTest.scala | Scala | apache-2.0 | 2,888 |
// This is a sample code that was shamelessly stolen from
// http://boldradius.com/blog-post/VS0NpTAAADAACs_E/introduction-to-akka-streams
package extra
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import scala.collection.immutable
import scala.util.Random
object InputCustomer {
def random():InputCustomer = {
InputCustomer(s"FirstName${Random.nextInt(1000)} LastName${Random.nextInt(1000)}")
}
}
case class InputCustomer(name: String)
case class OutputCustomer(firstName: String, lastName: String)
object CustomersExample extends App {
implicit val actorSystem = ActorSystem()
import actorSystem.dispatcher
implicit val flowMaterializer = ActorMaterializer()
val inputCustomers = Source((1 to 100).map(_ => InputCustomer.random()))
val normalize = Flow[InputCustomer].mapConcat { input =>
input.name.split(" ").toList match {
case firstName::lastName::Nil => immutable.Seq(OutputCustomer(firstName, lastName))
case _ => immutable.Seq[OutputCustomer]()
}
}
val writeCustomers = Sink.foreach[OutputCustomer] { customer =>
println(customer)
}
inputCustomers.via(normalize).runWith(writeCustomers).andThen {
case _ =>
actorSystem.shutdown()
actorSystem.awaitTermination()
}
} | dotta/akka-streams-demo | scala-demo/src/main/scala/extra/CustomersExample.scala | Scala | apache-2.0 | 1,320 |
package com.twitter.util.routing
import java.lang.{Iterable => JIterable}
import java.util.function.Function
import scala.jdk.CollectionConverters._
object Validator {
/** [[Validator]] that does no validation and always returns empty error results */
val None: Validator[Any] = new Validator[Any] {
def apply(routes: Iterable[Any]): Iterable[ValidationError] = Iterable.empty
}
/** Java-friendly creation of a [[Validator]] */
def create[T](fn: Function[JIterable[T], JIterable[ValidationError]]): Validator[T] =
new Validator[T] {
def apply(routes: Iterable[T]): Iterable[ValidationError] =
fn(routes.asJava).asScala
}
}
/**
* Functional alias for determining whether all defined results are valid for a specific
* [[Router router]] type.
*/
abstract class Validator[-Route] extends (Iterable[Route] => Iterable[ValidationError])
| twitter/util | util-routing/src/main/scala/com/twitter/util/routing/Validator.scala | Scala | apache-2.0 | 877 |
package org.novetta.zoo.types
/**
* Create() case class. Used between the ConsumerActor and the WorkGroup.
* @param key: Long => The message key associated with this work.
* @param primaryURI: String => The primary URL for downloading the target resource
* @param secondaryURI: String => The secondary URL for downloading the target resource
* @param value: WorkState => The state of the Job, and component work at time of creation
*
* @constructor Generate a Create message. This is used to initiate the creation of a WorkActor
*
*/
case class Create(key: Long, primaryURI: String, secondaryURI: String, value: WorkState)
/**
* Result case class. Used between the WorkActor and the ProducerActor
* @param filename: String => The filename representing the target of this Job
* @param result: WorkResult => The WorkResult representing the end state of the Job
*
* @constructor Generate a Result message. This is used to transmit results to the Producer and from there, to the queueing backbone
*
*/
case class Result(filename: String, result: WorkResult)
/**
* ResultPackage case class. A package of results for transmission. The various filehashes are used for secondary aggregation against
* the target processing file, that information is lost due to the UUID usage in the temporary filestore.
* @param filename: String => The filename representing the target of this Job
* @param results: Iterable[WorkResult] => An Iterable representing the WorkResults to be transmitted
* @param MD5: String => MD5 hash of the target file.
* @param SHA1: String => SHA1 hash of the target file.
* @param SHA256: String => SHA256 hash of the target file.
*
* @constructor Generate a ResultPackage message. This is for multiple WorkResult transfers.
*
*/
case class ResultPackage(filename: String, results: Iterable[WorkResult], MD5: String, SHA1: String, SHA256: String)
object WorkState {
def create(filename: String, hashfilename: String, workToDo: List[TaskedWork], results: List[WorkResult] = List[WorkResult](), attempts: Int): WorkState = {
WorkState(filename, hashfilename, workToDo, 0, 0, results, attempts)
}
}
/**
* WorkState case class. A representation of the current state of a given Job. This can be used to merge Jobs, or transfer overall Job state.
* @param filename: String => The filename representing the target of this Job.
* @param hashfilename: String => The hashed filename representing the target of the Job.
* @param workToDo: List[TaskedWork] => A list of all TaskedWork elements, which are the component Work elements
* @param created: Int => The time this work was created.
* @param lastEdited: Int => The last time this work had an altered state.
* @param results: List[WorkResult] => A list of the WorkResults that have been generated.
* @param attempts: Int => The number of times this Job has been attempted across all executors.
* @constructor Generate a WorkState message.
*
*/
case class WorkState(
filename: String,
hashfilename: String,
workToDo: List[TaskedWork],
created: Int = 0,
lastEdited: Int = 0,
results: List[WorkResult] = List[WorkResult](),
attempts: Int = 0
) {
def isComplete: Boolean = {
workToDo.size == results.size
}
def +(that: WorkResult): WorkState = {
new WorkState(
filename = this.filename,
hashfilename = this.hashfilename,
workToDo = this.workToDo,
created = this.created,
lastEdited = 1,
results = this.results :+ that,
attempts = this.attempts
)
}
}
| Novetta/totem | src/main/scala/org/novetta/zoo/types/CommandTypes.scala | Scala | bsd-3-clause | 3,691 |
package com.datawizards.splot.examples.charts.histogram
import com.datawizards.splot.api.implicits._
import com.datawizards.splot.examples.people
object HistogramForStrings extends App {
people.plotHistogramForCategories(_.education)
}
| piotr-kalanski/SPlot | src/main/scala/com/datawizards/splot/examples/charts/histogram/HistogramForStrings.scala | Scala | apache-2.0 | 240 |
package com.kifi.franz
import com.amazonaws.services.sqs.AmazonSQSAsync
import play.api.libs.json.{Format, Json}
import scala.language.implicitConversions
class FormattedSQSQueue[T](
protected val sqs: AmazonSQSAsync,
val queue: QueueName,
protected val createIfNotExists: Boolean = false,
format: Format[T]
) extends SQSQueue[T] {
protected implicit def asString(obj: T) = Json.stringify(Json.toJson(obj)(format))
protected implicit def fromString(s: String) = Json.parse(s).as[T](format)
}
| stkem/franz | src/main/scala/com/kifi/franz/FormattedSQSQueue.scala | Scala | mit | 522 |
package pw.anisimov.adverto.api
import akka.actor.{Props, Actor, ActorLogging, ActorRef}
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import akka.util.Timeout
import pw.anisimov.adverto.api.ApiManagerActor.GetBinding
import scala.concurrent.duration._
class ApiManagerActor(host: String, port: Int, val dataActor: ActorRef) extends Actor with ActorLogging with AdvertsRoute {
var binding: Option[Http.ServerBinding] = None
implicit val system = context.system
implicit val materializer = ActorMaterializer()
implicit val timeout: Timeout = 5.seconds
implicit val dispatcher = context.dispatcher
override def preStart(): Unit = {
val selfRef = self
Http().bindAndHandle(advertsRoute, host, port).foreach(bound => selfRef ! bound)
}
override def postStop(): Unit = {
binding foreach (_.unbind())
}
override def receive: Receive = {
case boundEvent: Http.ServerBinding =>
log.info(s"Adverto API Started at: ${boundEvent.localAddress.toString}")
binding = Some(boundEvent)
case GetBinding =>
sender() ! binding
}
}
object ApiManagerActor {
case object GetBinding
def props(host: String, port: Int, dataActor: ActorRef): Props = Props(classOf[ApiManagerActor], host, port, dataActor)
}
| yoks/adverto | src/main/scala/pw/anisimov/adverto/api/ApiManagerActor.scala | Scala | apache-2.0 | 1,276 |
/*
* Copyright 2009-2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ccf.transport.http
import java.io.IOException
import java.net.URL
import ccf.transport.json.{JsonEncoder, JsonDecoder}
import org.apache.http.conn.scheme.Scheme
import ccf.transport._
object HttpConnection {
def create(url: URL, scheme: Option[Scheme] = None, headerContributor: Option[HttpTransportHeaderContributor] = None, timeoutMillis: Int = 3000) = {
val httpClient = new DispatchHttpClient(timeoutMillis, scheme)
new HttpConnection(url, httpClient, JsonDecoder, JsonEncoder, scheme, headerContributor)
}
}
class HttpConnection(url: URL, val client: HttpClient, decoder: Decoder, encoder: Encoder, scheme: Option[Scheme], headerContributor: Option[HttpTransportHeaderContributor]) extends Connection {
@throws(classOf[ConnectionException])
def send(originalRequest: TransportRequest): Option[TransportResponse] = try {
val headers = originalRequest.headers ++ headerContributor.map(_.getHeaders).getOrElse(Map())
val content = originalRequest.content
val request = TransportRequest(headers, content)
val response = post(request)
decoder.decodeResponse(response)
} catch {
case e: IOException => throw new ConnectionException(e.toString)
}
private def post(request: TransportRequest) = client.post(requestUrl(request), encoder.encodeRequest(request))
private def requestUrl(request: TransportRequest) = new URL(url, request.header("type").getOrElse(requestTypeMissing))
private def requestTypeMissing = throw new InvalidRequestException("Request header \\"type\\" missing")
}
| akisaarinen/ccf | ccf/src/main/scala/ccf/transport/http/HttpConnection.scala | Scala | apache-2.0 | 2,161 |
package com.github.j5ik2o.spetstore.adaptor.eventbus
import akka.actor.{ ActorRef, ActorSystem }
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.{ Publish, Subscribe, Unsubscribe }
import akka.event.ActorEventBus
import org.slf4j.LoggerFactory
case class EventWithSender(event: Any, sender: ActorRef)
trait EventBus extends ActorEventBus {
type Event = EventWithSender
type Classifier = Class[_]
}
object EventBus {
class EventBusOnRemote(system: ActorSystem) extends EventBus {
val logger = LoggerFactory.getLogger(getClass)
val mediator = DistributedPubSub(system).mediator
val topics = scala.collection.mutable.Map.empty[ActorRef, Class[_]]
override def subscribe(subscriber: ActorRef, to: Class[_]): Boolean = {
topics += (subscriber -> to)
mediator ! Subscribe(to.toString, subscriber)
true
}
override def publish(eventWithSender: EventWithSender): Unit = {
mediator ! Publish(eventWithSender.event.getClass.toString, eventWithSender)
}
override def unsubscribe(subscriber: ActorRef, from: Class[_]): Boolean = {
if (topics.contains(subscriber)) {
mediator ! Unsubscribe(from.toString, subscriber)
topics -= subscriber
true
} else false
}
override def unsubscribe(subscriber: ActorRef): Unit = {
topics.filter {
case (key, _) =>
key == subscriber
}.foreach {
case (key, value) =>
unsubscribe(key, value)
}
}
}
class EventBusOnLocal(val system: ActorSystem) extends EventBus {
private val eventStream = system.eventStream
override def subscribe(subscriber: ActorRef, to: Classifier): Boolean = {
eventStream.subscribe(subscriber, to)
}
override def publish(eventWithSender: EventWithSender): Unit = {
eventStream.publish(eventWithSender)
}
override def unsubscribe(subscriber: ActorRef, from: Classifier): Boolean = {
eventStream.unsubscribe(subscriber, from)
}
override def unsubscribe(subscriber: ActorRef): Unit = {
eventStream.unsubscribe(subscriber)
}
}
def ofLocal(system: ActorSystem): EventBus = new EventBusOnLocal(system)
def ofRemote(system: ActorSystem): EventBus = new EventBusOnRemote(system)
} | j5ik2o/spetstore-cqrs-es-akka | write-interface/src/main/scala/com/github/j5ik2o/spetstore/adaptor/eventbus/EventBus.scala | Scala | mit | 2,318 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import java.{util ⇒ ju}
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.util.URLFinder
import org.orbeon.saxon.function.{PropertiesStartsWith, Property}
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.scaxon.SimplePath._
import org.orbeon.xforms.XFormsId
import scala.collection.JavaConverters._
trait FormRunnerPDF {
// Return mappings (formatName → expression) for all PDF formats in the properties
//@XPathFunction
def getPDFFormats: ju.Map[String, String] = {
def propertiesStartingWithIt(prefix: String) =
PropertiesStartsWith.propertiesStartsWith(prefix).iterator map (_.getStringValue)
val formatPairsIt =
for {
formatPropertyName ← propertiesStartingWithIt("oxf.fr.pdf.format")
expression ← Property.propertyAsString(formatPropertyName)
formatName = formatPropertyName split '.' last
} yield
formatName → expression
formatPairsIt.toMap.asJava
}
// Return the PDF formatting expression for the given parameters
//@XPathFunction
def getPDFFormatExpression(
pdfFormats : ju.Map[String, String],
app : String,
form : String,
name : String,
dataType : String
): String = {
val propertyName = List("oxf.fr.pdf.map", app, form, name) ::: Option(dataType).toList mkString "."
val expressionOpt =
for {
format ← Property.propertyAsString(propertyName)
expression ← Option(pdfFormats.get(format))
} yield
expression
expressionOpt.orNull
}
// Build a PDF control id from the given HTML control
//@XPathFunction
def buildPDFFieldNameFromHTML(control: NodeInfo): String = {
def isContainer(e: NodeInfo) = {
val classes = e.attClasses
classes("xbl-fr-section") || (classes("xbl-fr-grid") && (e descendant "table" exists (_.attClasses("fr-repeat"))))
}
def findControlName(e: NodeInfo) =
XFormsId.getStaticIdFromId(e.id).trimAllToOpt flatMap FormRunner.controlNameFromIdOpt
def ancestorContainers(e: NodeInfo) =
control ancestor * filter isContainer reverse
def suffixAsList(id: String) =
XFormsId.getEffectiveIdSuffix(id).trimAllToOpt.toList
// This only makes sense if we are passed a control with a name
findControlName(control) map { controlName ⇒
((ancestorContainers(control) flatMap findControlName) :+ controlName) ++ suffixAsList(control.id) mkString "$"
} orNull
}
import URLFinder._
// Add http/https/mailto hyperlinks to a plain string
//@XPathFunction
def hyperlinkURLs(s: String, hyperlinks: Boolean): String =
replaceURLs(s, if (hyperlinks) replaceWithHyperlink else replaceWithPlaceholder)
}
object FormRunnerPDF extends FormRunnerPDF | brunobuzzi/orbeon-forms | form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/FormRunnerPDF.scala | Scala | lgpl-2.1 | 3,453 |
package effectful.examples.pure.dao.sql
import effectful.examples.effects.sql._
import effectful.examples.effects.sql.SqlDriver.SqlRow
import effectful.examples.pure.dao.DocDao.RecordMetadata
trait SqlValFormat[A] {
def toSqlVal(a: A) : SqlVal
def fromSqlVal(v: SqlVal) : A
}
trait SqlRowFormat[A] {
def toSqlRow(a: A) : SqlRow
def fromSqlRow(row: SqlRow) : A
}
trait SqlRecordFormat[ID,A] extends SqlValFormat[ID] with SqlRowFormat[A]
object SqlRecordFormat {
def apply[ID,A](
idFormat: SqlValFormat[ID],
rowFormat: SqlRowFormat[RecordMetadata]
) : SqlRecordFormat[ID,RecordMetadata] =
new SqlRecordFormat[ID,RecordMetadata] {
def toSqlVal(a: ID) = idFormat.toSqlVal(a)
def fromSqlVal(v: SqlVal) = idFormat.fromSqlVal(v)
def fromSqlRow(row: SqlRow) = rowFormat.fromSqlRow(row)
def toSqlRow(a: RecordMetadata) = rowFormat.toSqlRow(a)
}
}
trait CharDataFormat[A] {
def toCharData(a: A) : CharData
def fromCharData(data: CharData) : A
}
object CharDataFormat {
implicit val charDataFormat_String = new CharDataFormat[String] {
def toCharData(a: String) = CharData(a)
def fromCharData(data: CharData) = data.toCharString()
}
implicit val charDataFormat_Reader = new CharDataFormat[java.io.Reader] {
def toCharData(a: java.io.Reader) = CharData(a)
def fromCharData(data: CharData) = data.toCharStream()
}
}
trait BinDataFormat[A] {
def toBinData(a: A) : BinData
def fromBinData(data: BinData) : A
} | S-Mach/effectful | src/test/scala/effectful/examples/pure/dao/sql/SqlFormat.scala | Scala | mit | 1,486 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import org.orbeon.oxf.test.DocumentTestBase
import org.orbeon.oxf.xforms.state.XFormsStaticStateCache
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Test
class CacheTest extends DocumentTestBase with FormRunnerSupport with AssertionsForJUnit {
@Test def formRunnerStaticCache(): Unit = {
val Id1 = "6578e2e0e7911fd9ba284aefaea671cbfb814851"
val Id2 = "15c4a18428496faa1212d86f58c62d9d3c51cf0d"
def runAndAssert(form: String, mode: String, noscript: Boolean = false)(expectedInitialHit: Boolean, staticStateHoldsTemplate: Boolean) = {
def staticStateFoundOpt(events: List[CacheEvent]) =
events collectFirst { case StaticState(found, _) ⇒ found }
def staticStateHasTemplateOpt(events: List[CacheEvent]) = (
events
collectFirst { case StaticState(_, digest) ⇒ digest}
flatMap XFormsStaticStateCache.findDocument
map (_.template.isDefined)
)
// First time may or may not pass
val (_, events1) = runFormRunner("cache-test", form, mode, document = Id1, noscript = noscript, initialize = false)
assert(Some(expectedInitialHit) === staticStateFoundOpt(events1))
// Second time with different document must always pass
val (_, events2) = runFormRunner("cache-test", form, mode, document = Id2, noscript = noscript, initialize = false)
assert(Some(true) === staticStateFoundOpt(events2))
assert(Some(staticStateHoldsTemplate) === staticStateHasTemplateOpt(events2))
}
locally {
val Form = "noscript-true-pdf-auto-wizard-false"
val staticStateHoldsTemplate = true
runAndAssert(Form, "new")(expectedInitialHit = false, staticStateHoldsTemplate)
for (mode ← Seq("edit", "view", "pdf"))
runAndAssert(Form, mode)(expectedInitialHit = true, staticStateHoldsTemplate)
// Once #1712 is fixed, should return true
// See https://github.com/orbeon/orbeon-forms/issues/1712
runAndAssert(Form, "edit", noscript = true)(expectedInitialHit = false, staticStateHoldsTemplate)
// NOTE: Need to run schema.xpl or FR PFC for this to work
// See https://github.com/orbeon/orbeon-forms/issues/1731
// runAndAssert(Form, "schema")(expectedFound = false)
}
locally {
val Form = "noscript-false-pdf-template-wizard-true"
val staticStateHoldsTemplate = false
runAndAssert(Form, "new" )(expectedInitialHit = false, staticStateHoldsTemplate)
runAndAssert(Form, "edit")(expectedInitialHit = true, staticStateHoldsTemplate)
runAndAssert(Form, "view")(expectedInitialHit = false, staticStateHoldsTemplate)
runAndAssert(Form, "pdf" )(expectedInitialHit = true, staticStateHoldsTemplate)
runAndAssert(Form, "edit", noscript = true)(expectedInitialHit = true, staticStateHoldsTemplate)
// NOTE: Need to run schema.xpl or FR PFC for this to work
// See https://github.com/orbeon/orbeon-forms/issues/1731
// runAndAssert(Form, "schema")(expectedFound = false)
}
}
}
| ajw625/orbeon-forms | src/test/scala/org/orbeon/oxf/fr/CacheTest.scala | Scala | lgpl-2.1 | 3,977 |
/*
* Copyright 2016 Actian Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actian.spark_vector.loader.command
import org.apache.spark.sql.SQLContext
import com.actian.spark_vector.loader.options.UserOptions
import com.actian.spark_vector.sql.{ sparkQuote, colsSelectStatement }
object ParquetRead {
/**
* Based on `options`, register a temporary table as the source of the `Parquet` input
*
* @return A string containing the `SELECT` statement that can be used to subsequently consume data from the temporary table
* @note The temporary table will be named "parquet_<vectorTargetTable>*"
*/
def registerTempTable(options: UserOptions, sqlContext: SQLContext): String = {
val table = s"parquet_${options.vector.targetTable}_${System.currentTimeMillis}"
sqlContext.read.parquet(options.general.sourceFile).createOrReplaceTempView(table)
s"select ${colsSelectStatement(options.general.colsToLoad)} from ${sparkQuote(table)}"
}
}
| ActianCorp/spark-vector | loader/src/main/scala/com/actian/spark_vector/loader/command/ParquetRead.scala | Scala | apache-2.0 | 1,499 |
package slamdata.engine.fs
import scalaz.\\/
import scalaz.concurrent._
import scalaz.stream._
import argonaut._
case class RenderedJson(value: String) {
def toJson: String \\/ Json = JsonParser.parse(value)
override def toString = value
}
trait FileSystem {
def scan(path: Path, offset: Option[Long], limit: Option[Long]): Process[Task, RenderedJson]
final def scanAll(path: Path) = scan(path, None, None)
final def scanTo(path: Path, limit: Long) = scan(path, None, Some(limit))
final def scanFrom(path: Path, offset: Long) = scan(path, Some(offset), None)
def delete(path: Path): Task[Unit]
def ls: Task[List[Path]]
}
object FileSystem {
val Null = new FileSystem {
def scan(path: Path, offset: Option[Long], limit: Option[Long]): Process[Task, RenderedJson] = Process.halt
def delete(path: Path): Task[Unit] = Task.now(())
def ls: Task[List[Path]] = Task.now(Nil)
}
} | sellout/slamengine-old | src/main/scala/slamdata/engine/fs/filesystem.scala | Scala | agpl-3.0 | 914 |
package com.twitter.finatra.http.exceptions
import com.google.common.net.MediaType
import com.twitter.finagle.http.{Response, Status}
import com.twitter.finatra.http.response.{ErrorsResponse, ResponseBuilder}
import org.jboss.netty.handler.codec.http.HttpResponseStatus
/* HTTP Exceptions */
// TODO: Redesign to avoid boilderplate below (@see ResponseBuilder) */
/**
* HttpException which will be rendered as an HTTP response.
*/
object HttpException {
def plainText(status: HttpResponseStatus, body: String) = {
new HttpException(status, MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(status: HttpResponseStatus, errors: String*) = {
new HttpException(status, MediaType.JSON_UTF_8, errors)
}
}
class HttpException(
val statusCode: HttpResponseStatus,
val mediaType: MediaType,
val errors: Seq[String] = Seq())
extends Exception {
/* Public */
override def getMessage: String = {
"HttpException(" + statusCode + ":" + mediaType + ") with errors: " + errors.mkString(",")
}
/* Generated Equals/Hashcode */
override def equals(other: Any): Boolean = other match {
case that: HttpException =>
(that canEqual this) &&
statusCode == that.statusCode &&
mediaType == that.mediaType &&
errors == that.errors
case _ => false
}
override def hashCode(): Int = {
val state = Seq(statusCode, mediaType, errors)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
def canEqual(other: Any): Boolean = other.isInstanceOf[HttpException]
}
/* Specific Status Exceptions */
object NotFoundException {
def plainText(body: String) = {
new NotFoundException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new NotFoundException(MediaType.JSON_UTF_8, errors)
}
}
case class NotFoundException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.NotFound, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object ConflictException {
def plainText(body: String) = {
new ConflictException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ConflictException(MediaType.JSON_UTF_8, errors)
}
}
case class ConflictException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.Conflict, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object InternalServerErrorException {
def plainText(body: String) = {
new InternalServerErrorException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new InternalServerErrorException(MediaType.JSON_UTF_8, errors)
}
}
case class InternalServerErrorException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.InternalServerError, mediaType, errors)
object ServiceUnavailableException {
def plainText(body: String) = {
new ServiceUnavailableException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ServiceUnavailableException(MediaType.JSON_UTF_8, errors)
}
}
case class ServiceUnavailableException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.ServiceUnavailable, mediaType, errors)
object BadRequestException {
def plainText(body: String) = {
new BadRequestException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new BadRequestException(MediaType.JSON_UTF_8, errors)
}
}
case class BadRequestException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.BadRequest, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object ForbiddenException {
def plainText(body: String) = {
new ForbiddenException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ForbiddenException(MediaType.JSON_UTF_8, errors)
}
}
case class ForbiddenException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.Forbidden, mediaType, errors)
object NotAcceptableException {
def plainText(body: String) = {
new NotAcceptableException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new NotAcceptableException(MediaType.JSON_UTF_8, errors)
}
}
case class NotAcceptableException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.NotAcceptable, mediaType, errors)
| nkhuyu/finatra | http/src/main/scala/com/twitter/finatra/http/exceptions/exceptions.scala | Scala | apache-2.0 | 4,706 |
package utils
// spark-core
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd._
// spark-sql
import org.apache.spark.sql.SparkSession
// mllib
import org.apache.spark.mllib.linalg.{Vector, Vectors, Matrix, Matrices}
// others
import scala.math
object Utils {
/**
* Load data from libsvm format file.
*
* @param spark SparkSession
* @param filename a string of file name
* @param numSplits if it is specificed, spark will change the number of partitions
* @param isCoalesce if true, use coalesce(); otherwise use repartition()
*/
def loadLibsvmData(spark: SparkSession, filename: String, numSplits: Int = -1, isCoalesce: Boolean = true): RDD[(Double, Array[Double])] = {
// Loads data
var rawdata = spark.read.format("libsvm")
.load(filename)
.rdd
if (numSplits > 0) {
// coalesce() avoids a full shuffle, so it is usually faster than repartition().
// However, coalesce() may not return the desired number of partitions;
// if the data is small, the resulting #partition can be smaller than numSplits.
if (isCoalesce) {
var t1 = System.nanoTime()
rawdata = rawdata.coalesce(numSplits)
var t2 = System.nanoTime()
var tdiff = (t2 - t1) * 1E-9
println("Time cost of coalesce is " + tdiff.toString)
}
// repartition() is slower, but it is guaranteed to return exactly numSplits partitions.
else {
var t1 = System.nanoTime()
rawdata = rawdata.repartition(numSplits)
var t2 = System.nanoTime()
var tdiff = (t2 - t1) * 1E-9
println("Time cost of repartition is " + tdiff.toString)
}
// note: coalesce can result in data being sent over the network. avoid this for large datasets
}
val labelVectorRdd: RDD[(Double, Array[Double])] = rawdata
.map(pair => (pair(0).toString.toDouble, Vectors.parse(pair(1).toString).toArray))
.persist()
labelVectorRdd
}
def meanAndMax(labelVectorRdd: RDD[(Double, Array[Double])]): (Double, Array[Double]) = {
val maxFeatures: Array[Double] = labelVectorRdd.map(pair => pair._2.map(math.abs))
.reduce((a, b) => (a zip b).map(pair => math.max(pair._1, pair._2)) )
val meanLabel: Double = labelVectorRdd.map(pair => pair._1)
.mean
(meanLabel, maxFeatures)
}
def normalize(sc: SparkContext, labelVectorRdd: RDD[(Double, Array[Double])], meanLabel: Double, maxFeatures: Array[Double]): RDD[(Double, Array[Double])] = {
val maxFeaturesBc = sc.broadcast(maxFeatures)
val meanLabelBc = sc.broadcast(meanLabel)
val normalizedLabelVectorRdd: RDD[(Double, Array[Double])] = labelVectorRdd
.map(pair => (pair._1-meanLabelBc.value, (pair._2 zip maxFeaturesBc.value).map(a => a._1 / a._2)))
normalizedLabelVectorRdd
}
def parseLibsvm(str: String, d: Int): (Double, Vector) = {
val strArray: Array[String] = str.split(" ")
val label: Double = strArray(0).toDouble
val elements: Array[(Int, Double)] = strArray.drop(1)
.map(s => s.split(":"))
.map(pair => (pair(0).toDouble.toInt, pair(1).toDouble))
val feature: Vector = Vectors.sparse(d, elements)
(label, feature)
}
} | jey/alchemist | test/src/main/scala/utils/Utils.scala | Scala | apache-2.0 | 3,799 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.ddl
import org.kiji.annotations.ApiAudience
import org.kiji.delegation.Lookups
import org.kiji.delegation.NamedLookup
import org.kiji.delegation.NoSuchProviderException
import org.kiji.schema.shell.DDLException
import org.kiji.schema.shell.Environment
import org.kiji.schema.shell.spi.EnvironmentPlugin
import org.kiji.schema.shell.spi.ParserPluginFactory
/**
* Returns a modified environment that uses the specified module.
*
* @param env the environment in which this command executes.
* @param moduleName the name of the module to load.
*/
@ApiAudience.Private
final class UseModuleCommand(val env: Environment, val moduleName: String) extends DDLCommand {
override def exec(): Environment = {
val lookup = Lookups.getNamed(classOf[ParserPluginFactory])
try {
val module = lookup.lookup(moduleName)
echo("Loading module \\"" + moduleName + "\\"")
val envWithModule: Environment = env.withModule(module)
// Take this environment and add any necessary extension data.
return addExtensionData(envWithModule, module)
} catch { case _: NoSuchProviderException =>
throw new DDLException("No such module: '" + moduleName + "'.")
}
}
/**
* Further process the environment after loading the module, by adding any initial
* environment extension data specified by the plugin. This is only applicable to
* ParserPluginFactory instances that also extend EnvironmentPlugin.
*
* <p>This has been called out into its own method as a means to introduce the 'T' type
* parameter, required for matching the type of envPlugin with the result of calling its
* createExtensionState() method.</p>
*
* @param envWithModule the initial environment 'env', with the specified module loaded.
* @param module the ParserPluginFactory module being loaded.
* @tparam T the free type parameter of the EnvironmentPlugin that the ParserPluginFactory
* is cast to.
* @return the provided environment, augmented with any initial environment extension data
* supplied by the plugin.
*/
private def addExtensionData[T](envWithModule: Environment, module: ParserPluginFactory):
Environment = {
if (module.isInstanceOf[EnvironmentPlugin[_]]) {
// This module extends the environment with additional data to track.
// Load its default data in here.
val envPlugin: EnvironmentPlugin[T] = module.asInstanceOf[EnvironmentPlugin[T]]
return envWithModule.updateExtension(envPlugin, envPlugin.createExtensionState())
} else {
return envWithModule // Just loading the module was sufficient.
}
}
}
| kijiproject/kiji-schema-shell | src/main/scala/org/kiji/schema/shell/ddl/UseModuleCommand.scala | Scala | apache-2.0 | 3,371 |
import sbt._
import Keys._
import Tests._
import Defaults._
import java.io.{ CharArrayWriter, PrintWriter }
import Import._
object Ticket543Test extends Build {
val marker = new File("marker")
val check = TaskKey[Unit]("check", "Check correct error has been returned.")
lazy val root = Project("root", file("."), settings = defaultSettings ++ Seq(
libraryDependencies += "org.scalatest" %% "scalatest" % "1.8" % "test",
scalaVersion := "2.9.2",
fork := true,
testListeners += new TestReportListener {
def testEvent(event: TestEvent): Unit = {
for (e <- event.detail.filter(_.status == sbt.testing.Status.Failure)) {
if (e.throwable != null && e.throwable.isDefined) {
val caw = new CharArrayWriter
e.throwable.get.printStackTrace(new PrintWriter(caw))
if (caw.toString.contains("Test.scala:"))
marker.createNewFile()
}
}
}
def startGroup(name: String): Unit = ()
def endGroup(name: String, t: Throwable): Unit = ()
def endGroup(name: String, result: TestResult.Value): Unit = ()
},
check := {
val exists = marker.exists
marker.delete()
if (!exists) sys.error("Null or invalid error had been returned previously")
}
))
}
| dansanduleac/sbt | sbt/src/sbt-test/tests/t543/project/Ticket543Test.scala | Scala | bsd-3-clause | 1,213 |
package org.jetbrains.plugins.scala
package lang
package parameterInfo
package typeParameterInfo
import _root_.scala.util.Sorting
import lexer.ScalaTokenTypes
import collection.mutable.ArrayBuffer
import com.intellij.codeInsight.hint.{HintUtil, ShowParameterInfoContext}
import com.intellij.psi.PsiElement
import java.awt.Color
import com.intellij.lang.parameterInfo.ParameterInfoUIContext
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.openapi.fileEditor.{OpenFileDescriptor, FileEditorManager}
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import psi.api.ScalaFile
import java.io.File
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.util.io.FileUtil
import base.ScalaLightPlatformCodeInsightTestCaseAdapter
/**
* @author Aleksander Podkhalyuzin
* @since 26.04.2009
*/
abstract class TypeParameterInfoTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
val caretMarker = "/*caret*/"
protected def folderPath = baseRootPath() + "parameterInfo/typeParameterInfo/"
protected def doTest() {
import _root_.junit.framework.Assert._
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val offset = fileText.indexOf(caretMarker)
assert(offset != -1, "Not specified caret marker in test case. Use /*caret*/ in scala file for this.")
val fileEditorManager = FileEditorManager.getInstance(getProjectAdapter)
val editor = fileEditorManager.openTextEditor(new OpenFileDescriptor(getProjectAdapter, file, offset), false)
val context = new ShowParameterInfoContext(editor, getProjectAdapter, scalaFile, offset, -1)
val handler = new ScalaTypeParameterInfoHandler
val leafElement = scalaFile.findElementAt(offset)
val element = PsiTreeUtil.getParentOfType(leafElement, handler.getArgumentListClass)
handler.findElementForParameterInfo(context)
val items = new ArrayBuffer[String]
for (item <- context.getItemsToShow) {
val uiContext = new ParameterInfoUIContext {
def getDefaultParameterColor: Color = HintUtil.INFORMATION_COLOR
def setupUIComponentPresentation(text: String, highlightStartOffset: Int, highlightEndOffset: Int,
isDisabled: Boolean, strikeout: Boolean, isDisabledBeforeHighlight: Boolean,
background: Color): String = {
items.append(text)
text
}
def isUIComponentEnabled: Boolean = false
def getCurrentParameterIndex: Int = 0
def getParameterOwner: PsiElement = element
def setUIComponentEnabled(enabled: Boolean) {}
}
handler.updateUI(item, uiContext)
}
val itemsArray = items.toArray
Sorting.quickSort[String](itemsArray)
val res = new StringBuilder("")
for (item <- itemsArray) res.append(item).append("\n")
if (res.length > 0) res.replace(res.length - 1, res.length, "")
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ => assertTrue("Test result must be in last comment statement.", false)
}
assertEquals(output, res.toString())
}
} | consulo/consulo-scala | test/org/jetbrains/plugins/scala/lang/parameterInfo/typeParameterInfo/TypeParameterInfoTestBase.scala | Scala | apache-2.0 | 3,830 |
package com.wavesplatform.it.sync.transactions
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory.parseString
import com.wavesplatform.account.Address
import com.wavesplatform.api.http.ApiError.CustomValidationError
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.Node
import com.wavesplatform.it.NodeConfigs._
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.{BaseTransactionSuite, NodesFromDocker}
import com.wavesplatform.transaction.Asset.Waves
import com.wavesplatform.transaction.transfer.TransferTransaction
class RebroadcastTransactionSuite extends BaseTransactionSuite with NodesFromDocker {
import RebroadcastTransactionSuite._
override protected def nodeConfigs: Seq[Config] =
Seq(configWithRebroadcastAllowed.withFallback(Miners.head), configWithRebroadcastAllowed.withFallback(NotMiner))
private def nodeAIsMiner: Node = nodes.head
private def nodeBIsNotMiner: Node = nodes.last
test("should rebroadcast a transaction if that's allowed in config") {
val tx = TransferTransaction.selfSigned(2.toByte, nodeAIsMiner.keyPair, Address.fromString(nodeBIsNotMiner.address).explicitGet(), Waves, transferAmount, Waves, minFee, ByteStr.empty, System.currentTimeMillis())
.explicitGet()
.json()
val dockerNodeAId = docker.stopContainer(dockerNodes().head)
val txId = nodeBIsNotMiner.signedBroadcast(tx).id
docker.startContainer(dockerNodeAId)
nodeBIsNotMiner.waitForPeers(1)
nodeAIsMiner.ensureTxDoesntExist(txId)
nodeBIsNotMiner.signedBroadcast(tx)
nodeAIsMiner.waitForUtxIncreased(0)
nodeAIsMiner.utxSize shouldBe 1
}
test("should not rebroadcast a transaction if that's not allowed in config") {
dockerNodes().foreach(docker.restartNode(_, configWithRebroadcastNotAllowed))
val tx = TransferTransaction
.selfSigned(2.toByte, nodeAIsMiner.keyPair, Address.fromString(nodeBIsNotMiner.address).explicitGet(), Waves, transferAmount, Waves, minFee, ByteStr.empty, System.currentTimeMillis())
.explicitGet()
.json()
val dockerNodeAId = docker.stopContainer(dockerNodes().head)
val txId = nodeBIsNotMiner.signedBroadcast(tx).id
docker.startContainer(dockerNodeAId)
nodeBIsNotMiner.waitForPeers(1)
nodeAIsMiner.ensureTxDoesntExist(txId)
nodeBIsNotMiner.signedBroadcast(tx)
nodes.waitForHeightArise()
nodeAIsMiner.utxSize shouldBe 0
nodeAIsMiner.ensureTxDoesntExist(txId)
}
test("should not broadcast a transaction if there are not enough peers") {
val tx = TransferTransaction
.selfSigned(2.toByte, nodeAIsMiner.keyPair, Address.fromString(nodeBIsNotMiner.address).explicitGet(), Waves, transferAmount, Waves, minFee, ByteStr.empty, System.currentTimeMillis())
.explicitGet()
.json()
val testNode = dockerNodes().last
try {
docker.restartNode(testNode, configWithMinimumPeers(999))
assertApiError(testNode.signedBroadcast(tx), CustomValidationError("There are not enough connections with peers \\\\(\\\\d+\\\\) to accept transaction").assertiveRegex)
} finally {
docker.restartNode(testNode, configWithMinimumPeers(0))
}
}
}
object RebroadcastTransactionSuite {
private val configWithRebroadcastAllowed =
parseString("waves.synchronization.utx-synchronizer.allow-tx-rebroadcasting = true")
private val configWithRebroadcastNotAllowed =
parseString("waves.synchronization.utx-synchronizer.allow-tx-rebroadcasting = false")
private def configWithMinimumPeers(n: Int) =
parseString(s"waves.rest-api.minimum-peers = $n")
}
| wavesplatform/Waves | node-it/src/test/scala/com/wavesplatform/it/sync/transactions/RebroadcastTransactionSuite.scala | Scala | mit | 3,728 |
/*
* Copyright (C) 2014 Ivan Cukic <ivan at mi.sanu.ac.rs>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package core
import akka.io.IO
import akka.actor.Props
import dataapi.{RoutedHttpService, ResourceService, DataspaceService, UserService, SessionService, DataspaceRoleService, StatusService}
import spray.can.Http
import spray.routing._
import spray.http.HttpHeaders.RawHeader
object Rest extends App
with RouteConcatenation
with Directives
{
private implicit val _ = Core.system.dispatcher
// Defining the routes for the service
val routes =
respondWithHeader(RawHeader("Access-Control-Allow-Origin", "*")) {
pathPrefix("v1") {
new StatusService().route ~
new SessionService().route ~
new UserService().route ~
new DataspaceRoleService().route ~
new ResourceService().route ~
new DataspaceService().route
}
}
// Creating the service
val rootService = Core.system.actorOf(Props(new RoutedHttpService(routes)))
Core.collectorActor
// Binding the 8080 port to our server
IO(Http)(Core.system) ! Http.Bind(rootService, "0.0.0.0", port = 42042)
}
| ivan-cukic/litef-conductor | src/main/scala/core/Rest.scala | Scala | apache-2.0 | 1,873 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine.std
import slamdata.Predef._
import scalaz._
import slamdata.engine._
import slamdata.engine.fp._
trait IdentityLib extends Library {
import Type._
import Validation.{success, failure}
val Squash = Squashing("SQUASH", "Squashes all dimensional information", Top :: Nil,
noSimplification,
partialTyper { case x :: Nil => x },
tpe => success(tpe :: Nil))
val ToId = Mapping(
"oid",
"Converts a string to a (backend-specific) object identifier.",
Type.Str :: Nil,
noSimplification,
partialTyper {
case Type.Const(Data.Str(str)) :: Nil => Type.Const(Data.Id(str))
case Type.Str :: Nil => Type.Id
},
Type.typecheck(_, Type.Id) map κ(Type.Str :: Nil))
val functions = Squash :: ToId :: Nil
}
object IdentityLib extends IdentityLib
| wemrysi/quasar | core/src/main/scala/slamdata/engine/std/identity.scala | Scala | apache-2.0 | 1,441 |
package models
import akka.actor.ActorRef
/** Trait representing an order for a particular security.
*
* The OrderLike trait should be mixed in with each specific type of order
* (i.e., ask orders, bid orders, limit orders, market orders, etc).
*
*/
trait OrderLike {
/** Whether the order is buy (true) or sell (false). */
def buy: Boolean
/** The unique identifier of the security. */
def instrument: SecurityLike
/** The quantity being bought or sold. */
def quantity: Double
/** Orders will often need to be split during the matching process. */
def split(newQuantity: Double): OrderLike
/** String representation of an order. */
def toString: String
/** The trading party for the order. */
def tradingPartyRef: ActorRef
}
| davidrpugh/play-securities-exchange | app/models/OrderLike.scala | Scala | apache-2.0 | 771 |
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.gridfs
import java.nio.ByteBuffer
import com.mongodb.async.SingleResultCallback
import com.mongodb.async.client.gridfs.{GridFSDownloadStream => JGridFSDownloadStream}
import org.mongodb.scala.internal.ObservableHelper.{observe, observeCompleted, observeInt}
import org.mongodb.scala.{Completed, Observable}
/**
* A GridFS InputStream for downloading data from GridFS
*
* Provides the `GridFSFile` for the file to being downloaded as well as the `read` methods of a `AsyncInputStream`
*
* @since 1.2
*/
case class GridFSDownloadStream(private val wrapped: JGridFSDownloadStream) extends AsyncInputStream {
/**
* Gets the corresponding GridFSFile for the file being downloaded
*
* @return a Observable with a single element containing the corresponding GridFSFile for the file being downloaded
*/
def gridFSFile(): Observable[GridFSFile] = observe(wrapped.getGridFSFile(_: SingleResultCallback[GridFSFile]))
/**
* Sets the number of chunks to return per batch.
*
* Can be used to control the memory consumption of this InputStream. The smaller the batchSize the lower the memory consumption
* and higher latency.
*
* @param batchSize the batch size
* @return this
* @see [[http://http://docs.mongodb.org/manual/reference/method/cursor.batchSize/#cursor.batchSize Batch Size]]
*/
def batchSize(batchSize: Int): GridFSDownloadStream = {
wrapped.batchSize(batchSize)
this
}
/**
* Reads a sequence of bytes from this stream into the given buffer.
*
* @param dst the destination buffer
* @return an Observable with a single element indicating total number of bytes read into the buffer, or
* `-1` if there is no more data because the end of the stream has been reached.
*/
override def read(dst: ByteBuffer): Observable[Int] = observeInt(wrapped.read(dst, _: SingleResultCallback[java.lang.Integer]))
/**
* Closes the input stream
*
* @return a Observable with a single element indicating when the operation has completed
*/
override def close(): Observable[Completed] = observeCompleted(wrapped.close(_: SingleResultCallback[Void]))
}
| jCalamari/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/gridfs/GridFSDownloadStream.scala | Scala | apache-2.0 | 2,768 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import java.lang.{ Integer => JInt, Short => JShort, Long => JLong, Float => JFloat, Double => JDouble, Boolean => JBool }
import java.util.{ List => JList, Map => JMap }
import scala.annotation.implicitNotFound
import collection.GenTraversable
/**
* Simple implementation of a Monad type-class.
* Subclasses only need to override apply and flatMap, but they should override map,
* join, joinWith, and sequence if there are better implementations.
*
* Laws Monads must follow:
* identities:
* flatMap(apply(x))(fn) == fn(x)
* flatMap(m)(apply _) == m
* associativity on flatMap (you can either flatMap f first, or f to g:
* flatMap(flatMap(m)(f))(g) == flatMap(m) { x => flatMap(f(x))(g) }
*/
@implicitNotFound(msg = "Cannot find Monad type class for ${M}")
trait Monad[M[_]] extends Applicative[M] {
def flatMap[T, U](m: M[T])(fn: (T) => M[U]): M[U]
override def map[T, U](m: M[T])(fn: (T) => U): M[U] = flatMap(m)((t: T) => apply(fn(t)))
override def join[T, U](mt: M[T], mu: M[U]): M[(T, U)] =
flatMap(mt) { (t: T) =>
map(mu) { (u: U) => (t, u) }
}
}
/**
* For use from Java/minimizing code bloat in scala
*/
abstract class AbstractMonad[M[_]] extends Monad[M]
/**
* Follows the type-class pattern for the Monad trait
*/
object Monad {
/** Get the Monad for a type, e.g: Monad[List] */
def apply[M[_]](implicit monad: Monad[M]): Monad[M] = monad
def flatMap[M[_], T, U](m: M[T])(fn: (T) => M[U])(implicit monad: Monad[M]) = monad.flatMap(m)(fn)
def map[M[_], T, U](m: M[T])(fn: (T) => U)(implicit monad: Monad[M]) = monad.map(m)(fn)
def foldM[M[_], T, U](acc: T, xs: GenTraversable[U])(fn: (T, U) => M[T])(implicit monad: Monad[M]): M[T] =
if (xs.isEmpty)
monad.apply(acc)
else
monad.flatMap(fn(acc, xs.head)){ t: T => foldM(t, xs.tail)(fn) }
// Some instances of the Monad typeclass (case for a macro):
implicit val list: Monad[List] = new Monad[List] {
def apply[T](v: T) = List(v);
def flatMap[T, U](m: List[T])(fn: (T) => List[U]) = m.flatMap(fn)
}
implicit val option: Monad[Option] = new Monad[Option] {
def apply[T](v: T) = Option(v);
def flatMap[T, U](m: Option[T])(fn: (T) => Option[U]) = m.flatMap(fn)
}
implicit val some: Monad[Some] = new Monad[Some] {
def apply[T](v: T) = Some(v);
def flatMap[T, U](m: Some[T])(fn: (T) => Some[U]) = fn(m.get)
}
implicit val vector: Monad[Vector] = new Monad[Vector] {
def apply[T](v: T) = Vector(v);
def flatMap[T, U](m: Vector[T])(fn: (T) => Vector[U]) = m.flatMap(fn)
}
implicit val set: Monad[Set] = new Monad[Set] {
def apply[T](v: T) = Set(v);
def flatMap[T, U](m: Set[T])(fn: (T) => Set[U]) = m.flatMap(fn)
}
implicit val seq: Monad[Seq] = new Monad[Seq] {
def apply[T](v: T) = Seq(v);
def flatMap[T, U](m: Seq[T])(fn: (T) => Seq[U]) = m.flatMap(fn)
}
implicit val indexedseq: Monad[IndexedSeq] = new Monad[IndexedSeq] {
def apply[T](v: T) = IndexedSeq(v);
def flatMap[T, U](m: IndexedSeq[T])(fn: (T) => IndexedSeq[U]) = m.flatMap(fn)
}
// Set up the syntax magic (allow .pure[Int] syntax and flatMap in for):
// import Monad.{pureOp, operators} to get
implicit def pureOp[A](a: A) = new PureOp(a)
implicit def operators[A, M[_]](m: M[A])(implicit monad: Monad[M]) =
new MonadOperators(m)(monad)
}
/**
* This enrichment allows us to use our Monad instances in for expressions:
* if (import Monad._) has been done
*/
class MonadOperators[A, M[_]](m: M[A])(implicit monad: Monad[M]) extends ApplicativeOperators[A, M](m) {
def flatMap[U](fn: (A) => M[U]): M[U] = monad.flatMap(m)(fn)
}
// This is a Semigroup, for all Monads.
class MonadSemigroup[T, M[_]](implicit monad: Monad[M], sg: Semigroup[T])
extends Semigroup[M[T]] {
import Monad.operators
def plus(l: M[T], r: M[T]) = for (lv <- l; rv <- r) yield sg.plus(lv, rv)
}
// This is a Monoid, for all Monads.
class MonadMonoid[T, M[_]](implicit monad: Monad[M], mon: Monoid[T])
extends MonadSemigroup[T, M] with Monoid[M[T]] {
lazy val zero = monad(mon.zero)
}
// Group, Ring, and Field ARE NOT AUTOMATIC. You have to check that the laws hold for your Monad.
class MonadGroup[T, M[_]](implicit monad: Monad[M], grp: Group[T])
extends MonadMonoid[T, M] with Group[M[T]] {
import Monad.operators
override def negate(v: M[T]) = v.map { grp.negate(_) }
override def minus(l: M[T], r: M[T]) = for (lv <- l; rv <- r) yield grp.minus(lv, rv)
}
class MonadRing[T, M[_]](implicit monad: Monad[M], ring: Ring[T])
extends MonadGroup[T, M] with Ring[M[T]] {
import Monad.operators
lazy val one = monad(ring.one)
def times(l: M[T], r: M[T]) = for (lv <- l; rv <- r) yield ring.times(lv, rv)
}
class MonadField[T, M[_]](implicit monad: Monad[M], fld: Field[T])
extends MonadRing[T, M] with Field[M[T]] {
import Monad.operators
override def inverse(v: M[T]) = v.map { fld.inverse(_) }
override def div(l: M[T], r: M[T]) = for (lv <- l; rv <- r) yield fld.div(lv, rv)
}
| jinlee/algebird | algebird-core/src/main/scala/com/twitter/algebird/Monad.scala | Scala | apache-2.0 | 5,567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import java.lang.{Iterable => JavaIterable}
import java.math.{BigDecimal => JavaBigDecimal}
import java.math.{BigInteger => JavaBigInteger}
import java.sql.{Date, Timestamp}
import java.time.{Instant, LocalDate}
import java.util.{Map => JavaMap}
import javax.annotation.Nullable
import scala.language.existentials
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Functions to convert Scala types to Catalyst types and vice versa.
*/
object CatalystTypeConverters {
// The Predef.Map is scala.collection.immutable.Map.
// Since the map values can be mutable, we explicitly import scala.collection.Map at here.
import scala.collection.Map
private[sql] def isPrimitive(dataType: DataType): Boolean = {
dataType match {
case BooleanType => true
case ByteType => true
case ShortType => true
case IntegerType => true
case LongType => true
case FloatType => true
case DoubleType => true
case _ => false
}
}
private def getConverterForType(dataType: DataType): CatalystTypeConverter[Any, Any, Any] = {
val converter = dataType match {
case udt: UserDefinedType[_] => UDTConverter(udt)
case arrayType: ArrayType => ArrayConverter(arrayType.elementType)
case mapType: MapType => MapConverter(mapType.keyType, mapType.valueType)
case structType: StructType => StructConverter(structType)
case StringType => StringConverter
case DateType if SQLConf.get.datetimeJava8ApiEnabled => LocalDateConverter
case DateType => DateConverter
case TimestampType if SQLConf.get.datetimeJava8ApiEnabled => InstantConverter
case TimestampType => TimestampConverter
case dt: DecimalType => new DecimalConverter(dt)
case BooleanType => BooleanConverter
case ByteType => ByteConverter
case ShortType => ShortConverter
case IntegerType => IntConverter
case LongType => LongConverter
case FloatType => FloatConverter
case DoubleType => DoubleConverter
case dataType: DataType => IdentityConverter(dataType)
}
converter.asInstanceOf[CatalystTypeConverter[Any, Any, Any]]
}
/**
* Converts a Scala type to its Catalyst equivalent (and vice versa).
*
* @tparam ScalaInputType The type of Scala values that can be converted to Catalyst.
* @tparam ScalaOutputType The type of Scala values returned when converting Catalyst to Scala.
* @tparam CatalystType The internal Catalyst type used to represent values of this Scala type.
*/
private abstract class CatalystTypeConverter[ScalaInputType, ScalaOutputType, CatalystType]
extends Serializable {
/**
* Converts a Scala type to its Catalyst equivalent while automatically handling nulls
* and Options.
*/
final def toCatalyst(@Nullable maybeScalaValue: Any): CatalystType = {
if (maybeScalaValue == null) {
null.asInstanceOf[CatalystType]
} else if (maybeScalaValue.isInstanceOf[Option[ScalaInputType]]) {
val opt = maybeScalaValue.asInstanceOf[Option[ScalaInputType]]
if (opt.isDefined) {
toCatalystImpl(opt.get)
} else {
null.asInstanceOf[CatalystType]
}
} else {
toCatalystImpl(maybeScalaValue.asInstanceOf[ScalaInputType])
}
}
/**
* Given a Catalyst row, convert the value at column `column` to its Scala equivalent.
*/
final def toScala(row: InternalRow, column: Int): ScalaOutputType = {
if (row.isNullAt(column)) null.asInstanceOf[ScalaOutputType] else toScalaImpl(row, column)
}
/**
* Convert a Catalyst value to its Scala equivalent.
*/
def toScala(@Nullable catalystValue: CatalystType): ScalaOutputType
/**
* Converts a Scala value to its Catalyst equivalent.
* @param scalaValue the Scala value, guaranteed not to be null.
* @return the Catalyst value.
*/
protected def toCatalystImpl(scalaValue: ScalaInputType): CatalystType
/**
* Given a Catalyst row, convert the value at column `column` to its Scala equivalent.
* This method will only be called on non-null columns.
*/
protected def toScalaImpl(row: InternalRow, column: Int): ScalaOutputType
}
private case class IdentityConverter(dataType: DataType)
extends CatalystTypeConverter[Any, Any, Any] {
override def toCatalystImpl(scalaValue: Any): Any = scalaValue
override def toScala(catalystValue: Any): Any = catalystValue
override def toScalaImpl(row: InternalRow, column: Int): Any = row.get(column, dataType)
}
private case class UDTConverter[A >: Null](
udt: UserDefinedType[A]) extends CatalystTypeConverter[A, A, Any] {
// toCatalyst (it calls toCatalystImpl) will do null check.
override def toCatalystImpl(scalaValue: A): Any = udt.serialize(scalaValue)
override def toScala(catalystValue: Any): A = {
if (catalystValue == null) null else udt.deserialize(catalystValue)
}
override def toScalaImpl(row: InternalRow, column: Int): A =
toScala(row.get(column, udt.sqlType))
}
/** Converter for arrays, sequences, and Java iterables. */
private case class ArrayConverter(
elementType: DataType) extends CatalystTypeConverter[Any, Seq[Any], ArrayData] {
private[this] val elementConverter = getConverterForType(elementType)
override def toCatalystImpl(scalaValue: Any): ArrayData = {
scalaValue match {
case a: Array[_] =>
new GenericArrayData(a.map(elementConverter.toCatalyst))
case s: scala.collection.Seq[_] =>
new GenericArrayData(s.map(elementConverter.toCatalyst).toArray)
case i: JavaIterable[_] =>
val iter = i.iterator
val convertedIterable = scala.collection.mutable.ArrayBuffer.empty[Any]
while (iter.hasNext) {
val item = iter.next()
convertedIterable += elementConverter.toCatalyst(item)
}
new GenericArrayData(convertedIterable.toArray)
case other => throw new IllegalArgumentException(
s"The value (${other.toString}) of the type (${other.getClass.getCanonicalName}) "
+ s"cannot be converted to an array of ${elementType.catalogString}")
}
}
override def toScala(catalystValue: ArrayData): Seq[Any] = {
if (catalystValue == null) {
null
} else if (isPrimitive(elementType)) {
catalystValue.toArray[Any](elementType)
} else {
val result = new Array[Any](catalystValue.numElements())
catalystValue.foreach(elementType, (i, e) => {
result(i) = elementConverter.toScala(e)
})
result
}
}
override def toScalaImpl(row: InternalRow, column: Int): Seq[Any] =
toScala(row.getArray(column))
}
private case class MapConverter(
keyType: DataType,
valueType: DataType)
extends CatalystTypeConverter[Any, Map[Any, Any], MapData] {
private[this] val keyConverter = getConverterForType(keyType)
private[this] val valueConverter = getConverterForType(valueType)
override def toCatalystImpl(scalaValue: Any): MapData = {
val keyFunction = (k: Any) => keyConverter.toCatalyst(k)
val valueFunction = (k: Any) => valueConverter.toCatalyst(k)
scalaValue match {
case map: Map[_, _] => ArrayBasedMapData(map, keyFunction, valueFunction)
case javaMap: JavaMap[_, _] => ArrayBasedMapData(javaMap, keyFunction, valueFunction)
case other => throw new IllegalArgumentException(
s"The value (${other.toString}) of the type (${other.getClass.getCanonicalName}) "
+ "cannot be converted to a map type with "
+ s"key type (${keyType.catalogString}) and value type (${valueType.catalogString})")
}
}
override def toScala(catalystValue: MapData): Map[Any, Any] = {
if (catalystValue == null) {
null
} else {
val keys = catalystValue.keyArray().toArray[Any](keyType)
val values = catalystValue.valueArray().toArray[Any](valueType)
val convertedKeys =
if (isPrimitive(keyType)) keys else keys.map(keyConverter.toScala)
val convertedValues =
if (isPrimitive(valueType)) values else values.map(valueConverter.toScala)
convertedKeys.zip(convertedValues).toMap
}
}
override def toScalaImpl(row: InternalRow, column: Int): Map[Any, Any] =
toScala(row.getMap(column))
}
private case class StructConverter(
structType: StructType) extends CatalystTypeConverter[Any, Row, InternalRow] {
private[this] val converters = structType.fields.map { f => getConverterForType(f.dataType) }
override def toCatalystImpl(scalaValue: Any): InternalRow = scalaValue match {
case row: Row =>
val ar = new Array[Any](row.size)
var idx = 0
while (idx < row.size) {
ar(idx) = converters(idx).toCatalyst(row(idx))
idx += 1
}
new GenericInternalRow(ar)
case p: Product =>
val ar = new Array[Any](structType.size)
val iter = p.productIterator
var idx = 0
while (idx < structType.size) {
ar(idx) = converters(idx).toCatalyst(iter.next())
idx += 1
}
new GenericInternalRow(ar)
case other => throw new IllegalArgumentException(
s"The value (${other.toString}) of the type (${other.getClass.getCanonicalName}) "
+ s"cannot be converted to ${structType.catalogString}")
}
override def toScala(row: InternalRow): Row = {
if (row == null) {
null
} else {
val ar = new Array[Any](row.numFields)
var idx = 0
while (idx < row.numFields) {
ar(idx) = converters(idx).toScala(row, idx)
idx += 1
}
new GenericRowWithSchema(ar, structType)
}
}
override def toScalaImpl(row: InternalRow, column: Int): Row =
toScala(row.getStruct(column, structType.size))
}
private object StringConverter extends CatalystTypeConverter[Any, String, UTF8String] {
override def toCatalystImpl(scalaValue: Any): UTF8String = scalaValue match {
case str: String => UTF8String.fromString(str)
case utf8: UTF8String => utf8
case chr: Char => UTF8String.fromString(chr.toString)
case ac: Array[Char] => UTF8String.fromString(String.valueOf(ac))
case other => throw new IllegalArgumentException(
s"The value (${other.toString}) of the type (${other.getClass.getCanonicalName}) "
+ s"cannot be converted to the string type")
}
override def toScala(catalystValue: UTF8String): String =
if (catalystValue == null) null else catalystValue.toString
override def toScalaImpl(row: InternalRow, column: Int): String =
row.getUTF8String(column).toString
}
private object DateConverter extends CatalystTypeConverter[Date, Date, Any] {
override def toCatalystImpl(scalaValue: Date): Int = DateTimeUtils.fromJavaDate(scalaValue)
override def toScala(catalystValue: Any): Date =
if (catalystValue == null) null else DateTimeUtils.toJavaDate(catalystValue.asInstanceOf[Int])
override def toScalaImpl(row: InternalRow, column: Int): Date =
DateTimeUtils.toJavaDate(row.getInt(column))
}
private object LocalDateConverter extends CatalystTypeConverter[LocalDate, LocalDate, Any] {
override def toCatalystImpl(scalaValue: LocalDate): Int = {
DateTimeUtils.localDateToDays(scalaValue)
}
override def toScala(catalystValue: Any): LocalDate = {
if (catalystValue == null) null
else DateTimeUtils.daysToLocalDate(catalystValue.asInstanceOf[Int])
}
override def toScalaImpl(row: InternalRow, column: Int): LocalDate =
DateTimeUtils.daysToLocalDate(row.getInt(column))
}
private object TimestampConverter extends CatalystTypeConverter[Timestamp, Timestamp, Any] {
override def toCatalystImpl(scalaValue: Timestamp): Long =
DateTimeUtils.fromJavaTimestamp(scalaValue)
override def toScala(catalystValue: Any): Timestamp =
if (catalystValue == null) null
else DateTimeUtils.toJavaTimestamp(catalystValue.asInstanceOf[Long])
override def toScalaImpl(row: InternalRow, column: Int): Timestamp =
DateTimeUtils.toJavaTimestamp(row.getLong(column))
}
private object InstantConverter extends CatalystTypeConverter[Instant, Instant, Any] {
override def toCatalystImpl(scalaValue: Instant): Long =
DateTimeUtils.instantToMicros(scalaValue)
override def toScala(catalystValue: Any): Instant =
if (catalystValue == null) null
else DateTimeUtils.microsToInstant(catalystValue.asInstanceOf[Long])
override def toScalaImpl(row: InternalRow, column: Int): Instant =
DateTimeUtils.microsToInstant(row.getLong(column))
}
private class DecimalConverter(dataType: DecimalType)
extends CatalystTypeConverter[Any, JavaBigDecimal, Decimal] {
private val nullOnOverflow = !SQLConf.get.ansiEnabled
override def toCatalystImpl(scalaValue: Any): Decimal = {
val decimal = scalaValue match {
case d: BigDecimal => Decimal(d)
case d: JavaBigDecimal => Decimal(d)
case d: JavaBigInteger => Decimal(d)
case d: Decimal => d
case other => throw new IllegalArgumentException(
s"The value (${other.toString}) of the type (${other.getClass.getCanonicalName}) "
+ s"cannot be converted to ${dataType.catalogString}")
}
decimal.toPrecision(dataType.precision, dataType.scale, Decimal.ROUND_HALF_UP, nullOnOverflow)
}
override def toScala(catalystValue: Decimal): JavaBigDecimal = {
if (catalystValue == null) null
else catalystValue.toJavaBigDecimal
}
override def toScalaImpl(row: InternalRow, column: Int): JavaBigDecimal =
row.getDecimal(column, dataType.precision, dataType.scale).toJavaBigDecimal
}
private abstract class PrimitiveConverter[T] extends CatalystTypeConverter[T, Any, Any] {
final override def toScala(catalystValue: Any): Any = catalystValue
final override def toCatalystImpl(scalaValue: T): Any = scalaValue
}
private object BooleanConverter extends PrimitiveConverter[Boolean] {
override def toScalaImpl(row: InternalRow, column: Int): Boolean = row.getBoolean(column)
}
private object ByteConverter extends PrimitiveConverter[Byte] {
override def toScalaImpl(row: InternalRow, column: Int): Byte = row.getByte(column)
}
private object ShortConverter extends PrimitiveConverter[Short] {
override def toScalaImpl(row: InternalRow, column: Int): Short = row.getShort(column)
}
private object IntConverter extends PrimitiveConverter[Int] {
override def toScalaImpl(row: InternalRow, column: Int): Int = row.getInt(column)
}
private object LongConverter extends PrimitiveConverter[Long] {
override def toScalaImpl(row: InternalRow, column: Int): Long = row.getLong(column)
}
private object FloatConverter extends PrimitiveConverter[Float] {
override def toScalaImpl(row: InternalRow, column: Int): Float = row.getFloat(column)
}
private object DoubleConverter extends PrimitiveConverter[Double] {
override def toScalaImpl(row: InternalRow, column: Int): Double = row.getDouble(column)
}
/**
* Creates a converter function that will convert Scala objects to the specified Catalyst type.
* Typical use case would be converting a collection of rows that have the same schema. You will
* call this function once to get a converter, and apply it to every row.
*/
def createToCatalystConverter(dataType: DataType): Any => Any = {
if (isPrimitive(dataType)) {
// Although the `else` branch here is capable of handling inbound conversion of primitives,
// we add some special-case handling for those types here. The motivation for this relates to
// Java method invocation costs: if we have rows that consist entirely of primitive columns,
// then returning the same conversion function for all of the columns means that the call site
// will be monomorphic instead of polymorphic. In microbenchmarks, this actually resulted in
// a measurable performance impact. Note that this optimization will be unnecessary if we
// use code generation to construct Scala Row -> Catalyst Row converters.
def convert(maybeScalaValue: Any): Any = {
if (maybeScalaValue.isInstanceOf[Option[Any]]) {
maybeScalaValue.asInstanceOf[Option[Any]].orNull
} else {
maybeScalaValue
}
}
convert
} else {
getConverterForType(dataType).toCatalyst
}
}
/**
* Creates a converter function that will convert Catalyst types to Scala type.
* Typical use case would be converting a collection of rows that have the same schema. You will
* call this function once to get a converter, and apply it to every row.
*/
def createToScalaConverter(dataType: DataType): Any => Any = {
if (isPrimitive(dataType)) {
identity
} else {
getConverterForType(dataType).toScala
}
}
/**
* Converts Scala objects to Catalyst rows / types.
*
* Note: This should be called before do evaluation on Row
* (It does not support UDT)
* This is used to create an RDD or test results with correct types for Catalyst.
*/
def convertToCatalyst(a: Any): Any = a match {
case s: String => StringConverter.toCatalyst(s)
case d: Date => DateConverter.toCatalyst(d)
case ld: LocalDate => LocalDateConverter.toCatalyst(ld)
case t: Timestamp => TimestampConverter.toCatalyst(t)
case i: Instant => InstantConverter.toCatalyst(i)
case d: BigDecimal => new DecimalConverter(DecimalType(d.precision, d.scale)).toCatalyst(d)
case d: JavaBigDecimal => new DecimalConverter(DecimalType(d.precision, d.scale)).toCatalyst(d)
case seq: Seq[Any] => new GenericArrayData(seq.map(convertToCatalyst).toArray)
case r: Row => InternalRow(r.toSeq.map(convertToCatalyst): _*)
case arr: Array[Byte] => arr
case arr: Array[Char] => StringConverter.toCatalyst(arr)
case arr: Array[_] => new GenericArrayData(arr.map(convertToCatalyst))
case map: Map[_, _] =>
ArrayBasedMapData(
map,
(key: Any) => convertToCatalyst(key),
(value: Any) => convertToCatalyst(value))
case other => other
}
/**
* Converts Catalyst types used internally in rows to standard Scala types
* This method is slow, and for batch conversion you should be using converter
* produced by createToScalaConverter.
*/
def convertToScala(catalystValue: Any, dataType: DataType): Any = {
createToScalaConverter(dataType)(catalystValue)
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala | Scala | apache-2.0 | 19,811 |
package spatial.tests
import org.scalatest.{FlatSpec, Matchers}
import spatial.dsl._
import virtualized._
object OuterSwitchTest extends SpatialTest {
@virtualize def main(): Unit = {
val in = ArgIn[Int]
setArg(in, 20)
val dram = DRAM[Int](32)
Accel {
val data = SRAM[Int](32)
if (in.value <= 28) {
Sequential.Foreach((in.value+4) by 1){ i => data(i) = i }
}
dram(0::32) store data
}
printArray(getMem(dram), "dram")
}
}
class SwitchTests extends FlatSpec with Matchers {
"OuterSwitchTest" should "have one controller in the SwitchCase" in { OuterSwitchTest.runTest() }
}
| stanford-ppl/spatial-lang | spatial/core/test/spatial/tests/SwitchTests.scala | Scala | mit | 640 |
package monocle.std
import monocle.MonocleSuite
import monocle.law.discipline.{IsoTests, PrismTests}
import monocle.law.discipline.function.{EachTests, PossibleTests}
import cats.data.Validated
import scala.annotation.nowarn
class ValidatedSpec extends MonocleSuite {
import cats.laws.discipline.arbitrary._
checkAll(
"Validated is isomorphic to Disjunction",
IsoTests(monocle.std.validated.validationToDisjunction[String, Int])
)
checkAll("success", PrismTests(monocle.std.validated.success[String, Int]))
checkAll("failure", PrismTests(monocle.std.validated.failure[String, Int]))
checkAll("each Validated", EachTests[Validated[Unit, Int], Int])
checkAll("possible Validated", PossibleTests[Validated[Unit, Int], Int]): @nowarn
}
| julien-truffaut/Monocle | test/shared/src/test/scala/monocle/std/ValidatedSpec.scala | Scala | mit | 758 |
package spgui.widgets
import sp.domain._
import Logic._
import spgui.communication.BackendCommunication
import scala.util.Try
import sp.erica._
object EricaLogic {
val dummyPatient = API_Patient.Patient(
"4502085",
API_Patient.Priority("NotTriaged", "2017-02-01T15:49:19Z"),
API_Patient.Attended(true, "sarli29", "2017-02-01T15:58:33Z"),
API_Patient.Location("52", "2017-02-01T15:58:33Z"),
API_Patient.Team("GUL", "NAKME", "B", "2017-02-01T15:58:33Z"),
API_Patient.Examination(false, "2017-02-01T15:58:33Z"),
API_Patient.LatestEvent("OmsKoord", -1, false, "2017-02-01T15:58:33Z"),
API_Patient.Plan(false, "2017-02-01T15:58:33Z"),
API_Patient.ArrivalTime("", "2017-02-01T10:01:38Z"),
API_Patient.Debugging("NAKKK","B","B23"),
API_Patient.Finished(false, false, "2017-02-01T10:01:38Z")
)
}
object ToAndFrom {
def eventBody(mess: SPMessage): Option[API_PatientEvent.Event] = mess.getBodyAs[API_PatientEvent.Event]
def make(h: SPHeader, b: API_PatientEvent.Event): SPMessage = SPMessage.make(h, b)
}
| kristoferB/SP | sperica/frontend/src/main/scala/spgui/widgets/WidgetComm.scala | Scala | mit | 1,059 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.locapriv.ops
import fr.cnrs.liris.lumos.domain.RemoteFile
import fr.cnrs.liris.accio.sdk._
import fr.cnrs.liris.locapriv.domain.Event
import scala.util.Random
@Op(
category = "transform",
help = "Uniformly sample events inside traces.",
description = "Perform a uniform sampling on traces, keeping each event with a given probability.",
unstable = true)
case class UniformSamplingOp(
@Arg(help = "Probability to keep each event")
probability: Double,
@Arg(help = "Input dataset")
data: RemoteFile)
extends TransformOp[Event] {
require(probability >= 0 && probability <= 1, s"Probability must be in [0, 1] (got $probability)")
override protected def transform(key: String, trace: Iterable[Event]): Iterable[Event] = {
if (probability == 1) {
trace
} else if (probability == 0) {
Seq.empty
} else {
val rnd = new Random(seeds(key))
trace.filter(_ => rnd.nextDouble() <= probability)
}
}
} | privamov/accio | accio/java/fr/cnrs/liris/locapriv/ops/UniformSamplingOp.scala | Scala | gpl-3.0 | 1,743 |
package build.unstable.sonicd.source
import akka.actor._
import akka.stream.ActorMaterializer
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.{Cancel, Request, SubscriptionTimeoutExceeded}
import akka.stream.scaladsl._
import build.unstable.sonic.JsonProtocol._
import build.unstable.sonic.model._
import build.unstable.sonicd.SonicdLogging
import build.unstable.sonicd.source.Composer.{ComposeStrategy, ComposedQuery, ConcatStrategy}
import build.unstable.sonicd.system.actor.SonicdController
import build.unstable.sonicd.system.actor.SonicdController.{SonicdQuery, UnauthorizedException}
import build.unstable.tylog.Variation
import org.slf4j.event.Level
import spray.json._
import scala.collection.mutable
import scala.util.matching.Regex
class Composer(query: Query, actorContext: ActorContext, context: RequestContext)
extends SonicdSource(query, actorContext, context) with SonicdLogging {
import Composer._
val injectQueryPlaceholder = getOption[String]("inject-query-placeholder")
implicit val combinedQueryJsonFormat: JsonFormat[ComposedQuery] =
Composer.getComposedQueryJsonFormat(injectQueryPlaceholder, query.query, context)
val queries = getConfig[List[ComposedQuery]]("queries")
assert(queries.nonEmpty, "expected at least one query in `queries` property")
assert(queries.forall(_.priority >= 0), "'priority' field in query config must be an unsigned integer")
val bufferSize = getOption[Int]("buffer-size").getOrElse(2048)
val strategy = getOption[ComposeStrategy]("strategy").getOrElse(MergeStrategy)
val failFast = getOption[Boolean]("fail-fast").getOrElse(true)
val actorMaterializer = ActorMaterializer.create(actorContext)
val publisher: Props = {
Props(classOf[ComposerPublisher], queries, bufferSize, strategy, failFast,
context, actorMaterializer)
}
}
object Composer {
case class ComposedQuery(query: SonicdQuery, priority: Int, name: Option[String] = None)
sealed trait ComposeStrategy
case object MergeStrategy extends ComposeStrategy
case object ConcatStrategy extends ComposeStrategy
implicit val strategyJsonFormat: RootJsonFormat[ComposeStrategy] = new RootJsonFormat[ComposeStrategy] {
override def read(json: JsValue): ComposeStrategy = json match {
case JsString("concat") ⇒ ConcatStrategy
case JsString("merge") ⇒ MergeStrategy
case JsString(a) ⇒ throw new Exception(
s"possible values for strategy are `merge` and `concat` found: $a")
case e ⇒ throw new Exception(s"expected JsString found: $e")
}
override def write(obj: ComposeStrategy): JsValue = {
obj match {
case ConcatStrategy ⇒ JsString("concat")
case MergeStrategy ⇒ JsString("merge")
}
}
}
def getComposedQueryJsonFormat(placeholder: Option[String], query: String, context: RequestContext) = {
new RootJsonFormat[ComposedQuery] {
override def read(json: JsValue): ComposedQuery = {
val fields = json.asJsObject().fields
val priority = fields.get("priority").flatMap(_.convertTo[Option[Int]]).getOrElse(0)
val name = fields.get("name").flatMap(_.convertTo[Option[String]])
val obj = json.asJsObject().fields
val q =
obj.getOrElse("query", throw getException("query")).convertTo[String]
val replaced: String = placeholder.map { place ⇒
val rgx = new Regex(place)
rgx.replaceAllIn(q, query)
}.getOrElse(q)
val config: JsValue = obj.getOrElse("config", throw getException("config"))
ComposedQuery(SonicdQuery(new Query(None, Some(context.traceId), None, replaced, config)), priority, name)
}
override def write(obj: ComposedQuery): JsValue = {
JsObject(Map(
"priority" → JsNumber(obj.priority),
"query" → JsString(obj.query.query.query),
"name" → obj.name.map(JsString.apply).getOrElse(JsNull),
"config" → obj.query.query.config
))
}
}
}
private def getException(s: String): Exception = {
new Exception(s"expected field `$s` but not field with that name found")
}
}
class ComposerPublisher(queries: List[ComposedQuery], bufferSize: Int, strategy: ComposeStrategy,
failFast: Boolean)(implicit ctx: RequestContext, materializer: ActorMaterializer)
extends ActorPublisher[SonicMessage] with SonicdLogging with SonicdPublisher {
case object Ack
case object Started
case object Completed
/* OVERRIDES */
@throws[Exception](classOf[Exception])
override def postStop(): Unit = {
log.debug("stopping combinator publisher of '{}'", ctx.traceId)
}
@throws[Exception](classOf[Exception])
override def preStart(): Unit = {
log.debug("starting combinator publisher of '{}'", ctx.traceId)
}
override def unhandled(message: Any): Unit = {
log.warning("recv undhandled message {}", message)
}
/* HELPERS */
def tryPushDownstream() {
while (isActive && totalDemand > 0 && buffer.nonEmpty) {
onNext(buffer.dequeue())
}
}
implicit val priorityMessageOrdering =
new Ordering[(Source[(build.unstable.sonic.model.SonicMessage, Int), akka.NotUsed], Int)] {
override def compare(x: (Source[(build.unstable.sonic.model.SonicMessage, Int), akka.NotUsed], Int),
y: (Source[(build.unstable.sonic.model.SonicMessage, Int), akka.NotUsed], Int)): Int = {
if (x._2 < y._2) 1
else if (x._2 > y._2) -1
else 0
}
}
def updateProgress(subStreamProgress: QueryProgress): Boolean =
subStreamProgress.status == QueryProgress.Running && {
val prog = 1.0 * subStreamProgress.progress / streamsLeft / subStreamProgress.total.getOrElse(100d) * 100
!prog.isNaN && !prog.isInfinite && {
progress = QueryProgress(QueryProgress.Running, prog, Some(100d), Some("%"))
true
}
}
/* STATE */
val buffer: mutable.Queue[SonicMessage] = mutable.Queue(StreamStarted(ctx.traceId))
val deferred = mutable.Queue.empty[(SonicMessage, Int)]
var pendingAck: Boolean = false
var progress: QueryProgress = _
var streamsLeft: Int = _
var streamsByPriority: mutable.Map[Int, Int] = _
var allowedPriority: Int = _
/* BEHAVIOUR */
def commonReceive: Receive = {
case Completed ⇒ context.become(terminating(StreamCompleted.success))
case Status.Failure(e) ⇒ context.become(terminating(StreamCompleted.error(ctx.traceId, e)))
case Cancel ⇒
log.debug("client canceled")
context.stop(self)
}
def terminating(done: StreamCompleted): Receive = {
tryPushDownstream()
if (buffer.isEmpty && isActive && totalDemand > 0) {
onNext(done)
onCompleteThenStop()
}
{
case r: Request ⇒ terminating(done)
}
}
def sendAckMaybe(upstream: ActorRef) {
if (totalDemand > 0) {
upstream ! Ack
} else {
pendingAck = true
}
}
def materialized(upstream: ActorRef): Receive = commonReceive orElse {
case Request(n) ⇒
tryPushDownstream()
if (totalDemand > 0 && pendingAck) {
upstream ! Ack
pendingAck = false
}
case (m: SonicMessage, priority: Int) if priority >= allowedPriority ⇒
m match {
case c: StreamCompleted if failFast && c.error.isDefined ⇒
context.become(terminating(StreamCompleted.error(c.error.get)))
case c: StreamCompleted ⇒
try {
streamsLeft -= 1
val left = streamsByPriority(priority)
if (left == 1) {
if (streamsLeft > 0) {
streamsByPriority.remove(priority)
log.debug("changing allowed priority: previous allowed priority {}", allowedPriority)
allowedPriority = streamsByPriority.maxBy(_._1)._1
deferred.dequeueAll(_._2 == allowedPriority).foreach(materialized(upstream)(_))
log.debug("changing allowed priority: next allowed priority {}", allowedPriority)
} else context.become(terminating(StreamCompleted.success))
} else streamsByPriority.update(priority, left - 1)
} catch {
case e: Exception ⇒
context.become(terminating(StreamCompleted.error(e)))
} finally tryPushDownstream()
case p: QueryProgress ⇒
if (updateProgress(p)) {
buffer.enqueue(progress)
tryPushDownstream()
}
case t: TypeMetadata ⇒
if (updateMeta(t)) {
buffer.enqueue(meta.asInstanceOf[SonicMessage])
tryPushDownstream()
}
case o: OutputChunk ⇒
buffer.enqueue(m)
tryPushDownstream()
case _: StreamStarted ⇒ //ignore
}
sendAckMaybe(upstream)
case (m: SonicMessage, p: Int) ⇒
sendAckMaybe(upstream)
deferred.enqueue(m → p)
if (deferred.size > bufferSize) {
val e = new Exception(s"reached deferred buffer limit of $bufferSize")
context.become(terminating(StreamCompleted.error(e)))
}
}
def waiting: Receive = commonReceive orElse {
case Request(n) ⇒ tryPushDownstream()
case Started ⇒
tryPushDownstream()
log.debug("materialized combined sources for {}", ctx.traceId)
sender() ! Ack
context.become(materialized(sender()))
}
override def receive: Receive = commonReceive orElse {
case SubscriptionTimeoutExceeded ⇒
log.info("no subscriber in within subs timeout {}", subscriptionTimeout)
onCompleteThenStop()
case Request(n) ⇒
log.tylog(Level.DEBUG, ctx.traceId, CombineSources, Variation.Attempt, "client requested first element")
tryPushDownstream()
try {
val ps = queries.map { query ⇒
if (!SonicdController.isAuthorized(ctx.user, query.query.query.sourceSecurity, ctx.clientAddress))
throw new UnauthorizedException(ctx.user, ctx.clientAddress)
val source = SonicdController.getDataSource(
query.query.query, context, ctx.user, ctx.clientAddress)
val ref = query.name
.map(n ⇒ context.actorOf(source.publisher, n))
.getOrElse(context.actorOf(source.publisher))
Source.fromPublisher[SonicMessage](ActorPublisher.apply(ref))
.map((_, query.priority)) → query.priority
}.sorted
log.debug("sorted queries by priority {}", ps)
// assign max priority as current(ly streaming)
val (sources, _current) = ps.unzip
allowedPriority = _current.head
streamsLeft = ps.length
streamsByPriority = _current.foldLeft(mutable.Map.empty[Int, Int]) { (acc, el) ⇒
acc.updated(el, acc.getOrElse(el, 0) + 1)
}
log.trace("streams sources {}", sources)
log.trace("streams priorities {}", streamsByPriority)
val merged = if (sources.length > 1) {
val second = sources(1)
val tail = sources.slice(2, streamsLeft)
log.debug("merging {}:{}:{}", sources.head, second, tail)
Source.combine(sources.head, second, tail: _*)(if (strategy == ConcatStrategy) Concat(_) else Merge(_))
} else sources.head
log.debug("combined graphs: {}", merged)
merged.to(Sink.actorRefWithAck(self, Started, Ack, Completed)).run()
context.become(waiting)
} catch {
case e: Exception ⇒
context.become(terminating(StreamCompleted.error(e)))
log.tylog(Level.DEBUG, ctx.traceId, CombineSources, Variation.Failure(e),
"failed to build {} graph", strategy)
}
log.tylog(Level.DEBUG, ctx.traceId, CombineSources, Variation.Success,
"successfully built combined {} graph", strategy)
}
}
| ernestrc/sonicd | server/src/main/scala/build/unstable/sonicd/source/Composer.scala | Scala | mit | 11,826 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.expressions.{Alias, And, ArrayTransform, CaseWhen, Coalesce, CreateArray, CreateMap, CreateNamedStruct, EqualTo, ExpectsInputTypes, Expression, GetStructField, If, IsNull, KnownFloatingPointNormalized, LambdaFunction, Literal, NamedLambdaVariable, UnaryExpression}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Window}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types._
/**
* We need to take care of special floating numbers (NaN and -0.0) in several places:
* 1. When compare values, different NaNs should be treated as same, `-0.0` and `0.0` should be
* treated as same.
* 2. In aggregate grouping keys, different NaNs should belong to the same group, -0.0 and 0.0
* should belong to the same group.
* 3. In join keys, different NaNs should be treated as same, `-0.0` and `0.0` should be
* treated as same.
* 4. In window partition keys, different NaNs should belong to the same partition, -0.0 and 0.0
* should belong to the same partition.
*
* Case 1 is fine, as we handle NaN and -0.0 well during comparison. For complex types, we
* recursively compare the fields/elements, so it's also fine.
*
* Case 2, 3 and 4 are problematic, as Spark SQL turns grouping/join/window partition keys into
* binary `UnsafeRow` and compare the binary data directly. Different NaNs have different binary
* representation, and the same thing happens for -0.0 and 0.0.
*
* This rule normalizes NaN and -0.0 in window partition keys, join keys and aggregate grouping
* keys.
*
* Ideally we should do the normalization in the physical operators that compare the
* binary `UnsafeRow` directly. We don't need this normalization if the Spark SQL execution engine
* is not optimized to run on binary data. This rule is created to simplify the implementation, so
* that we have a single place to do normalization, which is more maintainable.
*
* Note that, this rule must be executed at the end of optimizer, because the optimizer may create
* new joins(the subquery rewrite) and new join conditions(the join reorder).
*/
object NormalizeFloatingNumbers extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan match {
case _ => plan transform {
case w: Window if w.partitionSpec.exists(p => needNormalize(p)) =>
// Although the `windowExpressions` may refer to `partitionSpec` expressions, we don't need
// to normalize the `windowExpressions`, as they are executed per input row and should take
// the input row as it is.
w.copy(partitionSpec = w.partitionSpec.map(normalize))
// Only hash join and sort merge join need the normalization. Here we catch all Joins with
// join keys, assuming Joins with join keys are always planned as hash join or sort merge
// join. It's very unlikely that we will break this assumption in the near future.
case j @ ExtractEquiJoinKeys(_, leftKeys, rightKeys, condition, _, _, _)
// The analyzer guarantees left and right joins keys are of the same data type. Here we
// only need to check join keys of one side.
if leftKeys.exists(k => needNormalize(k)) =>
val newLeftJoinKeys = leftKeys.map(normalize)
val newRightJoinKeys = rightKeys.map(normalize)
val newConditions = newLeftJoinKeys.zip(newRightJoinKeys).map {
case (l, r) => EqualTo(l, r)
} ++ condition
j.copy(condition = Some(newConditions.reduce(And)))
// TODO: ideally Aggregate should also be handled here, but its grouping expressions are
// mixed in its aggregate expressions. It's unreliable to change the grouping expressions
// here. For now we normalize grouping expressions in `AggUtils` during planning.
}
}
/**
* Short circuit if the underlying expression is already normalized
*/
private def needNormalize(expr: Expression): Boolean = expr match {
case KnownFloatingPointNormalized(_) => false
case _ => needNormalize(expr.dataType)
}
private def needNormalize(dt: DataType): Boolean = dt match {
case FloatType | DoubleType => true
case StructType(fields) => fields.exists(f => needNormalize(f.dataType))
case ArrayType(et, _) => needNormalize(et)
// Currently MapType is not comparable and analyzer should fail earlier if this case happens.
case _: MapType =>
throw new IllegalStateException("grouping/join/window partition keys cannot be map type.")
case _ => false
}
private[sql] def normalize(expr: Expression): Expression = expr match {
case _ if !needNormalize(expr) => expr
case a: Alias =>
a.withNewChildren(Seq(normalize(a.child)))
case CreateNamedStruct(children) =>
CreateNamedStruct(children.map(normalize))
case CreateArray(children, useStringTypeWhenEmpty) =>
CreateArray(children.map(normalize), useStringTypeWhenEmpty)
case CreateMap(children, useStringTypeWhenEmpty) =>
CreateMap(children.map(normalize), useStringTypeWhenEmpty)
case _ if expr.dataType == FloatType || expr.dataType == DoubleType =>
KnownFloatingPointNormalized(NormalizeNaNAndZero(expr))
case If(cond, trueValue, falseValue) =>
If(cond, normalize(trueValue), normalize(falseValue))
case CaseWhen(branches, elseVale) =>
CaseWhen(branches.map(br => (br._1, normalize(br._2))), elseVale.map(normalize))
case Coalesce(children) =>
Coalesce(children.map(normalize))
case _ if expr.dataType.isInstanceOf[StructType] =>
val fields = expr.dataType.asInstanceOf[StructType].fieldNames.zipWithIndex.map {
case (name, i) => Seq(Literal(name), normalize(GetStructField(expr, i)))
}
val struct = CreateNamedStruct(fields.flatten.toSeq)
KnownFloatingPointNormalized(If(IsNull(expr), Literal(null, struct.dataType), struct))
case _ if expr.dataType.isInstanceOf[ArrayType] =>
val ArrayType(et, containsNull) = expr.dataType
val lv = NamedLambdaVariable("arg", et, containsNull)
val function = normalize(lv)
KnownFloatingPointNormalized(ArrayTransform(expr, LambdaFunction(function, Seq(lv))))
case _ => throw new IllegalStateException(s"fail to normalize $expr")
}
val FLOAT_NORMALIZER: Any => Any = (input: Any) => {
val f = input.asInstanceOf[Float]
if (f.isNaN) {
Float.NaN
} else if (f == -0.0f) {
0.0f
} else {
f
}
}
val DOUBLE_NORMALIZER: Any => Any = (input: Any) => {
val d = input.asInstanceOf[Double]
if (d.isNaN) {
Double.NaN
} else if (d == -0.0d) {
0.0d
} else {
d
}
}
}
case class NormalizeNaNAndZero(child: Expression) extends UnaryExpression with ExpectsInputTypes {
override def dataType: DataType = child.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(FloatType, DoubleType))
private lazy val normalizer: Any => Any = child.dataType match {
case FloatType => NormalizeFloatingNumbers.FLOAT_NORMALIZER
case DoubleType => NormalizeFloatingNumbers.DOUBLE_NORMALIZER
}
override def nullSafeEval(input: Any): Any = {
normalizer(input)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val codeToNormalize = child.dataType match {
case FloatType => (f: String) => {
s"""
|if (Float.isNaN($f)) {
| ${ev.value} = Float.NaN;
|} else if ($f == -0.0f) {
| ${ev.value} = 0.0f;
|} else {
| ${ev.value} = $f;
|}
""".stripMargin
}
case DoubleType => (d: String) => {
s"""
|if (Double.isNaN($d)) {
| ${ev.value} = Double.NaN;
|} else if ($d == -0.0d) {
| ${ev.value} = 0.0d;
|} else {
| ${ev.value} = $d;
|}
""".stripMargin
}
}
nullSafeCodeGen(ctx, ev, codeToNormalize)
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/NormalizeFloatingNumbers.scala | Scala | apache-2.0 | 9,014 |
package org.jetbrains.plugins.scala
package codeInsight
package daemon
import com.intellij.openapi.util.text.StringUtil
class ScalaRainbowVisitorTest extends base.ScalaLightCodeInsightFixtureTestAdapter {
import ScalaRainbowVisitorTest.{END_TAG => E, START_TAG => S, START_TAG_1 => S_1, START_TAG_2 => S_2, START_TAG_3 => S_3, START_TAG_4 => S_4}
def testRainbowOff(): Unit = doTest(
s"def foo(p1: Int): Unit = {}",
isRainbowOn = false
)
def testVariables(): Unit = doTest(
s"""def foo(): Unit = {
| var ${S_1}v1$E, ${S_2}v2$E = 42
| ${S_1}v1$E = 42
|
| var ${S_3}v3$E = ${S_1}v1$E + ${S_2}v2$E
|}
""".stripMargin
)
def testValues(): Unit = doTest(
s"""def foo(): Unit = {
| val ${S_1}v1$E, ${S_2}v2$E = 42
| val ${S_3}v3$E = ${S_1}v1$E + ${S_2}v2$E
|}
""".stripMargin
)
def testProperties(): Unit = doTest(
s"""class Foo {
| val foo: Int = 42
| var bar: Int = 42
|}
""".stripMargin
)
def testParameters(): Unit = doTest(
s"""def foo(${S_1}p1$E: Int, ${S_2}p2$E: Int): Unit = {
| val ${S_3}v3$E = ${S_1}p1$E + ${S_2}p2$E
|}
""".stripMargin
)
def testClassParameters(): Unit = doTest(
s"""case class Foo(p1: Int) {
| def foo(${S}p2$E: Int) = Foo(p1 = ${S}p2$E)
|}
""".stripMargin,
withColor = false
)
def testLambdaParameters(): Unit = doTest(
s"""def foo(${S_1}p1$E: Any => String = ${S_1}p2$E => ${S_1}p2$E.toString): Unit = {
| (${S_4}p3$E: String) => ${S_4}p3$E
|}
""".stripMargin
)
def testLambdaCaseParameters(): Unit = doTest(
s"""def foo: String => String = {
| case ${S_1}p1$E: String if ${S_1}p1$E.isEmpty => ${S_1}p1$E
|}
""".stripMargin
)
def testNestedMethods(): Unit = doTest(
s"""def foo(${S_1}p1$E: Int, ${S_2}p2$E: Int): Unit = {
| def bar(${S_1}p1$E: Int): Unit = {
| ${S_1}p1$E + ${S_2}p2$E
| }
|}
""".stripMargin
)
def testScalaDoc(): Unit = doTest(
s"""/**
| * @param ${S_1}p1$E first parameter
| * @param ${S_2}p2$E second parameter
| */
| def foo(${S_1}p1$E: Int, ${S_2}p2$E: Int): Unit = {}
""".stripMargin
)
def testPatterns(): Unit = doTest(
s"""case class Pair(p1: String = "", p2: String = "")
|
|def foo(${S_1}p1$E: Pair): Unit = {
| val ${S_2}v2$E = Pair()
| Pair() match {
| case Pair(${S_1}p1$E, ${S_2}p2$E) => ${S_1}p1$E + ${S_2}p2$E
| case $S_1`p1`$E =>
| case $S_2`v2`$E =>
| }
|}
""".stripMargin
)
def testForComprehensions(): Unit = doTest(
s"""
|for {
| ${S_1}p1$E <- Some(42)
| ${S_2}p2$E = 42
|} yield (${S_1}p1$E, ${S_2}p2$E)
""".stripMargin
)
private def doTest(text: String,
isRainbowOn: Boolean = true,
withColor: Boolean = true): Unit =
getFixture.testRainbow(
"dummy.scala",
StringUtil.convertLineSeparators(text),
isRainbowOn,
withColor
)
}
object ScalaRainbowVisitorTest {
private val START_TAG = "<rainbow>"
private val START_TAG_1 = "<rainbow color='ff000001'>"
private val START_TAG_2 = "<rainbow color='ff000002'>"
private val START_TAG_3 = "<rainbow color='ff000003'>"
private val START_TAG_4 = "<rainbow color='ff000004'>"
private val END_TAG = "</rainbow>"
}
| JetBrains/intellij-scala | scala/codeInsight/test/org/jetbrains/plugins/scala/codeInsight/daemon/ScalaRainbowVisitorTest.scala | Scala | apache-2.0 | 3,515 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
/**
* Adapted from:
*
* stream-lib
* Copyright 2016 AddThis
*
* This product includes software developed by AddThis.
*
* This product also includes code adapted from:
*
* Apache Solr (http://lucene.apache.org/solr/)
* Copyright 2014 The Apache Software Foundation
*
* Apache Mahout (http://mahout.apache.org/)
* Copyright 2014 The Apache Software Foundation
*
*/
package org.locationtech.geomesa.utils.clearspring
import com.clearspring.analytics.stream.frequency.IFrequency
import com.clearspring.analytics.stream.membership.Filter
/**
* Count-Min Sketch datastructure.
* An Improved Data Stream Summary: The Count-Min Sketch and its Applications
* https://web.archive.org/web/20060907232042/http://www.eecs.harvard.edu/~michaelm/CS222/countmin.pdf
*/
class CountMinSketch private (val eps: Double,
val confidence: Double,
private [utils] val table: Array[Array[Long]],
private [utils] var _size: Long,
private val depth: Int,
private val width: Int,
private val hashA: Array[Long]) extends IFrequency {
def getRelativeError: Double = eps
def getConfidence: Double = confidence
private def hash(item: Long, i: Int): Int = {
var hash = hashA(i) * item
// A super fast way of computing x mod 2^p-1
// See http://www.cs.princeton.edu/courses/archive/fall09/cos521/Handouts/universalclasses.pdf
// page 149, right after Proposition 7.
hash += hash >> 32
hash &= CountMinSketch.PrimeModulus
// Doing "%" after (int) conversion is ~2x faster than %'ing longs.
hash.toInt % width
}
override def add(item: Long, count: Long): Unit = {
if (count < 0) {
// Actually for negative increments we'll need to use the median
// instead of minimum, and accuracy will suffer somewhat.
// Probably makes sense to add an "allow negative increments"
// parameter to constructor.
throw new IllegalArgumentException("Negative increments not implemented")
}
var i = 0
while (i < depth) {
table(i)(hash(item, i)) += count
i += 1
}
_size += count
}
override def add(item: String, count: Long): Unit = {
if (count < 0) {
// Actually for negative increments we'll need to use the median
// instead of minimum, and accuracy will suffer somewhat.
// Probably makes sense to add an "allow negative increments"
// parameter to constructor.
throw new IllegalArgumentException("Negative increments not implemented")
}
val buckets = Filter.getHashBuckets(item, depth, width)
var i = 0
while (i < depth) {
table(i)(buckets(i)) += count
i += 1
}
_size += count
}
/**
* The estimate is correct within 'epsilon' * (total item count),
* with probability 'confidence'.
*/
override def estimateCount(item: Long): Long = {
var res = Long.MaxValue
var i = 0
while (i < depth) {
val row = table(i)
res = math.min(res, row(hash(item, i)))
i += 1
}
res
}
override def estimateCount(item: String): Long = {
var res = Long.MaxValue
val buckets = Filter.getHashBuckets(item, depth, width)
var i = 0
while (i < depth) {
res = Math.min(res, table(i)(buckets(i)))
i += 1
}
res
}
def +=(other: CountMinSketch): Unit = {
// note: we assume that seed is equal
if (depth != other.depth || width != other.width) {
throw new IllegalArgumentException("Can't merge CountMinSketch of different sizes")
}
var i, j = 0
while (i < table.length) {
val row = table(i)
val otherRow = other.table(i)
while (j < row.length) {
row(j) += otherRow(j)
j += 1
}
i += 1
j = 0
}
_size += other.size
}
def clear(): Unit = {
var i, j = 0
while (i < depth) {
val row = table(i)
while (j < width) {
row(j) = 0L
j += 1
}
i += 1
j = 0
}
_size = 0L
}
def isEquivalent(other: CountMinSketch): Boolean = {
if (size != other.size || depth != other.depth || width != other.width) {
return false
}
var i, j = 0
while (i < depth) {
if (hashA(i) != other.hashA(i)) {
return false
}
val row = table(i)
val otherRow = other.table(i)
while (j < width) {
if (row(j) != otherRow(j)) {
return false
}
j += 1
}
i += 1
j = 0
}
true
}
override def size: Long = _size
}
object CountMinSketch {
private val PrimeModulus: Long = (1L << 31) - 1
def apply(eps: Double, confidence: Double, seed: Int): CountMinSketch = {
// 2/w = eps ; w = 2/eps
// 1/2^depth <= 1-confidence ; depth >= -log2 (1-confidence)
val width = math.ceil(2 / eps).toInt
val depth = math.ceil(-1 * math.log(1 - confidence) / math.log(2)).toInt
val table: Array[Array[Long]] = Array.fill(depth)(Array.fill(width)(0L))
// We're using a linear hash functions
// of the form (a*x+b) mod p.
// a,b are chosen independently for each hash function.
// However we can set b = 0 as all it does is shift the results
// without compromising their uniformity or independence with
// the other hashes.
val hashA: Array[Long] = {
val r = new java.util.Random(seed)
Array.fill(depth)(r.nextInt(Int.MaxValue))
}
new CountMinSketch(eps, confidence, table, 0L, depth, width, hashA)
}
}
| aheyne/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/clearspring/CountMinSketch.scala | Scala | apache-2.0 | 6,074 |
package net.sansa_stack.inference.flink
import java.io.{File, FileInputStream}
import java.net.URI
import java.util.Properties
import scala.io.Source
import com.typesafe.config.ConfigFactory
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.webmonitor.WebMonitorUtils
import net.sansa_stack.inference.flink.data.{RDFGraphLoader, RDFGraphWriter}
import net.sansa_stack.inference.flink.forwardchaining.{
ForwardRuleReasonerOWLHorst,
ForwardRuleReasonerRDFS
}
import net.sansa_stack.inference.rules.ReasoningProfile._
import net.sansa_stack.inference.rules.{RDFSLevel, ReasoningProfile}
/**
* A class to compute the materialization of a given RDF graph for a given reasoning profile.
* Basically, used as the main class for inference.
*
* @author Lorenz Buehmann
*
*/
object RDFGraphMaterializer {
def main(args: Array[String]) {
parser.parse(args, Config()) match {
case Some(config) =>
run(args,
config.in,
config.out,
config.profile,
config.writeToSingleFile,
config.sortedOutput,
config.propertiesFile,
config.jobName)
case None =>
println(parser.usage)
}
}
def run(args: Array[String],
input: Seq[URI],
output: URI,
profile: ReasoningProfile,
writeToSingleFile: Boolean,
sortedOutput: Boolean,
propertiesFile: File,
jobName: String): Unit = {
// read reasoner optimization properties
val reasonerConf =
if (propertiesFile != null) ConfigFactory.parseFile(propertiesFile)
else ConfigFactory.load("reasoner")
// get params
val params: ParameterTool = ParameterTool.fromArgs(args)
val conf = new Configuration()
conf.setInteger("taskmanager.network.numberOfBuffers", 3000)
// set up the execution environment
val env = ExecutionEnvironment.getExecutionEnvironment
// and disable logging to standard out
env.getConfig.disableSysoutLogging()
// env.setParallelism(4)
// make parameters available in the web interface
env.getConfig.setGlobalJobParameters(params)
// load triples from disk
val graph = RDFGraphLoader.loadFromDisk(input, env)
// create reasoner
val reasoner = profile match {
case RDFS | RDFS_SIMPLE =>
val r = new ForwardRuleReasonerRDFS(env)
r.useSchemaBroadCasting = reasonerConf.getBoolean("reasoner.rdfs.schema.broadcast")
r.extractSchemaTriplesInAdvance =
reasonerConf.getBoolean("reasoner.rdfs.schema.extractTriplesInAdvance")
if (profile == RDFS_SIMPLE) r.level = RDFSLevel.SIMPLE
r
case OWL_HORST => new ForwardRuleReasonerOWLHorst(env)
}
// compute inferred graph
val inferredGraph = reasoner.apply(graph)
// println(s"|G_inf| = ${inferredGraph.size}")
// println(env.getExecutionPlan())
// write triples to disk
RDFGraphWriter.writeToDisk(inferredGraph, output, writeToSingleFile, sortedOutput)
// println(env.getExecutionPlan())
val jn = if (jobName.isEmpty) s"$profile Reasoning" else jobName
// run the program
env.execute(jn)
}
// the config object
case class Config(
in: Seq[URI] = Seq(),
out: URI = new URI("."),
profile: ReasoningProfile = ReasoningProfile.RDFS,
writeToSingleFile: Boolean = false,
sortedOutput: Boolean = false,
propertiesFile: File = null,
jobName: String = "") // new File(getClass.getResource("reasoner.properties").toURI)
// read ReasoningProfile enum
implicit val profilesRead: scopt.Read[ReasoningProfile.Value] =
scopt.Read.reads(ReasoningProfile forName _.toLowerCase())
// the CLI parser
val parser = new scopt.OptionParser[Config]("RDFGraphMaterializer") {
head("RDFGraphMaterializer", "0.6.0")
// opt[Seq[File]]('i', "input").required().valueName("<path1>,<path2>,...").
// action((x, c) => c.copy(in = x)).
// text("path to file or directory that contains the input files (in N-Triple format)")
opt[Seq[URI]]('i', "input")
.required()
.valueName("<path>")
.action((x, c) => c.copy(in = x))
.text("path to file or directory that contains the input files (in N-Triples format)")
opt[URI]('o', "out")
.required()
.valueName("<directory>")
.action((x, c) => c.copy(out = x))
.text("the output directory")
opt[Unit]("single-file")
.optional()
.action((_, c) => c.copy(writeToSingleFile = true))
.text("write the output to a single file in the output directory")
opt[Unit]("sorted")
.optional()
.action((_, c) => c.copy(sortedOutput = true))
.text("sorted output of the triples (per file)")
opt[ReasoningProfile]('p', "profile")
.required()
.valueName("{rdfs | rdfs-simple | owl-horst}")
.action((x, c) => c.copy(profile = x))
.text("the reasoning profile")
opt[File]('p', "prop")
.optional()
.valueName("<path_to_properties_file>")
.action((x, c) => c.copy(propertiesFile = x))
.text("the (optional) properties file which allows some more advanced options")
opt[String]('j', "jobName")
.optional()
.valueName("<name_of_the_Flink_job>")
.action((x, c) => c.copy(jobName = x))
.text("the name of the Flink job that occurs also in the Web-UI")
help("help").text("prints this usage text")
}
parser.showUsageOnError
}
| SANSA-Stack/SANSA-RDF | sansa-inference/sansa-inference-flink/src/main/scala/net/sansa_stack/inference/flink/RDFGraphMaterializer.scala | Scala | apache-2.0 | 5,609 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the MultipleChoiceQuestion entity.
*/
class MultipleChoiceQuestionGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-XSRF-TOKEN" -> "${xsrf_token}"
)
val scn = scenario("Test the MultipleChoiceQuestion entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login")
.check(headerRegex("Set-Cookie", "XSRF-TOKEN=(.*);[\\\\s]").saveAs("xsrf_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all multipleChoiceQuestions")
.get("/api/multiple-choice-questions")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new multipleChoiceQuestion")
.post("/api/multiple-choice-questions")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_multipleChoiceQuestion_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created multipleChoiceQuestion")
.get("${new_multipleChoiceQuestion_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created multipleChoiceQuestion")
.delete("${new_multipleChoiceQuestion_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) over (Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
| LisLo/ArTEMiS | src/test/gatling/user-files/simulations/MultipleChoiceQuestionGatlingTest.scala | Scala | mit | 3,550 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.xpath.scalaz
object arbitrary
extends kantan.codecs.scalaz.laws.discipline.ArbitraryInstances with kantan.xpath.laws.discipline.ArbitraryInstances
| nrinaudo/kantan.xpath | scalaz/src/test/scala/kantan/xpath/scalaz/arbitrary.scala | Scala | apache-2.0 | 767 |
import org.specs2.mutable._
import play.api.libs.iteratee._
import scala.concurrent._
import reactivemongo.bson.BSONObjectID
case class User(
_id: Option[BSONObjectID] = None,
username: String)
class JSONCollectionSpec extends Specification {
import Common._
import reactivemongo.bson._
import reactivemongo.api.FailoverStrategy
import play.modules.reactivemongo.json.BSONFormats._
import play.modules.reactivemongo.json.collection.JSONCollection
import play.modules.reactivemongo.json.collection.JSONQueryBuilder
import play.api.libs.json._
import play.api.libs.functional.syntax._
implicit val userReads = Json.reads[User]
implicit val userWrites = Json.writes[User]
sequential
lazy val collectionName = "reactivemongo_test_users"
lazy val bsonCollection = db(collectionName)
lazy val collection = new JSONCollection(db, collectionName, new FailoverStrategy())
"JSONCollection.save" should {
"add object if there does not exist in database" in {
// Check current document does not exist
val query = BSONDocument("username" -> BSONString("John Doe"))
val fetched1 = Await.result(bsonCollection.find(query).one, timeout)
fetched1 must beNone
// Add document..
val user = User(username = "John Doe")
val result = Await.result(collection.save(user), timeout)
result.ok must beTrue
// Check data in mongodb..
val fetched2 = Await.result(bsonCollection.find(query).one, timeout)
fetched2 must beSome.like {
case d: BSONDocument => {
d.get("_id").isDefined must beTrue
d.get("username") must beSome(BSONString("John Doe"))
}
}
}
"update object there already exists in database" in {
// Find saved object
val fetched1 = Await.result(collection.find(Json.obj("username" -> "John Doe")).one[User], timeout)
fetched1 must beSome.like {
case u: User => {
u._id.isDefined must beTrue
u.username must beEqualTo("John Doe")
}
}
// Update object..
val newUser = fetched1.get.copy(username = "Jane Doe")
val result = Await.result(collection.save(newUser), timeout)
result.ok must beTrue
// Check data in mongodb..
val fetched2 = Await.result(bsonCollection.find(BSONDocument("username" -> BSONString("John Doe"))).one, timeout)
fetched2 must beNone
val fetched3 = Await.result(bsonCollection.find(BSONDocument("username" -> BSONString("Jane Doe"))).one, timeout)
fetched3 must beSome.like {
case d: BSONDocument => {
d.get("_id") must beSome(fetched1.get._id.get)
d.get("username") must beSome(BSONString("Jane Doe"))
}
}
}
"add object if there does not exist but its field `_id` is setted" in {
// Check current document does not exist
val query = BSONDocument("username" -> BSONString("Robert Roe"))
val fetched1 = Await.result(bsonCollection.find(query).one, timeout)
fetched1 must beNone
// Add document..
val id = BSONObjectID.generate
val user = User(_id = Option(id), username = "Robert Roe")
val result = Await.result(collection.save(user), timeout)
result.ok must beTrue
// Check data in mongodb..
val fetched2 = Await.result(bsonCollection.find(query).one, timeout)
fetched2 must beSome.like {
case d: BSONDocument => {
d.get("_id") must beSome(id)
d.get("username") must beSome(BSONString("Robert Roe"))
}
}
}
}
"JSONQueryBuilder.merge" should {
"write an JsObject with mongo query only if there are not options defined" in {
val builder = JSONQueryBuilder(
collection = collection,
failover = new FailoverStrategy(),
queryOption = Option(Json.obj("username" -> "John Doe")))
builder.merge.toString must beEqualTo("{\\"username\\":\\"John Doe\\"}")
}
"write an JsObject with only defined options" in {
val builder1 = JSONQueryBuilder(
collection = collection,
failover = new FailoverStrategy(),
queryOption = Option(Json.obj("username" -> "John Doe")),
sortOption = Option(Json.obj("age" -> 1)))
builder1.merge.toString must beEqualTo("{\\"$query\\":{\\"username\\":\\"John Doe\\"},\\"$orderby\\":{\\"age\\":1}}")
val builder2 = builder1.copy(commentString = Option("get john doe users sorted by age"))
builder2.merge.toString must beEqualTo("{\\"$query\\":{\\"username\\":\\"John Doe\\"},\\"$orderby\\":{\\"age\\":1},\\"$comment\\":\\"get john doe users sorted by age\\"}")
}
}
}
| xnejp03/Play-ReactiveMongo | src/test/scala/jsoncollection.scala | Scala | apache-2.0 | 4,625 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkContext, SparkConf}
// $example on$
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.DecisionTreeClassifier
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.feature.{StringIndexer, IndexToString, VectorIndexer}
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
// $example off$
object DecisionTreeClassificationExample {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("DecisionTreeClassificationExample")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
val data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
// Index labels, adding metadata to the label column.
// Fit on whole dataset to include all labels in index.
val labelIndexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("indexedLabel")
.fit(data)
// Automatically identify categorical features, and index them.
val featureIndexer = new VectorIndexer()
.setInputCol("features")
.setOutputCol("indexedFeatures")
.setMaxCategories(4) // features with > 4 distinct values are treated as continuous
.fit(data)
// Split the data into training and test sets (30% held out for testing)
val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3))
// Train a DecisionTree model.
val dt = new DecisionTreeClassifier()
.setLabelCol("indexedLabel")
.setFeaturesCol("indexedFeatures")
// Convert indexed labels back to original labels.
val labelConverter = new IndexToString()
.setInputCol("prediction")
.setOutputCol("predictedLabel")
.setLabels(labelIndexer.labels)
// Chain indexers and tree in a Pipeline
val pipeline = new Pipeline()
.setStages(Array(labelIndexer, featureIndexer, dt, labelConverter))
// Train model. This also runs the indexers.
val model = pipeline.fit(trainingData)
// Make predictions.
val predictions = model.transform(testData)
// Select example rows to display.
predictions.select("predictedLabel", "label", "features").show(5)
// Select (prediction, true label) and compute test error
val evaluator = new MulticlassClassificationEvaluator()
.setLabelCol("indexedLabel")
.setPredictionCol("prediction")
.setMetricName("precision")
val accuracy = evaluator.evaluate(predictions)
println("Test Error = " + (1.0 - accuracy))
val treeModel = model.stages(2).asInstanceOf[DecisionTreeClassificationModel]
println("Learned classification tree model:\\n" + treeModel.toDebugString)
// $example off$
}
}
// scalastyle:on println
| chenc10/Spark-PAF | examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala | Scala | apache-2.0 | 3,751 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnetexamples.rnn
import ml.dmlc.mxnet.Callback.Speedometer
import ml.dmlc.mxnet._
import BucketIo.BucketSentenceIter
import ml.dmlc.mxnet.optimizer.SGD
import org.kohsuke.args4j.{CmdLineParser, Option}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
import ml.dmlc.mxnet.module.BucketingModule
import ml.dmlc.mxnet.module.FitParams
/**
* Bucketing LSTM examples
* @author Yizhi Liu
*/
class LstmBucketing {
@Option(name = "--data-train", usage = "training set")
private val dataTrain: String = "example/rnn/ptb.train.txt"
@Option(name = "--data-val", usage = "validation set")
private val dataVal: String = "example/rnn/ptb.valid.txt"
@Option(name = "--num-epoch", usage = "the number of training epoch")
private val numEpoch: Int = 5
@Option(name = "--gpus", usage = "the gpus will be used, e.g. '0,1,2,3'")
private val gpus: String = null
@Option(name = "--cpus", usage = "the cpus will be used, e.g. '0,1,2,3'")
private val cpus: String = null
@Option(name = "--save-model-path", usage = "the model saving path")
private val saveModelPath: String = "model/lstm"
}
object LstmBucketing {
private val logger: Logger = LoggerFactory.getLogger(classOf[LstmBucketing])
def perplexity(label: NDArray, pred: NDArray): Float = {
pred.waitToRead()
val labelArr = label.T.toArray.map(_.toInt)
var loss = .0
(0 until pred.shape(0)).foreach(i =>
loss -= Math.log(Math.max(1e-10f, pred.slice(i).toArray(labelArr(i))))
)
Math.exp(loss / labelArr.length).toFloat
}
def main(args: Array[String]): Unit = {
val inst = new LstmBucketing
val parser: CmdLineParser = new CmdLineParser(inst)
try {
parser.parseArgument(args.toList.asJava)
val contexts =
if (inst.gpus != null) inst.gpus.split(',').map(id => Context.gpu(id.trim.toInt))
else if (inst.cpus != null) inst.cpus.split(',').map(id => Context.cpu(id.trim.toInt))
else Array(Context.cpu(0))
val batchSize = 32
val buckets = Array(10, 20, 30, 40, 50, 60)
val numHidden = 200
val numEmbed = 200
val numLstmLayer = 2
logger.info("Building vocab ...")
val vocab = BucketIo.defaultBuildVocab(inst.dataTrain)
def BucketSymGen(key: AnyRef):
(Symbol, IndexedSeq[String], IndexedSeq[String]) = {
val seqLen = key.asInstanceOf[Int]
val sym = Lstm.lstmUnroll(numLstmLayer, seqLen, vocab.size,
numHidden = numHidden, numEmbed = numEmbed, numLabel = vocab.size)
(sym, IndexedSeq("data"), IndexedSeq("softmax_label"))
}
val initC = (0 until numLstmLayer).map(l =>
(s"l${l}_init_c_beta", (batchSize, numHidden))
)
val initH = (0 until numLstmLayer).map(l =>
(s"l${l}_init_h_beta", (batchSize, numHidden))
)
val initStates = initC ++ initH
val dataTrain = new BucketSentenceIter(inst.dataTrain, vocab,
buckets, batchSize, initStates)
val dataVal = new BucketSentenceIter(inst.dataVal, vocab,
buckets, batchSize, initStates)
val model = new BucketingModule(
symGen = BucketSymGen,
defaultBucketKey = dataTrain.defaultBucketKey,
contexts = contexts)
val fitParams = new FitParams()
fitParams.setEvalMetric(
new CustomMetric(perplexity, name = "perplexity"))
fitParams.setKVStore("device")
fitParams.setOptimizer(
new SGD(learningRate = 0.01f, momentum = 0f, wd = 0.00001f))
fitParams.setInitializer(new Xavier(factorType = "in", magnitude = 2.34f))
fitParams.setBatchEndCallback(new Speedometer(batchSize, 50))
logger.info("Start training ...")
model.fit(
trainData = dataTrain,
evalData = Some(dataVal),
numEpoch = inst.numEpoch, fitParams)
logger.info("Finished training...")
} catch {
case ex: Exception =>
logger.error(ex.getMessage, ex)
parser.printUsage(System.err)
sys.exit(1)
}
}
}
| kkk669/mxnet | scala-package/examples/src/main/scala/ml/dmlc/mxnetexamples/rnn/LstmBucketing.scala | Scala | apache-2.0 | 4,828 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.frs102.retriever.FullAccountsBoxRetriever
import uk.gov.hmrc.ct.box.CtValidation
class AC154Spec extends WordSpec with Matchers with MockitoSugar {
"AC154" should {
"have no errors if AC58 has the same value" in {
val boxRetriever = mock[FullAccountsBoxRetriever]
when(boxRetriever.ac58()).thenReturn(AC58(Some(50)))
when(boxRetriever.ac59()).thenReturn(AC59(Some(50)))
AC154(Some(50)).validate(boxRetriever) shouldBe empty
}
"have no errors if AC58 and AC154 are both None" in {
val boxRetriever = mock[FullAccountsBoxRetriever]
when(boxRetriever.ac58()).thenReturn(AC58(None))
when(boxRetriever.ac59()).thenReturn(AC59(Some(50)))
AC154(None).validate(boxRetriever) shouldBe empty
}
"have no errors if AC58 and AC154 are both 0" in {
val boxRetriever = mock[FullAccountsBoxRetriever]
when(boxRetriever.ac58()).thenReturn(AC58(Some(0)))
when(boxRetriever.ac59()).thenReturn(AC59(Some(50)))
AC154(Some(0)).validate(boxRetriever) shouldBe empty
}
"return error if AC58 is 0 and AC154 is None" in {
val boxRetriever = mock[FullAccountsBoxRetriever]
when(boxRetriever.ac58()).thenReturn(AC58(Some(0)))
when(boxRetriever.ac59()).thenReturn(AC59(Some(50)))
AC154(None).validate(boxRetriever) shouldBe Set(CtValidation(None, "error.creditors.within.one.year.note.current.total.not.equal.balance.sheet"))
}
"return error if AC58 is 0 and AC154 is 50" in {
val boxRetriever = mock[FullAccountsBoxRetriever]
when(boxRetriever.ac58()).thenReturn(AC58(Some(0)))
when(boxRetriever.ac59()).thenReturn(AC59(Some(50)))
AC154(Some(50)).validate(boxRetriever) shouldBe Set(CtValidation(None, "error.creditors.within.one.year.note.current.total.not.equal.balance.sheet"))
}
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC154Spec.scala | Scala | apache-2.0 | 2,637 |
package io.iteratee.monix
import cats.MonadError
import io.iteratee.{ EnumerateeModule, EnumeratorErrorModule, IterateeErrorModule, Module }
import io.iteratee.files.SuspendableFileModule
import monix.eval.Task
trait TaskModule extends Module[Task]
with EnumerateeModule[Task]
with EnumeratorErrorModule[Task, Throwable] with IterateeErrorModule[Task, Throwable]
with SuspendableFileModule[Task] {
final type M[f[_]] = MonadError[f, Throwable]
final protected def captureEffect[A](a: => A): Task[A] = Task.delay(a)
}
final object TaskModule {
def instance(implicit taskMonadError: MonadError[Task, Throwable]): TaskModule = new TaskModule {
final protected val F: MonadError[Task, Throwable] = taskMonadError
}
}
| flyingwalrusllc/iteratee | monix/jvm/src/main/scala/io/iteratee/monix/TaskModule.scala | Scala | apache-2.0 | 741 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io.IOException
import org.apache.kafka.clients.{ClientRequest, ClientResponse, NetworkClient}
import org.apache.kafka.common.Node
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import org.apache.kafka.common.utils.{Time => JTime}
object NetworkClientBlockingOps {
implicit def networkClientBlockingOps(client: NetworkClient): NetworkClientBlockingOps =
new NetworkClientBlockingOps(client)
}
/**
* Provides extension methods for `NetworkClient` that are useful for implementing blocking behaviour. Use with care.
*
* Example usage:
*
* {{{
* val networkClient: NetworkClient = ...
* import NetworkClientBlockingOps._
* networkClient.blockingReady(...)
* }}}
*/
class NetworkClientBlockingOps(val client: NetworkClient) extends AnyVal {
/**
* Invokes `client.ready` followed by 0 or more `client.poll` invocations until the connection to `node` is ready,
* the timeout expires or the connection fails.
*
* It returns `true` if the call completes normally or `false` if the timeout expires. If the connection fails,
* an `IOException` is thrown instead.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
def blockingReady(node: Node, timeout: Long)(implicit time: JTime): Boolean = {
client.ready(node, time.milliseconds()) || pollUntil(timeout) { (_, now) =>
if (client.isReady(node, now))
true
else if (client.connectionFailed(node))
throw new IOException(s"Connection to $node failed")
else false
}
}
/**
* Invokes `client.send` followed by 1 or more `client.poll` invocations until a response is received,
* the timeout expires or a disconnection happens.
*
* It returns `true` if the call completes normally or `false` if the timeout expires. In the case of a disconnection,
* an `IOException` is thrown instead.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
def blockingSendAndReceive(request: ClientRequest, timeout: Long)(implicit time: JTime): Option[ClientResponse] = {
client.send(request, time.milliseconds())
pollUntilFound(timeout) { case (responses, _) =>
val response = responses.find { response =>
response.request.request.header.correlationId == request.request.header.correlationId
}
response.foreach { r =>
if (r.wasDisconnected) {
val destination = request.request.destination
throw new IOException(s"Connection to $destination was disconnected before the response was read")
}
}
response
}
}
/**
* Invokes `client.poll` until `predicate` returns `true` or the timeout expires.
*
* It returns `true` if the call completes normally or `false` if the timeout expires. Exceptions thrown via
* `predicate` are not handled and will bubble up.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
private def pollUntil(timeout: Long)(predicate: (Seq[ClientResponse], Long) => Boolean)(implicit time: JTime): Boolean = {
pollUntilFound(timeout) { (responses, now) =>
if (predicate(responses, now)) Some(true)
else None
}.fold(false)(_ => true)
}
/**
* Invokes `client.poll` until `collect` returns `Some` or the timeout expires.
*
* It returns the result of `collect` if the call completes normally or `None` if the timeout expires. Exceptions
* thrown via `collect` are not handled and will bubble up.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
private def pollUntilFound[T](timeout: Long)(collect: (Seq[ClientResponse], Long) => Option[T])(implicit time: JTime): Option[T] = {
val methodStartTime = time.milliseconds()
val timeoutExpiryTime = methodStartTime + timeout
@tailrec
def recurse(iterationStartTime: Long): Option[T] = {
val pollTimeout = if (timeout < 0) timeout else timeoutExpiryTime - iterationStartTime
val responses = client.poll(pollTimeout, iterationStartTime).asScala
val result = collect(responses, iterationStartTime)
if (result.isDefined) result
else {
val afterPollTime = time.milliseconds()
if (timeout < 0 || afterPollTime < timeoutExpiryTime)
recurse(afterPollTime)
else None
}
}
recurse(methodStartTime)
}
}
| eljefe6a/kafka | core/src/main/scala/kafka/utils/NetworkClientBlockingOps.scala | Scala | apache-2.0 | 5,433 |
package japgolly.scalajs.react.extra.router2
import japgolly.scalajs.react._, vdom.prefix_<^._
import japgolly.scalajs.react.test._
import org.scalajs.dom._
import scalaz._
import scalaz.effect.IO
import utest._
import TestUtil._
object RouterTest extends TestSuite {
sealed trait MyPage
object MyPage {
case object Root extends MyPage
case object Hello extends MyPage
case class Greet(name: String) extends MyPage
case class Person(id: Long) extends MyPage
val RootComponent = ReactComponentB[RouterCtl[MyPage]]("Root")
.render(r =>
<.div(
<.h2("Router Demonstration"),
<.p("This is the root page. Click on a link below to view routes within this page."),
<.div(r.link(Hello)("The 'hello' route", ^.cls := "hello")),
<.div(r.link(Greet("bob"))("Greet('bob')", ^.cls := "n1")),
<.div(r.link(Greet("crap"))("Greet('crap')", ^.cls := "n2")))
).build
val HelloComponent =
ReactComponentB.static("Hello", <.h3("Hello there!")).buildU
val NameComponent = ReactComponentB[String]("Name")
.render(name => <.h3(s"I believe your name is '$name'."))
.build
val PersonComponent = ReactComponentB[Person]("Person by ID")
.render(p => <.h3(s"Person #${p.id} Details..."))
.build
val config = RouterConfigDsl[MyPage].buildConfig { dsl =>
import dsl._
(removeTrailingSlashes
| staticRoute(root, Root) ~> renderR(RootComponent(_))
| staticRoute("/hello", Hello) ~> render (HelloComponent())
| staticRedirect("/hey") ~> redirectToPage(Hello)(Redirect.Replace)
| dynamicRouteCT("/name" / string("[a-z]+").caseclass1(Greet)(Greet.unapply)) ~> dynRender(g => NameComponent(g.name))
| dynamicRouteCT("/person" / long.caseclass1(Person)(Person.unapply)) ~> dynRender(PersonComponent(_))
)
.notFound(redirectToPage(Root)(Redirect.Replace))
.renderWith((ctl, res) =>
if (res.page == Root)
res.render()
else
<.div(
<.div(ctl.link(Root)("Back", ^.cls := "back")),
res.render()))
}
}
// -------------------------------------------------------------------------------------------------------------------
// object MyOtherPage extends RoutingRules {
// override val notFound = render(<.h1("404!!"))
// val thebuns = register(location(".buns", <.h1("The Buns!")))
// }
// -------------------------------------------------------------------------------------------------------------------
override val tests = TestSuite {
'sim {
import MyPage.{Root, Hello, Greet, Person}
val base = BaseUrl("file:///routerDemo")
val router = Router(base, MyPage.config.logToConsole)
val c = ReactTestUtils.renderIntoDocument(router())
def html = c.getDOMNode().outerHTML
def testView(routeSuffix: String, p: MyPage): Unit = {
window.location.href mustEqual base.+(routeSuffix).value
val h = html
assertContains(h, "Router Demo", p == Root)
assertContains(h, """>Back<""", p != Root)
assertContains(h, "Hello there", p == Hello)
assertContains(h, "your name is", p match {case Greet(_) => true; case _ => false})
}
def assertRoot() = testView("", Root)
def assertRouteHello() = testView("/hello", Hello)
def assertRouteNameBob() = testView("/name/bob", Greet("bob"))
def click(css: String): Unit = Simulation.click run Sel(css).findIn(c)
def clickBack() = click("a.back")
def clickHello() = click("a.hello")
def clickNameBob() = click("a.n1")
try {
assertRoot()
clickHello(); assertRouteHello()
clickBack(); assertRoot()
clickNameBob(); assertRouteNameBob()
clickBack(); assertRoot()
} finally {
React.unmountComponentAtNode(c.getDOMNode())
}
}
'pure {
implicit val base = BaseUrl("http://www.yaya.com/blah")
val r = new RouterLogic(base, MyPage.config.logToConsole)
'urlParsing {
'root { r.parseUrl(base.abs) mustEqual Some(Path("")) }
'tslash { r.parseUrl(base / "" abs) mustEqual Some(Path("/")) }
'path { r.parseUrl(base / "hehe" abs) mustEqual Some(Path("/hehe")) }
}
'syncToUrl {
def runh[P](r: RouterLogic[P], start: AbsUrl) = {
val s = SimHistory(start)
val a = s.run(r.syncToUrl(s.startUrl))
s.broadcasts mustEqual Vector.empty // this is sync(), not set()
(s, a)
}
def testh[P](r: RouterLogic[P], start: AbsUrl)(expectPrevHistory: AbsUrl => List[AbsUrl], expectPage: P, expectPath: String): Unit = {
val (s, res) = runh(r, start)
s.history.mustEqual(Path(expectPath).abs :: expectPrevHistory(start))
res.page mustEqual expectPage
}
// 'match_root - r.syncToUrl(base.abs) .mustEqual(\/-(MyPage.root))
// 'match_path - r.syncToUrl(base / "hello" abs).mustEqual(\/-(MyPage.hello))
'notFound_redirect - testh(r, base / "what" abs)(_ => Nil, MyPage.Root, "")
// 'notFound_render {
// val abs = base / "what" abs
// val r2 = MyOtherPage.routingEngine(base)
// val (s, a) = runh(r2, abs)
// s.history mustEqual List(abs)
// a.path.value mustEqual "/what"
// React.renderToStaticMarkup(a render r2) mustEqual "<h1>404!!</h1>"
// }
'badbase - testh(r, AbsUrl("https://www.google.com"))(List(_), MyPage.Root, "")
'tslash_root - testh(r, base / "" abs) (_ => Nil, MyPage.Root, "")
'tslash_path - testh(r, base / "hello/" abs)(_ => Nil, MyPage.Hello, "/hello")
}
}
}
} | elacin/scalajs-react | test/src/test/scala/japgolly/scalajs/react/extra/router2/RouterTest.scala | Scala | apache-2.0 | 5,967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.File
import java.net.URI
import scala.collection.mutable
import scala.language.reflectiveCalls
import org.apache.hadoop.fs.{BlockLocation, FileStatus, LocatedFileStatus, Path, RawLocalFileSystem}
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.util.{KnownSizeEstimation, SizeEstimator}
class FileIndexSuite extends SharedSQLContext {
test("InMemoryFileIndex: leaf files are qualified paths") {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val path = new Path(file.getCanonicalPath)
val catalog = new InMemoryFileIndex(spark, Seq(path), Map.empty, None) {
def leafFilePaths: Seq[Path] = leafFiles.keys.toSeq
def leafDirPaths: Seq[Path] = leafDirToChildrenFiles.keys.toSeq
}
assert(catalog.leafFilePaths.forall(p => p.toString.startsWith("file:/")))
assert(catalog.leafDirPaths.forall(p => p.toString.startsWith("file:/")))
}
}
test("SPARK-26188: don't infer data types of partition columns if user specifies schema") {
withTempDir { dir =>
val partitionDirectory = new File(dir, "a=4d")
partitionDirectory.mkdir()
val file = new File(partitionDirectory, "text.txt")
stringToFile(file, "text")
val path = new Path(dir.getCanonicalPath)
val schema = StructType(Seq(StructField("a", StringType, false)))
val fileIndex = new InMemoryFileIndex(spark, Seq(path), Map.empty, Some(schema))
val partitionValues = fileIndex.partitionSpec().partitions.map(_.values)
assert(partitionValues.length == 1 && partitionValues(0).numFields == 1 &&
partitionValues(0).getString(0) == "4d")
}
}
test("SPARK-26230: if case sensitive, validate partitions with original column names") {
withTempDir { dir =>
val partitionDirectory = new File(dir, "a=1")
partitionDirectory.mkdir()
val file = new File(partitionDirectory, "text.txt")
stringToFile(file, "text")
val partitionDirectory2 = new File(dir, "A=2")
partitionDirectory2.mkdir()
val file2 = new File(partitionDirectory2, "text.txt")
stringToFile(file2, "text")
val path = new Path(dir.getCanonicalPath)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val fileIndex = new InMemoryFileIndex(spark, Seq(path), Map.empty, None)
val partitionValues = fileIndex.partitionSpec().partitions.map(_.values)
assert(partitionValues.length == 2)
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val msg = intercept[AssertionError] {
val fileIndex = new InMemoryFileIndex(spark, Seq(path), Map.empty, None)
fileIndex.partitionSpec()
}.getMessage
assert(msg.contains("Conflicting partition column names detected"))
assert("Partition column name list #[0-1]: A".r.findFirstIn(msg).isDefined)
assert("Partition column name list #[0-1]: a".r.findFirstIn(msg).isDefined)
}
}
}
test("SPARK-26263: Throw exception when partition value can't be casted to user-specified type") {
withTempDir { dir =>
val partitionDirectory = new File(dir, "a=foo")
partitionDirectory.mkdir()
val file = new File(partitionDirectory, "text.txt")
stringToFile(file, "text")
val path = new Path(dir.getCanonicalPath)
val schema = StructType(Seq(StructField("a", IntegerType, false)))
withSQLConf(SQLConf.VALIDATE_PARTITION_COLUMNS.key -> "true") {
val fileIndex = new InMemoryFileIndex(spark, Seq(path), Map.empty, Some(schema))
val msg = intercept[RuntimeException] {
fileIndex.partitionSpec()
}.getMessage
assert(msg == "Failed to cast value `foo` to `IntegerType` for partition column `a`")
}
withSQLConf(SQLConf.VALIDATE_PARTITION_COLUMNS.key -> "false") {
val fileIndex = new InMemoryFileIndex(spark, Seq(path), Map.empty, Some(schema))
val partitionValues = fileIndex.partitionSpec().partitions.map(_.values)
assert(partitionValues.length == 1 && partitionValues(0).numFields == 1 &&
partitionValues(0).isNullAt(0))
}
}
}
test("InMemoryFileIndex: input paths are converted to qualified paths") {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val unqualifiedDirPath = new Path(dir.getCanonicalPath)
val unqualifiedFilePath = new Path(file.getCanonicalPath)
require(!unqualifiedDirPath.toString.contains("file:"))
require(!unqualifiedFilePath.toString.contains("file:"))
val fs = unqualifiedDirPath.getFileSystem(spark.sessionState.newHadoopConf())
val qualifiedFilePath = fs.makeQualified(new Path(file.getCanonicalPath))
require(qualifiedFilePath.toString.startsWith("file:"))
val catalog1 = new InMemoryFileIndex(
spark, Seq(unqualifiedDirPath), Map.empty, None)
assert(catalog1.allFiles.map(_.getPath) === Seq(qualifiedFilePath))
val catalog2 = new InMemoryFileIndex(
spark, Seq(unqualifiedFilePath), Map.empty, None)
assert(catalog2.allFiles.map(_.getPath) === Seq(qualifiedFilePath))
}
}
test("InMemoryFileIndex: folders that don't exist don't throw exceptions") {
withTempDir { dir =>
val deletedFolder = new File(dir, "deleted")
assert(!deletedFolder.exists())
val catalog1 = new InMemoryFileIndex(
spark, Seq(new Path(deletedFolder.getCanonicalPath)), Map.empty, None)
// doesn't throw an exception
assert(catalog1.listLeafFiles(catalog1.rootPaths).isEmpty)
}
}
test("PartitioningAwareFileIndex listing parallelized with many top level dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 1))) {
withTempDir { dir =>
val topLevelDirs = (1 to scale).map { i =>
val tmp = new File(dir, s"foo=$i.txt")
tmp.mkdir()
new Path(tmp.getCanonicalPath)
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, topLevelDirs, Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("PartitioningAwareFileIndex listing parallelized with large child dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 1))) {
withTempDir { dir =>
for (i <- 1 to scale) {
new File(dir, s"foo=$i.txt").mkdir()
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, Seq(new Path(dir.getCanonicalPath)), Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("PartitioningAwareFileIndex listing parallelized with large, deeply nested child dirs") {
for ((scale, expectedNumPar) <- Seq((10, 0), (50, 4))) {
withTempDir { dir =>
for (i <- 1 to 2) {
val subdirA = new File(dir, s"a=$i")
subdirA.mkdir()
for (j <- 1 to 2) {
val subdirB = new File(subdirA, s"b=$j")
subdirB.mkdir()
for (k <- 1 to scale) {
new File(subdirB, s"foo=$k.txt").mkdir()
}
}
}
HiveCatalogMetrics.reset()
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 0)
new InMemoryFileIndex(spark, Seq(new Path(dir.getCanonicalPath)), Map.empty, None)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == expectedNumPar)
}
}
}
test("InMemoryFileIndex - file filtering") {
assert(!InMemoryFileIndex.shouldFilterOut("abcd"))
assert(InMemoryFileIndex.shouldFilterOut(".ab"))
assert(InMemoryFileIndex.shouldFilterOut("_cd"))
assert(!InMemoryFileIndex.shouldFilterOut("_metadata"))
assert(!InMemoryFileIndex.shouldFilterOut("_common_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("_ab_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("_cd_common_metadata"))
assert(InMemoryFileIndex.shouldFilterOut("a._COPYING_"))
}
test("SPARK-17613 - PartitioningAwareFileIndex: base path w/o '/' at end") {
class MockCatalog(
override val rootPaths: Seq[Path])
extends PartitioningAwareFileIndex(spark, Map.empty, None) {
override def refresh(): Unit = {}
override def leafFiles: mutable.LinkedHashMap[Path, FileStatus] = mutable.LinkedHashMap(
new Path("mockFs://some-bucket/file1.json") -> new FileStatus()
)
override def leafDirToChildrenFiles: Map[Path, Array[FileStatus]] = Map(
new Path("mockFs://some-bucket/") -> Array(new FileStatus())
)
override def partitionSpec(): PartitionSpec = {
PartitionSpec.emptySpec
}
}
withSQLConf(
"fs.mockFs.impl" -> classOf[FakeParentPathFileSystem].getName,
"fs.mockFs.impl.disable.cache" -> "true") {
val pathWithSlash = new Path("mockFs://some-bucket/")
assert(pathWithSlash.getParent === null)
val pathWithoutSlash = new Path("mockFs://some-bucket")
assert(pathWithoutSlash.getParent === null)
val catalog1 = new MockCatalog(Seq(pathWithSlash))
val catalog2 = new MockCatalog(Seq(pathWithoutSlash))
assert(catalog1.allFiles().nonEmpty)
assert(catalog2.allFiles().nonEmpty)
}
}
test("InMemoryFileIndex with empty rootPaths when PARALLEL_PARTITION_DISCOVERY_THRESHOLD" +
"is a nonpositive number") {
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "0") {
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
}
val e = intercept[IllegalArgumentException] {
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "-1") {
new InMemoryFileIndex(spark, Seq.empty, Map.empty, None)
}
}.getMessage
assert(e.contains("The maximum number of paths allowed for listing files at " +
"driver side must not be negative"))
}
test("refresh for InMemoryFileIndex with FileStatusCache") {
withTempDir { dir =>
val fileStatusCache = FileStatusCache.getOrCreate(spark)
val dirPath = new Path(dir.getAbsolutePath)
val fs = dirPath.getFileSystem(spark.sessionState.newHadoopConf())
val catalog =
new InMemoryFileIndex(spark, Seq(dirPath), Map.empty, None, fileStatusCache) {
def leafFilePaths: Seq[Path] = leafFiles.keys.toSeq
def leafDirPaths: Seq[Path] = leafDirToChildrenFiles.keys.toSeq
}
val file = new File(dir, "text.txt")
stringToFile(file, "text")
assert(catalog.leafDirPaths.isEmpty)
assert(catalog.leafFilePaths.isEmpty)
catalog.refresh()
assert(catalog.leafFilePaths.size == 1)
assert(catalog.leafFilePaths.head == fs.makeQualified(new Path(file.getAbsolutePath)))
assert(catalog.leafDirPaths.size == 1)
assert(catalog.leafDirPaths.head == fs.makeQualified(dirPath))
}
}
test("SPARK-20280 - FileStatusCache with a partition with very many files") {
/* fake the size, otherwise we need to allocate 2GB of data to trigger this bug */
class MyFileStatus extends FileStatus with KnownSizeEstimation {
override def estimatedSize: Long = 1000 * 1000 * 1000
}
/* files * MyFileStatus.estimatedSize should overflow to negative integer
* so, make it between 2bn and 4bn
*/
val files = (1 to 3).map { i =>
new MyFileStatus()
}
val fileStatusCache = FileStatusCache.getOrCreate(spark)
fileStatusCache.putLeafFiles(new Path("/tmp", "abc"), files.toArray)
}
test("SPARK-20367 - properly unescape column names in inferPartitioning") {
withTempPath { path =>
val colToUnescape = "Column/#%'?"
spark
.range(1)
.select(col("id").as(colToUnescape), col("id"))
.write.partitionBy(colToUnescape).parquet(path.getAbsolutePath)
assert(spark.read.parquet(path.getAbsolutePath).schema.exists(_.name == colToUnescape))
}
}
test("SPARK-25062 - InMemoryFileIndex stores BlockLocation objects no matter what subclass " +
"the FS returns") {
withSQLConf("fs.file.impl" -> classOf[SpecialBlockLocationFileSystem].getName) {
withTempDir { dir =>
val file = new File(dir, "text.txt")
stringToFile(file, "text")
val inMemoryFileIndex = new InMemoryFileIndex(
spark, Seq(new Path(file.getCanonicalPath)), Map.empty, None) {
def leafFileStatuses = leafFiles.values
}
val blockLocations = inMemoryFileIndex.leafFileStatuses.flatMap(
_.asInstanceOf[LocatedFileStatus].getBlockLocations)
assert(blockLocations.forall(_.getClass == classOf[BlockLocation]))
}
}
}
}
class FakeParentPathFileSystem extends RawLocalFileSystem {
override def getScheme: String = "mockFs"
override def getUri: URI = {
URI.create("mockFs://some-bucket")
}
}
class SpecialBlockLocationFileSystem extends RawLocalFileSystem {
class SpecialBlockLocation(
names: Array[String],
hosts: Array[String],
offset: Long,
length: Long)
extends BlockLocation(names, hosts, offset, length)
override def getFileBlockLocations(
file: FileStatus,
start: Long,
len: Long): Array[BlockLocation] = {
Array(new SpecialBlockLocation(Array("dummy"), Array("dummy"), 0L, file.getLen))
}
}
| WindCanDie/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileIndexSuite.scala | Scala | apache-2.0 | 14,679 |
/* Copyright (C) 2012-2013 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.cps.example
import java.net.SocketAddress
import java.nio.ByteBuffer
import java.nio.channels.ClosedChannelException
import java.nio.charset.StandardCharsets.UTF_8
import com.treode.cps.{CpsKit, CpsSocketKit, cut, thunk}
import com.treode.cps.buffer.{Buffer, InputBuffer}
import com.treode.cps.io.{ServerSocket, Socket}
import com.treode.cps.scheduler.Scheduler
import com.treode.cps.CpsSocketKit
trait ExampleKit {
// The Example provides the server and client, but it requires a runtime system to provide a
// scheduler and sockets. The test specifies a stub runtime system to facilitation testing, and
// the scripts provide the live runtime system for actual operation.
this: CpsKit with CpsSocketKit =>
private [example] def readString (buf: InputBuffer) = {
if (buf.ensure (4) < 0) throw new ClosedChannelException
val len = buf.readInt
if (buf.ensure (len) < 0) throw new ClosedChannelException
val bytes = ByteBuffer.allocate (len)
buf.readBytes (bytes)
bytes.flip()
UTF_8 .decode (bytes) .toString
}
private [example] def writeString (sock: Socket, str: String) = {
val enc = UTF_8.encode (str)
val buf = Buffer()
buf.writeInt (enc.limit)
buf.writeBytes (enc)
val bytes = buf.readableByteBuffers
sock.write (bytes)
while (bytes .map (_.remaining) .sum > 0)
sock.write (bytes)
}
// The CPS plugin has some trouble with try/catch. To work around it when it happens, which is
// not always, pull the try/catch block into its own method.
def clientCatcher (client: Socket, input: InputBuffer) =
try {
val string = readString (input)
writeString (client, string)
input.discard (input.readAt)
} catch {
case _ :ClosedChannelException => cut()
}
def serveClient (client: Socket) {
scheduler.spawn {
val input = InputBuffer (client)
while (client.isOpen) {
clientCatcher (client, input)
}}}
def launchServer (addr: SocketAddress) {
scheduler.spawn {
val server = newServerSocket
server.bind (addr)
println ("Listening on " + server.localAddress.get)
while (true) {
serveClient (server.accept())
}}}
private def time (s: String, warmup: Int, trials: Int) (f: => Any @thunk) = {
println ("Starting " + s + " warmup")
var i = 0
while (i < warmup) {
f
i += 1
}
println ("Starting " + s + " timing")
var start = System.currentTimeMillis
i = 0
while (i < trials) {
f
i += 1
if (i % 1000 == 0 || i == trials) {
val n = if (i % 1000 == 0) 1000 else i % 1000
val end = System.currentTimeMillis
val ms = (end - start) .toDouble / n.toDouble
val qps = n.toDouble / (end - start) .toDouble * 1000.0
println ("%20s (%d trials): %10.3f ms, %10.0f qps" format (s, n, ms, qps))
start = System.currentTimeMillis
}}}
def launchClient (addr: SocketAddress) {
scheduler.spawn {
val client = newSocket
client.connect (addr)
val input = InputBuffer (client)
time ("echo", 1000, 100000) {
writeString (client, "Hello")
val string = readString (input)
assert ("Hello" == string)
}
shutdown()
}}}
| Treode/cps-example | src/main/scala/com/treode/cps/example/ExampleKit.scala | Scala | apache-2.0 | 3,891 |
package com.twitter.finagle.stats
import com.twitter.app.GlobalFlag
import scala.collection.Map
import scala.collection.mutable
object format extends GlobalFlag[String](
"commonsmetrics",
"Format style for metric names (ostrich|commonsmetrics)"
) {
private[stats] val Ostrich = "ostrich"
private[stats] val CommonsMetrics = "commonsmetrics"
}
/**
* Allows for customization of how stat names get formatted.
*/
private[stats] sealed trait StatsFormatter {
def apply(values: SampledValues): Map[String, Number] = {
val results = new mutable.HashMap[String, Number]()
results ++= values.gauges
results ++= values.counters
values.histograms.foreach { case (name, snapshot) =>
results += histoName(name, "count") -> snapshot.count
results += histoName(name, "sum") -> snapshot.sum
results += histoName(name, labelAverage) -> snapshot.avg
results += histoName(name, labelMin) -> snapshot.min
results += histoName(name, labelMax) -> snapshot.max
for (p <- snapshot.percentiles) {
val percentileName = histoName(name, labelPercentile(p.getQuantile))
results += percentileName -> p.getValue
}
}
results
}
/**
* Returns the full formatted name of histogram.
*
* @param name the "name" of the histogram
* @param component a single part of this histogram, for example the average,
* count, or a percentile.
*/
protected def histoName(name: String, component: String): String
/** Label applied for a given percentile, `p`, of a histogram */
protected def labelPercentile(p: Double): String
/** Label applied for the minimum of a histogram */
protected def labelMin: String
/** Label applied for the maximum of a histogram */
protected def labelMax: String
/** Label applied for the average of a histogram */
protected def labelAverage: String
}
private[stats] object StatsFormatter {
/**
* Uses the global flag, [[format]], to select the formatter used.
*/
def default: StatsFormatter =
format() match {
case format.Ostrich => Ostrich
case format.CommonsMetrics => CommonsMetrics
}
/**
* The default behavior for formatting as done by Commons Metrics.
*
* See Commons Metrics' `Metrics.sample()`.
*/
object CommonsMetrics extends StatsFormatter {
protected def histoName(name: String, component: String): String =
s"$name.$component"
protected def labelPercentile(p: Double): String = {
// this has a strange quirk that p999 gets formatted as p9990
val gname: String = "p" + (p * 10000).toInt
if (3 < gname.length && ("00" == gname.substring(3))) {
gname.substring(0, 3)
} else {
gname
}
}
protected def labelMin: String = "min"
protected def labelMax: String = "max"
protected def labelAverage: String = "avg"
}
/**
* Replicates the behavior for formatting Ostrich stats.
*
* See Ostrich's `Distribution.toMap`.
*/
object Ostrich extends StatsFormatter {
protected def histoName(name: String, component: String): String =
s"$name.$component"
protected def labelPercentile(p: Double): String = {
p match {
case 0.5d => "p50"
case 0.9d => "p90"
case 0.95d => "p95"
case 0.99d => "p99"
case 0.999d => "p999"
case 0.9999d => "p9999"
case _ =>
val padded = (p * 10000).toInt
s"p$padded"
}
}
protected def labelMin: String = "minimum"
protected def labelMax: String = "maximum"
protected def labelAverage: String = "average"
}
}
| jay-johnson/finagle | finagle-stats/src/main/scala/com/twitter/finagle/stats/StatsFormatter.scala | Scala | apache-2.0 | 3,646 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.oflow.floodlight
import org.openflow.protocol._
import org.openflow.protocol.action._
import org.openflow.protocol.statistics._
import net.floodlightcontroller.core._
import net.floodlightcontroller.core.module._
import net.floodlightcontroller.counter._
import net.floodlightcontroller.packet._
import net.floodlightcontroller.core.IOFMessageListener._
import org.slf4j._
import scala.collection.JavaConverters._
import java.util.ArrayList
import java.net.{URL, URLClassLoader, InetAddress}
import java.rmi.RMISecurityManager
import scala.actors.remote._
import net.floodlightcontroller.core.IListener.Command
import kumoi.shell.aaa._
import kumoi.shell.event._
import kumoi.core.Config
import java.rmi.server._
import java.rmi.registry._
import kumoi.shell.oflow._
import kumoi.core.classloader._
import kumoi.impl.oflow._
import kumoi.impl.oflow.OFlowCommon._
/**
* Openflow Controller.
*
* @author Akiyoshi Sugiki
*/
class FloodlightController extends OFlowObject[OFlowController] with OFlowController
with IOFSwitchListener with IOFMessageListener with IFloodlightModule {
private val log = LoggerFactory.getLogger(classOf[OFlowController])
private var provider: IFloodlightProviderService = null
private var cstore: ICounterStoreService = null
private var cloader = new RemoteClassLoader(Array(), getClass.getClassLoader)
// constructor
changeClassLoader()
private def changeClassLoader() {
//System.setSecurityManager(new RMISecurityManager)
log.info("***** changeClassLoader() *****")
RemoteActor.classLoader = cloader
Thread.currentThread.setContextClassLoader(cloader)
log.info("***SWAP=" + Thread.currentThread.getContextClassLoader + "***")
}
private val defaultHandler: PartialFunction[OFlowEvent, Boolean] = {
case event =>
log.info("********** UNMATCH **********")
log.info("event=" + event)
true
}
private var handlers: List[PartialFunction[OFlowEvent, Boolean]] = List(defaultHandler)
private var singletonHandler: PartialFunction[OFlowEvent, Boolean] = handlers.reduceLeft(_ orElse _)
def addr(implicit auth: AAA) = {
log.warn("addr() was not implemented.")
changeClassLoader()
InetAddress.getLocalHost
}
def port(implicit auth: AAA) = {
log.warn("port() was not implemented.")
changeClassLoader()
6633
}
/**
* retrieve class files from (host, port).
*/
def loadFrom(host: InetAddress, port: Int)(implicit auth: AAA) {
changeClassLoader()
val url = new URL("http://" + host.getHostName + ":" + port + "/")
cloader.addURL(url)
log.info("loadFrom " + url)
}
/**
* don't retrieve class files from (host, port)
*/
def unloadFrom(host: InetAddress, port: Int)(implicit auth: AAA) {
changeClassLoader()
val url = new URL("http://" + host.getHostName + ":" + port + "/")
cloader.removeURL(url)
log.info("unloadFrom " + url)
}
/**
* add Openflow event handlers.
*/
def add(f: PartialFunction[OFlowEvent, Boolean])(implicit auth: AAA) {
changeClassLoader()
log.info("actor=" + RemoteActor.classLoader)
log.info("context=" + Thread.currentThread.getContextClassLoader)
log.info("urls=" + cloader.getURLs)
handlers = f :: handlers
singletonHandler = handlers.reduceLeft(_ orElse _)
}
/**
* delete event handlers.
*/
def remove(f: PartialFunction[OFlowEvent, Boolean])(implicit auth: AAA) {
changeClassLoader()
handlers = handlers.filter(_ != f)
singletonHandler = handlers.reduceLeft(_ orElse _)
}
/**
* name()
*/
override def name(implicit auth: AAA) = {
changeClassLoader()
getName
}
/**
* getName()
*/
def getName() = "OFlowController"
/**
* getId()
*/
def getId() = {
changeClassLoader()
0
}
override def info(implicit auth: AAA) = {
changeClassLoader()
//log.info("controller id=" + provider.getControllerId())
//log.info(provider.getControllerInfo(""))
//log.info("node ips=" + provider.getControllerNodeIPs())
//log.info("switches=" + provider.getSwitches())
//log.info("startTime=" + provider.getSystemStartTime())
List(ControllerId(provider.getControllerId),
NodeIPs(provider.getControllerNodeIPs.asScala.toMap),
StartTime(new java.util.Date(provider.getSystemStartTime)))
}
override def stats(implicit auth: AAA) = {
changeClassLoader()
List()
}
def switches(implicit auth: AAA) = {
changeClassLoader()
provider.getSwitches.values.asScala.map(new FloodlightSwitch(provider, _, this)).toList
}
protected def genEvent(e: Exception) = NetControllerError(e)
/**
* getModuleDependencies()
*/
def getModuleDependencies() = {
changeClassLoader()
val l = new ArrayList[Class[_ <: IFloodlightService]]()
l.add(classOf[IFloodlightProviderService])
l.add(classOf[ICounterStoreService])
l
}
/**
* init()
*/
def init(context: FloodlightModuleContext) {
changeClassLoader()
log.info("********** INIT **********")
val registry = LocateRegistry.createRegistry(ofcPort)
registry.bind(ofcName, this)
provider = context.getServiceImpl(classOf[IFloodlightProviderService])
cstore = context.getServiceImpl(classOf[ICounterStoreService])
}
/**
* startUp()
*/
def startUp(context: FloodlightModuleContext) {
changeClassLoader()
provider.addOFSwitchListener(this) // -> OK
//OFlowKernel.start()
// Asynchronous messages
provider.addOFMessageListener(OFType.PACKET_IN, this) // -> OK
provider.addOFMessageListener(OFType.FLOW_REMOVED, this) // -> OK
provider.addOFMessageListener(OFType.PORT_STATUS, this) // -> OK
// Immutable messages
provider.addOFMessageListener(OFType.ERROR, this) // XX: never sent
// Barrier messages
provider.addOFMessageListener(OFType.BARRIER_REPLY, this) // -> OK
// Misc. messages
provider.addOFMessageListener(OFType.STATS_REPLY, this) // -> OK
provider.addOFMessageListener(OFType.GET_CONFIG_REPLY, this) // XX: never sent
provider.addOFMessageListener(OFType.VENDOR, this) // -> ???
/*
provider.addOFMessageListener(OFType.HELLO, this) // x
provider.addOFMessageListener(OFType.ECHO_REQUEST, this) // x
provider.addOFMessageListener(OFType.ECHO_REPLY, this) // x
// Switch configuration messages
provider.addOFMessageListener(OFType.FEATURES_REQUEST, this) // x
provider.addOFMessageListener(OFType.FEATURES_REPLY, this) // x
provider.addOFMessageListener(OFType.GET_CONFIG_REQUEST, this)
provider.addOFMessageListener(OFType.SET_CONFIG, this) // todo
// Controller command messages
provider.addOFMessageListener(OFType.PACKET_OUT, this) // DONE
provider.addOFMessageListener(OFType.FLOW_MOD, this) // DONE
provider.addOFMessageListener(OFType.PORT_MOD, this) // todo
// Barrier messages
provider.addOFMessageListener(OFType.BARRIER_REQUEST, this) // todo
provider.addOFMessageListener(OFType.STATS_REQUEST, this) // DONE
//
// Queue configuration messages
//
*/
}
/**
* addedSwitch()
*/
def addedSwitch(sw: IOFSwitch) { // Data path join.
changeClassLoader()
log.info("***** switch attributes=" + sw.getAttributes)
//handleDatapathJoin(DatapathJoin(new FloodlightSwitch(provider, sw)))
singletonHandler(DatapathJoin(new FloodlightSwitch(provider, sw, this)))
}
/**
* removedSwitch()
*/
def removedSwitch(sw: IOFSwitch) { // Data path leave.
changeClassLoader()
//handleDatapathLeave(DatapathLeave(new FloodlightSwitch(provider, sw)))
singletonHandler(DatapathLeave(new FloodlightSwitch(provider, sw, this)))
}
/**
* receive()
*/
def receive(sw: IOFSwitch, msg: OFMessage, context: net.floodlightcontroller.core.FloodlightContext) = {
changeClassLoader()
msg.getType match {
case OFType.PACKET_IN => processPacketIn(sw, msg.asInstanceOf[OFPacketIn], context)
case OFType.FLOW_REMOVED => processFlowRemoved(sw, msg.asInstanceOf[OFFlowRemoved], context)
case OFType.PORT_STATUS => processPortStatus(sw, msg.asInstanceOf[OFPortStatus], context)
case OFType.STATS_REPLY => processStatsReply(sw, msg.asInstanceOf[OFStatisticsReply], context)
case OFType.BARRIER_REPLY => processBarrierReply(sw, msg.asInstanceOf[OFBarrierReply], context)
case OFType.ERROR => processError(sw, msg.asInstanceOf[OFError], context)
case msg =>
log.warn("unhandled message " + sw + " -> " + msg)
Command.CONTINUE // error
}
}
private def statsReply(sw: IOFSwitch, reply: OFlowStats.Reply, context: net.floodlightcontroller.core.FloodlightContext) {
singletonHandler(StatsReply(new FloodlightSwitch(provider, sw, this), reply, new FloodlightContext(context)))
}
private def processStatsReply(sw: IOFSwitch, reply: OFStatisticsReply, context: net.floodlightcontroller.core.FloodlightContext) = {
import OFlowStats._
//log.info("****** DEBUG *****")
reply.getStatisticType match {
case OFStatisticsType.DESC =>
//log.info("DESC stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val desc = s.asInstanceOf[OFDescriptionStatistics]
statsReply(sw,
DescStats(reply.getXid, desc.getManufacturerDescription,
desc.getHardwareDescription,
desc.getSoftwareDescription,
desc.getSerialNumber,
desc.getDatapathDescription), context)
}
case OFStatisticsType.TABLE =>
//log.info("TABLE stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val table = s.asInstanceOf[OFTableStatistics]
statsReply(sw,
TableStats(reply.getXid, table.getTableId, table.getName,
FloodlightPattern.wildcards(table.getWildcards), table.getMaximumEntries,
table.getActiveCount, table.getLookupCount, table.getMatchedCount),
context)
}
case OFStatisticsType.FLOW =>
//log.info("FLOW stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val flow = s.asInstanceOf[OFFlowStatisticsReply]
statsReply(sw,
FlowStats(reply.getXid, flow.getTableId,
FloodlightMatch.patternFrom(new FloodlightMatch(flow.getMatch)),
FlowRemoved.Durations(flow.getDurationSeconds, flow.getDurationNanoseconds),
flow.getPriority,
FlowMod.Timeouts(flow.getIdleTimeout, flow.getHardTimeout),
flow.getCookie,
FlowRemoved.Counts(flow.getPacketCount, flow.getByteCount),
FloodlightActions.translate(flow.getActions.asScala.toList)),
context)
}
case OFStatisticsType.AGGREGATE =>
//log.info("AGGREGATE stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val agg = s.asInstanceOf[OFAggregateStatisticsReply]
statsReply(sw,
AggregateStats(reply.getXid, agg.getPacketCount, agg.getByteCount, agg.getFlowCount),
context)
}
case OFStatisticsType.PORT =>
//log.info("PORT stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val port = s.asInstanceOf[OFPortStatisticsReply]
statsReply(sw,
PortStats(reply.getXid, port.getPortNumber,
port.getreceivePackets, port.getTransmitPackets,
port.getReceiveBytes, port.getTransmitBytes,
port.getReceiveDropped, port.getTransmitDropped,
port.getreceiveErrors, port.getTransmitErrors,
port.getReceiveFrameErrors, port.getReceiveOverrunErrors,
port.getReceiveCRCErrors, port.getCollisions),
context)
}
case OFStatisticsType.QUEUE =>
//log.info("QUEUE stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val queue = s.asInstanceOf[OFQueueStatisticsReply]
statsReply(sw,
QueueStats(reply.getXid, queue.getPortNumber, queue.getQueueId,
queue.getTransmitBytes, queue.getTransmitPackets,
queue.getTransmitErrors),
context)
}
case OFStatisticsType.VENDOR =>
//log.info("VENDOR stats reply=" + reply)
for (s <- reply.getStatistics.asScala) {
val vendor = s.asInstanceOf[OFVendorStatistics]
statsReply(sw, null, context)
}
case _ => log.warn("unknown stats query reply=" + reply)
}
//log.info("******************")
Command.CONTINUE
}
private def processPacketIn(sw: IOFSwitch, pi: OFPacketIn, context: net.floodlightcontroller.core.FloodlightContext) = {
val reason = pi.getReason match {
case OFPacketIn.OFPacketInReason.ACTION => PacketIn.Reason.Action
case OFPacketIn.OFPacketInReason.NO_MATCH => PacketIn.Reason.NoMatch
//case _ => PacketIn.NoMatch // TODO:
}
singletonHandler(PacketIn(new FloodlightSwitch(provider, sw, this), pi.getBufferId,
FloodlightPort.convert(pi.getInPort), pi.getTotalLength,
reason, pi.getPacketData, new FloodlightContext(context)))
Command.CONTINUE // Command.STOP
}
private def processFlowRemoved(sw: IOFSwitch, fr: OFFlowRemoved, context: net.floodlightcontroller.core.FloodlightContext) = {
import OFFlowRemoved.OFFlowRemovedReason._
val reason = fr.getReason match {
case OFPRR_IDLE_TIMEOUT => FlowRemoved.Reason.IdleTimeout
case OFPRR_HARD_TIMEOUT => FlowRemoved.Reason.HardTimeout
case OFPRR_DELETE => FlowRemoved.Reason.Delete
//case _ => FlowRemoved.Delete // TODO:
}
val mtch = new FloodlightMatch(fr.getMatch)
singletonHandler(FlowRemoved(new FloodlightSwitch(provider, sw, this), mtch, fr.getCookie,
fr.getPriority, reason,
FlowRemoved.Durations(fr.getDurationSeconds, fr.getDurationNanoseconds),
fr.getIdleTimeout, FlowRemoved.Counts(fr.getPacketCount, fr.getByteCount),
new FloodlightContext(context)))
Command.CONTINUE // Command.STOP
}
private def processBarrierReply(sw: IOFSwitch, br: OFBarrierReply, context: net.floodlightcontroller.core.FloodlightContext) = {
singletonHandler(BarrierReply(new FloodlightSwitch(provider, sw, this), br.getXid, new FloodlightContext(context)))
Command.CONTINUE
}
private val portStatusMap = Map(
OFPortStatus.OFPortReason.OFPPR_ADD.ordinal -> PortStatus.Reason.Add,
OFPortStatus.OFPortReason.OFPPR_DELETE.ordinal -> PortStatus.Reason.Delete,
OFPortStatus.OFPortReason.OFPPR_MODIFY.ordinal -> PortStatus.Reason.Modify
)
private def processPortStatus(sw: IOFSwitch, ps: OFPortStatus, context: net.floodlightcontroller.core.FloodlightContext) = {
val reason = portStatusMap(ps.getReason)
singletonHandler(PortStatus(new FloodlightSwitch(provider, sw, this), new FloodlightPort(ps.getDesc), reason,
new FloodlightContext(context)))
Command.CONTINUE // Command.STOP
}
private val helloFailedMap = Map(
OFError.OFHelloFailedCode.OFPHFC_INCOMPATIBLE.ordinal -> HelloFailed.Incompatible,
OFError.OFHelloFailedCode.OFPHFC_EPERM.ordinal -> HelloFailed.Eperm
)
private val badRequestMap = Map(
OFError.OFBadRequestCode.OFPBRC_BAD_VERSION.ordinal -> BadRequest.BadVendor,
OFError.OFBadRequestCode.OFPBRC_BAD_TYPE.ordinal -> BadRequest.BadType,
OFError.OFBadRequestCode.OFPBRC_BAD_STAT.ordinal -> BadRequest.BadStat,
OFError.OFBadRequestCode.OFPBRC_BAD_VENDOR.ordinal -> BadRequest.BadVendor,
OFError.OFBadRequestCode.OFPBRC_BAD_SUBTYPE.ordinal -> BadRequest.BadSubtype,
OFError.OFBadRequestCode.OFPBRC_EPERM.ordinal -> BadRequest.Eperm,
OFError.OFBadRequestCode.OFPBRC_BAD_LEN.ordinal -> BadRequest.BadLen,
OFError.OFBadRequestCode.OFPBRC_BUFFER_EMPTY.ordinal -> BadRequest.BufferEmpty,
OFError.OFBadRequestCode.OFPBRC_BUFFER_UNKNOWN.ordinal -> BadRequest.BufferUnknown
)
private val badActionMap = Map(
OFError.OFBadActionCode.OFPBAC_BAD_TYPE.ordinal -> BadAction.BadType,
OFError.OFBadActionCode.OFPBAC_BAD_LEN.ordinal -> BadAction.BadLen,
OFError.OFBadActionCode.OFPBAC_BAD_VENDOR.ordinal -> BadAction.BadVendor,
OFError.OFBadActionCode.OFPBAC_BAD_VENDOR_TYPE.ordinal -> BadAction.BadVendorType,
OFError.OFBadActionCode.OFPBAC_BAD_ARGUMENT.ordinal -> BadAction.BadArgument,
OFError.OFBadActionCode.OFPBAC_EPERM.ordinal -> BadAction.Eperm,
OFError.OFBadActionCode.OFPBAC_TOO_MANY.ordinal -> BadAction.TooMany,
OFError.OFBadActionCode.OFPBAC_BAD_QUEUE.ordinal -> BadAction.BadQueue
)
private val flowModFailedMap = Map(
OFError.OFFlowModFailedCode.OFPFMFC_ALL_TABLES_FULL.ordinal -> FlowModFailed.AllTablesFull,
OFError.OFFlowModFailedCode.OFPFMFC_OVERLAP.ordinal -> FlowModFailed.Overlap,
OFError.OFFlowModFailedCode.OFPFMFC_EPERM.ordinal -> FlowModFailed.Eperm,
OFError.OFFlowModFailedCode.OFPFMFC_BAD_EMERG_TIMEOUT.ordinal -> FlowModFailed.BadEmergTimeout,
OFError.OFFlowModFailedCode.OFPFMFC_BAD_COMMAND.ordinal -> FlowModFailed.BadCommand,
OFError.OFFlowModFailedCode.OFPFMFC_UNSUPPORTED.ordinal -> FlowModFailed.Unsupported
)
private val portModFailedMap = Map(
OFError.OFPortModFailedCode.OFPPMFC_BAD_PORT.ordinal -> PortModFailed.BadPort,
OFError.OFPortModFailedCode.OFPPMFC_BAD_HW_ADDR.ordinal -> PortModFailed.BadHwAddr
)
private val queueOpFailedMap = Map(
OFError.OFQueueOpFailedCode.OFPQOFC_BAD_PORT.ordinal -> QueueOpFailed.BadPort,
OFError.OFQueueOpFailedCode.OFPQOFC_BAD_QUEUE.ordinal -> QueueOpFailed.BadQueue,
OFError.OFQueueOpFailedCode.OFPQOFC_EPERM.ordinal -> QueueOpFailed.Eperm
)
private val errMap = Map(
OFError.OFErrorType.OFPET_HELLO_FAILED.getValue -> (HelloFailed, helloFailedMap),
OFError.OFErrorType.OFPET_BAD_REQUEST.getValue -> (BadRequest, badRequestMap),
OFError.OFErrorType.OFPET_BAD_ACTION.getValue -> (BadAction, badActionMap),
OFError.OFErrorType.OFPET_FLOW_MOD_FAILED.getValue -> (FlowModFailed, flowModFailedMap),
OFError.OFErrorType.OFPET_PORT_MOD_FAILED.getValue -> (PortModFailed, portModFailedMap),
OFError.OFErrorType.OFPET_QUEUE_OP_FAILED.getValue -> (QueueOpFailed, queueOpFailedMap)
)
private def processError(sw: IOFSwitch, err: OFError, context: net.floodlightcontroller.core.FloodlightContext) = {
val (code, ecode) = errMap(err.getErrorType)
singletonHandler(Error(code, ecode(err.getErrorCode)))
Command.CONTINUE // Command.STOP
}
/**
* getModuleServies/getServiceImpls
*/
def getModuleServices() = {
changeClassLoader()
null
}
def getServiceImpls() = {
changeClassLoader()
null
}
/**
* isCallbackOrderingPrereq/Postreq
*/
def isCallbackOrderingPrereq(otype: OFType, name: String) = {
changeClassLoader()
false
}
def isCallbackOrderingPostreq(otype: OFType, name: String) = {
changeClassLoader()
false
}
}
| axi-sugiki/kumoi | src/kumoi/impl/oflow/floodlight/FloodlightController.scala | Scala | apache-2.0 | 19,215 |
package io.continuum.bokeh
class Traversal {
def collect(objs: List[Model]): List[Model] = {
val refs = collection.mutable.ListBuffer[Model]()
traverse(objs, { case ref: Model => refs += ref })
refs.toList
}
def traverse(objs: List[Model], fn: PartialFunction[Model, Unit]) {
val visited = collection.mutable.HashSet[String]()
def descendFields(obj: HasFields) {
obj.fields.map(_.field.valueOpt).foreach(_.foreach(descend _))
}
def descend(obj: Any) {
obj match {
case obj: Model =>
if (!visited.contains(obj.id)) {
visited += obj.id
descendFields(obj)
fn(obj)
}
case obj: HasFields =>
descendFields(obj)
case obj: List[_] =>
obj.foreach(descend)
case obj: Map[_, _] =>
obj.foreach { case (key, value) => descend(key) -> descend(value) }
case obj: Product =>
obj.productIterator.foreach(descend)
case _ =>
}
}
descend(objs)
}
}
| bokeh/bokeh-scala | bokeh/src/main/scala/Traversal.scala | Scala | mit | 1,245 |
package ch.wsl.box.client.views.components.widget
import java.util.UUID
import ch.wsl.box.client.services.{Labels, REST}
import ch.wsl.box.client.styles.GlobalStyles
import ch.wsl.box.model.shared.{JSONField, JSONFieldLookup, JSONID, JSONLookup, JSONMetadata}
import io.circe._
import io.circe.syntax._
import ch.wsl.box.shared.utils.JSONUtils._
import scribe.{Logger, Logging}
import scala.concurrent.{ExecutionContext, Future}
import scalatags.JsDom.all._
import scalatags.JsDom
import io.udash._
import io.udash.bindings.Bindings
import io.udash.bindings.modifiers.Binding
import io.udash.bootstrap.tooltip.UdashTooltip
import io.udash.properties.single.Property
import org.scalajs.dom.{Element, window}
import org.scalajs.dom
import scala.concurrent.duration._
trait Widget extends Logging {
def field:JSONField
def jsonToString(json:Json):String = json.string
def resetChangeAlert():Unit = {}
def strToJson(nullable:Boolean = false)(str:String):Json = (str, nullable) match {
case ("", true) => Json.Null
case _ => str.asJson
}
def strToNumericJson(str:String):Json = str match {
case "" => Json.Null
case _ => str.toDouble.asJson
}
def strToNumericArrayJson(str:String):Json = str match {
case "" => Json.Null
case _ => str.asJson.asArray.map(_.map(s => strToNumericJson(s.string))).map(_.asJson).getOrElse(Json.Null)
}
protected def show():Modifier
protected def edit():Modifier
def showOnTable():Modifier = frag("Not implemented")
def text():ReadableProperty[String] = Property("Not implemented")
def editOnTable():Modifier = frag("Not implemented")
def render(write:Boolean,conditional:ReadableProperty[Boolean]):Modifier = showIf(conditional) {
if(write && !field.readOnly) {
div(edit()).render
} else {
div(show()).render
}
}
def beforeSave(data:Json, metadata:JSONMetadata):Future[Json] = Future.successful(data)
def afterSave(data:Json, metadata:JSONMetadata):Future[Json] = Future.successful(data)
def afterRender():Future[Boolean] = Future.successful(true)
def reload() = {} //recover autoreleased resources
def killWidget() = {
bindings.foreach(_.kill())
registrations.foreach(_.cancel())
bindings = List()
registrations = List()
}
private var bindings:List[Binding] = List()
private var registrations:List[Registration] = List()
def autoRelease(b:Binding):Binding = {
bindings = b :: bindings
b
}
def autoRelease(r:Registration):Registration = {
registrations = r :: registrations
r
}
}
object Widget{
def forString(_field:JSONField,str:String):Widget = new Widget {
override def field: JSONField = _field
override protected def show(): JsDom.all.Modifier = str
override protected def edit(): JsDom.all.Modifier = str
}
}
trait HasData extends Widget {
def data:Property[Json]
override def showOnTable(): JsDom.all.Modifier = autoRelease(bind(data.transform(_.string)))
override def text(): ReadableProperty[String] = data.transform(_.string)
}
trait IsCheckBoxWithData extends Widget {
def data:Property[Json]
private def checkbox2string(p: Json):JsDom.all.Modifier = {
p.as[Boolean].right.toOption match {
case Some(true) => raw("✓")
case Some(false) => raw("✕")
case _ => "-"
}
}
override protected def show(): JsDom.all.Modifier = WidgetUtils.showNotNull(data) { p =>
div(
checkbox2string(p) , " ", field.title
).render
}
override def showOnTable(): JsDom.all.Modifier = WidgetUtils.showNotNull(data) { p =>
div(
checkbox2string(p)
).render
}
override def text(): ReadableProperty[String] = data.transform(_.string)
}
case class WidgetCallbackActions(saveAndThen: (Json => Unit) => Unit)
object WidgetCallbackActions{
def noAction = new WidgetCallbackActions(_ => ())
}
case class WidgetParams(
id:ReadableProperty[Option[String]],
prop:Property[Json],
field:JSONField,
metadata: JSONMetadata,
_allData:Property[Json],
children:Seq[JSONMetadata],
actions:WidgetCallbackActions,
public:Boolean
) extends Logging {
def allData:ReadableProperty[Json] = _allData
def otherField(str:String):Property[Json] = {
_allData.bitransform(_.js(str))((fd:Json) => _allData.get.deepMerge(Json.obj((str,fd))))
}
}
object WidgetParams{
def simple(prop:Property[Json],field:JSONField,metadata:JSONMetadata,public:Boolean):WidgetParams = WidgetParams(
Property(None),
prop = prop,
field = field,
metadata = metadata,
_allData = prop,
children = Seq(),
actions = WidgetCallbackActions.noAction,
public
)
}
trait ComponentWidgetFactory{
def name:String
def create(params:WidgetParams):Widget
}
object ChildWidget {
final val childTag = "$child-element"
}
trait ChildWidget extends Widget with HasData
| Insubric/box | client/src/main/scala/ch/wsl/box/client/views/components/widget/Widget.scala | Scala | apache-2.0 | 5,070 |
import scala.quoted.*
object PowerMacro {
def powerCode(x: Expr[Double], n: Long)(using Quotes): Expr[Double] =
if (n == 0) '{1.0}
else if (n % 2 == 0) '{ val y = $x * $x; ${powerCode('y, n / 2)} }
else '{ $x * ${powerCode(x, n - 1)} }
def power2(x: Expr[Double])(using Quotes) = '{
inline def power(x: Double): Double = ${powerCode('x, 2)} // error
power($x)
}
}
| dotty-staging/dotty | tests/neg-macros/i4803f.scala | Scala | apache-2.0 | 392 |
package edu.uchicago.cs.encsel.dataset.column
import java.io.File
import org.junit.Assert.assertTrue
import org.junit.Test
import edu.uchicago.cs.encsel.dataset.column.csv.CSVColumnReader2
import edu.uchicago.cs.encsel.dataset.column.json.JsonColumnReader
import edu.uchicago.cs.encsel.dataset.column.tsv.TSVColumnReader
class ColumnReaderFactoryTest {
@Test
def testGetColumnReader(): Unit = {
var cr = ColumnReaderFactory.getColumnReader(new File("src/test/resource/test_columner.csv").toURI)
assertTrue(cr.isInstanceOf[CSVColumnReader2])
cr = ColumnReaderFactory.getColumnReader(new File("src/test/resource/test_columner.tsv").toURI)
assertTrue(cr.isInstanceOf[TSVColumnReader])
cr = ColumnReaderFactory.getColumnReader(new File("src/test/resource/test_json_parser.json").toURI)
assertTrue(cr.isInstanceOf[JsonColumnReader])
}
} | harperjiang/enc-selector | src/test/scala/edu/uchicago/cs/encsel/dataset/column/ColumnReaderFactoryTest.scala | Scala | apache-2.0 | 871 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Generator for RDDs for testing with ScalaCheck
*/
package com.holdenkarau.spark.testing
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.random._
import org.scalacheck._
@Experimental
object RDDGenerator {
/**
* Generate an RDD of the desired type. Attempt to try different number of partitions
* so as to catch problems with empty partitions, etc.
* minPartitions defaults to 1, but when generating data too large for a single machine choose a larger value.
*
* @param sc Spark Context
* @param minPartitions defaults to 1
* @param generator used to create the generator. This function will be used to create the generator as
* many times as required.
* @tparam T The required type for the RDD
* @return
*/
def genRDD[T: ClassTag](sc: SparkContext, minPartitions: Int = 1)(generator: => Gen[T]): Gen[RDD[T]] = {
arbitraryRDD(sc, minPartitions)(generator).arbitrary
}
/**
* Generate an RDD of the desired type. Attempt to try different number of partitions
* so as to catch problems with empty partitions, etc.
* minPartitions defaults to 1, but when generating data too large for a single machine choose a larger value.
*
* @param sc Spark Context
* @param minPartitions defaults to 1
* @param generator used to create the generator. This function will be used to create the generator as
* many times as required.
* @tparam T The required type for the RDD
* @return
*/
def arbitraryRDD[T: ClassTag](sc: SparkContext, minPartitions: Int = 1)(generator: => Gen[T]): Arbitrary[RDD[T]] = {
Arbitrary {
Gen.sized(sz =>
sz match {
case 0 => sc.emptyRDD[T]
case size => {
// Generate different partition sizes
val mp = minPartitions
val specialPartitionSizes = List(size, (size / 2), mp, mp + 1, mp + 3).filter(_ > mp)
val partitionsGen = for {
// TODO change 1 to minPartitions
partitionCount <- Gen.chooseNum(1, 2 * size, specialPartitionSizes: _*)
} yield partitionCount
// Wrap the scalacheck generator in a Spark generator
val sparkElemGenerator = new WrappedGenerator(generator)
val rdds = partitionsGen.map { numPartitions =>
RandomRDDs.randomRDD(sc, sparkElemGenerator, size, numPartitions)
}
rdds
}
})
}
}
}
/**
* A WrappedGenerator wraps a ScalaCheck generator to allow Spark's RandomRDD to use it
*/
private[testing] class WrappedGenerator[T](getGenerator: => Gen[T]) extends RandomDataGenerator[T] {
lazy val random = new scala.util.Random()
lazy val params = Gen.Parameters.default.withRng(random)
lazy val generator: Gen[T] = getGenerator
def nextValue(): T = {
generator(params).get
}
def copy() = {
new WrappedGenerator(getGenerator)
}
override def setSeed(seed: Long): Unit = random.setSeed(seed)
}
| mahmoudhanafy/spark-testing-base | src/main/1.3/scala/com/holdenkarau/spark/testing/RDDGenerator.scala | Scala | apache-2.0 | 3,956 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.kryo.impl
import java.io.OutputStream
import java.util.{Date, UUID}
import com.esotericsoftware.kryo.io.Output
import com.vividsolutions.jts.geom.Geometry
import org.locationtech.geomesa.features.SimpleFeatureSerializer
import org.locationtech.geomesa.features.kryo.KryoFeatureSerializer.{NON_NULL_BYTE, NULL_BYTE, VERSION}
import org.locationtech.geomesa.features.kryo.json.KryoJsonSerialization
import org.locationtech.geomesa.features.kryo.serialization.{KryoGeometrySerialization, KryoUserDataSerialization}
import org.locationtech.geomesa.features.serialization.ObjectType
import org.locationtech.geomesa.features.serialization.ObjectType.ObjectType
import org.locationtech.geomesa.utils.cache.{CacheKeyGenerator, SoftThreadLocal, SoftThreadLocalCache}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
trait KryoFeatureSerialization extends SimpleFeatureSerializer {
private [kryo] def serializeSft: SimpleFeatureType
private val cacheKey = CacheKeyGenerator.cacheKey(serializeSft)
private val writers = KryoFeatureSerialization.getWriters(cacheKey, serializeSft)
private val withId = !options.withoutId
private val withUserData = options.withUserData
override def serialize(sf: SimpleFeature): Array[Byte] = {
val output = KryoFeatureSerialization.getOutput(null)
writeFeature(sf, output)
output.toBytes
}
override def serialize(sf: SimpleFeature, out: OutputStream): Unit = {
val output = KryoFeatureSerialization.getOutput(out)
writeFeature(sf, output)
output.flush()
}
private def writeFeature(sf: SimpleFeature, output: Output): Unit = {
val offsets = KryoFeatureSerialization.getOffsets(cacheKey, writers.length)
val offset = output.position()
output.writeInt(VERSION, true)
output.setPosition(offset + 5) // leave 4 bytes to write the offsets
if (withId) {
// TODO optimize for uuids?
output.writeString(sf.getID)
}
// write attributes and keep track off offset into byte array
var i = 0
while (i < writers.length) {
offsets(i) = output.position() - offset
writers(i)(output, sf.getAttribute(i))
i += 1
}
// write the offsets - variable width
i = 0
val offsetStart = output.position() - offset
while (i < writers.length) {
output.writeInt(offsets(i), true)
i += 1
}
// got back and write the start position for the offsets
val end = output.position()
output.setPosition(offset + 1)
output.writeInt(offsetStart)
// reset the position back to the end of the buffer so the bytes aren't lost, and we can keep writing user data
output.setPosition(end)
if (withUserData) {
KryoUserDataSerialization.serialize(output, sf.getUserData)
}
}
}
object KryoFeatureSerialization {
private [this] val outputs = new SoftThreadLocal[Output]()
private [this] val writers = new SoftThreadLocalCache[String, Array[(Output, AnyRef) => Unit]]()
private [this] val offsets = new SoftThreadLocalCache[String, Array[Int]]()
def getOutput(stream: OutputStream): Output = {
val out = outputs.getOrElseUpdate(new Output(1024, -1))
out.setOutputStream(stream)
out
}
private [kryo] def getOffsets(sft: String, size: Int): Array[Int] =
offsets.getOrElseUpdate(sft, Array.ofDim[Int](size))
// noinspection UnitInMap
def getWriters(key: String, sft: SimpleFeatureType): Array[(Output, AnyRef) => Unit] = {
import scala.collection.JavaConversions._
writers.getOrElseUpdate(key, sft.getAttributeDescriptors.map { ad =>
val (otype, bindings) = ObjectType.selectType(ad.getType.getBinding, ad.getUserData)
matchWriter(otype, bindings)
}.toArray)
}
private def matchWriter(otype: ObjectType, bindings: Seq[ObjectType] = Seq.empty): (Output, AnyRef) => Unit = {
otype match {
case ObjectType.STRING =>
(o: Output, v: AnyRef) => o.writeString(v.asInstanceOf[String]) // write string supports nulls
case ObjectType.INT =>
val w = (o: Output, v: AnyRef) => o.writeInt(v.asInstanceOf[Int])
writeNullable(w)
case ObjectType.LONG =>
val w = (o: Output, v: AnyRef) => o.writeLong(v.asInstanceOf[Long])
writeNullable(w)
case ObjectType.FLOAT =>
val w = (o: Output, v: AnyRef) => o.writeFloat(v.asInstanceOf[Float])
writeNullable(w)
case ObjectType.DOUBLE =>
val w = (o: Output, v: AnyRef) => o.writeDouble(v.asInstanceOf[Double])
writeNullable(w)
case ObjectType.BOOLEAN =>
val w = (o: Output, v: AnyRef) => o.writeBoolean(v.asInstanceOf[Boolean])
writeNullable(w)
case ObjectType.DATE =>
val w = (o: Output, v: AnyRef) => o.writeLong(v.asInstanceOf[Date].getTime)
writeNullable(w)
case ObjectType.UUID =>
val w = (o: Output, v: AnyRef) => {
val uuid = v.asInstanceOf[UUID]
o.writeLong(uuid.getMostSignificantBits)
o.writeLong(uuid.getLeastSignificantBits)
}
writeNullable(w)
case ObjectType.GEOMETRY =>
// null checks are handled by geometry serializer
(o: Output, v: AnyRef) => KryoGeometrySerialization.serialize(o, v.asInstanceOf[Geometry])
case ObjectType.JSON =>
(o: Output, v: AnyRef) => KryoJsonSerialization.serialize(o, v.asInstanceOf[String])
case ObjectType.LIST =>
val valueWriter = matchWriter(bindings.head)
(o: Output, v: AnyRef) => {
val list = v.asInstanceOf[java.util.List[AnyRef]]
if (list == null) {
o.writeInt(-1, true)
} else {
o.writeInt(list.size(), true)
val iter = list.iterator()
while (iter.hasNext) {
valueWriter(o, iter.next())
}
}
}
case ObjectType.MAP =>
val keyWriter = matchWriter(bindings.head)
val valueWriter = matchWriter(bindings(1))
(o: Output, v: AnyRef) => {
val map = v.asInstanceOf[java.util.Map[AnyRef, AnyRef]]
if (map == null) {
o.writeInt(-1, true)
} else {
o.writeInt(map.size(), true)
val iter = map.entrySet.iterator()
while (iter.hasNext) {
val entry = iter.next()
keyWriter(o, entry.getKey)
valueWriter(o, entry.getValue)
}
}
}
case ObjectType.BYTES =>
(o: Output, v: AnyRef) => {
val arr = v.asInstanceOf[Array[Byte]]
if (arr == null) {
o.writeInt(-1, true)
} else {
o.writeInt(arr.length, true)
o.writeBytes(arr)
}
}
}
}
private def writeNullable(wrapped: (Output, AnyRef) => Unit): (Output, AnyRef) => Unit = {
(o: Output, v: AnyRef) => {
if (v == null) {
o.write(NULL_BYTE)
} else {
o.write(NON_NULL_BYTE)
wrapped(o, v)
}
}
}
} | ronq/geomesa | geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/impl/KryoFeatureSerialization.scala | Scala | apache-2.0 | 7,468 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.logic.operation.graph.api
import java.io.File
import org.digimead.tabuddy.desktop.core.definition.api.XOperation
import org.digimead.tabuddy.model.Model
import org.digimead.tabuddy.model.graph.Graph
import org.digimead.tabuddy.model.serialization.Serialization
/**
* OperationGraphSaveAs base trait.
*/
trait XOperationGraphSaveAs {
checkSubclass()
/**
* Save graph as ...
*
* @param graph graph to save
* @param path directory of the graph container
* @param name name of the graph
* @return copy of the graph
*/
def apply(graph: Graph[_ <: Model.Like], name: String, path: File): Graph[_ <: Model.Like]
/**
* Create 'Save graph as ...' operation.
*
* @param graph graph to save
* @param path directory of the graph container
* @param name name of the graph
* @return 'Save graph as ...' operation
*/
def operation(graph: Graph[_ <: Model.Like], name: String, path: File): XOperation[Graph[_ <: Model.Like]]
/**
* Checks that this class can be subclassed.
* <p>
* The API class is intended to be subclassed only at specific,
* controlled point. This method enforces this rule
* unless it is overridden.
* </p><p>
* <em>IMPORTANT:</em> By providing an implementation of this
* method that allows a subclass of a class which does not
* normally allow subclassing to be created, the implementer
* agrees to be fully responsible for the fact that any such
* subclass will likely fail.
* </p>
*/
protected def checkSubclass(): Unit =
throw new IllegalAccessException("Please, use org.digimead.tabuddy.desktop.logic.operation.graph.OperationGraphSaveAs instead.")
}
| digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/operation/graph/api/XOperationGraphSaveAs.scala | Scala | agpl-3.0 | 3,944 |
// Equites, a Scala chess playground
// Copyright © 2013 Frank S. Thomas <frank@timepit.eu>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eu.timepit.equites
package util
import org.specs2.ScalaCheck
import org.specs2.mutable._
import PieceUtil._
import ArbitraryInstances._
class NotationSpec extends Specification with ScalaCheck {
"pieceFromLetter" should {
"be the inverse of RichPiece.toLetter" in prop {
(piece: AnyPiece) =>
readLetter(showLetter(piece).charAt(0)) must beSome(piece)
}
"yield None on invalid input" in {
readLetter('0') must beNone
}
}
"pieceFromFigurine" should {
"be the inverse of RichPiece.toFigurine" in prop {
(piece: AnyPiece) =>
readFigurine(showFigurine(piece).charAt(0)) must beSome(piece)
}
"yield None on invalid input" in {
readFigurine('0') must beNone
}
}
}
| equites-chess/equites-core | src/test/scala/eu/timepit/equites/util/NotationSpec.scala | Scala | gpl-3.0 | 1,493 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations.calculations
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.computations.{CP117, CP283, CP284, CP291}
class NetTradingProfitCalculatorSpec extends WordSpec with Matchers {
"NetTradingProfitCalculator" should {
"return correct value" in new NetTradingProfitCalculator {
val result = netTradingProfitCalculation(cp117 = CP117(1000),
cp283 = CP283(Some(800)))
result shouldBe CP284(Some(200))
}
"return correct value when losses has no value" in new NetTradingProfitCalculator {
val result = netTradingProfitCalculation(cp117 = CP117(1000),
cp283 = CP283(None))
result shouldBe CP284(Some(1000))
}
}
"NetTradingProfitCalculator for profits chargeable" should {
"return value of CP284 if CP283 > 0" in new NetTradingProfitCalculator {
val result = netTradingProfitForProfitsChargeable(netTradingProfit = CP284(Some(1000)),
lossesBroughtForwardUsedAgainstTradingProfit = CP283(Some(500)))
result shouldBe CP291(Some(1000))
}
"return None if CP283 = 0" in new NetTradingProfitCalculator {
val result = netTradingProfitForProfitsChargeable(netTradingProfit = CP284(Some(1000)),
lossesBroughtForwardUsedAgainstTradingProfit = CP283(Some(0)))
result shouldBe CP291(None)
}
"return None if CP283 < 0" in new NetTradingProfitCalculator {
val result = netTradingProfitForProfitsChargeable(netTradingProfit = CP284(Some(1000)),
lossesBroughtForwardUsedAgainstTradingProfit = CP283(Some(-1)))
result shouldBe CP291(None)
}
}
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/calculations/NetTradingProfitCalculatorSpec.scala | Scala | apache-2.0 | 2,228 |
package com.es.scala.more
/**
* Created by mark on 12/4/14.
*/
class UseOfNull {
/**
* returns null if given null
* but nulls are bad
* @param x
* @return
*/
def exampleOfNull(x: List[String]): String = {
if ( x != null) {
x.head
}
else null
}
/**
* Returns an empty list when given an empty list
* That is better
* @param x
* @return
*/
def exampleOfNil(x: List[String]): List[String] = {
if ( x != Nil) {
List(x.head)
}
else Nil
}
/**
* Use of ??? to denote "not implemented yet"
* @param a
* @return
*/
def notImplemented(a: Int): Int = ???
}
| elephantscale/learning-scala | scala-with-sujee/src/main/scala/com/es/scala/more/UseOfNull.scala | Scala | apache-2.0 | 646 |
package net.chwthewke.chcalc
import org.parboiled2._
import scala.collection.immutable.SortedMap
import scala.util.Try
private class Parsers( val input : ParserInput ) extends Parser {
import Parsers._
def HeroDefRule = rule {
Name ~ Sep ~
BigNumberAndSep ~
BigNumberAndSep ~
6.times( UpgradePercent ~ Sep ) ~
Marks ~ EOI ~>
hero
}
def Name = rule { capture( oneOrMore( noneOf( "|" ) ) ) }
def UpgradePercent = rule { capture( oneOrMore( CharPredicate.Digit | "." ) ) ~> toUpgradePercent }
def BigNumberAndSep = rule { SuffixedNumber ~ Sep | SciNumber ~ Sep }
def SciNumber = rule { PosInteger ~ '.' ~ capture( PosInteger ) ~ 'e' ~ PosInteger ~> sci }
def SuffixedNumber = rule { PosInteger ~ Suffix ~> ( ( n, s ) => n * s ) }
def PosInteger = rule { capture( oneOrMore( CharPredicate.Digit ) ) ~> ( _.toInt ) }
def Suffix = rule { optional( SuffixMap ) ~> pow10D }
def Marks = rule { zeroOrMore( MarkCodes ) ~> ( _.toList ) }
private def hero : ( String, BigInt, BigInt, Seq[Int], List[Mark] ) => HeroDef =
( n, c, d, u, m ) => {
val upgrades = SortedMap( upgradeLevels.zip( u.toList ) : _* )
HeroDef( n, c, d, m, upgrades )
}
private val toUpgradePercent : String => Int = s => ( s.toDouble * 100 ).toInt
private val sci : ( Int, Int, String, Int ) => BigInt = ( i, f, fs, e ) => {
val d = fs.length
( i * pow10( d ) + f ) * pow10( e - d )
}
private val pow10D : Option[Int] => BigInt = n => pow10( n.getOrElse( 0 ) )
private def pow10( n : Int ) : BigInt = BigInt( 10 ) pow n
}
object Parsers {
def heroDef( s : String ) : Try[HeroDef] =
new Parsers( s ).HeroDefRule.run()
private val MarkCodes : Map[String, Mark] = Map( "*" -> Knight )
private val Sep = '|'
private val SuffixMap : Map[String, Int] =
Suffixes.zipWithIndex.map { case ( s, i ) => s -> ( i + 1 ) * 3 }.toMap
private val upgradeLevels : List[Int] = List( 10, 25, 50, 75, 100, 125 )
}
| chwthewke/ch-calc | src/main/scala/net/chwthewke/chcalc/Parsers.scala | Scala | mit | 1,988 |
package spire.algebra
import annotation.tailrec
import scala.{specialized => spec}
/**
* Semiring is a ring without identities or an inverse. Thus, it has no
* negation, zero, or one.
*
* A Semiring with an additive inverse (-) is a Rng.
* A Semiring with additive and multiplicative identities (0 and 1) is a Rig.
* A Semiring with all of the above is a Ring.
*/
trait Semiring[@spec(Byte, Short, Int, Long, Float, Double) A] extends AdditiveMonoid[A] with MultiplicativeSemigroup[A] {
/**
* Returns `a` multiplied with itself `n` times. For instance,
* `a pow 3 === a * a * a`. Since this is a semiring, there is no notion of
* a multiplicative identity, and so the exponent must be positive.
*/
def pow(a:A, n:Int):A =
if (n > 0) multiplicative.sumn(a, n)
else throw new IllegalArgumentException(s"Illegal non-positive exponent $n to Semiring#pow")
}
object Semiring {
@inline final def apply[A](implicit r:Semiring[A]):Semiring[A] = r
}
| lrytz/spire | core/src/main/scala/spire/algebra/Semiring.scala | Scala | mit | 978 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.batch.sql
import org.apache.flink.api.common.typeinfo.BasicTypeInfo.{INT_TYPE_INFO, LONG_TYPE_INFO, STRING_TYPE_INFO}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.streaming.runtime.io.MultipleInputSelectionHandler
import org.apache.flink.table.api.config.{ExecutionConfigOptions, OptimizerConfigOptions}
import org.apache.flink.table.planner.runtime.utils.BatchTestBase
import org.apache.flink.types.Row
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import org.junit.{Before, Test}
import scala.collection.JavaConversions._
import scala.util.Random
/**
* IT cases for multiple input.
*
* <p>This test class works by comparing the results with and without multiple input.
* The following IT cases are picked from
* [[org.apache.flink.table.planner.plan.batch.sql.MultipleInputCreationTest]].
*/
@RunWith(classOf[Parameterized])
class MultipleInputITCase(shuffleMode: String) extends BatchTestBase {
@Before
override def before(): Unit = {
super.before()
registerCollection(
"x",
MultipleInputITCase.dataX,
MultipleInputITCase.rowType,
"a, b, c, nx",
MultipleInputITCase.nullables)
registerCollection(
"y",
MultipleInputITCase.dataY,
MultipleInputITCase.rowType,
"d, e, f, ny",
MultipleInputITCase.nullables)
registerCollection(
"z",
MultipleInputITCase.dataZ,
MultipleInputITCase.rowType,
"g, h, i, nz",
MultipleInputITCase.nullables)
registerCollection(
"t",
MultipleInputITCase.dataT,
MultipleInputITCase.rowType,
"a, b, c, nt",
MultipleInputITCase.nullables)
tEnv.getConfig.getConfiguration.setString(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE, shuffleMode)
}
@Test
def testBasicMultipleInput(): Unit = {
checkMultipleInputResult(
"""
|SELECT * FROM
| (SELECT a FROM x INNER JOIN y ON x.a = y.d) T1
| INNER JOIN
| (SELECT d FROM y INNER JOIN t ON y.d = t.a) T2
| ON T1.a = T2.d
|""".stripMargin)
}
@Test
def testManyMultipleInputs(): Unit = {
checkMultipleInputResult(
"""
|WITH
| T1 AS (
| SELECT a, ny, nz FROM x
| LEFT JOIN y ON x.a = y.ny
| LEFT JOIN z ON x.a = z.nz),
| T2 AS (
| SELECT T1.a AS a, t.b AS b, d, T1.ny AS ny, nz FROM T1
| LEFT JOIN t ON T1.a = t.a
| INNER JOIN y ON T1.a = y.d),
| T3 AS (
| SELECT T1.a AS a, t.b AS b, d, T1.ny AS ny, nz FROM T1
| LEFT JOIN y ON T1.a = y.d
| INNER JOIN t ON T1.a = t.a),
| T4 AS (SELECT b, SUM(d) AS sd, SUM(ny) AS sy, SUM(nz) AS sz FROM T2 GROUP BY b),
| T5 AS (SELECT b, SUM(d) AS sd, SUM(ny) AS sy, SUM(nz) AS sz FROM T3 GROUP BY b)
|SELECT * FROM
| (SELECT t.b, sd, sy, sz FROM T4 LEFT JOIN t ON T4.b = t.b)
| UNION ALL
| (SELECT y.e, sd, sy, sz FROM T5 LEFT JOIN y ON T5.b = y.e)
|""".stripMargin)
}
@Test
def testJoinWithAggAsProbe(): Unit = {
checkMultipleInputResult(
"""
|WITH T AS (SELECT a, d FROM x INNER JOIN y ON x.a = y.d)
|SELECT * FROM
| (SELECT a, COUNT(*) AS cnt FROM T GROUP BY a) T1
| LEFT JOIN
| (SELECT d, SUM(a) AS sm FROM T GROUP BY d) T2
| ON T1.a = T2.d
|""".stripMargin
)
}
@Test
def testNoPriorityConstraint(): Unit = {
checkMultipleInputResult(
"""
|SELECT * FROM x
| INNER JOIN y ON x.a = y.d
| INNER JOIN t ON x.a = t.a
|""".stripMargin
)
}
@Test
def testRelatedInputs(): Unit = {
checkMultipleInputResult(
"""
|WITH
| T1 AS (SELECT x.a AS a, y.d AS b FROM y LEFT JOIN x ON y.d = x.a),
| T2 AS (
| SELECT a, b FROM
| (SELECT a, b FROM T1)
| UNION ALL
| (SELECT x.a AS a, x.b AS b FROM x))
|SELECT * FROM T2 LEFT JOIN t ON T2.a = t.a
|""".stripMargin
)
}
@Test
def testRelatedInputsWithAgg(): Unit = {
checkMultipleInputResult(
"""
|WITH
| T1 AS (SELECT x.a AS a, y.d AS b FROM y LEFT JOIN x ON y.d = x.a),
| T2 AS (
| SELECT a, b FROM
| (SELECT a, b FROM T1)
| UNION ALL
| (SELECT COUNT(x.a) AS a, x.b AS b FROM x GROUP BY x.b))
|SELECT * FROM T2 LEFT JOIN t ON T2.a = t.a
|""".stripMargin
)
}
@Test
def testDeadlockCausedByExchangeInAncestor(): Unit = {
checkMultipleInputResult(
"""
|WITH T1 AS (
| SELECT x1.*, x2.a AS k, (x1.b + x2.b) AS v
| FROM x x1 LEFT JOIN x x2 ON x1.a = x2.a WHERE x2.a > 0)
|SELECT x.a, x.b, T1.* FROM x LEFT JOIN T1 ON x.a = T1.k WHERE x.a > 0 AND T1.v = 0
|""".stripMargin
)
}
@Test
def testMaxSupportedInputs(): Unit = {
val rowType = new RowTypeInfo(INT_TYPE_INFO, STRING_TYPE_INFO)
val data = Seq(BatchTestBase.row(1, "test"))
val nullables: Array[Boolean] = Array(true, true)
registerCollection("left_table", data, rowType, "a, b", nullables)
registerCollection("right_table", data, rowType, "c, d", nullables)
val numJoins = MultipleInputSelectionHandler.MAX_SUPPORTED_INPUT_COUNT - 1
val sql = new StringBuilder("SELECT t0.a, t0.b")
for (i <- 1 to numJoins) {
sql.append(s", t$i.c, t$i.d")
}
sql.append(" from left_table as t0")
for (i <- 1 to numJoins) {
sql.append(s" left join right_table as t$i on t0.a = t$i.c and t$i.c = 1")
}
checkMultipleInputResult(sql.toString())
}
def checkMultipleInputResult(sql: String): Unit = {
tEnv.getConfig.getConfiguration.setBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_MULTIPLE_INPUT_ENABLED, false)
val expected = executeQuery(sql)
tEnv.getConfig.getConfiguration.setBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_MULTIPLE_INPUT_ENABLED, true)
checkResult(sql, expected)
}
}
object MultipleInputITCase {
@Parameters(name = "shuffleMode: {0}")
def parameters: Array[String] = Array("ALL_EDGES_BLOCKING", "ALL_EDGES_PIPELINED")
def generateRandomData(): Seq[Row] = {
val data = new java.util.ArrayList[Row]()
val numRows = Random.nextInt(30)
lazy val strs = Seq("multiple", "input", "itcase")
for (_ <- 0 until numRows) {
data.add(BatchTestBase.row(
Random.nextInt(3),
Random.nextInt(3).longValue(),
strs(Random.nextInt(3)),
Random.nextInt(3)))
}
data
}
lazy val rowType = new RowTypeInfo(INT_TYPE_INFO, LONG_TYPE_INFO, STRING_TYPE_INFO, INT_TYPE_INFO)
lazy val nullables: Array[Boolean] = Array(true, true, true, true)
lazy val dataX: Seq[Row] = generateRandomData()
lazy val dataY: Seq[Row] = generateRandomData()
lazy val dataZ: Seq[Row] = generateRandomData()
lazy val dataT: Seq[Row] = generateRandomData()
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/MultipleInputITCase.scala | Scala | apache-2.0 | 7,928 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.io.File
import java.util.Properties
import javax.security.auth.login.Configuration
import scala.collection.Seq
import kafka.admin.ConfigCommand
import kafka.security.minikdc.MiniKdc
import kafka.server.KafkaConfig
import kafka.utils.JaasTestUtils.{JaasSection, Krb5LoginModule, ZkDigestModule}
import kafka.utils.{JaasTestUtils, TestUtils}
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.security.authenticator.LoginManager
import org.apache.kafka.common.security.scram.internals.ScramMechanism
/*
* Implements an enumeration for the modes enabled here:
* zk only, kafka only, both, custom KafkaServer.
*/
sealed trait SaslSetupMode
case object ZkSasl extends SaslSetupMode
case object KafkaSasl extends SaslSetupMode
case object Both extends SaslSetupMode
/*
* Trait used in SaslTestHarness and EndToEndAuthorizationTest to setup keytab and jaas files.
*/
trait SaslSetup {
private val workDir = TestUtils.tempDir()
private val kdcConf = MiniKdc.createConfig
private var kdc: MiniKdc = null
private var serverKeytabFile: Option[File] = None
private var clientKeytabFile: Option[File] = None
def startSasl(jaasSections: Seq[JaasSection]): Unit = {
// Important if tests leak consumers, producers or brokers
LoginManager.closeAll()
val hasKerberos = jaasSections.exists(_.modules.exists {
case _: Krb5LoginModule => true
case _ => false
})
if (hasKerberos) {
initializeKerberos()
}
writeJaasConfigurationToFile(jaasSections)
val hasZk = jaasSections.exists(_.modules.exists {
case _: ZkDigestModule => true
case _ => false
})
if (hasZk)
System.setProperty("zookeeper.authProvider.1", "org.apache.zookeeper.server.auth.SASLAuthenticationProvider")
}
protected def initializeKerberos(): Unit = {
val (serverKeytabFile, clientKeytabFile) = maybeCreateEmptyKeytabFiles()
kdc = new MiniKdc(kdcConf, workDir)
kdc.start()
kdc.createPrincipal(serverKeytabFile, JaasTestUtils.KafkaServerPrincipalUnqualifiedName + "/localhost")
kdc.createPrincipal(clientKeytabFile,
JaasTestUtils.KafkaClientPrincipalUnqualifiedName, JaasTestUtils.KafkaClientPrincipalUnqualifiedName2)
}
/** Return a tuple with the path to the server keytab file and client keytab file */
protected def maybeCreateEmptyKeytabFiles(): (File, File) = {
if (serverKeytabFile.isEmpty)
serverKeytabFile = Some(TestUtils.tempFile())
if (clientKeytabFile.isEmpty)
clientKeytabFile = Some(TestUtils.tempFile())
(serverKeytabFile.get, clientKeytabFile.get)
}
protected def jaasSections(kafkaServerSaslMechanisms: Seq[String],
kafkaClientSaslMechanism: Option[String],
mode: SaslSetupMode = Both,
kafkaServerEntryName: String = JaasTestUtils.KafkaServerContextName): Seq[JaasSection] = {
val hasKerberos = mode != ZkSasl &&
(kafkaServerSaslMechanisms.contains("GSSAPI") || kafkaClientSaslMechanism.contains("GSSAPI"))
if (hasKerberos)
maybeCreateEmptyKeytabFiles()
mode match {
case ZkSasl => JaasTestUtils.zkSections
case KafkaSasl =>
Seq(JaasTestUtils.kafkaServerSection(kafkaServerEntryName, kafkaServerSaslMechanisms, serverKeytabFile),
JaasTestUtils.kafkaClientSection(kafkaClientSaslMechanism, clientKeytabFile))
case Both => Seq(JaasTestUtils.kafkaServerSection(kafkaServerEntryName, kafkaServerSaslMechanisms, serverKeytabFile),
JaasTestUtils.kafkaClientSection(kafkaClientSaslMechanism, clientKeytabFile)) ++ JaasTestUtils.zkSections
}
}
private def writeJaasConfigurationToFile(jaasSections: Seq[JaasSection]): Unit = {
val file = JaasTestUtils.writeJaasContextsToFile(jaasSections)
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, file.getAbsolutePath)
// This will cause a reload of the Configuration singleton when `getConfiguration` is called
Configuration.setConfiguration(null)
}
def closeSasl(): Unit = {
if (kdc != null)
kdc.stop()
// Important if tests leak consumers, producers or brokers
LoginManager.closeAll()
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM)
System.clearProperty("zookeeper.authProvider.1")
Configuration.setConfiguration(null)
}
def kafkaServerSaslProperties(serverSaslMechanisms: Seq[String], interBrokerSaslMechanism: String): Properties = {
val props = new Properties
props.put(KafkaConfig.SaslMechanismInterBrokerProtocolProp, interBrokerSaslMechanism)
props.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, serverSaslMechanisms.mkString(","))
props
}
def kafkaClientSaslProperties(clientSaslMechanism: String, dynamicJaasConfig: Boolean = false): Properties = {
val props = new Properties
props.put(SaslConfigs.SASL_MECHANISM, clientSaslMechanism)
if (dynamicJaasConfig)
props.put(SaslConfigs.SASL_JAAS_CONFIG, jaasClientLoginModule(clientSaslMechanism))
props
}
def jaasClientLoginModule(clientSaslMechanism: String, serviceName: Option[String] = None): String = {
if (serviceName.isDefined)
JaasTestUtils.clientLoginModule(clientSaslMechanism, clientKeytabFile, serviceName.get)
else
JaasTestUtils.clientLoginModule(clientSaslMechanism, clientKeytabFile)
}
def createScramCredentials(zkConnect: String, userName: String, password: String): Unit = {
val credentials = ScramMechanism.values.map(m => s"${m.mechanismName}=[iterations=4096,password=$password]")
val args = Array("--zookeeper", zkConnect,
"--alter", "--add-config", credentials.mkString(","),
"--entity-type", "users",
"--entity-name", userName)
ConfigCommand.main(args)
}
}
| noslowerdna/kafka | core/src/test/scala/integration/kafka/api/SaslSetup.scala | Scala | apache-2.0 | 6,731 |
package org.talkingpuffin.ui
import _root_.scala.swing.UIElement
import java.awt.Cursor
import SwingInvoke._
/**
* Whenever a long running operation is performed the UI thread should be released and a spinner should be shown. This object has
* functions for simplifying these tasks. Every function spins off a thread, and calls a callback function when job is finished.
*/
object LongRunningSpinner {
/**
* Handles several functions after each other on the same thread, and the function returns true if the next function should be
* executed, false otherwise. The callback funtion is called when the functions have finished
*/
def run(frame: UIElement, callback: (Status) => Unit, functions: () => Boolean*) {
execSwingWorker({
try {
later(frame.cursor = Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR))
functions.find(f => !f())
later(frame.cursor = Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR))
Successful
}
catch {
case e: Exception => {
e.printStackTrace
new Failure(e)
}
}
}, (status: Status) =>
if(callback != null) callback(status)
)
}
abstract sealed case class Status()
case object Successful extends Status
case class Failure(val e:Exception) extends Status
}
| dcbriccetti/talking-puffin | desktop/src/main/scala/org/talkingpuffin/ui/LongRunningSpinner.scala | Scala | mit | 1,333 |
package org.jetbrains.plugins.scala.lang.overrideImplement
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.overrideImplement.ScalaOIUtil
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import org.jetbrains.plugins.scala.util.TypeAnnotationSettings
/**
* @author Alefas
* @since 14.05.12
*/
class ScalaOverrideImplementTest extends ScalaLightPlatformCodeInsightTestCaseAdapter {
def runTest(methodName: String, fileText: String, expectedText: String, isImplement: Boolean,
settings: ScalaCodeStyleSettings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter)),
copyScalaDoc: Boolean = false) {
configureFromFileTextAdapter("dummy.scala", fileText.replace("\\r", "").stripMargin.trim)
val oldSettings = ScalaCodeStyleSettings.getInstance(getProjectAdapter).clone()
TypeAnnotationSettings.set(getProjectAdapter, settings)
ScalaApplicationSettings.getInstance().COPY_SCALADOC = copyScalaDoc
ScalaOIUtil.invokeOverrideImplement(getProjectAdapter, getEditorAdapter, getFileAdapter, isImplement, methodName)
TypeAnnotationSettings.set(getProjectAdapter, oldSettings.asInstanceOf[ScalaCodeStyleSettings])
checkResultByText(expectedText.replace("\\r", "").stripMargin.trim)
}
def testFoo() {
val fileText =
"""
|package test
|
|class Foo extends b {
| <caret>
|}
|abstract class b {
| def foo(x: b): b
|}
"""
val expectedText =
"""
|package test
|
|class Foo extends b {
| def foo(x: b): b = <selection>???</selection>
|}
|abstract class b {
| def foo(x: b): b
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testEmptyLinePos() {
val fileText =
"""
|package test
|class Empty extends b {
| def foo(): Int = 3
|
|
| <caret>
|
|
|}
|abstract class b {
| def too: b
|}
"""
val expectedText =
"""
|package test
|class Empty extends b {
| def foo(): Int = 3
|
| def too: b = <selection>???</selection>
|}
|abstract class b {
| def too: b
|}
"""
val methodName: String = "too"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testNewLineBetweenMethods() {
val fileText =
"""
|package test
|
|class MethodsNewLine extends b {
| def foo(): Int = 3<caret>
|}
|abstract class b {
| def too: b
|}
"""
val expectedText =
"""
|package test
|
|class MethodsNewLine extends b {
| def foo(): Int = 3
|
| def too: b = <selection>???</selection>
|}
|abstract class b {
| def too: b
|}
"""
val methodName: String = "too"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testNewLineUpper() {
val fileText =
"""
|package test
|
|class UpperNewLine extends b {
| <caret>
| def foo(): Int = 3
|}
|abstract class b {
| def too: b
|}
"""
val expectedText =
"""
|package test
|
|class UpperNewLine extends b {
|
| def too: b = <selection>???</selection>
|
| def foo(): Int = 3
|}
|abstract class b {
| def too: b
|}
"""
val methodName: String = "too"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testOverrideFunction() {
val fileText =
"""
|package test
|
|class A {
| def foo(): A = null
|}
|class FunctionOverride extends A {
| val t = foo()
|
|
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|class A {
| def foo(): A = null
|}
|class FunctionOverride extends A {
| val t = foo()
|
| override def foo(): A = <selection>super.foo()</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testImplementTypeAlias() {
val fileText =
"""
|package Y
|trait Aa {
| type K
|}
|class TypeAlias extends Aa {
| val t = foo()
| <caret>
| def y(): Int = 3
|}
"""
val expectedText =
"""
|package Y
|trait Aa {
| type K
|}
|class TypeAlias extends Aa {
| val t = foo()
|
| type K = <selection>this.type</selection>
|
| def y(): Int = 3
|}
"""
val methodName: String = "K"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testOverrideValue() {
val fileText =
"""
|package test
|
|class A {
| val foo: A = new A
|}
|class OverrideValue extends A {
| val t = foo()
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|class A {
| val foo: A = new A
|}
|class OverrideValue extends A {
| val t = foo()
| override val foo: A = <selection>_</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testImplementVar() {
val fileText =
"""
|package test
|
|trait A {
| var foo: A
|}
|class VarImplement extends A {
| val t = foo()
| <caret>
| def y(): Int = 3
|}
"""
val expectedText =
"""
|package test
|
|trait A {
| var foo: A
|}
|class VarImplement extends A {
| val t = foo()
|
| var foo: A = <selection>_</selection>
|
| def y(): Int = 3
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testImplementFromSelfType() {
val fileText =
"""
|package test
|
|trait A {
| def foo: Int
|}
|trait B {
| self: A =>
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|trait A {
| def foo: Int
|}
|trait B {
| self: A =>
| def foo: Int = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testOverrideFromSelfType() {
val fileText =
"""
|package test
|
|trait A {
| def foo: Int = 1
|}
|trait B {
| self: A =>
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|trait A {
| def foo: Int = 1
|}
|trait B {
| self: A =>
| override def foo = <selection>self.foo</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
val settings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter))
runTest(methodName, fileText, expectedText, isImplement, settings = TypeAnnotationSettings.noTypeAnnotationForPublic(settings))
}
def testTypeAlias() {
val fileText =
"""
|class ImplementTypeAlias extends b {
| <caret>
|}
|abstract class b {
| type L
|}
"""
val expectedText =
"""
|class ImplementTypeAlias extends b {
| type L = <selection>this.type</selection>
|}
|abstract class b {
| type L
|}
"""
val methodName: String = "L"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testVal() {
val fileText =
"""
|package test
|
|class Val extends b {
| <caret>
|}
|abstract class b {
| val too: b
|}
"""
val expectedText =
"""
|package test
|
|class Val extends b {
| val too: b = <selection>_</selection>
|}
|abstract class b {
| val too: b
|}
"""
val methodName: String = "too"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testVar() {
val fileText =
"""
|package test
|
|class Var extends b {
| <caret>
|}
|abstract class b {
| var too: b
|}
"""
val expectedText =
"""
|package test
|
|class Var extends b {
| var too: b = <selection>_</selection>
|}
|abstract class b {
| var too: b
|}
"""
val methodName: String = "too"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testClassTypeParam() {
val fileText =
"""
|class A[T] {
| def foo: T = new T
|}
|
|class ClassTypeParam extends A[Int] {
| <caret>
|}
"""
val expectedText =
"""
|class A[T] {
| def foo: T = new T
|}
|
|class ClassTypeParam extends A[Int] {
| override def foo: Int = <selection>super.foo</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testHardSubstituting() {
val fileText =
"""
|class A[T] {
| def foo(x: (T) => T, y: (T, Int) => T): Double = 1.0
|}
|
|class Substituting extends A[Float] {
| <caret>
|}
"""
val expectedText =
"""
|class A[T] {
| def foo(x: (T) => T, y: (T, Int) => T): Double = 1.0
|}
|
|class Substituting extends A[Float] {
| override def foo(x: Float => Float, y: (Float, Int) => Float): Double = <selection>super.foo(x, y)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSimpleTypeParam() {
val fileText =
"""
|abstract class A {
| def foo[T](x: T): T
|}
|class SimpleTypeParam extends A {
| <caret>
|}
"""
val expectedText =
"""
|abstract class A {
| def foo[T](x: T): T
|}
|class SimpleTypeParam extends A {
| def foo[T](x: T): T = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL1997() {
val fileText =
"""
|package test
|
|trait Foo {
| def foo(a: Any*): Any
|}
|
|trait Sub extends Foo {
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|trait Foo {
| def foo(a: Any*): Any
|}
|
|trait Sub extends Foo {
| def foo(a: Any*): Any = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL1999() {
val fileText =
"""
|package test
|
|trait Parent {
| def m(p: T forSome {type T <: Number})
|}
|
|class Child extends Parent {
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|trait Parent {
| def m(p: T forSome {type T <: Number})
|}
|
|class Child extends Parent {
| def m(p: (T) forSome {type T <: Number}): Unit = <selection>???</selection>
|}
"""
val methodName: String = "m"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL2540() {
val fileText =
"""
|class A {
| def foo(x_ : Int) = 1
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|class A {
| def foo(x_ : Int) = 1
|}
|
|class B extends A {
| override def foo(x_ : Int): Int = <selection>super.foo(x_)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL2010() {
val fileText =
"""
|package test
|
|class Parent {
| def doSmth(smth: => String) {}
|}
|
|class Child extends Parent {
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|class Parent {
| def doSmth(smth: => String) {}
|}
|
|class Child extends Parent {
| override def doSmth(smth: => String): Unit = <selection>super.doSmth(smth)</selection>
|}
"""
val methodName: String = "doSmth"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL2052A() {
val fileText =
"""
|class A {
| type ID[X] = X
| def foo(in: ID[String]): ID[Int] = null
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|class A {
| type ID[X] = X
| def foo(in: ID[String]): ID[Int] = null
|}
|
|class B extends A {
| override def foo(in: ID[String]): ID[Int] = <selection>super.foo(in)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL2052B() {
val fileText =
"""
|class A {
| type ID[X] = X
| val foo: ID[Int] = null
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|class A {
| type ID[X] = X
| val foo: ID[Int] = null
|}
|
|class B extends A {
| override val foo: ID[Int] = <selection>_</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL2052C() {
val fileText =
"""
|class A {
| type F = (Int => String)
| def foo(f: F): Any = null
|}
|
|object B extends A {
| <caret>
|}
"""
val expectedText =
"""
|class A {
| type F = (Int => String)
| def foo(f: F): Any = null
|}
|
|object B extends A {
| override def foo(f: B.F): Any = <selection>super.foo(f)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL3808() {
val fileText =
"""
|trait TC[_]
|
|class A {
| def foo[M[X], N[X[_]]: TC]: String = ""
|}
|
|object B extends A {
| <caret>
|}
"""
val expectedText =
"""
|trait TC[_]
|
|class A {
| def foo[M[X], N[X[_]]: TC]: String = ""
|}
|
|object B extends A {
| override def foo[M[X], N[X[_]] : TC]: String = <selection>super.foo</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testSCL3305() {
val fileText =
"""
|package test
|
|object A {
|
| object Nested {
|
| class Nested2
|
| }
|
|}
|
|abstract class B {
| def foo(v: A.Nested.Nested2)
|}
|
|class C extends B {
| <caret>
|}
"""
val expectedText =
"""
|package test
|
|import test.A.Nested
|
|object A {
|
| object Nested {
|
| class Nested2
|
| }
|
|}
|
|abstract class B {
| def foo(v: A.Nested.Nested2)
|}
|
|class C extends B {
| def foo(v: Nested.Nested2): Unit = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testUnitReturn() {
val fileText =
"""
|package test
|
|class Foo extends b {
| <caret>
|}
|abstract class b {
| def foo(x: b): Unit
|}
"""
val expectedText =
"""
|package test
|
|class Foo extends b {
| def foo(x: b): Unit = <selection>???</selection>
|}
|abstract class b {
| def foo(x: b): Unit
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testUnitInferredReturn() {
val fileText =
"""
|package test
|
|class Foo extends b {
| <caret>
|}
|abstract class b {
| def foo(x: b) = ()
|}
"""
val expectedText =
"""
|package test
|
|class Foo extends b {
| override def foo(x: b): Unit = <selection>super.foo(x)</selection>
|}
|abstract class b {
| def foo(x: b) = ()
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testInferredReturn() {
val fileText =
"""
|package test
|
|class Foo extends b {
| <caret>
|}
|abstract class b {
| def foo(x: b) = 1
|}
"""
val expectedText =
"""
|package test
|
|class Foo extends b {
| override def foo(x: b): Int = <selection>super.foo(x)</selection>
|}
|abstract class b {
| def foo(x: b) = 1
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testNoExplicitReturn() {
val fileText =
"""
|class A {
| def foo(x : Int): Int = 1
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|class A {
| def foo(x : Int): Int = 1
|}
|
|class B extends A {
| override def foo(x: Int): Int = <selection>super.foo(x)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
val settings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter))
runTest(methodName, fileText, expectedText, isImplement, settings)
}
def testImplicitParams() {
val fileText =
"""
|trait A {
| def foo(x : Int)(implicit name: String): Int = name + x
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|trait A {
| def foo(x : Int)(implicit name: String): Int = name + x
|}
|
|class B extends A {
| override def foo(x: Int)(implicit name: String): Int = <selection>super.foo(x)</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
//don't add return type for protected
def testProtectedMethod() {
val fileText =
"""
|abstract class A {
| protected def foo(): Unit
|}
|
|class B extends A {
| <caret>
|}
"""
val expectedText =
"""
|abstract class A {
| protected def foo(): Unit
|}
|
|class B extends A {
| protected def foo() = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
val settings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter))
runTest(methodName, fileText, expectedText, isImplement, settings = TypeAnnotationSettings.noTypeAnnotationForProtected(settings))
}
def testProtectedMethodNoBody() {
val fileText =
"""
|abstract class A {
| protected def foo(): Unit
|}
|
|class B<caret> extends A
"""
val expectedText =
"""
|abstract class A {
| protected def foo(): Unit
|}
|
|class B extends A {
| protected def foo(): Unit = <selection>???</selection>
|}
"""
val methodName: String = "foo"
val isImplement = true
runTest(methodName, fileText, expectedText, isImplement)
}
def testOverrideProtectedMethodNoBody() {
val fileText =
"""
|abstract class A {
| protected def foo(): Unit = {}
|}
|
|class B<caret> extends A
"""
val expectedText =
"""
|abstract class A {
| protected def foo(): Unit = {}
|}
|
|class B extends A {
| override protected def foo(): Unit = <selection>super.foo()</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testCopyScalaDoc() = {
val fileText =
"""
|abstract class A {
|
| /**
| * qwerty
| *
| * @return
| */
| protected def foo(): Unit = {}
|}
|
|class B<caret> extends A
"""
val expectedText =
"""
|abstract class A {
|
| /**
| * qwerty
| *
| * @return
| */
| protected def foo(): Unit = {}
|}
|
|class B extends A {
| /**
| * qwerty
| *
| * @return
| */
| override protected def foo(): Unit = <selection>super.foo()</selection>
|}
"""
val methodName: String = "foo"
val isImplement = false
val copyScalaDoc = true
runTest(methodName, fileText, expectedText, isImplement, copyScalaDoc = copyScalaDoc)
}
def testNoImportScalaSeq(): Unit = {
val fileText =
"""
|import scala.collection.Seq
|
|class Test {
| def foo: Seq[Int] = Seq(1)
|}
|
|class Test2 extends Test {
|<caret>
|}
"""
val expectedText =
"""
|import scala.collection.Seq
|
|class Test {
| def foo: Seq[Int] = Seq(1)
|}
|
|class Test2 extends Test {
| override def foo: Seq[Int] = super.foo
|}
"""
val methodName: String = "foo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
def testOverrideClassParam(): Unit = {
val fileText =
"""
|class Parent(val param1: Int, var param2: String)
|
|class Child extends Parent(4, "") {
| <caret>
|}
"""
val expectedText =
"""
|class Parent(val param1: Int, var param2: String)
|
|class Child extends Parent(4, "") {
| override val param1: Int = _
|}
"""
runTest("param1", fileText, expectedText, isImplement = false)
}
def testDoNotSaveAnnotations(): Unit ={
val fileText =
"""
|trait Base {
| @throws(classOf[Exception])
| @deprecated
| def annotFoo(int: Int): Int = 45
|}
|
|class Inheritor extends Base {
| <caret>
|}
"""
val expectedText =
"""
|trait Base {
| @throws(classOf[Exception])
| @deprecated
| def annotFoo(int: Int): Int = 45
|}
|
|class Inheritor extends Base {
| override def annotFoo(int: Int): Int = super.annotFoo(int)
|}
"""
val methodName: String = "annotFoo"
val isImplement = false
runTest(methodName, fileText, expectedText, isImplement)
}
}
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/overrideImplement/ScalaOverrideImplementTest.scala | Scala | apache-2.0 | 25,824 |
// In run, rather than pos, to check for problems like SI-4283
object O1 {
private[O1] class Base {
def foo: Int = 0
}
class Mediator extends Base
}
object O2 {
class Derived extends O1.Mediator {
override def foo: Int = super.foo
}
}
object Test {
def main(args: Array[String]): Unit = {
new O2.Derived().foo
}
}
| yusuke2255/dotty | tests/run/t5162.scala | Scala | bsd-3-clause | 342 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.linalg.Counter2.Curried
import breeze.linalg.operators.Counter2Ops
import breeze.storage.Zero
import collection.mutable.HashMap
import breeze.math.Semiring
import breeze.linalg.support._
import scala.collection.Set
import scala.collection.parallel.mutable
import scala.reflect.ClassTag
import CanTraverseValues.ValuesVisitor
/**
*
* @author dlwh
*/
/**
* A map-like tensor that acts like a collection of key-value pairs where
* the set of values may grow arbitrarily.
*
* @author dlwh
*/
trait Counter2Like
[K1, K2, V,
+M1[VV] <: Curried[scala.collection.mutable.Map,K1]#Result[VV],
+T <: Counter[K2,V],
+This<:Counter2[K1,K2,V]] extends TensorLike[(K1,K2),V,This] { self =>
def data : M1[_<:T]
def default: V
override def size = {
var s = 0
for (m <- data.valuesIterator) {
s += m.size
}
s
}
def apply(i: (K1, K2)) = apply(i._1, i._2)
def apply(k : K1, k2: K2) = data.get(k).map(t => t(k2)) getOrElse default
def contains(k: K1) = data.contains(k)
def contains(k1: K1, k2: K2) = data.contains(k1) && data(k1).contains(k2)
def update(i: (K1, K2), v: V) {update(i._1, i._2, v)}
def update(k1 : K1, k2: K2, v : V) =
innerGetOrElseUpdate(k1,data)(k2) = v
private[linalg] def innerGetOrElseUpdate[M](k:K1, m: scala.collection.mutable.Map[K1,M]): M = {
m.getOrElseUpdate(k,m.default(k))
}
override def keysIterator = for ((k1,m) <- data.iterator; k2 <- m.keysIterator) yield (k1,k2)
override def valuesIterator = for (m <- data.valuesIterator; v <- m.valuesIterator) yield v
override def iterator = for ((k1,m) <- data.iterator; (k2,v) <- m.iterator) yield (k1,k2)->v
def activeSize = size
def activeIterator = iterator
def activeKeysIterator = keysIterator
def activeValuesIterator = valuesIterator
def repr = this.asInstanceOf[This]
override def toString: String = {
data.iterator.map { case (k1, c) => k1 + " -> " + c.toString}.mkString("Counter2(",",\\n",")")
}
override def equals(p1: Any): Boolean = p1 match {
case x:Counter2[_, _, _] =>
x.activeIterator.toSet == activeIterator.toSet
case _ => false
}
}
trait Counter2[K1, K2, V]
extends Tensor[(K1,K2),V] with Counter2Like[K1,K2,V,Curried[scala.collection.mutable.Map,K1]#Result,Counter[K2,V],Counter2[K1,K2,V]]
object Counter2 extends LowPriorityCounter2 with Counter2Ops {
@SerialVersionUID(1L)
class Impl[K1, K2, V]
(override val data : scala.collection.mutable.Map[K1,Counter[K2,V]])
(implicit scalar : Zero[V])
extends Counter2[K1,K2,V] with Serializable {
def default = scalar.zero
def keySet: Set[(K1, K2)] = new Set[(K1, K2)] {
def contains(k: (K1, K2)): Boolean = data.contains(k._1) && data(k._1).contains(k._2)
def +(elem: (K1, K2)): Set[(K1, K2)] = Set.empty ++ iterator + elem
def -(elem: (K1, K2)): Set[(K1, K2)] = Set.empty ++ iterator - elem
def iterator: Iterator[(K1, K2)] = for( (k1,m) <- data.iterator; k2 <- m.keysIterator) yield (k1, k2)
}
}
/** Returns a new empty counter. */
def apply[K1,K2,V:Zero](): Counter2[K1,K2,V] = {
new Impl(new CounterHashMap)
}
@SerialVersionUID(1L)
private class CounterHashMap[K1, K2, V:Zero] extends HashMap[K1, Counter[K2, V]] with Serializable {
override def default(k: K1) = Counter[K2,V]()
}
/** Aggregates the counts in the given items. */
def apply[K1,K2,V:Semiring:Zero](values : (K1,K2,V)*) : Counter2[K1,K2,V] =
apply(values.iterator)
/** Aggregates the counts in the given items. */
def apply[K1,K2,V:Semiring:Zero](values : TraversableOnce[(K1,K2,V)]) : Counter2[K1,K2,V] = {
val rv = apply[K1,K2,V]()
values.foreach({ case (k1,k2,v) => rv(k1,k2) = implicitly[Semiring[V]].+(rv(k1,k2), v) })
rv
}
/** Counts the given elements. */
def count[K1,K2](values : TraversableOnce[(K1,K2)]) : Counter2[K1,K2,Int] = {
val rv = apply[K1,K2,Int]()
values.foreach({ case (k1,k2) => rv(k1,k2) += 1; })
rv
}
implicit def CanMapValuesCounter[K1, K2, V, RV:Semiring:Zero]: CanMapValues[Counter2[K1, K2, V], V, RV, Counter2[K1, K2, RV]] = {
new CanMapValues[Counter2[K1, K2, V], V, RV, Counter2[K1, K2, RV]] {
override def apply (from: Counter2[K1, K2, V], fn: (V => RV) ) = {
val rv = Counter2[K1, K2, RV] ()
for ((k, v) <- from.iterator) {
rv (k) = fn (v)
}
rv
}
}
}
implicit def CanMapActiveValuesCounter[K1, K2, V, RV:Semiring:Zero]: CanMapActiveValues[Counter2[K1, K2, V], V, RV, Counter2[K1, K2, RV]] = {
new CanMapActiveValues[Counter2[K1, K2, V],V,RV,Counter2[K1, K2, RV]] {
override def apply(from: Counter2[K1, K2, V], fn: (V => RV)) = {
val rv = Counter2[K1,K2, RV]()
for( (k,v) <- from.activeIterator) {
rv(k) = fn(v)
}
rv
}
}
}
implicit def canIterateValues[K1, K2, V]: CanTraverseValues[Counter2[K1, K2,V], V] = new CanTraverseValues[Counter2[K1, K2, V], V] {
def isTraversableAgain(from: Counter2[K1, K2, V]): Boolean = true
def traverse(from: Counter2[K1, K2, V], fn: ValuesVisitor[V]): Unit = {
for( v <- from.valuesIterator) {
fn.visit(v)
}
}
}
// slicing
implicit def canSliceRow[K1,K2,V] : CanSlice2[Counter2[K1,K2,V],K1,::.type, Counter[K2,V]]
= new CanSlice2[Counter2[K1,K2,V],K1, ::.type, Counter[K2,V]] {
override def apply(from : Counter2[K1,K2,V], row : K1, unused: ::.type) = from.innerGetOrElseUpdate(row, from.data)
}
implicit def canSliceCol[K1,K2,V]: CanSlice2[Counter2[K1,K2,V], ::.type, K2,Counter[K1,V]]
= new CanSlice2[Counter2[K1,K2,V],::.type, K2,Counter[K1,V]] {
def apply(from: Counter2[K1, K2, V], x: ::.type, col: K2) = new Counter[K1,V] {
def default = from.default
override val data = new scala.collection.mutable.Map[K1,V] {
override def apply(k1 : K1) =
from(k1,col)
override def update(k1 : K1, v : V) =
from(k1,col) = v
override def -=(k1 : K1) = {
from.data(k1)(col) = from.default
this
}
override def +=(tup : (K1,V)) = {
from.data(tup._1)(col) = (tup._2)
this
}
override def iterator =
for ((k1,map) <- from.data.iterator; v <- map.get(col)) yield (k1,v)
override def get(k1 : K1) =
from.data.get(k1).map(_(col))
override def keySet = from.data.keySet
override def size = from.data.size
}
}
}
/**
* Returns a Counter[K2, V]
* @tparam V
* @return
*/
implicit def canMapRows[K1, K2, V, R:Zero:Semiring]: CanCollapseAxis[Counter2[K1, K2,V], Axis._0.type, Counter[K1, V], Counter[K1, R], Counter2[K1, K2, R]] = {
new CanCollapseAxis[Counter2[K1, K2, V], Axis._0.type, Counter[K1, V], Counter[K1, R], Counter2[K1, K2, R]] {
def apply(from: Counter2[K1, K2, V], axis: Axis._0.type)(f: (Counter[K1, V]) => Counter[K1, R]): Counter2[K1, K2, R] = {
val result = Counter2[K1, K2, R]()
for (dom <- from.keySet.map(_._2)) {
result(::, dom) := f(from(::, dom))
}
result
}
}
}
implicit def handholdCanMapRows[K1, K2, V]: CanCollapseAxis.HandHold[Counter2[K1, K2, V], Axis._0.type, Counter[K1, V]] = new CanCollapseAxis.HandHold[Counter2[K1, K2, V], Axis._0.type, Counter[K1, V]]()
/**
* Returns a Counter[K1, V]
* @tparam V
* @tparam R
* @return
*/
implicit def canMapCols[K1, K2, V, R:ClassTag:Zero:Semiring]: CanCollapseAxis[Counter2[K1, K2,V], Axis._1.type, Counter[K2, V], Counter[K2, R], Counter2[K1, K2, R]] = new CanCollapseAxis[Counter2[K1, K2,V], Axis._1.type, Counter[K2, V], Counter[K2, R], Counter2[K1,K2,R]] {
def apply(from: Counter2[K1, K2,V], axis: Axis._1.type)(f: (Counter[K2, V]) => Counter[K2, R]): Counter2[K1, K2, R] = {
val result = Counter2[K1, K2, R]()
for( (dom,c) <- from.data) {
result(dom, ::) := f(c)
}
result
}
}
implicit def handholdCanMapCols[K1, K2, V]: CanCollapseAxis.HandHold[Counter2[K1, K2, V], Axis._1.type, Counter[K2, V]] = new CanCollapseAxis.HandHold[Counter2[K1, K2, V], Axis._1.type, Counter[K2, V]]()
/**
* This is just a curried version of scala.collection.Map.
* Used to get around Scala's lack of partially applied types.
*
* @author dlwh
*/
trait Curried[M[_,_],K] {
type Result[V] = M[K,V]
}
implicit def scalarOf[K1, K2, V]: ScalarOf[Counter2[K1, K2, V], V] = ScalarOf.dummy
}
trait LowPriorityCounter2 {
/**
* Returns a Counter[K2, V]
* @tparam V
* @tparam R
* @return
*/
implicit def canCollapseRows[K1, K2, V, R:ClassTag:Zero:Semiring]: CanCollapseAxis[Counter2[K1, K2, V], Axis._0.type, Counter[K1, V], R, Counter[K2, R]] = new CanCollapseAxis[Counter2[K1, K2,V], Axis._0.type, Counter[K1, V], R, Counter[K2,R]] {
def apply(from: Counter2[K1, K2,V], axis: Axis._0.type)(f: (Counter[K1, V]) => R): Counter[K2, R] = {
val result = Counter[K2, R]()
for( dom <- from.keySet.map(_._2)) {
result(dom) = f(from(::, dom))
}
result
}
}
/**
* Returns a Counter[K1, V]
* @tparam V
* @tparam R
* @return
*/
implicit def canCollapseCols[K1, K2, V, R:ClassTag:Zero:Semiring]: CanCollapseAxis[Counter2[K1, K2, V], Axis._1.type, Counter[K2, V], R, Counter[K1, R]] = new CanCollapseAxis[Counter2[K1, K2,V], Axis._1.type, Counter[K2, V], R, Counter[K1,R]] {
def apply(from: Counter2[K1, K2,V], axis: Axis._1.type)(f: (Counter[K2, V]) => R): Counter[K1, R] = {
val result = Counter[K1, R]()
for( (dom,c) <- from.data) {
result(dom) = f(c)
}
result
}
}
}
| chen0031/breeze | math/src/main/scala/breeze/linalg/Counter2.scala | Scala | apache-2.0 | 10,293 |
package fi.pyppe.subtitler
import java.io.File
import org.specs2.mutable.Specification
class OpenSubtitlesHasherSpec extends Specification {
val breakdanceAvi = new File(getClass.getResource("/breakdance.avi").getPath)
"OpenSubtitlesHasher" should {
s"compute hash for ${breakdanceAvi.getName}" in {
breakdanceAvi.length === 12909756L
OpenSubtitlesHasher.computeHash(breakdanceAvi) === "8e245d9679d31e12"
}
}
}
| Pyppe/subtitler | src/test/scala/fi/pyppe/subtitler/OpenSubtitlesHasherSpec.scala | Scala | mit | 443 |
/*
* Copyright 2013 Steve Vickers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.extensions.dsl.criteria
import scala.language.dynamics
/**
* The '''Untyped''' type defines the behaviour expected of queries where the
* MongoDB document may not correspond to a Scala type known to the system
* using this abstraction.
*/
sealed trait Untyped
extends Dynamic {
def selectDynamic(field: String): Term[Any] =
Term[Any](field);
}
object Untyped {
/**
* The criteria property is a ''factory'' of '''Untyped''' instances.
*/
val criteria = new Untyped {};
def where(block: (Untyped) => Expression): Expression =
block(criteria);
def where(block: (Untyped, Untyped) => Expression): Expression =
block(criteria, criteria);
def where(block: (Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
def where(block: (Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped, Untyped) => Expression): Expression =
block(criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria, criteria);
}
| ReactiveMongo/ReactiveMongo-Extensions | bson/src/main/scala/dsl/criteria/Untyped.scala | Scala | apache-2.0 | 7,264 |
/*
* Copyright 2017-2020 Aleksey Fomkin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package korolev.effect
import korolev.effect.Effect.Fiber
import scala.annotation.implicitNotFound
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
/**
* Korolev's internal presentation of effect (such as Future, cats.effect.IO, Monix or ZIO tasks).
* Contains enough functionality to make Korolev works.
*/
@implicitNotFound("Instance of Effect for ${F} is not found.")
trait Effect[F[_]] {
private val noneVal: F[None.type] = pure(None)
def none[A]: F[Option[A]] = noneVal.asInstanceOf[F[Option[A]]]
def pure[A](value: A): F[A]
def delay[A](value: => A): F[A]
def delayAsync[A](value: => F[A]): F[A] = flatMap(delay(value))(identity)
def fail[A](e: Throwable): F[A]
def unit: F[Unit]
def never[T]: F[T]
def fromTry[A](value: => Try[A]): F[A]
def promise[A](cb: (Either[Throwable, A] => Unit) => Unit): F[A]
def promiseF[A](cb: (Either[Throwable, A] => Unit) => F[Unit]): F[A]
def flatMap[A, B](m: F[A])(f: A => F[B]): F[B]
def map[A, B](m: F[A])(f: A => B): F[B]
def recover[A, AA >: A](m: F[A])(f: PartialFunction[Throwable, AA]): F[AA]
def recoverF[A, AA >: A](m: F[A])(f: PartialFunction[Throwable, F[AA]]): F[AA]
// def onError[A](m: F[A])(f: Throwable => Unit): F[A]
// def onErrorF[A](m: F[A])(f: Throwable => F[Unit]): F[A]
/** Keep in mind that when [[F]] has strict semantic, effect should
* created inside 'start()' brackets. */
def start[A](create: => F[A])(implicit ec: ExecutionContext): F[Fiber[F, A]]
/** Keep in mind that when [[F]] has strict semantic, effect should
* created inside 'fork()' brackets. */
def fork[A](m: => F[A])(implicit ec: ExecutionContext): F[A]
def sequence[A](in: List[F[A]]): F[List[A]]
def runAsync[A](m: F[A])(callback: Either[Throwable, A] => Unit): Unit
def run[A](m: F[A]): Either[Throwable, A]
def toFuture[A](m: F[A]): Future[A]
}
object Effect {
type Promise[A] = Either[Throwable, A] => Unit
trait Fiber[F[_], A] {
def join(): F[A]
}
def apply[F[_]: Effect]: Effect[F] = implicitly[Effect[F]]
class FutureEffect extends Effect[Future] {
private implicit val immediateEc: ExecutionContext = new ExecutionContext {
// Run on the same thread
def execute(runnable: Runnable): Unit = runnable.run()
def reportFailure(cause: Throwable): Unit = cause.printStackTrace()
}
val unit: Future[Unit] = Future.unit
def never[T]: Future[T] = Future.never
def toFuture[A](m: Future[A]): Future[A] = m
def fail[A](e: Throwable): Future[A] = Future.failed(e)
def pure[A](value: A): Future[A] = Future.successful(value)
def delay[A](value: => A): Future[A] =
try {
Future.successful(value)
} catch {
case error: Throwable =>
Future.failed(error)
}
def fork[A](m: => Future[A])(implicit ec: ExecutionContext): Future[A] =
Future(m)(ec).flatten
def fromTry[A](value: => Try[A]): Future[A] = Future.fromTry(value)
def flatMap[A, B](m: Future[A])(f: A => Future[B]): Future[B] = m.flatMap(f)
def map[A, B](m: Future[A])(f: A => B): Future[B] = m.map(f)
def runAsync[A](m: Future[A])(f: Either[Throwable, A] => Unit): Unit =
m.onComplete(x => f(x.toEither))
def run[A](m: Future[A]): Either[Throwable, A] =
Try(Await.result(m, Duration.Inf)).toEither
def recover[A, AA >: A](m: Future[A])(f: PartialFunction[Throwable, AA]): Future[AA] = m.recover(f)
def recoverF[A, AA >: A](m: Future[A])(f: PartialFunction[Throwable, Future[AA]]): Future[AA] = m.recoverWith(f)
// def onError[A](m: Future[A])(f: Throwable => Unit): Future[A] = {
// m.onComplete {
// case Success(value) => ()
// case Failure(exception) =>
// f(exception)
// }
// m
// }
// def onErrorF[A](m: Future[A])(f: Throwable => Future[Unit]): Future[A] = {
// m.onComplete {
// case Success(value) => ()
// case Failure(exception) =>
// f(exception)
// }
// m
// }
/** Keep in mind that when [[F]] has strict semantic, effect should
* created inside 'start()' brackets. */
def sequence[A](in: List[Future[A]]): Future[List[A]] =
Future.sequence(in)
def start[A](create: => Future[A])(implicit ec: ExecutionContext): Future[Fiber[Future, A]] = {
val f = Future(create)(ec).flatten
Future.successful {
new Fiber[Future, A] {
override def join(): Future[A] = f
}
}
}
def promise[A](cb: (Either[Throwable, A] => Unit) => Unit): Future[A] = {
val promise = scala.concurrent.Promise[A]()
try {
cb(or => promise.complete(or.toTry))
promise.future
} catch {
case e: Throwable =>
Future.failed(e)
}
}
def promiseF[A](cb: (Either[Throwable, A] => Unit) => Future[Unit]): Future[A] = {
val promise = scala.concurrent.Promise[A]()
// FIXME handle error
cb(or => promise.complete(or.toTry)).flatMap { _ =>
promise.future
}
}
}
implicit val futureEffect: Effect[Future] =
new FutureEffect()
}
| fomkin/korolev | modules/effect/src/main/scala/korolev/effect/Effect.scala | Scala | apache-2.0 | 5,768 |
package org.bizzle.astar
import
scala.annotation.tailrec
import
org.bizzle.pathfinding.{ coordinate, pathingmap, PathingStatus },
coordinate.{ BadCoordinate2D, Coordinate2D },
pathingmap.PathingMapString,
PathingStatus._
import
base.{ AStarBase, AStarStepData, HeuristicLib }
/**
* Created by IntelliJ IDEA.
* User: Jason
* Date: 11/17/11
* Time: 3:33 PM
*/
// As A* traverses the graph, it follows a path of the lowest known cost, keeping a sorted priority queue of alternate path segments along the way.
// If, at any point, a segment of the path being traversed has a higher cost than another encountered path segment, it abandons the higher-cost path
// segment and traverses the lower-cost path segment instead. This process continues until the destination is reached.
// Already-spent cost (from origin to current) g(x)
// Heuristic estimate (from current to destination) h(x)
// Total combined cost (from origin to destination) f(x) = g(x) + h(x)
object AStar extends AStarBase[AStarStepData](1.0, HeuristicLib.manhattanDistance) {
override def apply(mapString: PathingMapString) : PathingStatus[AStarStepData] = {
val stepData = AStarStepData(mapString)
execute(primeStepData(stepData), calculateMaxIters(stepData.pathingMap.colCount, stepData.pathingMap.rowCount))
}
override protected def execute(stepData: AStarStepData, maxIters: Int) : PathingStatus[AStarStepData] = {
@tailrec def executeHelper(stepData: AStarStepData, maxIters: Int) : PathingStatus[AStarStepData] = {
val decision = decide(stepData, maxIters)
decision match {
case Continue(x: AStarStepData) => executeHelper(step(x)._1, maxIters)
case Success(_) => decision
case Failure(_) => decision
}
}
executeHelper(step(stepData)._1, maxIters)
}
protected def goalIsFound (stepData: AStarStepData, freshLoc: Coordinate2D) = freshLoc overlaps stepData.goal
protected def makeNewStepData(stepData: AStarStepData, freshLoc: Coordinate2D = BadCoordinate2D, isIncingIters: Boolean = false) = AStarStepData(freshLoc, stepData, isIncingIters)
}
| TheBizzle/PathFinding | AStar/src/main/org/bizzle/astar/AStar.scala | Scala | bsd-3-clause | 2,221 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import org.mockito.Mockito._
import uk.gov.hmrc.ct.accounts.AccountsMoneyValidationFixture
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.accounts.MockAbridgedAccountsRetriever
import uk.gov.hmrc.ct.box.CtValidation
class AC115AbridgedSpec extends AccountsMoneyValidationFixture[Frs102AccountsBoxRetriever] with MockAbridgedAccountsRetriever {
override def setUpMocks() = {
super.setUpMocks()
import boxRetriever._
when(ac42()).thenReturn(AC42(Some(100)))
when(ac43()).thenReturn(AC43(Some(100)))
when(ac114()).thenReturn(AC114(None))
when(ac115()).thenReturn(AC115(None))
when(ac116()).thenReturn(AC116(None))
when(ac209()).thenReturn(AC209(None))
when(ac210()).thenReturn(AC210(None))
when(ac118()).thenReturn(AC118(None))
when(ac119()).thenReturn(AC119(None))
when(ac120()).thenReturn(AC120(None))
when(ac211()).thenReturn(AC211(None))
when(ac5123()).thenReturn(AC5123(Some("test text")))
}
testAccountsMoneyValidationWithMin("AC115", 0, AC115.apply, testEmpty = false)
"AC115" should {
"throw global error when none of the fields for the note is entered" in {
setUpMocks()
when(boxRetriever.ac5123()).thenReturn(AC5123(None))
AC115(Some(10)).validate(boxRetriever) shouldBe Set(CtValidation(None, "error.balanceSheet.intangibleAssets.atLeastOneEntered"))
}
"throw global error when one field was entered but not cannot be set" in {
setUpMocks()
when(boxRetriever.ac42()).thenReturn(AC42(None))
when(boxRetriever.ac43()).thenReturn(AC43(None))
AC115(Some(10)).validate(boxRetriever) shouldBe Set(CtValidation(None, "error.balanceSheet.intangibleAssetsNote.cannot.exist"))
}
"validate successfully if nothing is wrong" in {
setUpMocks()
AC115(Some(10)).validate(boxRetriever) shouldBe Set.empty
}
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC115AbridgedSpec.scala | Scala | apache-2.0 | 2,559 |
package chana.xpath
import chana.Clear
import chana.Delete
import chana.Entity
import chana.Insert
import chana.InsertAll
import chana.InsertAllJson
import chana.InsertJson
import chana.Select
import chana.SelectAvro
import chana.SelectJson
import chana.Update
import chana.UpdateJson
import chana.xpath
import scala.util.Failure
import scala.util.Success
import scala.util.Try
trait XPathBehavior extends Entity {
def xpathBehavior: Receive = {
case Select(_, path) =>
resetIdleTimeout()
val commander = sender()
xpath.select(record, path) match {
case Success(ctxs: List[Context]) =>
commander ! Success(ctxs)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case SelectAvro(_, path) =>
resetIdleTimeout()
val commander = sender()
xpath.select(record, path) match {
case x @ Success(ctxs) =>
Try {
ctxs.map { ctx => encoderDecoder.avroEncode(ctx.value, ctx.schema).get }
} match {
case xs: Success[_] =>
commander ! xs // List[Array[Byte]]
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case SelectJson(_, path) =>
resetIdleTimeout()
val commander = sender()
xpath.select(record, path) match {
case Success(ctxs) =>
Try {
ctxs.map { ctx => encoderDecoder.jsonEncode(ctx.value, ctx.schema).get }
} match {
case xs: Success[_] =>
commander ! xs // List[Array[Byte]]
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case Update(_, path, value) =>
resetIdleTimeout()
val commander = sender()
xpath.update(record, path, value) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case UpdateJson(_, path, value) =>
resetIdleTimeout()
val commander = sender()
xpath.updateJson(record, path, value) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case Insert(_, path, value) =>
resetIdleTimeout()
val commander = sender()
xpath.insert(record, path, value) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case InsertJson(_, path, value) =>
resetIdleTimeout()
val commander = sender()
xpath.insertJson(record, path, value) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case InsertAll(_, path, values) =>
resetIdleTimeout()
val commander = sender()
xpath.insertAll(record, path, values) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case InsertAllJson(_, path, values) =>
resetIdleTimeout()
val commander = sender()
xpath.insertAllJson(record, path, values) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case Delete(_, path) =>
resetIdleTimeout()
val commander = sender()
xpath.delete(record, path) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
case Clear(_, path) =>
resetIdleTimeout()
val commander = sender()
xpath.clear(record, path) match {
case Success(actions) =>
commit(id, actions, commander)
case x @ Failure(ex) =>
log.error(ex, ex.getMessage)
commander ! x
}
}
}
| hustnn/chana | src/main/scala/chana/xpath/XPathBehavior.scala | Scala | apache-2.0 | 4,494 |
package controllers
import play.api._
import play.api.mvc._
import models.Task
import repositories.SprintRepo
import play.api.libs.json.Json
import util.JsonConversions._
object TaskController extends Controller {
def list = Action {
Ok
}
def json(id: Long) = Action {
val sprint = SprintRepo findById id
Ok(Json.toJson(sprint))
}
} | soupytwist/knit | app/controllers/TaskController.scala | Scala | gpl-3.0 | 357 |
/*
* Copyright (c) 2015-2017 Lymia Alusyia <lymia@lymiahugs.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package moe.lymia.mppatch.ui
import java.io.InputStreamReader
import java.nio.charset.StandardCharsets
import java.text.MessageFormat
import java.util.{Locale, Properties}
import moe.lymia.mppatch.util.io.IOUtils
import scala.collection.JavaConverters._
case class I18N(locale: Locale, map: Map[String, String]) {
private val messageFormatCache = new collection.mutable.HashMap[String, Option[MessageFormat]]
def getFormat(key: String) =
messageFormatCache.getOrElseUpdate(key, map.get(key).map(s => new MessageFormat(s, locale)))
def hasKey(key: String) = map.contains(key)
def apply(key: String, args: Any*) = getFormat(key).map(format =>
format.format(args.toArray)
).getOrElse("<"+key+">")
}
object I18N {
def loadI18NData(sourceFile: String): Map[String, String] = {
val prop = new Properties()
val reader = new InputStreamReader(IOUtils.getResource(sourceFile), StandardCharsets.UTF_8)
prop.load(reader)
reader.close()
val includes = prop.getProperty("includes")
val includeData = if(includes != null && includes.trim.nonEmpty) {
includes.trim.split(",").map(x => loadI18NData(x.trim)).reduce(_ ++ _)
} else Map()
includeData ++ prop.asScala.filter(_._1 != "includes").map(x => x.copy(_1 = x._1.trim, _2 = x._2))
}
private def defaultLocale = Locale.US
private def sourceFile(locale: Locale, generic: Boolean) =
s"text/i18n_${locale.getLanguage}_${if(generic) "generic" else locale.getCountry}.properties"
private def getSingleLocaleStrings(locale: Locale, generic: Boolean): Map[String, String] = {
val file = sourceFile(locale, generic)
if(IOUtils.resourceExists(file)) loadI18NData(file) else Map()
}
private def getLocaleStrings(locale: Locale): Map[String, String] =
getSingleLocaleStrings(defaultLocale, true) ++ getSingleLocaleStrings(defaultLocale, false) ++
getSingleLocaleStrings(locale , true) ++ getSingleLocaleStrings(locale , false)
def apply(locale: Locale) = new I18N(locale, getLocaleStrings(locale))
} | Lymia/CivV_Mod2DLC | src/main/scala/moe/lymia/mppatch/ui/I18N.scala | Scala | mit | 3,184 |
package com.github.dtaniwaki.akka_pusher
import com.github.dtaniwaki.akka_pusher.PusherModels.ChannelData
import com.github.dtaniwaki.akka_pusher.attributes.{ PusherChannelsAttributes, PusherChannelAttributes }
import spray.json.JsValue
object PusherMessages {
case class TriggerMessage(
channel: String,
event: String,
message: JsValue,
socketId: Option[String] = None)
@deprecated("TriggerMessage will be used for BatchTriggerMessage. It will be removed in v0.3", "0.2.3")
case class BatchTriggerMessage(
channel: String,
event: String,
message: JsValue,
socketId: Option[String] = None)
case class ChannelMessage(
channelName: String,
attributes: Seq[PusherChannelAttributes.Value] = Seq())
object ChannelMessage {
@deprecated("Set the attributes without option and make it PusherChannelAttributes enumeration sequence instead. It will be removed in v0.3", "0.2.3")
def apply(channel: String, attributes: Option[Seq[String]]): ChannelMessage = {
new ChannelMessage(channel, attributes.getOrElse(Seq()).map(PusherChannelAttributes.withName(_)))
}
}
case class ChannelsMessage(
prefixFilter: String,
attributes: Seq[PusherChannelsAttributes.Value] = Seq())
object ChannelsMessage {
@deprecated("Set the attributes without option and make it PusherChannelsAttributes enumeration sequence instead. It will be removed in v0.3", "0.2.3")
def apply(prefixFilter: String, attributes: Option[Seq[String]]): ChannelsMessage = {
new ChannelsMessage(prefixFilter, attributes.getOrElse(Seq()).map(PusherChannelsAttributes.withName(_)))
}
}
case class UsersMessage(
channel: String)
case class AuthenticateMessage(
socketId: String,
channel: String,
data: Option[ChannelData[JsValue]] = None)
case class ValidateSignatureMessage(
key: String,
signature: String,
body: String)
case class BatchTriggerTick()
}
| dtaniwaki/akka-pusher | src/main/scala/com/github/dtaniwaki/akka_pusher/PusherMessages.scala | Scala | mit | 1,937 |
package com.airbnb.common.ml.strategy.params
import com.airbnb.common.ml.strategy.config.TrainingOptions
import com.airbnb.common.ml.strategy.data.BaseBinarySample
// use tanh function and assume BinaryScoringSample.x is between 0~1
case class BaseParam (params: Array[Double] = Array()) extends StrategyParams[BaseBinarySample] {
override def apply(update: Array[Double]): StrategyParams[BaseBinarySample] = {
BaseParam(update)
}
override def getDefaultParams(trainingOptions: TrainingOptions): StrategyParams[BaseBinarySample] = {
if (trainingOptions.default.length == 3) {
BaseParam(trainingOptions.default.toArray)
} else {
BaseParam.getDefault
}
}
override def score(example: BaseBinarySample): Double = {
(1 + params(0) * math.tanh(probOffset(example))) * example.scoringPivot
}
private def probOffset(example: BaseBinarySample): Double = {
example.x * params(1) + params(2)
}
override def computeGradient(grad: Double, example: BaseBinarySample): Array[Double] = {
val prob = probOffset(example)
val gradA = grad * math.tanh(prob)
// sech(x) = 1/ cosh(x)
val cosh = math.cosh(prob)
val gradC = grad * params(0) / (cosh * cosh)
val gradB = gradC * params(1)
Array(gradA, gradB, gradC)
}
}
object BaseParam {
def getDefault: BaseParam = {
BaseParam(Array(0.2, 20, -12.0))
}
}
| airbnb/aerosolve | airlearner/airlearner-strategy/src/main/scala/com/airbnb/common/ml/strategy/params/BaseParam.scala | Scala | apache-2.0 | 1,381 |
package com.github.luzhuomi.regex.pderiv
/** The external AST
*
*/
object ExtPattern
{
sealed trait EPat
case class EEmpty(loc:Loc) extends EPat // empty sequence
case class EGroupNonMarking(p:EPat,loc:Loc) extends EPat // non marking group (:?r)
case class EGroup(p:EPat,loc:Loc) extends EPat // the marking group (r)
case class EOr(ps:List[EPat],loc:Loc) extends EPat // choice r|r
case class EConcat(ps:List[EPat],loc:Loc) extends EPat // concatenation rr
case class EOpt(p:EPat, greed:Boolean, loc:Loc) extends EPat // option r?
case class EPlus(p:EPat, greed:Boolean, loc:Loc) extends EPat // one or more r+
case class EStar(p:EPat, greed:Boolean, loc:Loc) extends EPat // zero or more r*
case class EBound(p:EPat, lb:Int, ub:Option[Int], greed:Boolean, loc:Loc) extends EPat // repeat r{1:10}
case class ECarat(loc:Loc) extends EPat // the ^
case class EDollar(loc:Loc) extends EPat // the $
case class EDot(loc:Loc) extends EPat // the any char .
case class EAny(cs:List[Char],loc:Loc) extends EPat // the character class [ a-z ]
case class ENoneOf(cs:List[Char],loc:Loc) extends EPat // the negative character class [^a-z]
case class EEscape(c:Char,loc:Loc) extends EPat // the backslash character
case class EChar(c:Char,loc:Loc) extends EPat // the non-escaped character
// the src loc w.r.t ot the right most character
type Loc = (Int,Int) // start,end
def hasGroup(p:EPat) : Boolean = p match
{
case EEmpty(_) => false
case EGroup(_,_) => true
case EGroupNonMarking(ep,_) => hasGroup(ep)
case EOr(eps,_) => eps exists hasGroup
case EConcat(eps,_) => eps exists hasGroup
case EOpt(ep,_,_) => hasGroup(ep)
case EPlus(ep,_,_) => hasGroup(ep)
case EStar(ep,_,_) => hasGroup(ep)
case EBound(ep,_,_,_,_) => hasGroup(ep)
case ECarat(_) => false
case EDollar(_) => false
case EDot(_) => false
case EAny(_,_) => false
case ENoneOf(_,_) => false
case EEscape(_,_) => false
case EChar(_,_) => false
}
} | luzhuomi/scala-pderiv | src/main/scala/com/github/luzhuomi/regex/pderiv/ExtPattern.scala | Scala | apache-2.0 | 2,066 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.selfservicetimetopay.models
import timetopaytaxpayer.cor.model.Debit
final case class CalculatorAmountsDue(amountsDue: Seq[Debit]) {
def total: BigDecimal = amountsDue.map(_.amount).sum
}
| hmrc/self-service-time-to-pay-frontend | app/uk/gov/hmrc/selfservicetimetopay/models/CalculatorAmountsDue.scala | Scala | apache-2.0 | 818 |
package blended.streams.dispatcher.internal.builder
import akka.NotUsed
import akka.stream._
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge}
import blended.container.context.api.ContainerContext
import blended.streams.FlowProcessor
import blended.streams.dispatcher.internal._
import blended.streams.jms.{JmsDeliveryMode, JmsEnvelopeHeader}
import blended.streams.message.FlowMessage.FlowMessageProps
import blended.streams.message.{FlowEnvelope, FlowEnvelopeLogger, FlowMessage}
import blended.streams.transaction._
import blended.streams.worklist._
import blended.util.logging.{LogLevel, Logger}
import blended.util.RichTry._
class MismatchedEnvelopeException(id : String)
extends Exception(s"Worklist event [$id] couldn't find the corresponding envelope")
class MissingResourceType(msg : FlowMessage)
extends Exception(s"Missing ResourceType in [$msg] ")
class IllegalResourceType(msg : FlowMessage, rt : String)
extends Exception(s"Illegal ResourceType [$rt] in [$msg]")
class MissingOutboundRouting(rt : String)
extends Exception(s"At least one Outbound route must be configured for ResourceType [$rt]")
class MissingContextObject(key : String, clazz : String)
extends Exception(s"Missing context object [$key], expected type [$clazz]")
class JmsDestinationMissing(env : FlowEnvelope, outboundId : String)
extends Exception(s"Unable to resolve JMS Destination for [${env.id}] in [$outboundId]")
case class DispatcherBuilder(
ctCtxt : ContainerContext,
dispatcherCfg: ResourceTypeRouterConfig,
sendFlow : Flow[FlowEnvelope, FlowEnvelope, NotUsed],
envLogger : FlowEnvelopeLogger
)(implicit val bs : DispatcherBuilderSupport) extends JmsEnvelopeHeader {
private[this] val classLogger : Logger = Logger[DispatcherBuilder]
def core(): Graph[FanOutShape3[FlowEnvelope, FlowEnvelope, WorklistEvent, FlowEnvelope], NotUsed] = {
GraphDSL.create() { implicit builder =>
// This is where we pick up the messages from the source, populate the headers
// and perform initial checks if the message can be processed
val processInbound = builder.add(DispatcherInbound(dispatcherCfg, ctCtxt, envLogger))
// The fanout step will produce one envelope per outbound config of the resource type
// sent in the message. The envelope context will contain the config for the outbound
// branch and the overall resource type config.
// Each envelope will also contain the destination routing.
// The step will also emit a WorklistStarted event if the calculation of the
// fanout steps was successfull.
// The step will emit an error envelope if an exception was thrown.
val processFanout = builder.add(DispatcherFanout(dispatcherCfg, ctCtxt, envLogger).build())
// The error splitter pushes all envelopes that have an exception defined to the error sink
// and all messages without an exception defined to the normal sink
val errorSplitter = builder.add(FlowProcessor.partition[FlowEnvelope](_.exception.isEmpty))
// wire up the steps
processInbound ~> processFanout.in
processFanout.out0 ~> errorSplitter.in
new FanOutShape3(
processInbound.in,
errorSplitter.out0, // Normal outcome
processFanout.out1, // WorklistStarted event
errorSplitter.out1 // Outcome with Exception
)
}
}
// We try to process the outbound message using the injected outbound flow
// and try to trans to resulting FlowEnvelope into a worklist.
// If the worklist creation yields an exception, a FlowEnvelope with
// exception will be passed downstream.
// If after the oubound flow the envelope is marked with an exception,
// We will generate a worklist failed event, otherwise we will generate
// a Worklist completed event with the in bound envelope.
def outbound() : Graph[FanOutShape2[FlowEnvelope, WorklistEvent, FlowEnvelope], NotUsed] = {
GraphDSL.create() { implicit b =>
val outbound = b.add(sendFlow)
val toWorklist = b.add(Flow.fromFunction[FlowEnvelope, Either[FlowEnvelope, WorklistEvent]] { env =>
try {
val worklist = bs.worklist(env).unwrap
val event : WorklistEvent = env.exception match {
case None => WorklistStepCompleted(worklist = worklist, state = WorklistStateCompleted)
case Some(_) => WorklistStepCompleted(worklist = worklist, state = WorklistStateFailed)
}
Right(event)
} catch {
case t : Throwable => Left(env.withException(t))
}
})
// This error happens if for some reason we cannot create the worklist for the worklist event
val transformError = b.add(FlowProcessor.splitEither[FlowEnvelope, WorklistEvent]())
outbound ~> toWorklist ~> transformError.in
new FanOutShape2(
outbound.in,
transformError.out1, // A WorklistEvent signalling either Completed or Failed
transformError.out0 // An exceptional outcome while creating the worklist
)
}
}
private[builder] def eventEnvelopes(worklistEvent : WorklistEvent) : Seq[FlowEnvelope] = {
val result = worklistEvent.worklist.items match {
// Should not happen
case Seq() =>
Seq(
FlowEnvelope(FlowMessage.noProps, worklistEvent.worklist.id)
.withException(new MismatchedEnvelopeException(worklistEvent.worklist.id))
)
case s =>
s.map {
case flowItem : FlowWorklistItem => flowItem.env
// Should not happen
case _ => FlowEnvelope(FlowMessage.noProps, worklistEvent.worklist.id)
.withException(new MismatchedEnvelopeException(worklistEvent.worklist.id))
}
}
envLogger.underlying.debug(s"Found worklist envelopes : [$result]")
result
}
private[builder] def branchIds(envelopes : Seq[FlowEnvelope]) : Seq[String] =
envelopes.map(_.header[String](bs.headerConfig.headerBranch)).filter(_.isDefined).map(_.get)
private[builder] def transactionUpdate(event : WorklistEvent) : (WorklistEvent, Option[FlowTransactionEvent]) = {
val props : FlowMessageProps = event.worklist.items match {
case Seq() => FlowMessage.noProps
case h :: _ => h match {
case flowItem : FlowWorklistItem => flowItem.env.flowMessage.header
case _ => FlowMessage.noProps
}
}
val transEvent : Option[FlowTransactionEvent] = event match {
// The started event will just update the FlowTransaction with a new worklist
case started : WorklistStarted =>
Some(FlowTransactionUpdate(
transactionId = started.worklist.id,
properties = props,
updatedState = WorklistStateStarted,
branchIds = branchIds(eventEnvelopes(event)):_*
))
// Worklist Termination does nothing for completed worklists,
// for failed worklists it produces a Transaction failed update
case term : WorklistTerminated =>
if (term.state == WorklistStateCompleted) {
val envelopes = eventEnvelopes(term)
// We only send transaction updates for a completed worklist, if auto completion is set to true ...
.filter { _.header[Boolean](bs.headerAutoComplete).getOrElse(true) }
// ... AND the bridge outbound destination lies within the internal JMS provider
// (if the message goes to external, the final bridge send will complete the transaction
.filter { env =>
(env.header[String](bs.headerBridgeVendor), env.header[String](bs.headerBridgeProvider)) match {
case (Some(v), Some(p)) => dispatcherCfg.providerRegistry.jmsProvider(v, p).exists(_.internal)
case (_, _) => false
}
}
if (envelopes.isEmpty) {
envLogger.underlying.debug(s"No item envelopes found for [${event.worklist.id}]")
None
} else {
Some(FlowTransactionUpdate(term.worklist.id, props, WorklistStateCompleted, branchIds(envelopes):_*))
}
} else {
Some(FlowTransactionFailed(event.worklist.id, props, term.reason.map(_.getMessage())))
}
// Completed worklist steps do nothing
case step : WorklistStepCompleted =>
envLogger.underlying.debug(s"No transaction event for completed worklist [${event.worklist.id}]")
None
}
envLogger.underlying.debug(s"Transaction update for worklist event [${event.worklist.id}] is [$transEvent]")
(event, transEvent)
}
private[builder] def acknowledge(event : WorklistEvent) : FlowEnvelope = {
eventEnvelopes(event) match {
case Seq() =>
FlowEnvelope(FlowMessage.noProps, event.worklist.id).withException(new MismatchedEnvelopeException(event.worklist.id))
case h :: _ =>
if (h.requiresAcknowledge) {
classLogger.debug(s"Acknowledging envelope [${h.id}]")
h.acknowledge()
}
h
}
}
def worklistEventHandler() : Graph[FanOutShape2[WorklistEvent, FlowTransactionEvent, FlowEnvelope], NotUsed] = {
GraphDSL.create() { implicit b =>
// The worklist Manager will track currently open worklist events and emit accumulated Worklist Events
// The worklist manager will produce Worklist started and Worklist Terminated events
val wlManager = b.add(WorklistManager.flow("worklistMgr", envLogger).named("worklistMgr"))
// We process the outcome of the worklist manager and transform it into Transaction events
val processEvent = b.add(Flow.fromFunction[WorklistEvent, (WorklistEvent, Option[FlowTransactionEvent])](transactionUpdate))
// if processEvent did come back with a TransactionEvent, this will be passed downstream
val branches = b.add(
Broadcast[(WorklistEvent, Option[FlowTransactionEvent])](2).named("wlBranch")
)
// This will generate a FlowTransactionEvent if necessary
val processTrans = b.add(Flow[(WorklistEvent, Option[FlowTransactionEvent])]
.map(_._2).named("selectTrans")
.filter(_.isDefined).named("hasUpdate")
.map(_.get).named("getTrans"))
// For completed worklist we will acknowledge the envelope and capture exceptions
val processWorklist = b.add(
Flow[(WorklistEvent, Option[FlowTransactionEvent])]
.map(_._1)
.filter(_.state == WorklistStateCompleted)
.map(acknowledge)
.filter(_.exception.isDefined)
)
wlManager ~> processEvent ~> branches
branches.out(0) ~> processTrans
branches.out(1) ~> processWorklist
new FanOutShape2(wlManager.in, processTrans.out, processWorklist.out)
}
}
def errorHandler() : Flow[FlowEnvelope, FlowTransactionEvent, NotUsed] = {
val g : Graph[FlowShape[FlowEnvelope, FlowEnvelope], NotUsed] = GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
val routeError = b.add(Flow.fromFunction[FlowEnvelope, FlowEnvelope] { env =>
try {
val vendor = env.header[String](srcVendorHeader(bs.headerConfig.prefix)).get
val provider = env.header[String](srcProviderHeader(bs.headerConfig.prefix)).get
val errProvider = dispatcherCfg.providerRegistry.jmsProvider(vendor, provider).get
val dest = errProvider.errors.asString
envLogger.logEnv(env, LogLevel.Debug, s"Routing error envelope [${env.id}] to [$vendor:$provider:$dest]")
env
.withHeader(deliveryModeHeader(bs.headerConfig.prefix), JmsDeliveryMode.Persistent.asString).get
.withHeader(bs.headerBridgeVendor, vendor).get
.withHeader(bs.headerBridgeProvider, provider).get
.withHeader(bs.headerBridgeDest, dest).get
.withHeader(bs.headerConfig.headerState, FlowTransactionStateFailed.toString).get
} catch {
case t : Throwable =>
envLogger.logEnv(env, LogLevel.Warn, s"Failed to resolve error routing for envelope [${env.id}] : [${t.getMessage()}]")
env
}
})
val sendError = b.add(sendFlow)
val ackError = b.add(Flow.fromFunction[FlowEnvelope, FlowEnvelope] { env =>
envLogger.logEnv(env, LogLevel.Debug, s"Acknowledging error envelope [${env.id}]")
env.acknowledge()
env
})
routeError ~> sendError ~> ackError
FlowShape(routeError.in, ackError.out)
}
Flow.fromGraph(g)
.via(Flow.fromFunction[FlowEnvelope, FlowTransactionEvent] { env =>
val event = FlowTransactionFailed(env.id, env.flowMessage.header, env.exception.map(_.getMessage()))
envLogger.logEnv(env, LogLevel.Debug, s"Transaction event : [$event]")
event
})
}
// The dispatcher processes a stream of inbound FlowEnvelopes and generates TransactionEvents to update
// a monitored FlowTransaction.
//
// This happens in the core graph :
// 1. Generate a Transaction Started Event
// 2. Perform the default inbound processing and fanout
// after this step we may end up with multiple FlowEnvelopes per inbound message
// 3. The total of all generated fanout messages is called the worklist
// 4. Update the open transaction with the worklist, so that it can track the individual branches.
// 5. Any exceptions are available on the error outlet
// This happens in the out graph:
// 5. For each fanout message
// a. process the message via the outbound flow
// b. if processed successfully mark the worklist item as completed
// c. if processed with exception, mark the worklist item as failed
// d. for an internal error, generate mark the envelope as exceptional
// This happens in the dispatcher :
// 6. The worklist manager collects all worklist events, updates the worklist state and pushes the resulting state downstream
// 7. The error handler collects all envelopes with exceptions and generates a transaction event from here
// 8. The worklist eventhandler generates a transaction failed event for each failed worklist
// 9. The combined transaction events are passed down stream
def dispatcher() : Graph[FlowShape[FlowEnvelope, FlowTransactionEvent], NotUsed] = {
GraphDSL.create() { implicit b =>
// of course we start with the core
val callCore = b.add(core())
// we do need a send processor
val callSend = b.add(outbound())
// We will collect the errors here
val error = b.add(Merge[FlowEnvelope](3))
// We will collect Worklist Events here
val event = b.add(Merge[WorklistEvent](2))
// we will collect transaction events here
val trans = b.add(Merge[FlowTransactionEvent](2))
// the normal outcome of core goes to send
callCore.out0 ~> callSend.in
// The worklist started event goes to the event channel
callCore.out1 ~> event
// errors go the error channel
callCore.out2 ~> error
// the normal output of the sendflow are worklist events, so they go to the event channel
val evtHandler = b.add(worklistEventHandler())
callSend.out0 ~> event // Worklist events
// All worklist events will go to the worklist manager, so that we can track worklist completions or failures
event ~> evtHandler.in
evtHandler.out0 ~> trans.in(0) // worklist events to FlowTransactions
evtHandler.out1 ~> error // Error channel - only occurs when envelope matching worklist branch can't be resolved
// if envelopes marked with an exception again got to the error channel
callSend.out1 ~> error
val errHandler = b.add(errorHandler())
// generate and collect transaction failures for exceptions
error.out ~> errHandler ~> trans.in(1)
FlowShape(callCore.in, trans.out)
}
}
}
| woq-blended/blended | blended.streams.dispatcher/src/main/scala/blended/streams/dispatcher/internal/builder/DispatcherBuilder.scala | Scala | apache-2.0 | 15,879 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.cluster.test
import akka.actor.{ActorSelection, ActorSystem, PoisonPill, Terminated}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.io.FileUtils
import org.apache.curator.test.TestingServer
import org.squbs.cluster.ZkCluster
import org.squbs.cluster.test.ZkClusterMultiActorSystemTestKit._
import java.io.File
import java.net.{InetAddress, ServerSocket}
import scala.annotation.tailrec
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.{implicitConversions, postfixOps}
import scala.util.{Failure, Random, Success, Try}
abstract class ZkClusterMultiActorSystemTestKit(systemName: String)
extends TestKit(ActorSystem(systemName, akkaRemoteConfig)) with LazyLogging {
val timeout: FiniteDuration
val clusterSize: Int
private var actorSystems = Map.empty[String, ActorSystem]
def zkClusterExts: Map[String, ZkCluster] = actorSystems map { sys => sys._1 -> ZkCluster(sys._2)}
def startCluster(): Unit = {
val startTime = System.currentTimeMillis
logger.info("Starting cluster of size {}", clusterSize)
Random.setSeed(System.nanoTime)
actorSystems = (0 until clusterSize) map { num =>
val sysName: String = systemName(num)
logger.info("Starting actor system {}", sysName)
sysName -> ActorSystem(sysName, akkaRemoteConfig withFallback zkConfig)
} toMap
// start the lazy actor
zkClusterExts foreach { ext =>
watch(ext._2.zkClusterActor)
}
Thread.sleep(500)
logger.info("Finished starting cluster in {} ms", System.currentTimeMillis - startTime)
}
protected lazy val zkConfig = ConfigFactory.parseString(
s"""
|zkCluster {
| connectionString = "127.0.0.1:$ZOOKEEPER_DEFAULT_PORT"
| namespace = "zkclustersystest-${System.currentTimeMillis()}"
| segments = 1
|}
""".stripMargin)
def shutdownCluster(): Unit = {
logger.info("Shutting down cluster")
zkClusterExts.foreach(ext => killSystem(ext._1))
Await.ready(system.terminate(), timeout)
}
protected def systemName(num: Int): String = s"member-$num"
implicit protected def zkCluster2Selection(zkCluster: ZkCluster): ActorSelection =
system.actorSelection(zkCluster.zkClusterActor.path.toStringWithAddress(zkCluster.zkAddress))
def killSystem(sysName: String): Unit = {
zkClusterExts(sysName).addShutdownListener((_) => actorSystems(sysName).terminate())
zkClusterExts(sysName).zkClusterActor ! PoisonPill
expectMsgType[Terminated](timeout)
actorSystems -= sysName
logger.info("system {} got killed", sysName)
}
def bringUpSystem(sysName: String): Unit = {
actorSystems += sysName -> ActorSystem(sysName, akkaRemoteConfig withFallback zkConfig)
watch(zkClusterExts(sysName).zkClusterActor)
logger.info("system {} is up", sysName)
Thread.sleep(timeout.toMillis / 5)
}
@tailrec final def pickASystemRandomly(exclude: Option[String] = None): String = {
val candidate: String = systemName(Math.abs(Random.nextInt()) % clusterSize)
(actorSystems get candidate, exclude) match {
case (Some(sys), Some(ex)) if candidate != ex =>
candidate
case (Some(sys), None) =>
candidate
case _ => pickASystemRandomly(exclude)
}
}
}
object ZkClusterMultiActorSystemTestKit {
val ZOOKEEPER_STARTUP_TIME = 5000
val ZOOKEEPER_DEFAULT_PORT = 8085
val zookeeperDir = new File("zookeeper")
FileUtils.deleteQuietly(zookeeperDir)
new TestingServer(ZOOKEEPER_DEFAULT_PORT, zookeeperDir, true)
Thread.sleep(ZOOKEEPER_STARTUP_TIME)
private def nextPort = {
val s = new ServerSocket(0)
val p = Try(s.getLocalPort) match {
case Success(port) => port
case Failure(e) => throw e
}
s.close()
p
}
def akkaRemoteConfig: Config = ConfigFactory.parseString(
s"""
|akka {
| actor {
| provider = "akka.remote.RemoteActorRefProvider"
| serializers {
| kryo = "io.altoo.akka.serialization.kryo.KryoSerializer"
| }
| serialization-bindings {
| "org.squbs.cluster.ZkMessages" = kryo
| }
| }
| remote {
| enabled-transports = ["akka.remote.netty.tcp"]
| artery {
| transport = tcp # See Selecting a transport below
| canonical.hostname = ${InetAddress.getLocalHost.getHostAddress}
| canonical.port = $nextPort
| }
| log-received-messages = on
| log-sent-messages = on
| command-ack-timeout = 3 s
| retry-window = 1s
| gate-invalid-addresses-for = 1s
| transport-failure-detector {
| heartbeat-interval = 2s
| acceptable-heartbeat-pause = 5s
| }
| watch-failure-detector {
| heartbeat-interval = 2s
| acceptable-heartbeat-pause = 5s
| threshold = 10.0
| }
| }
|}
""".stripMargin)
}
| akara/squbs | squbs-zkcluster/src/test/scala/org/squbs/cluster/test/ZkClusterMultiActorSystemTestKit.scala | Scala | apache-2.0 | 5,700 |
package io.vertx.asyncsql.test.mysql
import io.vertx.asyncsql.test.{BaseSqlTests, SqlTestVerticle}
import org.junit.Test
import org.vertx.scala.core.json._
import org.vertx.testtools.VertxAssert._
class MySqlTest extends SqlTestVerticle with BaseSqlTests {
override def doBefore() = expectOk(raw("DROP TABLE IF EXISTS `some_test`"))
override def getConfig() = baseConf.putString("connection", "MySQL")
override def createDateTable(dateDataType: String) = s"""CREATE TABLE date_test (
| id INT NOT NULL AUTO_INCREMENT,
| test_date $dateDataType,
| PRIMARY KEY(id)
|);""".stripMargin
override def createTableStatement(tableName: String) = s"""CREATE TABLE $tableName (
| id INT NOT NULL AUTO_INCREMENT,
| name VARCHAR(255),
| email VARCHAR(255) UNIQUE,
| is_male BOOLEAN,
| age INT,
| money FLOAT,
| wedding_date DATE,
| PRIMARY KEY (id)
|);""".stripMargin
override def createTableTestTwo: String = """CREATE TABLE test_two (
| id SERIAL,
| name VARCHAR(255),
| one_id BIGINT UNSIGNED NOT NULL,
| PRIMARY KEY (id)
|);""".stripMargin
@Test
def datetimeTest(): Unit =
(for {
(m, r) <- sendOk(raw("DROP TABLE IF EXISTS date_test"))
(msg, r2) <- sendOk(raw(createDateTable("datetime")))
(msg, insertReply) <- sendOk(raw("INSERT INTO date_test (test_date) VALUES ('2015-04-04');"))
(msg, reply) <- sendOk(raw("SELECT test_date FROM date_test"))
} yield {
val receivedFields = reply.getArray("fields")
assertEquals(Json.arr("test_date"), receivedFields)
logger.info("date is: " + reply.getArray("results").get[JsonArray](0).get[String](0));
assertEquals("2015-04-04T00:00:00.000", reply.getArray("results").get[JsonArray](0).get[String](0))
testComplete()
}) recover failedTest
@Test
def zeroDateTest(): Unit = (for {
_ <- setupTableTest()
(msg, insertReply) <- sendOk(raw("INSERT INTO some_test (name, wedding_date) VALUES ('tester', '0000-00-00');"))
(msg, reply) <- sendOk(prepared("SELECT wedding_date FROM some_test WHERE name=?", Json.arr("tester")))
} yield {
val receivedFields = reply.getArray("fields")
logger.info(reply.getArray("results").get[JsonArray](0).get[String](0))
assertEquals(Json.arr("wedding_date"), receivedFields)
assertEquals(null, reply.getArray("results").get[JsonArray](0).get[String](0))
testComplete()
}) recover failedTest
} | vert-x/mod-mysql-postgresql | src/test/scala/io/vertx/asyncsql/test/mysql/MySqlTest.scala | Scala | apache-2.0 | 3,396 |
package connectionpools
/**
* JdbcDrivers
* @author Sunghyouk Bae sunghyouk.bae@gmail.com
*/
object JdbcDrivers {
val DATASOURCE_CLASS_H2: String = "org.h2.jdbcx.JdbcDataSource"
val DRIVER_CLASS_H2: String = "org.h2.Driver"
val DIALECT_H2: String = "org.hibernate.dialect.H2Dialect"
val DATASOURCE_CLASS_HSQL: String = "org.hsqldb.jdbc.JDBCDataSource"
val DRIVER_CLASS_HSQL: String = "org.hsqldb.jdbcDriver"
val DIALECT_HSQL: String = "org.hibernate.dialect.HSQLDialect"
val DATASOURCE_CLASS_MYSQL: String = "com.mysql.jdbc.jdbc2.optional.MysqlDataSource"
val DRIVER_CLASS_MYSQL: String = "com.mysql.jdbc.Driver"
val DIALECT_MYSQL: String = "org.hibernate.dialect.MySQL5InnoDBDialect"
val DRIVER_CLASS_MARIADB: String = "org.mariadb.jdbc.Driver"
val DATASOURCE_CLASS_POSTGRESQL: String = "org.postgresql.ds.PGSimpleDataSource"
val DRIVER_CLASS_POSTGRESQL: String = "org.postgresql.Driver"
val DIALECT_POSTGRESQL: String = "org.hibernate.dialect.PostgreSQL82Dialect"
}
| debop/connectionpool-benchmark | src/main/scala/connectionpools/JdbcDrivers.scala | Scala | apache-2.0 | 1,004 |
package com.github.mdr.mash.repl.browser
import com.github.mdr.mash.view.model.ValueModel
import com.github.mdr.mash.runtime.MashValue
case class ValueBrowserState(model: ValueModel,
path: String,
expressionStateOpt: Option[ExpressionState] = None) extends BrowserState {
override def rawValue: MashValue = model.rawValue
override def withPath(newPath: String): ValueBrowserState = copy(path = newPath)
override def selectionInfoOpt: Option[SelectionInfo] = None
def withExpressionState(expressionStateOpt: Option[ExpressionState]): BrowserState =
copy(expressionStateOpt = expressionStateOpt)
}
| mdr/mash | src/main/scala/com/github/mdr/mash/repl/browser/ValueBrowserState.scala | Scala | mit | 672 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.cogmath.hypergraph
import scala.collection.mutable.ArrayBuffer
import cogx.cogmath.collection.{IdentityHashSet, IdentityHashMap}
/** A Hypergraph is similar to a directed graph in that it is composed of
* hypernodes interconnected with hyperedges. There are two main differences:
*
* 1. A Hyperedge has a single source but one or more sinks. This is very much
* like a signal in an electronic circuit.
*
* 2. A Hypernode can be driven by multiple input hyperedges and may drive
* multiple hyperedges, also very much like a component in an electronic
* circuit. The input and output hyperedges are ordered, in the same order
* that they are connected to the hypernode.
*
* @author Greg Snider
*/
private [cogx] class Hypergraph extends Iterable[Hypernode] {
private val hypernodes = new IdentityHashSet[Hypernode]
/** Add `node` to the hypergraph. */
private[hypergraph] def +=(node: Hypernode) {
hypernodes += node
}
/** Remove `node` from the hypergraph. */
private[hypergraph] def -=(node: Hypernode) {
hypernodes -= node
}
/** Get all nodes in the hypergraph. */
def nodes: Seq[Hypernode] = hypernodes.toSeq
/** Get nodes which are inputs to the hypergraph. */
def inputs: Seq[Hypernode] = hypernodes.toSeq.filter(_.inputs.length == 0)
/** Get nodes which are outputs from the hypergraph. */
def outputs: Seq[Hypernode] = hypernodes.toSeq.filter(_.outputs.length == 0)
/** Get an iterator over all nodes in the hypergraph. */
def iterator: Iterator[Hypernode] = hypernodes.iterator
/** Get all edges in the hypergraph. */
def edges: Seq[Hyperedge] = {
val edges = new IdentityHashSet[Hyperedge]
for (node <- this) {
for (input <- node.inputs)
edges += input
for (output <- node.outputs)
edges += output
}
edges.toSeq
}
/** Check if `node` is owned by this hypergraph. */
def contains(node: Hypernode): Boolean =
hypernodes contains node
/** Number of hypernodes in the hypergraph. */
override def size: Int = hypernodes.size
}
| hpe-cct/cct-core | src/main/scala/cogx/cogmath/hypergraph/Hypergraph.scala | Scala | apache-2.0 | 2,704 |
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2021 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.core.infrastructure.persist.lock
import java.io.{File, IOException, RandomAccessFile}
import java.nio.channels.{FileChannel, FileLock}
import grizzled.slf4j.Logging
import trove.constants._
import trove.core.infrastructure.persist.lock.ProjectLock.EnvironmentOps
import trove.exceptional.{SystemError, SystemException}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
// Only useful where the underlying database can be opened by different programs concurrently.
// NOTE that this class does not clean up after itself if the JVM receives a SIGTERM.
// Cleanup should be handled externally.
private[persist] object ProjectLock {
val LockfileSuffix: String = ".lck"
private case class Resources(channel: LockableChannel, lock: FileLock)
def constructLockfileName(projectName: String): String = s"$projectName$LockfileSuffix"
class LockableChannel(raf: RandomAccessFile) {
val channel: FileChannel = raf.getChannel
@throws(clazz = classOf[IOException])
def tryLock(): FileLock = channel.tryLock()
def close(): Unit = channel.close()
}
trait EnvironmentOps extends LockResourceReleaseErrorHandling {
def newFile(directory: File, filename: String): File
def newRandomAccessFile(file: File): RandomAccessFile
def newChannel(raf: RandomAccessFile): LockableChannel
}
def apply(projectsHomeDir: File, projectName: String): ProjectLock = new ProjectLock(projectsHomeDir, projectName) with EnvironmentOps {
override def newFile(directory: File, filename: String): File = new File(directory, filename)
override def newRandomAccessFile(file: File): RandomAccessFile = new RandomAccessFile(file: File, "rw")
override def newChannel(raf: RandomAccessFile): LockableChannel = new LockableChannel(raf)
}
}
private[persist] class ProjectLock(projectsHomeDir: File, projectName: String) extends LockResourceReleaseErrorHandling with Logging {
self: EnvironmentOps =>
import ProjectLock._
val lockfileName: String = constructLockfileName(projectName)
private[this] val file = newFile(projectsHomeDir, lockfileName)
@volatile private[this] var resources: Option[Resources] = None
def lock(): Try[Unit] = Try {
logger.debug(s"Trying to acquire single application instance lock: .${file.getAbsolutePath}")
val raf = newRandomAccessFile(file)
val channel = newChannel(raf)
var tryLockSuccess = false
var tryLockResult: FileLock = null
try {
tryLockResult = channel.tryLock()
tryLockSuccess = true
} finally {
if(!tryLockSuccess || tryLockResult == null) {
close(channel = channel)
}
}
Option(tryLockResult).fold[Try[Resources]] {
logger.warn(s"Failed to acquire project lock for $projectName (${file.getAbsolutePath})")
SystemError(s"""Another instance of $ApplicationName currently has project "$projectName" open.""")
} {
lock =>
logger.debug(s"Acquired single application instance lock for project $projectName.")
val res = Resources(channel, lock)
resources = Some(res)
Success(res)
}
}.flatten.map(_ =>
()).recoverWith {
case e: SystemException =>
Failure(e)
case NonFatal(e) =>
SystemError("Error acquiring project lock", e)
}.recoverWith {
case e =>
release()
Failure(e)
}
def isLocked: Boolean = resources.nonEmpty
def release(): Unit =
resources.map{ res =>
val closeResult = close(res.channel, Some(res.lock), Some(file))
handleLockResourceReleaseError(closeResult)
closeResult.map { _ =>
resources = None
}
}
private[this] def close(channel: LockableChannel, lock: Option[FileLock] = None, file: Option[File] = None):
Try[Unit] = {
val result = lock.fold[Try[Unit]](Success(())) { lck: FileLock =>
Try(lck.release())
}
handleLockResourceReleaseError(Try(channel.close()))
file.foreach(f =>
handleLockResourceReleaseError(Try(f.delete())))
result
}
}
| emanchgo/trove | src/main/scala/trove/core/infrastructure/persist/lock/ProjectLock.scala | Scala | gpl-3.0 | 4,936 |
package workflow
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.scalatest.FunSuite
import pipelines.Logging
class EstimatorSuite extends FunSuite with PipelineContext with Logging {
test("Estimator fit RDD") {
sc = new SparkContext("local", "test")
val intEstimator = new Estimator[Int, Int] {
def fit(data: RDD[Int]): Transformer[Int, Int] = {
val first = data.first()
Transformer(x => x + first)
}
}
val trainData = sc.parallelize(Seq(32, 94, 12))
val testData = sc.parallelize(Seq(42, 58, 61))
val pipeline = intEstimator.withData(trainData)
assert(pipeline.apply(testData).get().collect().toSeq === Seq(42 + 32, 58 + 32, 61 + 32))
}
test("Estimator fit Pipeline Data") {
sc = new SparkContext("local", "test")
val transformer = Transformer[Int, Int](_ * 2)
val intEstimator = new Estimator[Int, Int] {
def fit(data: RDD[Int]): Transformer[Int, Int] = {
val first = data.first()
Transformer(x => x + first)
}
}
val trainData = sc.parallelize(Seq(32, 94, 12))
val testData = sc.parallelize(Seq(42, 58, 61))
val pipeline = intEstimator.withData(transformer(trainData))
assert(pipeline.apply(testData).get().collect().toSeq === Seq(42 + 64, 58 + 64, 61 + 64))
}
}
| tomerk/keystone | src/test/scala/workflow/EstimatorSuite.scala | Scala | apache-2.0 | 1,332 |
package coursier
import java.util.concurrent.ConcurrentHashMap
import coursier.cache.{Cache, CacheLogger}
import coursier.core.{Activation, DependencySet, Exclusions, Reconciliation}
import coursier.error.ResolutionError
import coursier.error.conflict.UnsatisfiedRule
import coursier.graph.ReverseModuleTree
import coursier.internal.Typelevel
import coursier.params.{Mirror, MirrorConfFile, ResolutionParams}
import coursier.params.rule.{Rule, RuleResolution}
import coursier.util._
import coursier.util.Monad.ops._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.higherKinds
import dataclass.{data, since}
@data class Resolve[F[_]](
cache: Cache[F],
dependencies: Seq[Dependency] = Nil,
repositories: Seq[Repository] = Resolve.defaultRepositories,
mirrorConfFiles: Seq[MirrorConfFile] = Resolve.defaultMirrorConfFiles,
mirrors: Seq[Mirror] = Nil,
resolutionParams: ResolutionParams = ResolutionParams(),
throughOpt: Option[F[Resolution] => F[Resolution]] = None,
transformFetcherOpt: Option[ResolutionProcess.Fetch[F] => ResolutionProcess.Fetch[F]] = None,
@since
initialResolution: Option[Resolution] = None
)(implicit
sync: Sync[F]
) {
private def S = sync
private def through: F[Resolution] => F[Resolution] =
throughOpt.getOrElse(identity[F[Resolution]])
private def transformFetcher: ResolutionProcess.Fetch[F] => ResolutionProcess.Fetch[F] =
transformFetcherOpt.getOrElse(identity[ResolutionProcess.Fetch[F]])
def finalDependencies: Seq[Dependency] = {
val filter = Exclusions(resolutionParams.exclusions)
dependencies
.filter { dep =>
filter(dep.module.organization, dep.module.name)
}
.map { dep =>
dep.withExclusions(
Exclusions.minimize(dep.exclusions ++ resolutionParams.exclusions)
)
}
}
def finalRepositories: F[Seq[Repository]] =
allMirrors.map(Mirror.replace(repositories, _))
def addDependencies(dependencies: Dependency*): Resolve[F] =
withDependencies(this.dependencies ++ dependencies)
def addRepositories(repositories: Repository*): Resolve[F] =
withRepositories(this.repositories ++ repositories)
def noMirrors: Resolve[F] =
withMirrors(Nil).withMirrorConfFiles(Nil)
def addMirrors(mirrors: Mirror*): Resolve[F] =
withMirrors(this.mirrors ++ mirrors)
def addMirrorConfFiles(mirrorConfFiles: MirrorConfFile*): Resolve[F] =
withMirrorConfFiles(this.mirrorConfFiles ++ mirrorConfFiles)
def mapResolutionParams(f: ResolutionParams => ResolutionParams): Resolve[F] =
withResolutionParams(f(resolutionParams))
def transformResolution(f: F[Resolution] => F[Resolution]): Resolve[F] =
withThroughOpt(Some(throughOpt.fold(f)(_ andThen f)))
def noTransformResolution(): Resolve[F] =
withThroughOpt(None)
def withTransformResolution(fOpt: Option[F[Resolution] => F[Resolution]]): Resolve[F] =
withThroughOpt(fOpt)
def transformFetcher(f: ResolutionProcess.Fetch[F] => ResolutionProcess.Fetch[F]): Resolve[F] =
withTransformFetcherOpt(Some(transformFetcherOpt.fold(f)(_ andThen f)))
def noTransformFetcher(): Resolve[F] =
withTransformFetcherOpt(None)
def withTransformFetcher(fOpt: Option[ResolutionProcess.Fetch[F] => ResolutionProcess.Fetch[F]])
: Resolve[F] =
withTransformFetcherOpt(fOpt)
private def allMirrors0 =
mirrors ++ mirrorConfFiles.flatMap(_.mirrors())
def allMirrors: F[Seq[Mirror]] =
S.delay(allMirrors0)
private def fetchVia: F[ResolutionProcess.Fetch[F]] = {
val fetchs = cache.fetchs
finalRepositories.map(r => ResolutionProcess.fetch(r, fetchs.head, fetchs.tail)(S))
}
private def ioWithConflicts0(fetch: ResolutionProcess.Fetch[F])
: F[(Resolution, Seq[UnsatisfiedRule])] = {
val initialRes =
Resolve.initialResolution(finalDependencies, resolutionParams, initialResolution)
def run(res: Resolution): F[Resolution] = {
val t = Resolve.runProcess(res, fetch, resolutionParams.maxIterations, cache.loggerOpt)(S)
through(t)
}
def validate0(res: Resolution): F[Resolution] =
Resolve.validate(res).either match {
case Left(errors) =>
val err = ResolutionError.from(errors.head, errors.tail: _*)
S.fromAttempt(Left(err))
case Right(()) =>
S.point(res)
}
def recurseOnRules(
res: Resolution,
rules: Seq[(Rule, RuleResolution)]
): F[(Resolution, List[UnsatisfiedRule])] =
rules match {
case Seq() =>
S.point((res, Nil))
case Seq((rule, ruleRes), t @ _*) =>
rule.enforce(res, ruleRes) match {
case Left(c) =>
S.fromAttempt(Left(c))
case Right(Left(c)) =>
recurseOnRules(res, t).map {
case (res0, conflicts) =>
(res0, c :: conflicts)
}
case Right(Right(None)) =>
recurseOnRules(res, t)
case Right(Right(Some(newRes))) =>
run(newRes.withDependencySet(DependencySet.empty)).flatMap(validate0).flatMap {
res0 =>
// FIXME check that the rule passes after it tried to address itself
recurseOnRules(res0, t)
}
}
}
def validateAllRules(res: Resolution, rules: Seq[(Rule, RuleResolution)]): F[Resolution] =
rules match {
case Seq() =>
S.point(res)
case Seq((rule, _), t @ _*) =>
rule.check(res) match {
case Some(c) =>
S.fromAttempt(Left(c))
case None =>
validateAllRules(res, t)
}
}
for {
res0 <- run(initialRes)
res1 <- validate0(res0)
t <- recurseOnRules(res1, resolutionParams.actualRules)
(res2, conflicts) = t
_ <- validateAllRules(res2, resolutionParams.actualRules)
} yield (res2, conflicts)
}
def ioWithConflicts: F[(Resolution, Seq[UnsatisfiedRule])] =
fetchVia.flatMap { f =>
val fetchVia0 = transformFetcher(f)
ioWithConflicts0(fetchVia0)
}
def io: F[Resolution] =
ioWithConflicts.map(_._1)
}
object Resolve extends PlatformResolve {
def apply(): Resolve[Task] =
Resolve(Cache.default)
implicit class ResolveTaskOps(private val resolve: Resolve[Task]) extends AnyVal {
def future()(implicit ec: ExecutionContext = resolve.cache.ec): Future[Resolution] =
resolve.io.future()
def either()(implicit
ec: ExecutionContext = resolve.cache.ec
): Either[ResolutionError, Resolution] = {
val f = resolve
.io
.map(Right(_))
.handle { case ex: ResolutionError => Left(ex) }
.future()
Await.result(f, Duration.Inf)
}
def run()(implicit ec: ExecutionContext = resolve.cache.ec): Resolution = {
val f = future()(ec)
Await.result(f, Duration.Inf)
}
}
private[coursier] def initialResolution(
dependencies: Seq[Dependency],
params: ResolutionParams = ResolutionParams(),
initialResolutionOpt: Option[Resolution] = None
): Resolution = {
import coursier.core.{Resolution => CoreResolution}
val scalaOrg =
if (params.typelevel) Organization("org.typelevel")
else Organization("org.scala-lang")
val forceScalaVersions =
if (params.doForceScalaVersion)
if (params.selectedScalaVersion.startsWith("3"))
Seq(
Module(scalaOrg, ModuleName("scala3-library")) -> params.selectedScalaVersion,
Module(scalaOrg, ModuleName("scala3-compiler")) -> params.selectedScalaVersion
)
else
Seq(
Module(scalaOrg, ModuleName("scala-library")) -> params.selectedScalaVersion,
Module(scalaOrg, ModuleName("scala-compiler")) -> params.selectedScalaVersion,
Module(scalaOrg, ModuleName("scala-reflect")) -> params.selectedScalaVersion,
Module(scalaOrg, ModuleName("scalap")) -> params.selectedScalaVersion
)
else
Nil
val mapDependencies = {
val l = (if (params.typelevel) Seq(Typelevel.swap) else Nil) ++
(if (params.doForceScalaVersion)
Seq(CoreResolution.overrideScalaModule(params.selectedScalaVersion, scalaOrg))
else Nil) ++
(if (params.doOverrideFullSuffix)
Seq(CoreResolution.overrideFullSuffix(params.selectedScalaVersion))
else Nil)
l.reduceOption((f, g) => dep => f(g(dep)))
}
val reconciliation: Option[Module => Reconciliation] = {
val actualReconciliation = params.actualReconciliation
if (actualReconciliation.isEmpty) None
else
Some {
val cache = new ConcurrentHashMap[Module, Reconciliation]
m =>
val reconciliation = cache.get(m)
if (reconciliation == null) {
val rec = actualReconciliation.find(_._1.matches(m)) match {
case Some((_, r)) => r
case None => Reconciliation.Default
}
val prev = cache.putIfAbsent(m, rec)
if (prev == null)
rec
else
prev
}
else
reconciliation
}
}
val baseRes = initialResolutionOpt.getOrElse(Resolution())
baseRes
.withRootDependencies(dependencies)
.withDependencySet(DependencySet.empty)
.withForceVersions(params.forceVersion ++ forceScalaVersions)
.withConflicts(Set.empty)
.withFilter(Some((dep: Dependency) => params.keepOptionalDependencies || !dep.optional))
.withReconciliation(reconciliation)
.withOsInfo(
params.osInfoOpt.getOrElse {
if (params.useSystemOsInfo)
// call from Sync[F].delay?
Activation.Os.fromProperties(sys.props.toMap)
else
Activation.Os.empty
}
)
.withJdkVersion(
params.jdkVersionOpt.orElse {
if (params.useSystemJdkVersion)
// call from Sync[F].delay?
sys.props.get("java.version").flatMap(coursier.core.Parse.version)
else
None
}
)
.withUserActivations(
if (params.profiles.isEmpty) None
else Some(params.profiles.iterator.map(p =>
if (p.startsWith("!")) p.drop(1) -> false else p -> true
).toMap)
)
.withMapDependencies(mapDependencies)
.withExtraProperties(params.properties)
.withForceProperties(params.forcedProperties)
.withDefaultConfiguration(params.defaultConfiguration)
}
private[coursier] def runProcess[F[_]](
initialResolution: Resolution,
fetch: ResolutionProcess.Fetch[F],
maxIterations: Int = 200,
loggerOpt: Option[CacheLogger] = None
)(implicit S: Sync[F]): F[Resolution] = {
val task = initialResolution
.process
.run(fetch, maxIterations)
loggerOpt match {
case None => task
case Some(logger) => logger.using(task)
}
}
def validate(res: Resolution): ValidationNel[ResolutionError, Unit] = {
val checkDone: ValidationNel[ResolutionError, Unit] =
if (res.isDone)
ValidationNel.success(())
else
ValidationNel.failure(new ResolutionError.MaximumIterationReached(res))
val checkErrors: ValidationNel[ResolutionError, Unit] = res
.errors
.map {
case ((module, version), errors) =>
new ResolutionError.CantDownloadModule(res, module, version, errors)
} match {
case Seq() =>
ValidationNel.success(())
case Seq(h, t @ _*) =>
ValidationNel.failures(h, t: _*)
}
val checkConflicts: ValidationNel[ResolutionError, Unit] =
if (res.conflicts.isEmpty)
ValidationNel.success(())
else
ValidationNel.failure(
new ResolutionError.ConflictingDependencies(
res,
res.conflicts
)
)
checkDone.zip(checkErrors, checkConflicts).map {
case ((), (), ()) =>
}
}
}
| coursier/coursier | modules/coursier/shared/src/main/scala/coursier/Resolve.scala | Scala | apache-2.0 | 12,137 |
package use.basic.defmacro
import org.scalatest.FunSuite
class DefMacroSample01UseTest extends FunSuite {
test("DefMacroSample01Use#use1を呼ぶ") {
val d = new DefMacroSample01Use
d.use1()
}
}
| thachi/scala-macro-sample | use/src/test/scala/use/basic/defmacro/DefMacroSample01UseTest.scala | Scala | apache-2.0 | 212 |
package io.transwarp.midas.constant.midas.params.data
/**
* Created by linchen on 16-10-10.
*/
object OrderByParams {
val orderByCol = "orderByCol"
val descending = "descending"
}
| transwarpio/rapidminer | api-driver/src/main/scala/io/transwarp/midas/constant/midas/params/data/OrderByParams.scala | Scala | gpl-3.0 | 189 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.