code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.azavea.gtfs
import com.github.nscala_time.time.Imports._
/**
* Represents a stop time in a sequence of stops in a trip
* @param arrival_time arrival time as offset from midnight on a given day
* @param departure_time departure time as offset from midnight on a given day
* @param shape_dist_traveled how much of the trip LineString has been traveled
*/
case class StopTimeRecord(
stopId: String,
tripId: String,
sequence: Int,
arrivalTime: Period,
departureTime: Period,
distanceTraveled: Option[Double] = None
)
| flibbertigibbet/open-transit-indicators | scala/gtfs/src/main/scala/com/azavea/gtfs/StopTimeRecord.scala | Scala | gpl-3.0 | 544 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import javax.swing.Icon
import javax.swing.JOptionPane
/**
* An attempt to get a decent looking OptionPane.
*
* @author Bill Venners
*/
private[scalatest] class NarrowJOptionPane(message: Object, messageType: Int) extends JOptionPane(message, messageType) {
override def getMaxCharactersPerLineCount(): Int = 60
}
| travisbrown/scalatest | src/main/scala/org/scalatest/tools/NarrowJOptionPane.scala | Scala | apache-2.0 | 974 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.math.{BigDecimal, BigInteger}
import java.nio.ByteOrder
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.parquet.column.Dictionary
import org.apache.parquet.io.api.{Binary, Converter, GroupConverter, PrimitiveConverter}
import org.apache.parquet.schema.{GroupType, MessageType, Type}
import org.apache.parquet.schema.OriginalType.{INT_32, LIST, UTF8}
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.{BINARY, DOUBLE, FIXED_LEN_BYTE_ARRAY, INT32, INT64}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.SQLTimestamp
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A [[ParentContainerUpdater]] is used by a Parquet converter to set converted values to some
* corresponding parent container. For example, a converter for a `StructType` field may set
* converted values to a [[InternalRow]]; or a converter for array elements may append converted
* values to an [[ArrayBuffer]].
*/
private[parquet] trait ParentContainerUpdater {
/** Called before a record field is being converted */
def start(): Unit = ()
/** Called after a record field is being converted */
def end(): Unit = ()
def set(value: Any): Unit = ()
def setBoolean(value: Boolean): Unit = set(value)
def setByte(value: Byte): Unit = set(value)
def setShort(value: Short): Unit = set(value)
def setInt(value: Int): Unit = set(value)
def setLong(value: Long): Unit = set(value)
def setFloat(value: Float): Unit = set(value)
def setDouble(value: Double): Unit = set(value)
}
/** A no-op updater used for root converter (who doesn't have a parent). */
private[parquet] object NoopUpdater extends ParentContainerUpdater
private[parquet] trait HasParentContainerUpdater {
def updater: ParentContainerUpdater
}
/**
* A convenient converter class for Parquet group types with a [[HasParentContainerUpdater]].
*/
private[parquet] abstract class ParquetGroupConverter(val updater: ParentContainerUpdater)
extends GroupConverter with HasParentContainerUpdater
/**
* Parquet converter for Parquet primitive types. Note that not all Spark SQL atomic types
* are handled by this converter. Parquet primitive types are only a subset of those of Spark
* SQL. For example, BYTE, SHORT, and INT in Spark SQL are all covered by INT32 in Parquet.
*/
private[parquet] class ParquetPrimitiveConverter(val updater: ParentContainerUpdater)
extends PrimitiveConverter with HasParentContainerUpdater {
override def addBoolean(value: Boolean): Unit = updater.setBoolean(value)
override def addInt(value: Int): Unit = updater.setInt(value)
override def addLong(value: Long): Unit = updater.setLong(value)
override def addFloat(value: Float): Unit = updater.setFloat(value)
override def addDouble(value: Double): Unit = updater.setDouble(value)
override def addBinary(value: Binary): Unit = updater.set(value.getBytes)
}
/**
* A [[ParquetRowConverter]] is used to convert Parquet records into Catalyst [[InternalRow]]s.
* Since Catalyst `StructType` is also a Parquet record, this converter can be used as root
* converter. Take the following Parquet type as an example:
* {{{
* message root {
* required int32 f1;
* optional group f2 {
* required double f21;
* optional binary f22 (utf8);
* }
* }
* }}}
* 5 converters will be created:
*
* - a root [[ParquetRowConverter]] for [[MessageType]] `root`, which contains:
* - a [[ParquetPrimitiveConverter]] for required [[INT_32]] field `f1`, and
* - a nested [[ParquetRowConverter]] for optional [[GroupType]] `f2`, which contains:
* - a [[ParquetPrimitiveConverter]] for required [[DOUBLE]] field `f21`, and
* - a [[ParquetStringConverter]] for optional [[UTF8]] string field `f22`
*
* When used as a root converter, [[NoopUpdater]] should be used since root converters don't have
* any "parent" container.
*
* @param schemaConverter A utility converter used to convert Parquet types to Catalyst types.
* @param parquetType Parquet schema of Parquet records
* @param catalystType Spark SQL schema that corresponds to the Parquet record type. User-defined
* types should have been expanded.
* @param updater An updater which propagates converted field values to the parent container
*/
private[parquet] class ParquetRowConverter(
schemaConverter: ParquetSchemaConverter,
parquetType: GroupType,
catalystType: StructType,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) with Logging {
assert(
parquetType.getFieldCount == catalystType.length,
s"""Field counts of the Parquet schema and the Catalyst schema don't match:
|
|Parquet schema:
|$parquetType
|Catalyst schema:
|${catalystType.prettyJson}
""".stripMargin)
assert(
!catalystType.existsRecursively(_.isInstanceOf[UserDefinedType[_]]),
s"""User-defined types in Catalyst schema should have already been expanded:
|${catalystType.prettyJson}
""".stripMargin)
logDebug(
s"""Building row converter for the following schema:
|
|Parquet form:
|$parquetType
|Catalyst form:
|${catalystType.prettyJson}
""".stripMargin)
/**
* Updater used together with field converters within a [[ParquetRowConverter]]. It propagates
* converted filed values to the `ordinal`-th cell in `currentRow`.
*/
private final class RowUpdater(row: InternalRow, ordinal: Int) extends ParentContainerUpdater {
override def set(value: Any): Unit = row(ordinal) = value
override def setBoolean(value: Boolean): Unit = row.setBoolean(ordinal, value)
override def setByte(value: Byte): Unit = row.setByte(ordinal, value)
override def setShort(value: Short): Unit = row.setShort(ordinal, value)
override def setInt(value: Int): Unit = row.setInt(ordinal, value)
override def setLong(value: Long): Unit = row.setLong(ordinal, value)
override def setDouble(value: Double): Unit = row.setDouble(ordinal, value)
override def setFloat(value: Float): Unit = row.setFloat(ordinal, value)
}
private val currentRow = new SpecificInternalRow(catalystType.map(_.dataType))
private val unsafeProjection = UnsafeProjection.create(catalystType)
/**
* The [[UnsafeRow]] converted from an entire Parquet record.
*/
def currentRecord: UnsafeRow = unsafeProjection(currentRow)
// Converters for each field.
private val fieldConverters: Array[Converter with HasParentContainerUpdater] = {
parquetType.getFields.asScala.zip(catalystType).zipWithIndex.map {
case ((parquetFieldType, catalystField), ordinal) =>
// Converted field value should be set to the `ordinal`-th cell of `currentRow`
newConverter(parquetFieldType, catalystField.dataType, new RowUpdater(currentRow, ordinal))
}.toArray
}
override def getConverter(fieldIndex: Int): Converter = fieldConverters(fieldIndex)
override def end(): Unit = {
var i = 0
while (i < currentRow.numFields) {
fieldConverters(i).updater.end()
i += 1
}
updater.set(currentRow)
}
override def start(): Unit = {
var i = 0
while (i < currentRow.numFields) {
fieldConverters(i).updater.start()
currentRow.setNullAt(i)
i += 1
}
}
/**
* Creates a converter for the given Parquet type `parquetType` and Spark SQL data type
* `catalystType`. Converted values are handled by `updater`.
*/
private def newConverter(
parquetType: Type,
catalystType: DataType,
updater: ParentContainerUpdater): Converter with HasParentContainerUpdater = {
catalystType match {
case BooleanType | IntegerType | LongType | FloatType | DoubleType | BinaryType =>
new ParquetPrimitiveConverter(updater)
case ByteType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit =
updater.setByte(value.asInstanceOf[ByteType#InternalType])
}
case ShortType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit =
updater.setShort(value.asInstanceOf[ShortType#InternalType])
}
// For INT32 backed decimals
case t: DecimalType if parquetType.asPrimitiveType().getPrimitiveTypeName == INT32 =>
new ParquetIntDictionaryAwareDecimalConverter(t.precision, t.scale, updater)
// For INT64 backed decimals
case t: DecimalType if parquetType.asPrimitiveType().getPrimitiveTypeName == INT64 =>
new ParquetLongDictionaryAwareDecimalConverter(t.precision, t.scale, updater)
// For BINARY and FIXED_LEN_BYTE_ARRAY backed decimals
case t: DecimalType
if parquetType.asPrimitiveType().getPrimitiveTypeName == FIXED_LEN_BYTE_ARRAY ||
parquetType.asPrimitiveType().getPrimitiveTypeName == BINARY =>
new ParquetBinaryDictionaryAwareDecimalConverter(t.precision, t.scale, updater)
case t: DecimalType =>
throw new RuntimeException(
s"Unable to create Parquet converter for decimal type ${t.json} whose Parquet type is " +
s"$parquetType. Parquet DECIMAL type can only be backed by INT32, INT64, " +
"FIXED_LEN_BYTE_ARRAY, or BINARY.")
case StringType =>
new ParquetStringConverter(updater)
case TimestampType =>
// TODO Implements `TIMESTAMP_MICROS` once parquet-mr has that.
new ParquetPrimitiveConverter(updater) {
// Converts nanosecond timestamps stored as INT96
override def addBinary(value: Binary): Unit = {
assert(
value.length() == 12,
"Timestamps (with nanoseconds) are expected to be stored in 12-byte long binaries, " +
s"but got a ${value.length()}-byte binary.")
val buf = value.toByteBuffer.order(ByteOrder.LITTLE_ENDIAN)
val timeOfDayNanos = buf.getLong
val julianDay = buf.getInt
updater.setLong(DateTimeUtils.fromJulianDay(julianDay, timeOfDayNanos))
}
}
case DateType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit = {
// DateType is not specialized in `SpecificMutableRow`, have to box it here.
updater.set(value.asInstanceOf[DateType#InternalType])
}
}
// A repeated field that is neither contained by a `LIST`- or `MAP`-annotated group nor
// annotated by `LIST` or `MAP` should be interpreted as a required list of required
// elements where the element type is the type of the field.
case t: ArrayType if parquetType.getOriginalType != LIST =>
if (parquetType.isPrimitive) {
new RepeatedPrimitiveConverter(parquetType, t.elementType, updater)
} else {
new RepeatedGroupConverter(parquetType, t.elementType, updater)
}
case t: ArrayType =>
new ParquetArrayConverter(parquetType.asGroupType(), t, updater)
case t: MapType =>
new ParquetMapConverter(parquetType.asGroupType(), t, updater)
case t: StructType =>
new ParquetRowConverter(
schemaConverter, parquetType.asGroupType(), t, new ParentContainerUpdater {
override def set(value: Any): Unit = updater.set(value.asInstanceOf[InternalRow].copy())
})
case t =>
throw new RuntimeException(
s"Unable to create Parquet converter for data type ${t.json} " +
s"whose Parquet type is $parquetType")
}
}
/**
* Parquet converter for strings. A dictionary is used to minimize string decoding cost.
*/
private final class ParquetStringConverter(updater: ParentContainerUpdater)
extends ParquetPrimitiveConverter(updater) {
private var expandedDictionary: Array[UTF8String] = null
override def hasDictionarySupport: Boolean = true
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { i =>
UTF8String.fromBytes(dictionary.decodeToBinary(i).getBytes)
}
}
override def addValueFromDictionary(dictionaryId: Int): Unit = {
updater.set(expandedDictionary(dictionaryId))
}
override def addBinary(value: Binary): Unit = {
// The underlying `ByteBuffer` implementation is guaranteed to be `HeapByteBuffer`, so here we
// are using `Binary.toByteBuffer.array()` to steal the underlying byte array without copying
// it.
val buffer = value.toByteBuffer
val offset = buffer.arrayOffset() + buffer.position()
val numBytes = buffer.remaining()
updater.set(UTF8String.fromBytes(buffer.array(), offset, numBytes))
}
}
/**
* Parquet converter for fixed-precision decimals.
*/
private abstract class ParquetDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetPrimitiveConverter(updater) {
protected var expandedDictionary: Array[Decimal] = _
override def hasDictionarySupport: Boolean = true
override def addValueFromDictionary(dictionaryId: Int): Unit = {
updater.set(expandedDictionary(dictionaryId))
}
// Converts decimals stored as INT32
override def addInt(value: Int): Unit = {
addLong(value: Long)
}
// Converts decimals stored as INT64
override def addLong(value: Long): Unit = {
updater.set(decimalFromLong(value))
}
// Converts decimals stored as either FIXED_LENGTH_BYTE_ARRAY or BINARY
override def addBinary(value: Binary): Unit = {
updater.set(decimalFromBinary(value))
}
protected def decimalFromLong(value: Long): Decimal = {
Decimal(value, precision, scale)
}
protected def decimalFromBinary(value: Binary): Decimal = {
if (precision <= Decimal.MAX_LONG_DIGITS) {
// Constructs a `Decimal` with an unscaled `Long` value if possible.
val unscaled = ParquetRowConverter.binaryToUnscaledLong(value)
Decimal(unscaled, precision, scale)
} else {
// Otherwise, resorts to an unscaled `BigInteger` instead.
Decimal(new BigDecimal(new BigInteger(value.getBytes), scale), precision, scale)
}
}
}
private class ParquetIntDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromLong(dictionary.decodeToInt(id).toLong)
}
}
}
private class ParquetLongDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromLong(dictionary.decodeToLong(id))
}
}
}
private class ParquetBinaryDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromBinary(dictionary.decodeToBinary(id))
}
}
}
/**
* Parquet converter for arrays. Spark SQL arrays are represented as Parquet lists. Standard
* Parquet lists are represented as a 3-level group annotated by `LIST`:
* {{{
* <list-repetition> group <name> (LIST) { <-- parquetSchema points here
* repeated group list {
* <element-repetition> <element-type> element;
* }
* }
* }}}
* The `parquetSchema` constructor argument points to the outermost group.
*
* However, before this representation is standardized, some Parquet libraries/tools also use some
* non-standard formats to represent list-like structures. Backwards-compatibility rules for
* handling these cases are described in Parquet format spec.
*
* @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists
*/
private final class ParquetArrayConverter(
parquetSchema: GroupType,
catalystSchema: ArrayType,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) {
private var currentArray: ArrayBuffer[Any] = _
private val elementConverter: Converter = {
val repeatedType = parquetSchema.getType(0)
val elementType = catalystSchema.elementType
// At this stage, we're not sure whether the repeated field maps to the element type or is
// just the syntactic repeated group of the 3-level standard LIST layout. Take the following
// Parquet LIST-annotated group type as an example:
//
// optional group f (LIST) {
// repeated group list {
// optional group element {
// optional int32 element;
// }
// }
// }
//
// This type is ambiguous:
//
// 1. When interpreted as a standard 3-level layout, the `list` field is just the syntactic
// group, and the entire type should be translated to:
//
// ARRAY<STRUCT<element: INT>>
//
// 2. On the other hand, when interpreted as a non-standard 2-level layout, the `list` field
// represents the element type, and the entire type should be translated to:
//
// ARRAY<STRUCT<element: STRUCT<element: INT>>>
//
// Here we try to convert field `list` into a Catalyst type to see whether the converted type
// matches the Catalyst array element type. If it doesn't match, then it's case 1; otherwise,
// it's case 2.
val guessedElementType = schemaConverter.convertField(repeatedType)
if (DataType.equalsIgnoreCompatibleNullability(guessedElementType, elementType)) {
// If the repeated field corresponds to the element type, creates a new converter using the
// type of the repeated field.
newConverter(repeatedType, elementType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentArray += value
})
} else {
// If the repeated field corresponds to the syntactic group in the standard 3-level Parquet
// LIST layout, creates a new converter using the only child field of the repeated field.
assert(!repeatedType.isPrimitive && repeatedType.asGroupType().getFieldCount == 1)
new ElementConverter(repeatedType.asGroupType().getType(0), elementType)
}
}
override def getConverter(fieldIndex: Int): Converter = elementConverter
override def end(): Unit = updater.set(new GenericArrayData(currentArray.toArray))
// NOTE: We can't reuse the mutable `ArrayBuffer` here and must instantiate a new buffer for the
// next value. `Row.copy()` only copies row cells, it doesn't do deep copy to objects stored
// in row cells.
override def start(): Unit = currentArray = ArrayBuffer.empty[Any]
/** Array element converter */
private final class ElementConverter(parquetType: Type, catalystType: DataType)
extends GroupConverter {
private var currentElement: Any = _
private val converter = newConverter(parquetType, catalystType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentElement = value
})
override def getConverter(fieldIndex: Int): Converter = converter
override def end(): Unit = currentArray += currentElement
override def start(): Unit = currentElement = null
}
}
/** Parquet converter for maps */
private final class ParquetMapConverter(
parquetType: GroupType,
catalystType: MapType,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) {
private var currentKeys: ArrayBuffer[Any] = _
private var currentValues: ArrayBuffer[Any] = _
private val keyValueConverter = {
val repeatedType = parquetType.getType(0).asGroupType()
new KeyValueConverter(
repeatedType.getType(0),
repeatedType.getType(1),
catalystType.keyType,
catalystType.valueType)
}
override def getConverter(fieldIndex: Int): Converter = keyValueConverter
override def end(): Unit =
updater.set(ArrayBasedMapData(currentKeys.toArray, currentValues.toArray))
// NOTE: We can't reuse the mutable Map here and must instantiate a new `Map` for the next
// value. `Row.copy()` only copies row cells, it doesn't do deep copy to objects stored in row
// cells.
override def start(): Unit = {
currentKeys = ArrayBuffer.empty[Any]
currentValues = ArrayBuffer.empty[Any]
}
/** Parquet converter for key-value pairs within the map. */
private final class KeyValueConverter(
parquetKeyType: Type,
parquetValueType: Type,
catalystKeyType: DataType,
catalystValueType: DataType)
extends GroupConverter {
private var currentKey: Any = _
private var currentValue: Any = _
private val converters = Array(
// Converter for keys
newConverter(parquetKeyType, catalystKeyType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentKey = value
}),
// Converter for values
newConverter(parquetValueType, catalystValueType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentValue = value
}))
override def getConverter(fieldIndex: Int): Converter = converters(fieldIndex)
override def end(): Unit = {
currentKeys += currentKey
currentValues += currentValue
}
override def start(): Unit = {
currentKey = null
currentValue = null
}
}
}
private trait RepeatedConverter {
private var currentArray: ArrayBuffer[Any] = _
protected def newArrayUpdater(updater: ParentContainerUpdater) = new ParentContainerUpdater {
override def start(): Unit = currentArray = ArrayBuffer.empty[Any]
override def end(): Unit = updater.set(new GenericArrayData(currentArray.toArray))
override def set(value: Any): Unit = currentArray += value
}
}
/**
* A primitive converter for converting unannotated repeated primitive values to required arrays
* of required primitives values.
*/
private final class RepeatedPrimitiveConverter(
parquetType: Type,
catalystType: DataType,
parentUpdater: ParentContainerUpdater)
extends PrimitiveConverter with RepeatedConverter with HasParentContainerUpdater {
val updater: ParentContainerUpdater = newArrayUpdater(parentUpdater)
private val elementConverter: PrimitiveConverter =
newConverter(parquetType, catalystType, updater).asPrimitiveConverter()
override def addBoolean(value: Boolean): Unit = elementConverter.addBoolean(value)
override def addInt(value: Int): Unit = elementConverter.addInt(value)
override def addLong(value: Long): Unit = elementConverter.addLong(value)
override def addFloat(value: Float): Unit = elementConverter.addFloat(value)
override def addDouble(value: Double): Unit = elementConverter.addDouble(value)
override def addBinary(value: Binary): Unit = elementConverter.addBinary(value)
override def setDictionary(dict: Dictionary): Unit = elementConverter.setDictionary(dict)
override def hasDictionarySupport: Boolean = elementConverter.hasDictionarySupport
override def addValueFromDictionary(id: Int): Unit = elementConverter.addValueFromDictionary(id)
}
/**
* A group converter for converting unannotated repeated group values to required arrays of
* required struct values.
*/
private final class RepeatedGroupConverter(
parquetType: Type,
catalystType: DataType,
parentUpdater: ParentContainerUpdater)
extends GroupConverter with HasParentContainerUpdater with RepeatedConverter {
val updater: ParentContainerUpdater = newArrayUpdater(parentUpdater)
private val elementConverter: GroupConverter =
newConverter(parquetType, catalystType, updater).asGroupConverter()
override def getConverter(field: Int): Converter = elementConverter.getConverter(field)
override def end(): Unit = elementConverter.end()
override def start(): Unit = elementConverter.start()
}
}
private[parquet] object ParquetRowConverter {
def binaryToUnscaledLong(binary: Binary): Long = {
// The underlying `ByteBuffer` implementation is guaranteed to be `HeapByteBuffer`, so here
// we are using `Binary.toByteBuffer.array()` to steal the underlying byte array without
// copying it.
val buffer = binary.toByteBuffer
val bytes = buffer.array()
val start = buffer.arrayOffset() + buffer.position()
val end = buffer.arrayOffset() + buffer.limit()
var unscaled = 0L
var i = start
while (i < end) {
unscaled = (unscaled << 8) | (bytes(i) & 0xff)
i += 1
}
val bits = 8 * (end - start)
unscaled = (unscaled << (64 - bits)) >> (64 - bits)
unscaled
}
def binaryToSQLTimestamp(binary: Binary): SQLTimestamp = {
assert(binary.length() == 12, s"Timestamps (with nanoseconds) are expected to be stored in" +
s" 12-byte long binaries. Found a ${binary.length()}-byte binary instead.")
val buffer = binary.toByteBuffer.order(ByteOrder.LITTLE_ENDIAN)
val timeOfDayNanos = buffer.getLong
val julianDay = buffer.getInt
DateTimeUtils.fromJulianDay(julianDay, timeOfDayNanos)
}
}
| spark0001/spark2.1.1 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRowConverter.scala | Scala | apache-2.0 | 26,994 |
package concrete.constraint.extension
import concrete.{IntDomain, Variable}
import org.scalacheck.Gen
import org.scalatest.{FlatSpec, Matchers, OptionValues}
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
class MDDRelationTest extends FlatSpec with Matchers with ScalaCheckPropertyChecks with OptionValues {
val mdd = MDDRelation(Seq(Array(1, 2, 3), Array(1, 3, 4), Array(1, 2, 5), Array(2, 3, 5)))
it should "have correct sizes" in {
mdd should have size 4
mdd.edges shouldBe 9
mdd.lambda shouldBe BigInt(4)
}
it should "find supports" in {
val v0 = new Variable("V0", IntDomain(0 to 2))
val v1 = new Variable("V1", IntDomain(0 to 3))
val v2 = new Variable("V2", IntDomain(0 to 5))
val scope = Array(v0.initDomain, v1.initDomain, v2.initDomain)
mdd.findSupport(scope, 0, 0) shouldBe None
mdd.findSupport(scope, 0, 1) should contain oneOf(Array(1, 2, 3), Array(1, 3, 4), Array(1, 2, 5))
mdd.findSupport(scope, 0, 2).value shouldBe Array(2, 3, 5)
mdd.findSupport(scope, 1, 0) shouldBe None
mdd.findSupport(scope, 1, 1) shouldBe None
mdd.findSupport(scope, 1, 2) should contain oneOf(Array(1, 2, 3), Array(1, 2, 5))
mdd.findSupport(scope, 1, 3) should contain oneOf(Array(1, 3, 4), Array(2, 3, 5))
mdd.findSupport(scope, 2, 0) shouldBe None
mdd.findSupport(scope, 2, 1) shouldBe None
mdd.findSupport(scope, 2, 2) shouldBe None
mdd.findSupport(scope, 2, 3).value shouldBe Array(1, 2, 3)
mdd.findSupport(scope, 2, 4).value shouldBe Array(1, 3, 4)
mdd.findSupport(scope, 2, 5) should contain oneOf(Array(1, 2, 5), Array(2, 3, 5))
scope(1) -= 2
mdd.findSupport(scope, 0, 0) shouldBe None
mdd.findSupport(scope, 0, 1).value shouldBe Array(1, 3, 4)
mdd.findSupport(scope, 0, 2).value shouldBe Array(2, 3, 5)
mdd.findSupport(scope, 1, 0) shouldBe None
mdd.findSupport(scope, 1, 1) shouldBe None
mdd.findSupport(scope, 1, 3) should contain oneOf(Array(1, 3, 4), Array(2, 3, 5))
mdd.findSupport(scope, 2, 0) shouldBe None
mdd.findSupport(scope, 2, 1) shouldBe None
mdd.findSupport(scope, 2, 2) shouldBe None
mdd.findSupport(scope, 2, 3) shouldBe None
mdd.findSupport(scope, 2, 4).value shouldBe Array(1, 3, 4)
mdd.findSupport(scope, 2, 5).value shouldBe Array(2, 3, 5)
}
it should "find single support" in {
val v58 = new Variable("V58", IntDomain(0 to 160))
val v59 = new Variable("V59", IntDomain.ofSeq(160))
val v60 = new Variable("V60", IntDomain.ofSeq(0))
val vars = Array(v58, v59, v60)
val scope = vars.map(_.initDomain)
val mdd = MDDRelation(Seq(Array(160, 160, 0)))
forAll(Gen.choose(0, 159)) { i =>
mdd.findSupport(scope, 0, i) shouldBe None
}
mdd.findSupport(scope, 0, 158) shouldBe None
mdd.findSupport(scope, 0, 160).value shouldBe Array(160, 160, 0)
}
}
| concrete-cp/concrete | src/test/scala/concrete/constraint/extension/MDDRelationTest.scala | Scala | lgpl-2.1 | 2,879 |
package intro
object Hello {
def main(args: Array[String]): Unit = {
println("Hello, world!")
}
}
| acanda/scala-intro-dojo | Scala Intro/src/main/scala/intro/Hello.scala | Scala | apache-2.0 | 109 |
/*
* Copyright 2013 Michał Rus <https://michalrus.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.agh.gst.consumer
import scala.reflect.io.{File, Directory}
import scala.collection.immutable.TreeMap
object CsvExporter {
val NonPathCharacter = """[^A-Za-z0-9_\\-.]""".r
val Extension = ".csv"
def cleanUp(title: String) =
File(NonPathCharacter replaceAllIn (title, "_"))
def csvFrom(data: TreeMap[Int, YearData]): String = {
val d = data map {
case (year, YearData(articles, citations)) => s"$year,$articles,$citations"
} mkString "\\n"
"Year,Articles,Citations\\n" + d + "\\n"
}
}
class CsvExporter(directory: Option[Directory], engine: String) extends Consumer {
import CsvExporter._
def refresh(title: String, data: TreeMap[Int, YearData]) = directory foreach { directory =>
val file = directory / cleanUp(title) addExtension (engine + Extension)
file writeAll csvFrom(data)
}
}
| michalrus/agh-google-scholar-trends | src/main/scala/edu/agh/gst/consumer/CsvExporter.scala | Scala | apache-2.0 | 1,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import scala.reflect.ClassTag
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.{ShuffleHandle, ShuffleWriteProcessor}
import org.apache.spark.storage.BlockManagerId
/**
* :: DeveloperApi ::
* Base class for dependencies.
*/
@DeveloperApi
abstract class Dependency[T] extends Serializable {
def rdd: RDD[T]
}
/**
* :: DeveloperApi ::
* Base class for dependencies where each partition of the child RDD depends on a small number
* of partitions of the parent RDD. Narrow dependencies allow for pipelined execution.
*/
@DeveloperApi
abstract class NarrowDependency[T](_rdd: RDD[T]) extends Dependency[T] {
/**
* Get the parent partitions for a child partition.
* @param partitionId a partition of the child RDD
* @return the partitions of the parent RDD that the child partition depends upon
*/
def getParents(partitionId: Int): Seq[Int]
override def rdd: RDD[T] = _rdd
}
/**
* :: DeveloperApi ::
* Represents a dependency on the output of a shuffle stage. Note that in the case of shuffle,
* the RDD is transient since we don't need it on the executor side.
*
* @param _rdd the parent RDD
* @param partitioner partitioner used to partition the shuffle output
* @param serializer [[org.apache.spark.serializer.Serializer Serializer]] to use. If not set
* explicitly then the default serializer, as specified by `spark.serializer`
* config option, will be used.
* @param keyOrdering key ordering for RDD's shuffles
* @param aggregator map/reduce-side aggregator for RDD's shuffle
* @param mapSideCombine whether to perform partial aggregation (also known as map-side combine)
* @param shuffleWriterProcessor the processor to control the write behavior in ShuffleMapTask
*/
@DeveloperApi
class ShuffleDependency[K: ClassTag, V: ClassTag, C: ClassTag](
@transient private val _rdd: RDD[_ <: Product2[K, V]],
val partitioner: Partitioner,
val serializer: Serializer = SparkEnv.get.serializer,
val keyOrdering: Option[Ordering[K]] = None,
val aggregator: Option[Aggregator[K, V, C]] = None,
val mapSideCombine: Boolean = false,
val shuffleWriterProcessor: ShuffleWriteProcessor = new ShuffleWriteProcessor)
extends Dependency[Product2[K, V]] {
if (mapSideCombine) {
require(aggregator.isDefined, "Map-side combine without Aggregator specified!")
}
override def rdd: RDD[Product2[K, V]] = _rdd.asInstanceOf[RDD[Product2[K, V]]]
private[spark] val keyClassName: String = reflect.classTag[K].runtimeClass.getName
private[spark] val valueClassName: String = reflect.classTag[V].runtimeClass.getName
// Note: It's possible that the combiner class tag is null, if the combineByKey
// methods in PairRDDFunctions are used instead of combineByKeyWithClassTag.
private[spark] val combinerClassName: Option[String] =
Option(reflect.classTag[C]).map(_.runtimeClass.getName)
val shuffleId: Int = _rdd.context.newShuffleId()
val shuffleHandle: ShuffleHandle = _rdd.context.env.shuffleManager.registerShuffle(
shuffleId, this)
/**
* Stores the location of the list of chosen external shuffle services for handling the
* shuffle merge requests from mappers in this shuffle map stage.
*/
private[spark] var mergerLocs: Seq[BlockManagerId] = Nil
def setMergerLocs(mergerLocs: Seq[BlockManagerId]): Unit = {
if (mergerLocs != null) {
this.mergerLocs = mergerLocs
}
}
def getMergerLocs: Seq[BlockManagerId] = mergerLocs
_rdd.sparkContext.cleaner.foreach(_.registerShuffleForCleanup(this))
_rdd.sparkContext.shuffleDriverComponents.registerShuffle(shuffleId)
}
/**
* :: DeveloperApi ::
* Represents a one-to-one dependency between partitions of the parent and child RDDs.
*/
@DeveloperApi
class OneToOneDependency[T](rdd: RDD[T]) extends NarrowDependency[T](rdd) {
override def getParents(partitionId: Int): List[Int] = List(partitionId)
}
/**
* :: DeveloperApi ::
* Represents a one-to-one dependency between ranges of partitions in the parent and child RDDs.
* @param rdd the parent RDD
* @param inStart the start of the range in the parent RDD
* @param outStart the start of the range in the child RDD
* @param length the length of the range
*/
@DeveloperApi
class RangeDependency[T](rdd: RDD[T], inStart: Int, outStart: Int, length: Int)
extends NarrowDependency[T](rdd) {
override def getParents(partitionId: Int): List[Int] = {
if (partitionId >= outStart && partitionId < outStart + length) {
List(partitionId - outStart + inStart)
} else {
Nil
}
}
}
| witgo/spark | core/src/main/scala/org/apache/spark/Dependency.scala | Scala | apache-2.0 | 5,505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc.connection
import java.sql.{Connection, Driver}
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.jdbc.JdbcConnectionProvider
private[jdbc] class BasicConnectionProvider extends JdbcConnectionProvider with Logging {
/**
* Additional properties for data connection (Data source property takes precedence).
*/
def getAdditionalProperties(options: JDBCOptions): Properties = new Properties()
override val name: String = "basic"
override def canHandle(driver: Driver, options: Map[String, String]): Boolean = {
val jdbcOptions = new JDBCOptions(options)
jdbcOptions.keytab == null || jdbcOptions.principal == null
}
override def getConnection(driver: Driver, options: Map[String, String]): Connection = {
val jdbcOptions = new JDBCOptions(options)
val properties = getAdditionalProperties(jdbcOptions)
jdbcOptions.asConnectionProperties.asScala.foreach { case(k, v) =>
properties.put(k, v)
}
logDebug(s"JDBC connection initiated with URL: ${jdbcOptions.url} and properties: $properties")
driver.connect(jdbcOptions.url, properties)
}
}
| chuckchen/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/BasicConnectionProvider.scala | Scala | apache-2.0 | 2,110 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zookeeper
import java.nio.charset.StandardCharsets
import java.util.UUID
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.concurrent.{ArrayBlockingQueue, ConcurrentLinkedQueue, CountDownLatch, Executors, Semaphore, TimeUnit}
import scala.collection.Seq
import com.yammer.metrics.core.{Gauge, Meter, MetricName}
import kafka.server.KafkaConfig
import kafka.metrics.KafkaYammerMetrics
import kafka.utils.TestUtils
import kafka.server.QuorumTestHarness
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.KeeperException.{Code, NoNodeException}
import org.apache.zookeeper.Watcher.Event.{EventType, KeeperState}
import org.apache.zookeeper.ZooKeeper.States
import org.apache.zookeeper.client.ZKClientConfig
import org.apache.zookeeper.{CreateMode, WatchedEvent, ZooDefs}
import org.junit.jupiter.api.Assertions.{assertArrayEquals, assertEquals, assertFalse, assertThrows, assertTrue, fail}
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo}
import scala.jdk.CollectionConverters._
class ZooKeeperClientTest extends QuorumTestHarness {
private val mockPath = "/foo"
private val time = Time.SYSTEM
private var zooKeeperClient: ZooKeeperClient = _
@BeforeEach
override def setUp(testInfo: TestInfo): Unit = {
TestUtils.verifyNoUnexpectedThreads("@BeforeEach")
cleanMetricsRegistry()
super.setUp(testInfo)
zooKeeperClient = newZooKeeperClient()
}
@AfterEach
override def tearDown(): Unit = {
if (zooKeeperClient != null)
zooKeeperClient.close()
super.tearDown()
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM)
TestUtils.verifyNoUnexpectedThreads("@AfterEach")
}
@Test
def testUnresolvableConnectString(): Unit = {
try {
newZooKeeperClient("some.invalid.hostname.foo.bar.local", connectionTimeoutMs = 10)
} catch {
case e: ZooKeeperClientTimeoutException =>
assertEquals(Set.empty, runningZkSendThreads, "ZooKeeper client threads still running")
}
}
private def runningZkSendThreads: collection.Set[String] = Thread.getAllStackTraces.keySet.asScala
.filter(_.isAlive)
.map(_.getName)
.filter(t => t.contains("SendThread()"))
@Test
def testConnectionTimeout(): Unit = {
zookeeper.shutdown()
assertThrows(classOf[ZooKeeperClientTimeoutException], () => newZooKeeperClient(
connectionTimeoutMs = 10).close())
}
@Test
def testConnection(): Unit = {
val client = newZooKeeperClient()
try {
// Verify ZooKeeper event thread name. This is used in QuorumTestHarness to verify that tests have closed ZK clients
val threads = Thread.getAllStackTraces.keySet.asScala.map(_.getName)
assertTrue(threads.exists(_.contains(QuorumTestHarness.ZkClientEventThreadSuffix)),
s"ZooKeeperClient event thread not found, threads=$threads")
} finally {
client.close()
}
}
@Test
def testConnectionViaNettyClient(): Unit = {
// Confirm that we can explicitly set client connection configuration, which is necessary for TLS.
// TLS connectivity itself is tested in system tests rather than here to avoid having to add TLS support
// to kafka.zk.EmbeddedZookeeper
val clientConfig = new ZKClientConfig()
val propKey = KafkaConfig.ZkClientCnxnSocketProp
val propVal = "org.apache.zookeeper.ClientCnxnSocketNetty"
KafkaConfig.setZooKeeperClientProperty(clientConfig, propKey, propVal)
val client = newZooKeeperClient(clientConfig = clientConfig)
try {
assertEquals(Some(propVal), KafkaConfig.zooKeeperClientProperty(client.clientConfig, propKey))
// For a sanity check, make sure a bad client connection socket class name generates an exception
val badClientConfig = new ZKClientConfig()
KafkaConfig.setZooKeeperClientProperty(badClientConfig, propKey, propVal + "BadClassName")
assertThrows(classOf[Exception], () => newZooKeeperClient(clientConfig = badClientConfig))
} finally {
client.close()
}
}
@Test
def testDeleteNonExistentZNode(): Unit = {
val deleteResponse = zooKeeperClient.handleRequest(DeleteRequest(mockPath, -1))
assertEquals(Code.NONODE, deleteResponse.resultCode, "Response code should be NONODE")
assertThrows(classOf[NoNodeException], () => deleteResponse.maybeThrow())
}
@Test
def testDeleteExistingZNode(): Unit = {
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val deleteResponse = zooKeeperClient.handleRequest(DeleteRequest(mockPath, -1))
assertEquals(Code.OK, deleteResponse.resultCode, "Response code for delete should be OK")
}
@Test
def testExistsNonExistentZNode(): Unit = {
val existsResponse = zooKeeperClient.handleRequest(ExistsRequest(mockPath))
assertEquals(Code.NONODE, existsResponse.resultCode, "Response code should be NONODE")
}
@Test
def testExistsExistingZNode(): Unit = {
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val existsResponse = zooKeeperClient.handleRequest(ExistsRequest(mockPath))
assertEquals(Code.OK, existsResponse.resultCode, "Response code for exists should be OK")
}
@Test
def testGetDataNonExistentZNode(): Unit = {
val getDataResponse = zooKeeperClient.handleRequest(GetDataRequest(mockPath))
assertEquals(Code.NONODE, getDataResponse.resultCode, "Response code should be NONODE")
}
@Test
def testGetDataExistingZNode(): Unit = {
val data = bytes
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, data, ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala,
CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val getDataResponse = zooKeeperClient.handleRequest(GetDataRequest(mockPath))
assertEquals(Code.OK, getDataResponse.resultCode, "Response code for getData should be OK")
assertArrayEquals(data, getDataResponse.data, "Data for getData should match created znode data")
}
@Test
def testSetDataNonExistentZNode(): Unit = {
val setDataResponse = zooKeeperClient.handleRequest(SetDataRequest(mockPath, Array.empty[Byte], -1))
assertEquals(Code.NONODE, setDataResponse.resultCode, "Response code should be NONODE")
}
@Test
def testSetDataExistingZNode(): Unit = {
val data = bytes
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val setDataResponse = zooKeeperClient.handleRequest(SetDataRequest(mockPath, data, -1))
assertEquals(Code.OK, setDataResponse.resultCode, "Response code for setData should be OK")
val getDataResponse = zooKeeperClient.handleRequest(GetDataRequest(mockPath))
assertEquals(Code.OK, getDataResponse.resultCode, "Response code for getData should be OK")
assertArrayEquals(data, getDataResponse.data, "Data for getData should match setData's data")
}
@Test
def testGetAclNonExistentZNode(): Unit = {
val getAclResponse = zooKeeperClient.handleRequest(GetAclRequest(mockPath))
assertEquals(Code.NONODE, getAclResponse.resultCode, "Response code should be NONODE")
}
@Test
def testGetAclExistingZNode(): Unit = {
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val getAclResponse = zooKeeperClient.handleRequest(GetAclRequest(mockPath))
assertEquals(Code.OK, getAclResponse.resultCode, "Response code for getAcl should be OK")
assertEquals(ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, getAclResponse.acl, "ACL should be " + ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala)
}
@Test
def testSetAclNonExistentZNode(): Unit = {
val setAclResponse = zooKeeperClient.handleRequest(SetAclRequest(mockPath, ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, -1))
assertEquals(Code.NONODE, setAclResponse.resultCode, "Response code should be NONODE")
}
@Test
def testGetChildrenNonExistentZNode(): Unit = {
val getChildrenResponse = zooKeeperClient.handleRequest(GetChildrenRequest(mockPath, registerWatch = true))
assertEquals(Code.NONODE, getChildrenResponse.resultCode, "Response code should be NONODE")
}
@Test
def testGetChildrenExistingZNode(): Unit = {
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val getChildrenResponse = zooKeeperClient.handleRequest(GetChildrenRequest(mockPath, registerWatch = true))
assertEquals(Code.OK, getChildrenResponse.resultCode, "Response code for getChildren should be OK")
assertEquals(Seq.empty[String], getChildrenResponse.children, "getChildren should return no children")
}
@Test
def testGetChildrenExistingZNodeWithChildren(): Unit = {
val child1 = "child1"
val child2 = "child2"
val child1Path = mockPath + "/" + child1
val child2Path = mockPath + "/" + child2
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val createResponseChild1 = zooKeeperClient.handleRequest(CreateRequest(child1Path, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponseChild1.resultCode, "Response code for create child1 should be OK")
val createResponseChild2 = zooKeeperClient.handleRequest(CreateRequest(child2Path, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponseChild2.resultCode, "Response code for create child2 should be OK")
val getChildrenResponse = zooKeeperClient.handleRequest(GetChildrenRequest(mockPath, registerWatch = true))
assertEquals(Code.OK, getChildrenResponse.resultCode, "Response code for getChildren should be OK")
assertEquals(Seq(child1, child2), getChildrenResponse.children.sorted, "getChildren should return two children")
}
@Test
def testPipelinedGetData(): Unit = {
val createRequests = (1 to 3).map(x => CreateRequest("/" + x, (x * 2).toString.getBytes, ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
val createResponses = createRequests.map(zooKeeperClient.handleRequest)
createResponses.foreach(createResponse => assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK"))
val getDataRequests = (1 to 3).map(x => GetDataRequest("/" + x))
val getDataResponses = zooKeeperClient.handleRequests(getDataRequests)
getDataResponses.foreach(getDataResponse => assertEquals(Code.OK, getDataResponse.resultCode,
"Response code for getData should be OK"))
getDataResponses.zipWithIndex.foreach { case (getDataResponse, i) =>
assertEquals(Code.OK, getDataResponse.resultCode, "Response code for getData should be OK")
assertEquals(((i + 1) * 2), Integer.valueOf(new String(getDataResponse.data)), "Data for getData should match")
}
}
@Test
def testMixedPipeline(): Unit = {
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
val getDataRequest = GetDataRequest(mockPath)
val setDataRequest = SetDataRequest("/nonexistent", Array.empty[Byte], -1)
val responses = zooKeeperClient.handleRequests(Seq(getDataRequest, setDataRequest))
assertEquals(Code.OK, responses.head.resultCode, "Response code for getData should be OK")
assertArrayEquals(Array.empty[Byte], responses.head.asInstanceOf[GetDataResponse].data, "Data for getData should be empty")
assertEquals(Code.NONODE, responses.last.resultCode, "Response code for setData should be NONODE")
}
@Test
def testZNodeChangeHandlerForCreation(): Unit = {
val znodeChangeHandlerCountDownLatch = new CountDownLatch(1)
val zNodeChangeHandler = new ZNodeChangeHandler {
override def handleCreation(): Unit = {
znodeChangeHandlerCountDownLatch.countDown()
}
override val path: String = mockPath
}
zooKeeperClient.registerZNodeChangeHandler(zNodeChangeHandler)
val existsRequest = ExistsRequest(mockPath)
val createRequest = CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT)
val responses = zooKeeperClient.handleRequests(Seq(existsRequest, createRequest))
assertEquals(Code.NONODE, responses.head.resultCode, "Response code for exists should be NONODE")
assertEquals(Code.OK, responses.last.resultCode, "Response code for create should be OK")
assertTrue(znodeChangeHandlerCountDownLatch.await(5, TimeUnit.SECONDS), "Failed to receive create notification")
}
@Test
def testZNodeChangeHandlerForDeletion(): Unit = {
val znodeChangeHandlerCountDownLatch = new CountDownLatch(1)
val zNodeChangeHandler = new ZNodeChangeHandler {
override def handleDeletion(): Unit = {
znodeChangeHandlerCountDownLatch.countDown()
}
override val path: String = mockPath
}
zooKeeperClient.registerZNodeChangeHandler(zNodeChangeHandler)
val existsRequest = ExistsRequest(mockPath)
val createRequest = CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT)
val responses = zooKeeperClient.handleRequests(Seq(createRequest, existsRequest))
assertEquals(Code.OK, responses.last.resultCode, "Response code for create should be OK")
assertEquals(Code.OK, responses.head.resultCode, "Response code for exists should be OK")
val deleteResponse = zooKeeperClient.handleRequest(DeleteRequest(mockPath, -1))
assertEquals(Code.OK, deleteResponse.resultCode, "Response code for delete should be OK")
assertTrue(znodeChangeHandlerCountDownLatch.await(5, TimeUnit.SECONDS), "Failed to receive delete notification")
}
@Test
def testZNodeChangeHandlerForDataChange(): Unit = {
val znodeChangeHandlerCountDownLatch = new CountDownLatch(1)
val zNodeChangeHandler = new ZNodeChangeHandler {
override def handleDataChange(): Unit = {
znodeChangeHandlerCountDownLatch.countDown()
}
override val path: String = mockPath
}
zooKeeperClient.registerZNodeChangeHandler(zNodeChangeHandler)
val existsRequest = ExistsRequest(mockPath)
val createRequest = CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT)
val responses = zooKeeperClient.handleRequests(Seq(createRequest, existsRequest))
assertEquals(Code.OK, responses.last.resultCode, "Response code for create should be OK")
assertEquals(Code.OK, responses.head.resultCode, "Response code for exists should be OK")
val setDataResponse = zooKeeperClient.handleRequest(SetDataRequest(mockPath, Array.empty[Byte], -1))
assertEquals(Code.OK, setDataResponse.resultCode, "Response code for setData should be OK")
assertTrue(znodeChangeHandlerCountDownLatch.await(5, TimeUnit.SECONDS), "Failed to receive data change notification")
}
@Test
def testBlockOnRequestCompletionFromStateChangeHandler(): Unit = {
// This tests the scenario exposed by KAFKA-6879 in which the expiration callback awaits
// completion of a request which is handled by another thread
val latch = new CountDownLatch(1)
val stateChangeHandler = new StateChangeHandler {
override val name = this.getClass.getName
override def beforeInitializingSession(): Unit = {
latch.await()
}
}
zooKeeperClient.close()
zooKeeperClient = newZooKeeperClient()
zooKeeperClient.registerStateChangeHandler(stateChangeHandler)
val requestThread = new Thread() {
override def run(): Unit = {
try
zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
finally
latch.countDown()
}
}
val reinitializeThread = new Thread() {
override def run(): Unit = {
zooKeeperClient.forceReinitialize()
}
}
reinitializeThread.start()
// sleep briefly before starting the request thread so that the initialization
// thread is blocking on the latch
Thread.sleep(100)
requestThread.start()
reinitializeThread.join()
requestThread.join()
}
@Test
def testExceptionInBeforeInitializingSession(): Unit = {
val faultyHandler = new StateChangeHandler {
override val name = this.getClass.getName
override def beforeInitializingSession(): Unit = {
throw new RuntimeException()
}
}
val goodCalls = new AtomicInteger(0)
val goodHandler = new StateChangeHandler {
override val name = this.getClass.getName
override def beforeInitializingSession(): Unit = {
goodCalls.incrementAndGet()
}
}
zooKeeperClient.close()
zooKeeperClient = newZooKeeperClient()
zooKeeperClient.registerStateChangeHandler(faultyHandler)
zooKeeperClient.registerStateChangeHandler(goodHandler)
zooKeeperClient.forceReinitialize()
assertEquals(1, goodCalls.get)
// Client should be usable even if the callback throws an error
val createResponse = zooKeeperClient.handleRequest(CreateRequest(mockPath, Array.empty[Byte],
ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
}
@Test
def testZNodeChildChangeHandlerForChildChange(): Unit = {
val zNodeChildChangeHandlerCountDownLatch = new CountDownLatch(1)
val zNodeChildChangeHandler = new ZNodeChildChangeHandler {
override def handleChildChange(): Unit = {
zNodeChildChangeHandlerCountDownLatch.countDown()
}
override val path: String = mockPath
}
val child1 = "child1"
val child1Path = mockPath + "/" + child1
val createResponse = zooKeeperClient.handleRequest(
CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
zooKeeperClient.registerZNodeChildChangeHandler(zNodeChildChangeHandler)
val getChildrenResponse = zooKeeperClient.handleRequest(GetChildrenRequest(mockPath, registerWatch = true))
assertEquals(Code.OK, getChildrenResponse.resultCode, "Response code for getChildren should be OK")
val createResponseChild1 = zooKeeperClient.handleRequest(
CreateRequest(child1Path, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponseChild1.resultCode, "Response code for create child1 should be OK")
assertTrue(zNodeChildChangeHandlerCountDownLatch.await(5, TimeUnit.SECONDS),
"Failed to receive child change notification")
}
@Test
def testZNodeChildChangeHandlerForChildChangeNotTriggered(): Unit = {
val zNodeChildChangeHandlerCountDownLatch = new CountDownLatch(1)
val zNodeChildChangeHandler = new ZNodeChildChangeHandler {
override def handleChildChange(): Unit = {
zNodeChildChangeHandlerCountDownLatch.countDown()
}
override val path: String = mockPath
}
val child1 = "child1"
val child1Path = mockPath + "/" + child1
val createResponse = zooKeeperClient.handleRequest(
CreateRequest(mockPath, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponse.resultCode, "Response code for create should be OK")
zooKeeperClient.registerZNodeChildChangeHandler(zNodeChildChangeHandler)
val getChildrenResponse = zooKeeperClient.handleRequest(GetChildrenRequest(mockPath, registerWatch = false))
assertEquals(Code.OK, getChildrenResponse.resultCode, "Response code for getChildren should be OK")
val createResponseChild1 = zooKeeperClient.handleRequest(
CreateRequest(child1Path, Array.empty[Byte], ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala, CreateMode.PERSISTENT))
assertEquals(Code.OK, createResponseChild1.resultCode, "Response code for create child1 should be OK")
assertFalse(zNodeChildChangeHandlerCountDownLatch.await(100, TimeUnit.MILLISECONDS),
"Child change notification received")
}
@Test
def testStateChangeHandlerForAuthFailure(): Unit = {
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, "no-such-file-exists.conf")
val stateChangeHandlerCountDownLatch = new CountDownLatch(1)
val stateChangeHandler = new StateChangeHandler {
override val name: String = this.getClass.getName
override def onAuthFailure(): Unit = {
stateChangeHandlerCountDownLatch.countDown()
}
}
val zooKeeperClient = newZooKeeperClient()
try {
zooKeeperClient.registerStateChangeHandler(stateChangeHandler)
zooKeeperClient.forceReinitialize()
assertTrue(stateChangeHandlerCountDownLatch.await(5, TimeUnit.SECONDS), "Failed to receive auth failed notification")
} finally zooKeeperClient.close()
}
@Test
def testConnectionLossRequestTermination(): Unit = {
val batchSize = 10
val zooKeeperClient = newZooKeeperClient(maxInFlight = 2)
zookeeper.shutdown()
try {
val requests = (1 to batchSize).map(i => GetDataRequest(s"/$i"))
val countDownLatch = new CountDownLatch(1)
val running = new AtomicBoolean(true)
val unexpectedResponses = new ArrayBlockingQueue[GetDataResponse](batchSize)
val requestThread = new Thread {
override def run(): Unit = {
while (running.get()) {
val responses = zooKeeperClient.handleRequests(requests)
val suffix = responses.dropWhile(response => response.resultCode != Code.CONNECTIONLOSS)
if (!suffix.forall(response => response.resultCode == Code.CONNECTIONLOSS))
responses.foreach(unexpectedResponses.add)
if (!unexpectedResponses.isEmpty || suffix.nonEmpty)
running.set(false)
}
countDownLatch.countDown()
}
}
requestThread.start()
val requestThreadTerminated = countDownLatch.await(30, TimeUnit.SECONDS)
if (!requestThreadTerminated) {
running.set(false)
requestThread.join(5000)
fail("Failed to receive a CONNECTIONLOSS response code after zookeeper has shutdown.")
} else if (!unexpectedResponses.isEmpty) {
fail(s"Received an unexpected non-CONNECTIONLOSS response code after a CONNECTIONLOSS response code from a single batch: $unexpectedResponses")
}
} finally zooKeeperClient.close()
}
/**
* Tests that if session expiry notification is received while a thread is processing requests,
* session expiry is handled and the request thread completes with responses to all requests,
* even though some requests may fail due to session expiry or disconnection.
*
* Sequence of events on different threads:
* Request thread:
* - Sends `maxInflightRequests` requests (these may complete before session is expired)
* Main thread:
* - Waits for at least one request to be processed (this should succeed)
* - Expires session by creating new client with same session id
* - Unblocks another `maxInflightRequests` requests before and after new client is closed (these may fail)
* ZooKeeperClient Event thread:
* - Delivers responses and session expiry (no ordering guarantee between these, both are processed asynchronously)
* Response executor thread:
* - Blocks subsequent sends by delaying response until session expiry is processed
* ZooKeeperClient Session Expiry Handler:
* - Unblocks subsequent sends
* Main thread:
* - Waits for all sends to complete. The requests sent after session expiry processing should succeed.
*/
@Test
def testSessionExpiry(): Unit = {
val maxInflightRequests = 2
val responseExecutor = Executors.newSingleThreadExecutor
val sendSemaphore = new Semaphore(0)
val sendCompleteSemaphore = new Semaphore(0)
val sendSize = maxInflightRequests * 5
@volatile var resultCodes: Seq[Code] = null
val stateChanges = new ConcurrentLinkedQueue[String]()
val zooKeeperClient = new ZooKeeperClient(zkConnect, zkSessionTimeout, zkConnectionTimeout, maxInflightRequests,
time, "testGroupType", "testGroupName", new ZKClientConfig, "ZooKeeperClientTest") {
override def send[Req <: AsyncRequest](request: Req)(processResponse: Req#Response => Unit): Unit = {
super.send(request)( response => {
responseExecutor.submit(new Runnable {
override def run(): Unit = {
sendCompleteSemaphore.release()
sendSemaphore.acquire()
processResponse(response)
}
})
})
}
}
try {
zooKeeperClient.registerStateChangeHandler(new StateChangeHandler {
override val name: String ="test-state-change-handler"
override def afterInitializingSession(): Unit = {
verifyHandlerThread()
stateChanges.add("afterInitializingSession")
}
override def beforeInitializingSession(): Unit = {
verifyHandlerThread()
stateChanges.add("beforeInitializingSession")
sendSemaphore.release(sendSize) // Resume remaining sends
}
private def verifyHandlerThread(): Unit = {
val threadName = Thread.currentThread.getName
assertTrue(threadName.startsWith(zooKeeperClient.reinitializeScheduler.threadNamePrefix), s"Unexpected thread + $threadName")
}
})
val requestThread = new Thread {
override def run(): Unit = {
val requests = (1 to sendSize).map(i => GetDataRequest(s"/$i"))
resultCodes = zooKeeperClient.handleRequests(requests).map(_.resultCode)
}
}
requestThread.start()
sendCompleteSemaphore.acquire() // Wait for request thread to start processing requests
val anotherZkClient = createZooKeeperClientToTriggerSessionExpiry(zooKeeperClient.currentZooKeeper)
sendSemaphore.release(maxInflightRequests) // Resume a few more sends which may fail
anotherZkClient.close()
sendSemaphore.release(maxInflightRequests) // Resume a few more sends which may fail
requestThread.join(10000)
if (requestThread.isAlive) {
requestThread.interrupt()
fail("Request thread did not complete")
}
assertEquals(Seq("beforeInitializingSession", "afterInitializingSession"), stateChanges.asScala.toSeq)
assertEquals(resultCodes.size, sendSize)
val connectionLostCount = resultCodes.count(_ == Code.CONNECTIONLOSS)
assertTrue(connectionLostCount <= maxInflightRequests, s"Unexpected connection lost requests $resultCodes")
val expiredCount = resultCodes.count(_ == Code.SESSIONEXPIRED)
assertTrue(expiredCount <= maxInflightRequests, s"Unexpected session expired requests $resultCodes")
assertTrue(connectionLostCount + expiredCount > 0, s"No connection lost or expired requests $resultCodes")
assertEquals(Code.NONODE, resultCodes.head)
assertEquals(Code.NONODE, resultCodes.last)
assertTrue(resultCodes.forall(Set(Code.NONODE, Code.SESSIONEXPIRED, Code.CONNECTIONLOSS).contains),
s"Unexpected result code $resultCodes")
} finally {
zooKeeperClient.close()
responseExecutor.shutdownNow()
}
assertFalse(zooKeeperClient.reinitializeScheduler.isStarted, "Expiry executor not shutdown")
}
@Test
def testSessionExpiryDuringClose(): Unit = {
val semaphore = new Semaphore(0)
val closeExecutor = Executors.newSingleThreadExecutor
try {
zooKeeperClient.reinitializeScheduler.schedule("test", () => semaphore.acquireUninterruptibly(),
delay = 0, period = -1, TimeUnit.SECONDS)
zooKeeperClient.scheduleReinitialize("session-expired", "Session expired.", delayMs = 0L)
val closeFuture = closeExecutor.submit(new Runnable {
override def run(): Unit = {
zooKeeperClient.close()
}
})
assertFalse(closeFuture.isDone, "Close completed without shutting down expiry scheduler gracefully")
assertTrue(zooKeeperClient.currentZooKeeper.getState.isAlive) // Client should be closed after expiry handler
semaphore.release()
closeFuture.get(10, TimeUnit.SECONDS)
assertFalse(zooKeeperClient.reinitializeScheduler.isStarted, "Expiry executor not shutdown")
} finally {
closeExecutor.shutdownNow()
}
}
@Test
def testReinitializeAfterAuthFailure(): Unit = {
val sessionInitializedCountDownLatch = new CountDownLatch(1)
val changeHandler = new StateChangeHandler {
override val name = this.getClass.getName
override def beforeInitializingSession(): Unit = {
sessionInitializedCountDownLatch.countDown()
}
}
zooKeeperClient.close()
@volatile var connectionStateOverride: Option[States] = None
zooKeeperClient = new ZooKeeperClient(zkConnect, zkSessionTimeout, zkConnectionTimeout,
zkMaxInFlightRequests, time, "testMetricGroup", "testMetricType", new ZKClientConfig, "ZooKeeperClientTest") {
override def connectionState: States = connectionStateOverride.getOrElse(super.connectionState)
}
zooKeeperClient.registerStateChangeHandler(changeHandler)
connectionStateOverride = Some(States.CONNECTED)
zooKeeperClient.ZooKeeperClientWatcher.process(new WatchedEvent(EventType.None, KeeperState.AuthFailed, null))
assertFalse(sessionInitializedCountDownLatch.await(10, TimeUnit.MILLISECONDS), "Unexpected session initialization when connection is alive")
connectionStateOverride = Some(States.AUTH_FAILED)
zooKeeperClient.ZooKeeperClientWatcher.process(new WatchedEvent(EventType.None, KeeperState.AuthFailed, null))
assertTrue(sessionInitializedCountDownLatch.await(5, TimeUnit.SECONDS), "Failed to receive session initializing notification")
}
def isExpectedMetricName(metricName: MetricName, name: String): Boolean =
metricName.getName == name && metricName.getGroup == "testMetricGroup" && metricName.getType == "testMetricType"
@Test
def testZooKeeperStateChangeRateMetrics(): Unit = {
def checkMeterCount(name: String, expected: Long): Unit = {
val meter = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.collectFirst {
case (metricName, meter: Meter) if isExpectedMetricName(metricName, name) => meter
}.getOrElse(sys.error(s"Unable to find meter with name $name"))
assertEquals(expected, meter.count, s"Unexpected meter count for $name")
}
val expiresPerSecName = "ZooKeeperExpiresPerSec"
val disconnectsPerSecName = "ZooKeeperDisconnectsPerSec"
checkMeterCount(expiresPerSecName, 0)
checkMeterCount(disconnectsPerSecName, 0)
zooKeeperClient.ZooKeeperClientWatcher.process(new WatchedEvent(EventType.None, KeeperState.Expired, null))
checkMeterCount(expiresPerSecName, 1)
checkMeterCount(disconnectsPerSecName, 0)
zooKeeperClient.ZooKeeperClientWatcher.process(new WatchedEvent(EventType.None, KeeperState.Disconnected, null))
checkMeterCount(expiresPerSecName, 1)
checkMeterCount(disconnectsPerSecName, 1)
}
@Test
def testZooKeeperSessionStateMetric(): Unit = {
def gaugeValue(name: String): Option[String] = {
KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.collectFirst {
case (metricName, gauge: Gauge[_]) if isExpectedMetricName(metricName, name) => gauge.value.asInstanceOf[String]
}
}
assertEquals(Some(States.CONNECTED.toString), gaugeValue("SessionState"))
assertEquals(States.CONNECTED, zooKeeperClient.connectionState)
zooKeeperClient.close()
assertEquals(None, gaugeValue("SessionState"))
assertEquals(States.CLOSED, zooKeeperClient.connectionState)
}
private def newZooKeeperClient(connectionString: String = zkConnect,
connectionTimeoutMs: Int = zkConnectionTimeout,
maxInFlight: Int = zkMaxInFlightRequests,
clientConfig: ZKClientConfig = new ZKClientConfig) =
new ZooKeeperClient(connectionString, zkSessionTimeout, connectionTimeoutMs, maxInFlight, time,
"testMetricGroup", "testMetricType", clientConfig, "ZooKeeperClientTest")
private def cleanMetricsRegistry(): Unit = {
val metrics = KafkaYammerMetrics.defaultRegistry
metrics.allMetrics.keySet.forEach(metrics.removeMetric)
}
private def bytes = UUID.randomUUID().toString.getBytes(StandardCharsets.UTF_8)
}
| TiVo/kafka | core/src/test/scala/unit/kafka/zookeeper/ZooKeeperClientTest.scala | Scala | apache-2.0 | 34,556 |
/*
* Copyright © 2015 Reactific Software LLC. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package rxmongo.messages
import rxmongo.bson.{ BSONBuilder, BSONProvider }
/** Represents A MongoDB Query Projection
*
* These objects may be passed to the [[rxmongo.client.Collection]].find method to limit what is returned in
* the query. Typically fields are simply included or excluded, but mongo supports array slicing and array
* element matches as well.
*/
class Projection extends BSONProvider {
private val builder = BSONBuilder()
def wrapAndTerminate = builder.wrapAndTerminate
/** Include named fields in the query results
*
* @param names The names of the fields to include
* @return A projection with fields included
*/
def include(names : String*) : Projection = {
for (name ← names) {
builder.integer(name, 1)
}
this
}
/** Exclude named fields from the query results
*
* @param names The names of the field to exclude
* @return A Projection with fields excluded
*/
def exclude(names : String*) : Projection = {
for (name ← names) {
builder.integer(name, 0)
}
this
}
/** Project a slice of an array, from the start
*
* Returns the first `count` elements of the array named `name`
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param count The number of elements of the array to return, from the start
* @return
*/
def slice(name : String, count : Int) : Projection = {
builder.obj(name, BSONBuilder().integer("$slice", count))
this
}
/** Project a slice of an array, from the start.
*
* This is just a synonym for the two argument `slice` method.
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param count The number of elements of the array to return, from the start
* @return A projection that returns the first `count` elements of the array named `name`
*/
def sliceFromStart(name : String, count : Int) : Projection = slice(name, count)
/** Project a slice of an array, from the end.
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param count The number of elements of the array to return, from the end
* @return A projection that returns the last `count` elements of the array named `name`
*/
def sliceFromEnd(name : String, count : Int) : Projection = {
builder.obj(name, BSONBuilder().integer("$slice", -count))
this
}
/** Project a slice of an array, from the start
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param skip The number of elements of the array to skip, from the start
* @param count The number of elements of the array to return, from the skip point
* @return A projection that returns the `count` elements of the array named `name` starting at
* `skip` elements from the start
*/
def slice(name : String, skip : Int, count : Int) : Projection = {
builder.obj(name, BSONBuilder().array("$slice", skip, count))
this
}
/** Project a slice of an array, from the start
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param skip The number of elements of the array to skip, from the start
* @param count The number of elements of the array to return, from the skip point
* @return A projection that returns the `count` elements of the array named `name` starting at
* `skip` elements from the start
*/
def sliceFromStart(name : String, skip : Int, count : Int) : Projection = slice(name, skip, count)
/** Project a slice of an array, from the end
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/slice/]]
* @param name The name of the array field to slice
* @param skip The number of elements of the array to skip, from the end
* @param count The number of elements of the array to return, from the skip point
* @return A projection that returns `count` elements of the array named `name` starting from
* `skip` elements from the end.
*/
def sliceFromEnd(name : String, skip : Int, count : Int) : Projection = {
builder.obj(name, BSONBuilder().array("$slice", -skip, count))
this
}
/** Include the meta textScore field
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/meta/]]
* @param name The name of the field for the textScore meta value
* @return
*/
def metaTextScore(name : String) : Projection = {
builder.obj(name, BSONBuilder().string("$meta", "textScore"))
this
}
/** Return the first array element that matches a query
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/elemMatch/]]
* @param name The name of the array field to match
* @param query The query to select which elements of the array to return
* @return A projection with the element match included.
*/
def elemMatch(name : String, query : Query) : Projection = {
builder.obj(name, BSONBuilder().obj("$elemMatch", query.result))
this
}
/** Do a positional slicing of an array
*
* @see [[http://docs.mongodb.org/master/reference/operator/projection/positional/]]
* @param name The name of the array field
* @return A projection with the positional slicing included.
*/
def positional(name : String) : Projection = {
builder.integer(name + ".$", 1)
this
}
}
object Projection {
def apply() : Projection = new Projection
/** Projection Constructor for excluding fields
*
* This creates a projection that excludes the named fields in the query result. If nothing else is added to
* the returned project, all other fields will be included.
*
* @see [[http://docs.mongodb.org/master/core/read-operations-introduction/]]
* @param names
* @return
*/
def except(names : String*) : Projection = {
val result = Projection()
for (name ← names) { result.builder.integer(name, 0) }
result
}
/** Projection constructor for including only some fields
*
* This creates a projection that includes only specifically named fields in the query result. Normally the
* _id field is returned by default but this constructor will specifically exclude it.
*
* @see [[http://docs.mongodb.org/master/core/read-operations-introduction/]]
* @param names The names of the fields to include
* @return A Projection that causes the query to only return certain fields.
*/
def only(names : String*) : Projection = {
val result = Projection()
for (name ← names) { result.builder.integer(name, 1) }
result.builder.integer("_id", 0)
result
}
/** Projection constructor for specifically including and excluding fields
*
* This creates a projection that includes some fields and excludes others. The pairs of (String,Boolean) indicate
* the fields and their disposition, true means included the field, false means exclude the field.
*
* @see [[http://docs.mongodb.org/master/core/read-operations-introduction/]]
* @param names
* @return
*/
def specific(names : (String, Boolean)*) : Projection = {
val result = Projection()
for ((name, include) ← names) { result.builder.integer(name, if (include) 1 else 0) }
result
}
}
| reactific/RxMongo | messages/src/main/scala/rxmongo/messages/Projection.scala | Scala | mit | 8,776 |
package com.geeksville.andropilot.gui
import android.os.Bundle
import android.preference.PreferenceFragment
import android.app.Activity
import com.geeksville.andropilot.FlurryActivity
import com.geeksville.andropilot.R
import android.preference.PreferenceActivity.Header
import android.preference.PreferenceActivity
import com.ridemission.scandroid.AndroidLogger
import scala.collection.JavaConverters._
import android.content.Context
import android.content.Intent
import scala.collection.mutable.Buffer
object SettingsActivity {
/**
* Create an intent that will bring up network sharing settings
*/
def sharingSettingsIntent(context: Context) = {
val i = new Intent(context, classOf[SettingsActivity])
i.setAction(Intent.ACTION_MANAGE_NETWORK_USAGE)
i
}
}
class SettingsActivity extends PreferenceActivity with FlurryActivity with AndroidLogger {
implicit def acontext: Context = this
// Bug in android - we have to track pref headers on our own
// https://code.google.com/p/android/issues/detail?id=22430
private var headers: Seq[Header] = Seq()
override def onResume() {
super.onResume()
for {
i <- Option(getIntent);
act <- Option(i.getAction)
} yield {
warn("Action: " + act)
act match {
case Intent.ACTION_MANAGE_NETWORK_USAGE =>
startPanel(R.id.pref_share)
case x @ _ =>
error("Unknown action: " + x)
}
}
}
private def startPanel(id: Int) {
/*
val args = new Bundle
args.putString("settings", name)
startPreferencePanel("com.geeksville.andropilot.gui.SettingsFragment", args, 0, null, null, 0)
*/
val found = headers.find { h =>
warn("considering " + h + " " + h.id)
id == h.id
}
found.foreach { h =>
switchToHeader(h)
}
}
/**
* Populate the activity with the top-level headers.
*/
override def onBuildHeaders(target: java.util.List[Header]) {
// warn("Building prefs headers")
headers = target.asScala
this.loadHeadersFromResource(R.xml.preferences, target);
}
}
class SettingsFragment extends PreferenceFragment {
val mapping = Map(
"flight" -> R.xml.preferences_flight,
"mavlink" -> R.xml.preferences_mavlink,
"network" -> R.xml.preferences_network,
"serial" -> R.xml.preferences_serial,
"share" -> R.xml.preferences_share)
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
if (getArguments != null) {
val n = getArguments.getString("settings")
if (n != null) {
val r = mapping(n)
addPreferencesFromResource(r) // If there is a particular screen to show, then show it
}
}
}
} | geeksville/arduleader | andropilot/src/main/scala/com/geeksville/andropilot/gui/SettingsActivity.scala | Scala | gpl-3.0 | 2,711 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util.Properties
import org.json4s.jackson.JsonMethods._
import org.mockito.Mockito.mock
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.config
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler._
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.execution.{LeafExecNode, QueryExecution, SparkPlanInfo, SQLExecution}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{AccumulatorMetadata, JsonProtocol, LongAccumulator}
class SQLListenerSuite extends SparkFunSuite with SharedSQLContext with JsonTestUtils {
import testImplicits._
import org.apache.spark.AccumulatorSuite.makeInfo
private def createTestDataFrame: DataFrame = {
Seq(
(1, 1),
(2, 2)
).toDF().filter("_1 > 1")
}
private def createProperties(executionId: Long): Properties = {
val properties = new Properties()
properties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionId.toString)
properties
}
private def createStageInfo(stageId: Int, attemptId: Int): StageInfo = new StageInfo(
stageId = stageId,
attemptId = attemptId,
// The following fields are not used in tests
name = "",
numTasks = 0,
rddInfos = Nil,
parentIds = Nil,
details = ""
)
private def createTaskInfo(taskId: Int, attemptNumber: Int): TaskInfo = new TaskInfo(
taskId = taskId,
attemptNumber = attemptNumber,
// The following fields are not used in tests
index = 0,
launchTime = 0,
executorId = "",
host = "",
taskLocality = null,
speculative = false
)
private def createTaskMetrics(accumulatorUpdates: Map[Long, Long]): TaskMetrics = {
val metrics = TaskMetrics.empty
accumulatorUpdates.foreach { case (id, update) =>
val acc = new LongAccumulator
acc.metadata = AccumulatorMetadata(id, Some(""), true)
acc.add(update)
metrics.registerAccumulator(acc)
}
metrics
}
test("basic") {
def checkAnswer(actual: Map[Long, String], expected: Map[Long, Long]): Unit = {
assert(actual.size == expected.size)
expected.foreach { e =>
// The values in actual can be SQL metrics meaning that they contain additional formatting
// when converted to string. Verify that they start with the expected value.
// TODO: this is brittle. There is no requirement that the actual string needs to start
// with the accumulator value.
assert(actual.contains(e._1))
val v = actual.get(e._1).get.trim
assert(v.startsWith(e._2.toString))
}
}
val listener = new SQLListener(spark.sparkContext.conf)
val executionId = 0
val df = createTestDataFrame
val accumulatorIds =
SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan))
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
// Assume all accumulators are long
var accumulatorValue = 0L
val accumulatorUpdates = accumulatorIds.map { id =>
accumulatorValue += 1L
(id, accumulatorValue)
}.toMap
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
val executionUIData = listener.executionIdToData(0)
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(
createStageInfo(0, 0),
createStageInfo(1, 0)
),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
assert(listener.getExecutionMetrics(0).isEmpty)
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo)),
(1L, 0, 0, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo))
)))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 2))
// Driver accumulator updates don't belong to this execution should be filtered and no
// exception will be thrown.
listener.onOtherEvent(SparkListenerDriverAccumUpdates(0, Seq((999L, 2L))))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 2))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo)),
(1L, 0, 0,
createTaskMetrics(accumulatorUpdates.mapValues(_ * 2)).accumulators().map(makeInfo))
)))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 3))
// Retrying a stage should reset the metrics
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 1)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 1, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo)),
(1L, 0, 1, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo))
)))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 2))
// Ignore the task end for the first attempt
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0),
createTaskMetrics(accumulatorUpdates.mapValues(_ * 100))))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 2))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(0, 0),
createTaskMetrics(accumulatorUpdates.mapValues(_ * 2))))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(1, 0),
createTaskMetrics(accumulatorUpdates.mapValues(_ * 3))))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 5))
// Summit a new stage
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 1, 0, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo)),
(1L, 1, 0, createTaskMetrics(accumulatorUpdates).accumulators().map(makeInfo))
)))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 7))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0),
createTaskMetrics(accumulatorUpdates.mapValues(_ * 3))))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(1, 0),
createTaskMetrics(accumulatorUpdates.mapValues(_ * 3))))
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 11))
assert(executionUIData.runningJobs === Seq(0))
assert(executionUIData.succeededJobs.isEmpty)
assert(executionUIData.failedJobs.isEmpty)
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assert(executionUIData.runningJobs.isEmpty)
assert(executionUIData.succeededJobs === Seq(0))
assert(executionUIData.failedJobs.isEmpty)
checkAnswer(listener.getExecutionMetrics(0), accumulatorUpdates.mapValues(_ * 11))
}
test("onExecutionEnd happens before onJobEnd(JobSucceeded)") {
val listener = new SQLListener(spark.sparkContext.conf)
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
val executionUIData = listener.executionIdToData(0)
assert(executionUIData.runningJobs.isEmpty)
assert(executionUIData.succeededJobs === Seq(0))
assert(executionUIData.failedJobs.isEmpty)
}
test("onExecutionEnd happens before multiple onJobEnd(JobSucceeded)s") {
val listener = new SQLListener(spark.sparkContext.conf)
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onJobStart(SparkListenerJobStart(
jobId = 1,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 1,
time = System.currentTimeMillis(),
JobSucceeded
))
val executionUIData = listener.executionIdToData(0)
assert(executionUIData.runningJobs.isEmpty)
assert(executionUIData.succeededJobs.sorted === Seq(0, 1))
assert(executionUIData.failedJobs.isEmpty)
}
test("onExecutionEnd happens before onJobEnd(JobFailed)") {
val listener = new SQLListener(spark.sparkContext.conf)
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq.empty,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))
))
val executionUIData = listener.executionIdToData(0)
assert(executionUIData.runningJobs.isEmpty)
assert(executionUIData.succeededJobs.isEmpty)
assert(executionUIData.failedJobs === Seq(0))
}
test("SPARK-11126: no memory leak when running non SQL jobs") {
val previousStageNumber = spark.sharedState.listener.stageIdToStageMetrics.size
spark.sparkContext.parallelize(1 to 10).foreach(i => ())
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
// listener should ignore the non SQL stage
assert(spark.sharedState.listener.stageIdToStageMetrics.size == previousStageNumber)
spark.sparkContext.parallelize(1 to 10).toDF().foreach(i => ())
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
// listener should save the SQL stage
assert(spark.sharedState.listener.stageIdToStageMetrics.size == previousStageNumber + 1)
}
test("SPARK-13055: history listener only tracks SQL metrics") {
val listener = new SQLHistoryListener(sparkContext.conf, mock(classOf[SparkUI]))
// We need to post other events for the listener to track our accumulators.
// These are largely just boilerplate unrelated to what we're trying to test.
val df = createTestDataFrame
val executionStart = SparkListenerSQLExecutionStart(
0, "", "", "", SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan), 0)
val stageInfo = createStageInfo(0, 0)
val jobStart = SparkListenerJobStart(0, 0, Seq(stageInfo), createProperties(0))
val stageSubmitted = SparkListenerStageSubmitted(stageInfo)
// This task has both accumulators that are SQL metrics and accumulators that are not.
// The listener should only track the ones that are actually SQL metrics.
val sqlMetric = SQLMetrics.createMetric(sparkContext, "beach umbrella")
val nonSqlMetric = sparkContext.longAccumulator("baseball")
val sqlMetricInfo = sqlMetric.toInfo(Some(sqlMetric.value), None)
val nonSqlMetricInfo = nonSqlMetric.toInfo(Some(nonSqlMetric.value), None)
val taskInfo = createTaskInfo(0, 0)
taskInfo.setAccumulables(List(sqlMetricInfo, nonSqlMetricInfo))
val taskEnd = SparkListenerTaskEnd(0, 0, "just-a-task", null, taskInfo, null)
listener.onOtherEvent(executionStart)
listener.onJobStart(jobStart)
listener.onStageSubmitted(stageSubmitted)
// Before SPARK-13055, this throws ClassCastException because the history listener would
// assume that the accumulator value is of type Long, but this may not be true for
// accumulators that are not SQL metrics.
listener.onTaskEnd(taskEnd)
val trackedAccums = listener.stageIdToStageMetrics.values.flatMap { stageMetrics =>
stageMetrics.taskIdToMetricUpdates.values.flatMap(_.accumulatorUpdates)
}
// Listener tracks only SQL metrics, not other accumulators
assert(trackedAccums.size === 1)
assert(trackedAccums.head === ((sqlMetricInfo.id, sqlMetricInfo.update.get)))
}
test("driver side SQL metrics") {
val listener = new SQLListener(spark.sparkContext.conf)
val expectedAccumValue = 12345
val physicalPlan = MyPlan(sqlContext.sparkContext, expectedAccumValue)
sqlContext.sparkContext.addSparkListener(listener)
val dummyQueryExecution = new QueryExecution(spark, LocalRelation()) {
override lazy val sparkPlan = physicalPlan
override lazy val executedPlan = physicalPlan
}
SQLExecution.withNewExecutionId(spark, dummyQueryExecution) {
physicalPlan.execute().collect()
}
def waitTillExecutionFinished(): Unit = {
while (listener.getCompletedExecutions.isEmpty) {
Thread.sleep(100)
}
}
waitTillExecutionFinished()
val driverUpdates = listener.getCompletedExecutions.head.driverAccumUpdates
assert(driverUpdates.size == 1)
assert(driverUpdates(physicalPlan.longMetric("dummy").id) == expectedAccumValue)
}
test("roundtripping SparkListenerDriverAccumUpdates through JsonProtocol (SPARK-18462)") {
val event = SparkListenerDriverAccumUpdates(1L, Seq((2L, 3L)))
val json = JsonProtocol.sparkEventToJson(event)
assertValidDataInJson(json,
parse("""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 1,
| "accumUpdates": [[2,3]]
|}
""".stripMargin))
JsonProtocol.sparkEventFromJson(json) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 1L)
accums.foreach { case (a, b) =>
assert(a == 2L)
assert(b == 3L)
}
}
// Test a case where the numbers in the JSON can only fit in longs:
val longJson = parse(
"""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 4294967294,
| "accumUpdates": [[4294967294,3]]
|}
""".stripMargin)
JsonProtocol.sparkEventFromJson(longJson) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 4294967294L)
accums.foreach { case (a, b) =>
assert(a == 4294967294L)
assert(b == 3L)
}
}
}
}
/**
* A dummy [[org.apache.spark.sql.execution.SparkPlan]] that updates a [[SQLMetrics]]
* on the driver.
*/
private case class MyPlan(sc: SparkContext, expectedValue: Long) extends LeafExecNode {
override def sparkContext: SparkContext = sc
override def output: Seq[Attribute] = Seq()
override val metrics: Map[String, SQLMetric] = Map(
"dummy" -> SQLMetrics.createMetric(sc, "dummy"))
override def doExecute(): RDD[InternalRow] = {
longMetric("dummy") += expectedValue
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
metrics.values.toSeq)
sc.emptyRDD
}
}
class SQLListenerMemoryLeakSuite extends SparkFunSuite {
test("no memory leak") {
quietly {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(config.MAX_TASK_FAILURES, 1) // Don't retry the tasks to run this test quickly
.set("spark.sql.ui.retainedExecutions", "50") // Set it to 50 to run this test quickly
withSpark(new SparkContext(conf)) { sc =>
SparkSession.sqlListener.set(null)
val spark = new SparkSession(sc)
import spark.implicits._
// Run 100 successful executions and 100 failed executions.
// Each execution only has one job and one stage.
for (i <- 0 until 100) {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
df.collect()
try {
df.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException => // This is expected for a failed job
}
}
sc.listenerBus.waitUntilEmpty(10000)
assert(spark.sharedState.listener.getCompletedExecutions.size <= 50)
assert(spark.sharedState.listener.getFailedExecutions.size <= 50)
// 50 for successful executions and 50 for failed executions
assert(spark.sharedState.listener.executionIdToData.size <= 100)
assert(spark.sharedState.listener.jobIdToExecutionId.size <= 100)
assert(spark.sharedState.listener.stageIdToStageMetrics.size <= 100)
}
}
}
}
| minixalpha/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLListenerSuite.scala | Scala | apache-2.0 | 19,915 |
package weightedroundrobin
import cats.data.State
import scala.concurrent.{ExecutionContext, Future}
import scala.language.higherKinds
import scala.util.control.NonFatal
import scala.reflect.runtime.universe._
/**
* Created by Tharindu Galappaththi on 9/29/17.
*/
trait RoundRobin[R <: Resource, F[B] <: Seq[B]] {
/** Executes the given resource consumer function `fn` and returns the result of `fn` or the exception
* along with the updated resource pool. Causes side effects using `syncWithOriginal`.
* There's a risk of thread safety of when consuming the resource pool.
* Could result in lost updates for resources.
*
* @param fn Resource Consumer Function
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param resourcePool Global Resources pool to be consumed
* @param syncGlobalWithUpdated Function to execute after the resource has been consumed by the `fn`
* @param tag Implicit Type tag to identify Resource
* @tparam S Return type of the resource consumer function `fn`
* @return a `Tuple2` of updated `resourcePool` with an `Either` of result or the occurred exception
*/
def forResource[S](fn: (R) => S)
(rewarder: Int => Int, penalizer: Int => Int, resourcePool: F[R])(syncGlobalWithUpdated: F[R] => F[R])
(implicit tag: TypeTag[R]): (F[R], Either[Throwable, S]) = {
val state: State[F[R], Either[Throwable, S]] = resourceConsumer(fn)(rewarder, penalizer).modify(s => {
syncGlobalWithUpdated(s)
})
state.run(resourcePool).value
}
/** Executes the given resource consumer function `fn` and returns the result of `fn` or the exception
* along with the updated resource pool. Causes side effects using `syncWithOriginal`.
* There's a risk of thread safety of when consuming the resource pool.
* Could result in lost updates for resources.
*
* @param fn Resource Consumer Function that returns a `Future`
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param resourcePool Global Resources pool to be consumed
* @param syncGlobalWithUpdated Function to execute after the resource has been consumed by the `fn`
* @param tag Implicit Type tag to identify Resource
* @tparam S type of the `Future` that the resource consumer function `fn` will return
* @return a `Tuple2` of updated `resourcePool` with an `Either` of result or the occurred exception
*/
def forResourceAsync[S](fn: (R) => Future[S])
(rewarder: Int => Int, penalizer: Int => Int, resourcePool: F[R])(syncGlobalWithUpdated: F[R] => F[R])
(implicit executionContext: ExecutionContext, tag: TypeTag[R]): (Future[F[R]], Future[Either[Throwable, S]]) = {
val state = resourceConsumerAsync(fn)(rewarder, penalizer).modify(f => {
f.map(t => {
syncGlobalWithUpdated(t)
})
})
state.run(Future.successful[F[R]](resourcePool)).value
}
/** Pure Function which Produces a `State` monad to consume any `Resource` pool using the given function `fn`
*
* @param fn Function to consume a resource of type `R`
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param tag Implicit Type tag to identify Resource
* @tparam S type of the result that the resource consumer function `fn` will return
* @return a `cats.data.State` monad that produces a `Tuple2` of updated resource pool and the result/exception
*/
def resourceConsumer[S](fn: (R) => S)(rewarder: Int => Int, penalizer: Int => Int)
(implicit tag: TypeTag[R]): State[F[R], Either[Throwable, S]] = {
val state = State[F[R], Either[Throwable, S]] {
pool => consumeResourceCandidate(fn)(pool, rewarder, penalizer)
}
state
}
/** Pure Function which Produces a `State` monad to consume any `Resource` pool using the given function `fn`
*
* @param fn Function to consume a resource of type `R`
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param tag Implicit Type tag to identify Resource
* @tparam S type of the `Future` that the resource consumer function `fn` will return
* @return a `cats.data.State` monad that produces a `Tuple2` of updated resource pool and the result/exception
*/
def resourceConsumerAsync[S](fn: (R) => Future[S])(rewarder: Int => Int, penalizer: Int => Int)
(implicit executionContext: ExecutionContext, tag: TypeTag[R]): State[Future[F[R]], Future[Either[Throwable, S]]] = {
val state = State[Future[F[R]], Future[Either[Throwable, S]]] {
poolF => {
val consumedPoolAndExecution = poolF.flatMap(p => consumeResourceCandidateAsync(fn)(p, rewarder, penalizer))
val consumedPool = consumedPoolAndExecution.flatMap(_._1)
val execution = consumedPoolAndExecution.map(_._2)
(consumedPool, execution)
}
}
state
}
/** Actual modifier logic for the `State` monad
*
* @param fn Function to consume a resource of type `R`
* @param resourcePool collection of resources to be consumed
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param tag type of the result that the resource consumer function `fn` will return
* @tparam S type of the result that the resource consumer function `fn` will return
* @return Tuple with consumed pool and exception/result
*/
private def consumeResourceCandidate[S](fn: (R) => S)(resourcePool: F[R], rewarder: Int => Int, penalizer: Int => Int)
(implicit tag: TypeTag[R]): (F[R], Either[Throwable, S]) = {
val (candidate, rewardedPool) = updateResource(resourcePool, rewarder)
try {
val result = fn(candidate)
(rewardedPool, Right(result))
} catch {
case NonFatal(ex) =>
val (_, penalizedPool) = updateResource(resourcePool, penalizer)
(penalizedPool, Left(ex))
}
}
/** Actual modifier logic for the `State` monad
*
* @param fn Function to consume a resource of type `R`
* @param resourcePool collection of resources to be consumed
* @param rewarder Function to reward the resource when it is successfully consumed
* @param penalizer Function to penalize the resource when it's consumption failed
* @param tag type of the result that the resource consumer function `fn` will return
* @tparam S type of the result that the resource consumer function `fn` will return
* @return Tuple with consumed pool and exception/result
*/
private def consumeResourceCandidateAsync[S](fn: (R) => Future[S])(resourcePool: F[R], rewarder: Int => Int, penalizer: Int => Int)
(implicit executionContext: ExecutionContext, tag: TypeTag[R]): Future[(Future[F[R]], Either[Throwable, S])] = {
val (candidate, rewardedPool) = updateResource(resourcePool, rewarder)
val triggeredFn = fn(candidate)
val triggeredTask: Future[Either[(Throwable, F[R]), (S, F[R])]] = triggeredFn.map(v => {
Right((v, rewardedPool))
}).recoverWith({
case NonFatal(ex) =>
val (_, penalizedPool) = updateResource(resourcePool, penalizer)
Future.successful(Left((ex, penalizedPool)))
})
triggeredTask.map({
case Left((ex, pool)) => (Future.successful(pool), Left(ex))
case Right((v, pool)) => (Future.successful(pool), Right(v))
})
}
/** Update the resource based on its type and weight
*
* @param resourcePool collection of resources to be consumed
* @param weightManipulator Function to update weight of the resource
* @param tag type of the result that the resource consumer function `fn` will return
* @tparam A Type of the Resource
* @return tuple of consumable resource with the resultant pool
*/
private def updateResource[A](resourcePool: F[A], weightManipulator: Int => Int)(implicit tag: TypeTag[R]): (A, F[A]) = {
typeOf(tag) match {
case t if t =:= typeOf[NeutralResource] =>
val head = resourcePool.head
val alteredPool = resourcePool.tail :+ head
(head, alteredPool.asInstanceOf[F[A]])
case t if t =:= typeOf[WeightedResource] =>
val casted: Seq[WeightedResource] = resourcePool.asInstanceOf[Seq[WeightedResource]]
val sortedResources = casted.sorted(Ordering.by[WeightedResource, Int](_.currentWeight).reverse)
val candidate = sortedResources.head
val updatedCandidate = candidate.copy(currentWeight = math.min(weightManipulator(candidate.currentWeight), candidate.maxWeight))
val alteredList = updatedCandidate +: sortedResources.tail
(candidate.asInstanceOf[A], alteredList.asInstanceOf[F[A]])
}
}
} | TharinduDG/weightedroundrobin | src/main/scala/weightedroundrobin/RoundRobin.scala | Scala | apache-2.0 | 9,572 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of BitWatts.
*
* Copyright (C) 2011-2015 Inria, University of Lille 1,
* University of Neuchâtel.
*
* BitWatts is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* BitWatts is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with BitWatts.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.bitwatts.module.virtio
import org.powerapi.core.{ConfigValue, Configuration}
/**
* Main configuration.
*
* @author <a href="mailto:maxime.colmant@gmail.com">Maxime Colmant</a>
* @author <a href="mailto:mascha.kurpicz@unine.ch">Mascha Kurpicz</a>
*/
class VirtioSensorConfiguration(prefix: Option[String]) extends Configuration(prefix) {
lazy val port = load { _.getInt(s"${configurationPath}powerapi.virtio.port") } match {
case ConfigValue(p) => p
case _ => 0
}
}
| Spirals-Team/bitwatts | bitwatts-core/src/main/scala/org/powerapi/bitwatts/module/virtio/VirtioSensorConfiguration.scala | Scala | agpl-3.0 | 1,412 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.rdd.variant.VariantArray
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.{ AlignmentRecord, Variant }
import org.bdgenomics.utils.interval.array.IntervalArray
import scala.reflect.ClassTag
class TreeRegionJoinSuite extends ADAMFunSuite {
sparkTest("run a join between data on a single contig") {
val rightRdd = sc.parallelize(Seq(
(ReferenceRegion("chr1", 10L, 20L), 0),
(ReferenceRegion("chr1", 15L, 25L), 1),
(ReferenceRegion("chr1", 30L, 50L), 2),
(ReferenceRegion("chr1", 60L, 70L), 3),
(ReferenceRegion("chr1", 90L, 100L), 4)))
.map(kv => {
val (k, v) = kv
// i have made many poor life decisions
(k, Variant.newBuilder
.setStart(v.toLong)
.build)
})
val tree = IntervalArray[ReferenceRegion, Variant](rightRdd,
VariantArray.apply(_, _))
val leftRdd = sc.parallelize(Seq(
(ReferenceRegion("chr1", 12L, 22L), 0),
(ReferenceRegion("chr1", 20L, 35L), 1),
(ReferenceRegion("chr1", 40L, 55L), 2),
(ReferenceRegion("chr1", 75L, 85L), 3),
(ReferenceRegion("chr1", 95L, 105L), 4)))
.map(kv => {
val (k, v) = kv
// and this is but another one of them
(k, AlignmentRecord.newBuilder
.setStart(v.toLong)
.build)
})
val joinData = InnerTreeRegionJoin().runJoinAndGroupByRightWithTree(tree,
leftRdd)
.map(kv => {
val (k, v) = kv
(k.map(_.getStart.toInt), v.getStart.toInt)
}).collect
assert(joinData.size === 5)
val joinMap = joinData.filter(_._1.nonEmpty)
.map(_.swap)
.toMap
.mapValues(_.toSet)
assert(joinMap.size === 4)
assert(joinMap(0).size === 2)
assert(joinMap(0)(0))
assert(joinMap(0)(1))
assert(joinMap(1).size === 2)
assert(joinMap(1)(1))
assert(joinMap(1)(2))
assert(joinMap(2).size === 1)
assert(joinMap(2)(2))
assert(joinMap(4).size === 1)
assert(joinMap(4)(4))
}
}
| laserson/adam | adam-core/src/test/scala/org/bdgenomics/adam/rdd/TreeRegionJoinSuite.scala | Scala | apache-2.0 | 2,919 |
package io.transwarp.midas.constant.midas.params
/**
* Created by tianming on 4/20/16.
*/
object SharedParams {
// shared params
val RegParam = "regParam" // "regularization parameter (>= 0)"
val MaxIter = "maxIter" // "maximum number of iterations (>= 0)"
val FeaturesCol = "featuresCol" // "features column name"
val LabelCol = "labelCol" // "label column name"
val PredictionCol = "predictionCol" // "prediction column name"
val RawPredictionCol = "rawPredictionCol" // "raw prediction (a.k.a. confidence) column name")
val ProbCol = "probabilityCol" // "Column name for predicted class conditional probabilities.
val Threshold = "threshold" // "threshold in binary classification prediction, in range [0, 1]")
val Thresholds = "thresholds" // "Thresholds in multi-class classification"
val InputCol = "inputCol" // "input column name")
val InputCols = "inputCols" // "input column names")
val OutputCol = "outputCol" // "output column name")
val OutputCols = "outputCols" // "output column names"
val CheckpointInterval = "checkpointInterval" // "checkpoint interval (>= 1)")
val FitIntercept = "fitIntercept" // "whether to fit an intercept term")
// "whether to standardize the training features before fitting the model.")
val Standardization = "standardization"
val Seed = "seed" // "random seed")
val ElasticNetParam = "elasticNetParam" // "the ElasticNet mixing parameter, in range [0, 1]."
val Tol = "tol" // "the convergence tolerance for iterative algorithms")
val StepSize = "stepSize" // "Step size to be used for each iteration of optimization.")
val WeightCol = "weightCol" // "weight column name
val MinDocFreq = "minDocFreq" // tfidf doc freq
val QuantilesCol = "quantilesCol"
}
| transwarpio/rapidminer | api-driver/src/main/scala/io/transwarp/midas/constant/midas/params/SharedParams.scala | Scala | gpl-3.0 | 1,754 |
package com.sretsnom.mangareader.server
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import com.sretsnom.mangareader.server.ServiceConfig.HttpConfig._
import com.typesafe.scalalogging.LazyLogging
import spray.can.Http
object ServerApp extends App with LazyLogging {
implicit val system = ActorSystem("mr-server")
val httpServer = system.actorOf(Props[HttpServerActor], "httpserver")
logger.info(banner)
IO(Http) ! Http.Bind(httpServer, interface, port)
private def banner =
"""
=============================================================
`-:- .:-.
`-+shmNNNo :NNNmdy+:`
-hmNNNNNNNNo :NNNNNNNNNd/
`hNNNNNNNNNo :NNNNNNNNNm.
`omNNNNNNNs /NNNNNNNNy.
`+hNNNNNN+:/+oo+////+ooo+//mNNNNNdo.
.sNNNNNNmo. `/dNNNNNNh:
`+hNNNNNN+:/+oo+////+ooo+//mNNNNNdo.
`omNNNNNNNs /NNNNNNNNy.
`hNNNNNNNNNo :NNNNNNNNNm.
-hmNNNNNNNNo :NNNNNNNNNd/
`-+sdmNNNo :NNNmdy+:`
`-:- .:-.
=============================================================
IDM SELF SERVICE TOOLING
""".stripMargin
}
| sretsnom/mangareader | serverMR/src/main/scala/com/sretsnom/mangareader/server/ServerApp.scala | Scala | gpl-3.0 | 1,471 |
package com.cloudray.scalapress.plugin.ecommerce
/** @author Stephen Samuel */
class EmailReportService(dao: OrderDao) {
def emails: Seq[String] = dao.emails
def csv: String = emails.mkString("\n")
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/plugin/ecommerce/EmailReportService.scala | Scala | apache-2.0 | 205 |
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by Chongguang on 2016/12/18.
*/
import december2016.Day18._
class Day18Spec extends FlatSpec with Matchers {
"isOpen: " should "reture ture for b, c, d, e, or f and false for others" in {
newRow("..^^.") shouldBe ".^^^^"
newRow(".^^^..^.^^") shouldBe "^^.^^^..^^"
}
"constructRows" should "work" in {
val rows1 = constructRows("..^^.", 3)
rows1(0) shouldBe "..^^."
rows1(1) shouldBe ".^^^^"
rows1(2) shouldBe "^^..^"
val rows2 = constructRows(".^^.^.^^^^", 10)
rows2(0) shouldBe ".^^.^.^^^^"
rows2(1) shouldBe "^^^...^..^"
rows2(2) shouldBe "^.^^.^.^^."
rows2(3) shouldBe "..^^...^^^"
rows2(4) shouldBe ".^^^^.^^.^"
}
"countSafe" should "work" in {
val rows1 = constructRows("..^^.", 3)
countSafe(rows1) shouldBe 6
val rows2 = constructRows(".^^.^.^^^^", 3)
countSafe(rows2) shouldBe 12
}
"countSafeOptMem" should "work" in {
countSafeOptMem("..^^.", 3) shouldBe 6
countSafeOptMem(".^^.^.^^^^", 3) shouldBe 12
}
}
/*
..^^.
.^^^^
^^..^
*/
/*
.^^.^.^^^^
^^^...^..^
^.^^.^.^^.
..^^...^^^
.^^^^.^^.^
^^..^.^^..
^^^^..^^^.
^..^^^^.^^
.^^^..^.^^
^^.^^^..^^
*/ | chongguang/adventofcode | src/test/scala/Day18Spec.scala | Scala | mit | 1,218 |
package com.monkeynuthead.keys
import org.monkeynuthead.scalajs.tone.Tone
import org.scalajs.dom
import scalacss.ScalatagsCss._
import scalatags.JsDom.all._
import scalatags.JsDom.svgTags.{polygon, svg}
import scalatags.JsDom.svgAttrs.points
object KeysApp {
private case class NoteConfig(name: String, idPrefix: String, style: Modifier, points: (Int) => (String, Int)) {
def id(octave: Integer): String = name + octave
}
private val Notes = List(
NoteConfig("C", "C", KeyStyles.WhiteNoteAfterWhite, KeyStyles.WhiteNoteAfterWhitePoints),
NoteConfig("C#", "CSharp", KeyStyles.BlackNote, KeyStyles.BlackNotePoints),
NoteConfig("D", "D", KeyStyles.WhiteNoteAfterBlack, KeyStyles.WhiteNoteAfterBlackPoints),
NoteConfig("Eb", "EFlat", KeyStyles.BlackNote, KeyStyles.BlackNotePoints),
NoteConfig("E", "E", KeyStyles.WhiteNoteAfterBlack, KeyStyles.WhiteNoteAfterBlackPoints),
NoteConfig("F", "F", KeyStyles.WhiteNoteAfterWhite, KeyStyles.WhiteNoteAfterWhitePoints),
NoteConfig("F#", "FSharp", KeyStyles.BlackNote, KeyStyles.BlackNotePoints),
NoteConfig("G", "G", KeyStyles.WhiteNoteAfterBlack, KeyStyles.WhiteNoteAfterBlackPoints),
NoteConfig("Ab", "AFlat", KeyStyles.BlackNote, KeyStyles.BlackNotePoints),
NoteConfig("A", "A", KeyStyles.WhiteNoteAfterBlack, KeyStyles.WhiteNoteAfterBlackPoints),
NoteConfig("Bb", "BFlat", KeyStyles.BlackNote, KeyStyles.BlackNotePoints),
NoteConfig("B", "B", KeyStyles.WhiteNoteAfterBlack, KeyStyles.WhiteNoteAfterBlackPoints)
)
private case class Note(id: String, octave: Integer, config: NoteConfig)
private def noteMouseDown(playNote: String => Unit, note: Note)(e: dom.MouseEvent): Unit = {
println(s"MouseDown ${note.id}")
playNote(note.id)
}
private def noteMouseUp(releaseNote: String => Unit, note: Note)(e: dom.MouseEvent): Unit = {
println(s"MouseUp ${note.id}")
releaseNote(note.id)
}
private def noteMouseOut(releaseNote: String => Unit, note: Note)(e: dom.MouseEvent): Unit = {
println(s"MouseOut ${note.id}")
releaseNote(note.id)
}
private def createNote(playNote: String => Unit, releaseNote: String => Unit,
offset: Int, octave: Int, noteConfig: NoteConfig): (Modifier, Int) = {
val (calculatedPoints, newOffset) = noteConfig.points(offset)
val note = Note(noteConfig.id(octave), octave, noteConfig)
val modifier = polygon(
id := note.id,
onmousedown := noteMouseDown(playNote, note) _,
onmouseup := noteMouseUp(releaseNote, note) _,
onmouseout := noteMouseOut(releaseNote, note) _,
note.config.style,
points := calculatedPoints,
note.id
)
(modifier, newOffset)
}
private def createKeys(playNote: String => Unit, releaseNote: String => Unit): Seq[Modifier] = {
val octaveNotes = for (
octave <- 4 to 5;
note <- Notes
) yield (octave, note)
octaveNotes.foldLeft((0, Seq.newBuilder[Modifier])) {
case ((offset, builder), (octave, note)) =>
val (modifier, newOffset) = createNote(playNote, releaseNote, offset, octave, note)
(newOffset, builder += modifier)
}._2.result()
}
private[keys] def setupUI(): Unit = {
dom.document.body.appendChild(KeyStyles.renderToHtmlElement.render)
val instrument = new Tone.Synth().toMaster()
var playingNotes: Set[String] = Set.empty
val playNote = (note: String) => {
playingNotes = playingNotes + note
instrument.triggerAttack(note)
}
val releaseNote = (note: String) => if (playingNotes.contains(note)) {
playingNotes = playingNotes - note
instrument.triggerRelease()
}
val keyboard = svg(
createKeys(playNote, releaseNote):_*,
)(KeyStyles.Keyboard)
dom.document.body.appendChild(keyboard.render)
}
def main(args: Array[String]): Unit = {
dom.window.onload = (e: dom.Event) => setupUI()
}
}
| georgenicoll/keys | src/main/scala/com/monkeynuthead/keys/KeysApp.scala | Scala | gpl-3.0 | 4,013 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.activation.{OnMeleeSpecialAttack, OnToggleEvent}
import io.truthencode.ddo.model.effect.ActiveEvent
import io.truthencode.ddo.model.misc.{CoolDown, DefaultCoolDown, DefaultSpellCoolDown}
/**
* Created by adarr on 1/29/2017.
* @note
* these should be moved out of the Feat package and renamed as they can and should be used for
* stances / toggles etc that are granted via enhancements etc
*
* Also, perhaps clickies?
*/
sealed trait FeatType
/**
* This effect can be turned on or off via a toggle.
*/
trait Toggle extends FeatType
/**
* This effect is a stance, which means it can be on or off, but may also have some stacking
* restrictions.
* i.e. only one offensive combat stance may be active at any given time, and automatically toggles
* off a defensive combat stance, however it may be combined with a non-conflicting stance, such as
* a wizards undead shroud or the iconic feat Amauntor's Flames.
*/
trait Stance extends FeatType with Toggle with ActiveFeat with OnToggleEvent with DefaultCoolDown
/**
* This effect must be explicitly activated and is generally a short-term effect such as uncanny
* dodge and may be subject to a cool-down.
*/
trait ActiveFeat extends FeatType with CoolDown {
self: ActiveEvent =>
}
/**
* This stance has effects that increase your defense by increasing your dodge, armor class or other
* defensive measures.
*
* It is exclusive of Offense combat stances.
*/
trait DefensiveCombatStance extends FeatType with Stance
/**
* This effect increasing your offensive capabilities by increasing damage, critical multiplier,
* spell dc / penetration etc.
*
* It is exclusive of Defensive Combat stances.
*/
trait OffensiveCombatStance extends FeatType with Stance
/**
* This effect benefits ranged combat such as missile weapons.
*/
trait RangedCombatStance extends FeatType with Stance
/**
* Listed on Improved Precise Shot, need to see what Archer's focus and any other ranged stance such
* as Inquisitive uses
* @todo
* is this a duplicate of RangedCombatStance?
*/
trait OffensiveRangedStance extends FeatType with Stance
/**
* This provides beneficial effects to spells and spell like abilities such as extending length,
* range or power.
*/
trait MetaMagic extends FeatType with Stance
/**
* This effect is considered permanent and always on. A given feat or ability may provide both
* active and passive effects.
*/
trait Passive extends FeatType
trait SpecialAttack extends FeatType
trait Ability extends FeatType
/**
* This is a tactical Feat such as Trip and Stunning Blow which can generally be affected by things
* such as Combat Mastery
*/
trait Tactical extends FeatType with OnMeleeSpecialAttack
/**
* Basic trait to categorize epic feats. Currently used only for better readability and testing.
*/
sealed trait EpicFeatCategory extends FeatType
protected[feats] trait GeneralPassive extends EpicFeatCategory with Passive
protected[feats] trait RangedCombatPassive extends EpicFeatCategory with Passive
protected[feats] trait SpellFeats
extends EpicFeatCategory with ActiveFeat with DefaultSpellCoolDown {
self: ActiveEvent =>
}
protected[feats] trait SpellCastingPassive extends EpicFeatCategory with Passive
protected[feats] trait SpellCastingActive extends EpicFeatCategory with ActiveFeat {
self: ActiveEvent =>
}
protected[feats] trait EpicMetaMagic extends EpicFeatCategory
trait ClassRestricted extends EpicFeatCategory
protected[feats] trait FreePassive extends EpicFeatCategory with Passive
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/FeatType.scala | Scala | apache-2.0 | 4,357 |
package org.hotsextra.matchmaking
package entries
import java.time.Instant
import rating.Rating
case class HeroLeagueEntry(playerid: Int, rating: Rating, jointime: Instant)
case class QuickMatchEntry(playerid: Int, rating: Rating, jointime: Instant, hero: Option[Hero]) | martijnhoekstra/hotsextra-matchmaker | src/main/scala/org/hotsextra/matchmaking/entries/Entry.scala | Scala | agpl-3.0 | 271 |
package is.hail.types.physical
import is.hail.annotations.{Region, UnsafeOrdering, _}
import is.hail.asm4s.{Code, coerce, const, _}
import is.hail.expr.ir.EmitMethodBuilder
import is.hail.types.virtual.TInt32
case object PInt32Optional extends PInt32(false)
case object PInt32Required extends PInt32(true)
class PInt32(override val required: Boolean) extends PNumeric with PPrimitive {
lazy val virtualType: TInt32.type = TInt32
def _asIdent = "int32"
override def _pretty(sb: StringBuilder, indent: Int, compact: Boolean): Unit = sb.append("PInt32")
override type NType = PInt32
override def unsafeOrdering(): UnsafeOrdering = new UnsafeOrdering {
def compare(o1: Long, o2: Long): Int = {
Integer.compare(Region.loadInt(o1), Region.loadInt(o2))
}
}
def codeOrdering(mb: EmitMethodBuilder[_], other: PType): CodeOrdering = {
assert(other isOfType this)
new CodeOrderingCompareConsistentWithOthers {
type T = Int
def compareNonnull(x: Code[T], y: Code[T]): Code[Int] =
Code.invokeStatic2[java.lang.Integer, Int, Int, Int]("compare", x, y)
override def ltNonnull(x: Code[T], y: Code[T]): Code[Boolean] = x < y
override def lteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = x <= y
override def gtNonnull(x: Code[T], y: Code[T]): Code[Boolean] = x > y
override def gteqNonnull(x: Code[T], y: Code[T]): Code[Boolean] = x >= y
override def equivNonnull(x: Code[T], y: Code[T]): Code[Boolean] = x.ceq(y)
}
}
override def byteSize: Long = 4
override def zero = coerce[PInt32](const(0))
override def add(a: Code[_], b: Code[_]): Code[PInt32] = {
coerce[PInt32](coerce[Int](a) + coerce[Int](b))
}
override def multiply(a: Code[_], b: Code[_]): Code[PInt32] = {
coerce[PInt32](coerce[Int](a) * coerce[Int](b))
}
def storePrimitiveAtAddress(addr: Code[Long], srcPType: PType, value: Code[_]): Code[Unit] =
Region.storeInt(addr, coerce[Int](value))
}
object PInt32 {
def apply(required: Boolean = false) = if (required) PInt32Required else PInt32Optional
def unapply(t: PInt32): Option[Boolean] = Option(t.required)
}
| cseed/hail | hail/src/main/scala/is/hail/types/physical/PInt32.scala | Scala | mit | 2,148 |
package com.sksamuel.elastic4s
import io.circe._
import io.circe.jawn._
import scala.annotation.implicitNotFound
/**
* Automatic HitAs and Indexable derivation
*
* == Usage ==
*
* {{{
* import io.circe.generic.auto._
* import com.sksamuel.elastic4s.circe._
*
* case class City(id: Int, name: String)
*
* // index
* index into "places" / "cities" id cityId source City(1, "munich")
*
* // search and parse
* val resp = client.execute {
* search in "places" / "cities"
* }.await
*
* val cities = resp.as[City]
*
* }}}
*/
package object circe {
@implicitNotFound(
"No Decoder for type ${T} found. Use 'import io.circe.generic.auto._' or provide an implicit Decoder instance "
)
implicit def hitReaderWithCirce[T](implicit decoder: Decoder[T]): HitReader[T] = new HitReader[T] {
override def read(hit: Hit): Either[Throwable, T] = decode[T](hit.sourceAsString)
}
@implicitNotFound(
"No Encoder for type ${T} found. Use 'import io.circe.generic.auto._' or provide an implicit Encoder instance "
)
implicit def indexableWithCirce[T](implicit encoder: Encoder[T],
printer: Json => String = Printer.noSpaces.pretty): Indexable[T] =
new Indexable[T] {
override def json(t: T): String = printer(encoder(t))
}
@implicitNotFound(
"No Decoder for type ${T} found. Use 'import io.circe.generic.auto._' or provide an implicit Decoder instance "
)
implicit def aggReaderWithCirce[T](implicit encoder: Decoder[T]): AggReader[T] = new AggReader[T] {
override def read(json: String): Either[Throwable, T] = decode[T](json)
}
}
| Tecsisa/elastic4s | elastic4s-circe/src/main/scala/com/sksamuel/elastic4s/circe/package.scala | Scala | apache-2.0 | 1,663 |
/*
<<<<<<< HEAD
* Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.scala.table
import com.twosigma.beakerx.KernelTest
import com.twosigma.beakerx.chart.Color
import com.twosigma.beakerx.scala.TestScalaEvaluator
import com.twosigma.beakerx.scala.fileloader.CSV
import com.twosigma.beakerx.table.highlight.{HighlightStyle, ThreeColorHeatmapHighlighter}
import org.junit.{Before, Test}
import org.scalatest.Matchers._
class TableDisplayTest {
@Before
@throws[Exception]
def setUp(): Unit = {
val scalaEvaluator = TestScalaEvaluator.evaluator
val importSeo = KernelTest.createSeo("")
}
@Test
def removeAllCellHighlighters(): Unit = {
val tableDisplay = new TableDisplay(generateKeyValueMap())
tableDisplay.addCellHighlighter(threeColorHeatmap)
tableDisplay.getCellHighlighters().size() shouldEqual 1
tableDisplay.removeAllCellHighlighters()
tableDisplay.getCellHighlighters().size() shouldEqual 0
}
private def threeColorHeatmap = {
new ThreeColorHeatmapHighlighter(
"y1",
HighlightStyle.SINGLE_COLUMN,
4, 6, 8,
new Color(247, 106, 106), new Color(239, 218, 82), new Color(100, 189, 122)
)
}
def generateKeyValueMap(): Map[String, Integer] = {
Map("str1" -> 1, "str2" -> 2, "str3" -> 2)
}
@Test
def creatingTableDisplayFromCSVShouldPreserveOrder(): Unit = {
val tableDisplay = new TableDisplay(new CSV().readFile("src/test/resources/interest-rates-small.csv"))
val list = tableDisplay.getValues()
list.head(0) shouldEqual 8.17
list.head(1) shouldEqual 8.5632
list.head(10) shouldEqual 0.4186
}
}
| twosigma/beaker-notebook | kernel/scala/src/test/scala/com/twosigma/beakerx/scala/table/TableDisplayTest.scala | Scala | apache-2.0 | 2,199 |
package sampler.cluster.abc.actor
import akka.actor.ActorLogging
import akka.actor.Actor
import akka.actor.actorRef2Scala
class ReportingActor[P](action: Option[Report[P] => Unit]) extends Actor with ActorLogging {
def receive = {
case report: Report[P] =>
action.foreach(a => a(report))
sender ! ReportCompleted(report)
case msg => log.error("Unexepected message ': {}", msg.getClass)
}
}
| tsaratoon/Sampler | sampler-cluster/src/main/scala/sampler/cluster/abc/actor/ReportingActor.scala | Scala | apache-2.0 | 405 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.net.URI
import java.util.concurrent.TimeUnit._
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.execution.datasources.{DataSource, InMemoryFileIndex, LogicalRelation}
import org.apache.spark.sql.types.StructType
/**
* A very simple source that reads files from the given directory as they appear.
*/
class FileStreamSource(
sparkSession: SparkSession,
path: String,
fileFormatClassName: String,
override val schema: StructType,
partitionColumns: Seq[String],
metadataPath: String,
options: Map[String, String]) extends Source with Logging {
import FileStreamSource._
private val sourceOptions = new FileStreamOptions(options)
private val hadoopConf = sparkSession.sessionState.newHadoopConf()
@transient private val fs = new Path(path).getFileSystem(hadoopConf)
private val qualifiedBasePath: Path = {
fs.makeQualified(new Path(path)) // can contain glob patterns
}
private val optionsWithPartitionBasePath = sourceOptions.optionMapWithoutPath ++ {
if (!SparkHadoopUtil.get.isGlobPath(new Path(path)) && options.contains("path")) {
Map("basePath" -> path)
} else {
Map()
}}
private val metadataLog =
new FileStreamSourceLog(FileStreamSourceLog.VERSION, sparkSession, metadataPath)
private var metadataLogCurrentOffset = metadataLog.getLatest().map(_._1).getOrElse(-1L)
/** Maximum number of new files to be considered in each batch */
private val maxFilesPerBatch = sourceOptions.maxFilesPerTrigger
private val fileSortOrder = if (sourceOptions.latestFirst) {
logWarning(
"""'latestFirst' is true. New files will be processed first, which may affect the watermark
|value. In addition, 'maxFileAge' will be ignored.""".stripMargin)
implicitly[Ordering[Long]].reverse
} else {
implicitly[Ordering[Long]]
}
private val maxFileAgeMs: Long = if (sourceOptions.latestFirst && maxFilesPerBatch.isDefined) {
Long.MaxValue
} else {
sourceOptions.maxFileAgeMs
}
private val fileNameOnly = sourceOptions.fileNameOnly
if (fileNameOnly) {
logWarning("'fileNameOnly' is enabled. Make sure your file names are unique (e.g. using " +
"UUID), otherwise, files with the same name but under different paths will be considered " +
"the same and causes data lost.")
}
/** A mapping from a file that we have processed to some timestamp it was last modified. */
// Visible for testing and debugging in production.
val seenFiles = new SeenFilesMap(maxFileAgeMs, fileNameOnly)
metadataLog.allFiles().foreach { entry =>
seenFiles.add(entry.path, entry.timestamp)
}
seenFiles.purge()
logInfo(s"maxFilesPerBatch = $maxFilesPerBatch, maxFileAgeMs = $maxFileAgeMs")
/**
* Returns the maximum offset that can be retrieved from the source.
*
* `synchronized` on this method is for solving race conditions in tests. In the normal usage,
* there is no race here, so the cost of `synchronized` should be rare.
*/
private def fetchMaxOffset(): FileStreamSourceOffset = synchronized {
// All the new files found - ignore aged files and files that we have seen.
val newFiles = fetchAllFiles().filter {
case (path, timestamp) => seenFiles.isNewFile(path, timestamp)
}
// Obey user's setting to limit the number of files in this batch trigger.
val batchFiles =
if (maxFilesPerBatch.nonEmpty) newFiles.take(maxFilesPerBatch.get) else newFiles
batchFiles.foreach { file =>
seenFiles.add(file._1, file._2)
logDebug(s"New file: $file")
}
val numPurged = seenFiles.purge()
logTrace(
s"""
|Number of new files = ${newFiles.size}
|Number of files selected for batch = ${batchFiles.size}
|Number of seen files = ${seenFiles.size}
|Number of files purged from tracking map = $numPurged
""".stripMargin)
if (batchFiles.nonEmpty) {
metadataLogCurrentOffset += 1
metadataLog.add(metadataLogCurrentOffset, batchFiles.map { case (p, timestamp) =>
FileEntry(path = p, timestamp = timestamp, batchId = metadataLogCurrentOffset)
}.toArray)
logInfo(s"Log offset set to $metadataLogCurrentOffset with ${batchFiles.size} new files")
}
FileStreamSourceOffset(metadataLogCurrentOffset)
}
/**
* For test only. Run `func` with the internal lock to make sure when `func` is running,
* the current offset won't be changed and no new batch will be emitted.
*/
def withBatchingLocked[T](func: => T): T = synchronized {
func
}
/** Return the latest offset in the [[FileStreamSourceLog]] */
def currentLogOffset: Long = synchronized { metadataLogCurrentOffset }
/**
* Returns the data that is between the offsets (`start`, `end`].
*/
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(FileStreamSourceOffset(_).logOffset).getOrElse(-1L)
val endOffset = FileStreamSourceOffset(end).logOffset
assert(startOffset <= endOffset)
val files = metadataLog.get(Some(startOffset + 1), Some(endOffset)).flatMap(_._2)
logInfo(s"Processing ${files.length} files from ${startOffset + 1}:$endOffset")
logTrace(s"Files are:\\n\\t" + files.mkString("\\n\\t"))
val newDataSource =
DataSource(
sparkSession,
paths = files.map(f => new Path(new URI(f.path)).toString),
userSpecifiedSchema = Some(schema),
partitionColumns = partitionColumns,
className = fileFormatClassName,
options = optionsWithPartitionBasePath)
Dataset.ofRows(sparkSession, LogicalRelation(newDataSource.resolveRelation(
checkFilesExist = false), isStreaming = true))
}
/**
* If the source has a metadata log indicating which files should be read, then we should use it.
* Only when user gives a non-glob path that will we figure out whether the source has some
* metadata log
*
* None means we don't know at the moment
* Some(true) means we know for sure the source DOES have metadata
* Some(false) means we know for sure the source DOSE NOT have metadata
*/
@volatile private[sql] var sourceHasMetadata: Option[Boolean] =
if (SparkHadoopUtil.get.isGlobPath(new Path(path))) Some(false) else None
private def allFilesUsingInMemoryFileIndex() = {
val globbedPaths = SparkHadoopUtil.get.globPathIfNecessary(fs, qualifiedBasePath)
val fileIndex = new InMemoryFileIndex(sparkSession, globbedPaths, options, Some(new StructType))
fileIndex.allFiles()
}
private def allFilesUsingMetadataLogFileIndex() = {
// Note if `sourceHasMetadata` holds, then `qualifiedBasePath` is guaranteed to be a
// non-glob path
new MetadataLogFileIndex(sparkSession, qualifiedBasePath, None).allFiles()
}
/**
* Returns a list of files found, sorted by their timestamp.
*/
private def fetchAllFiles(): Seq[(String, Long)] = {
val startTime = System.nanoTime
var allFiles: Seq[FileStatus] = null
sourceHasMetadata match {
case None =>
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf, sparkSession.sessionState.conf)) {
sourceHasMetadata = Some(true)
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
allFiles = allFilesUsingInMemoryFileIndex()
if (allFiles.isEmpty) {
// we still cannot decide
} else {
// decide what to use for future rounds
// double check whether source has metadata, preventing the extreme corner case that
// metadata log and data files are only generated after the previous
// `FileStreamSink.hasMetadata` check
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf, sparkSession.sessionState.conf)) {
sourceHasMetadata = Some(true)
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
sourceHasMetadata = Some(false)
// `allFiles` have already been fetched using InMemoryFileIndex in this round
}
}
}
case Some(true) => allFiles = allFilesUsingMetadataLogFileIndex()
case Some(false) => allFiles = allFilesUsingInMemoryFileIndex()
}
val files = allFiles.sortBy(_.getModificationTime)(fileSortOrder).map { status =>
(status.getPath.toUri.toString, status.getModificationTime)
}
val endTime = System.nanoTime
val listingTimeMs = NANOSECONDS.toMillis(endTime - startTime)
if (listingTimeMs > 2000) {
// Output a warning when listing files uses more than 2 seconds.
logWarning(s"Listed ${files.size} file(s) in $listingTimeMs ms")
} else {
logTrace(s"Listed ${files.size} file(s) in $listingTimeMs ms")
}
logTrace(s"Files are:\\n\\t" + files.mkString("\\n\\t"))
files
}
override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.logOffset == -1)
override def toString: String = s"FileStreamSource[$qualifiedBasePath]"
/**
* Informs the source that Spark has completed processing all data for offsets less than or
* equal to `end` and will only request offsets greater than `end` in the future.
*/
override def commit(end: Offset): Unit = {
// No-op for now; FileStreamSource currently garbage-collects files based on timestamp
// and the value of the maxFileAge parameter.
}
override def stop() {}
}
object FileStreamSource {
/** Timestamp for file modification time, in ms since January 1, 1970 UTC. */
type Timestamp = Long
case class FileEntry(path: String, timestamp: Timestamp, batchId: Long) extends Serializable
/**
* A custom hash map used to track the list of files seen. This map is not thread-safe.
*
* To prevent the hash map from growing indefinitely, a purge function is available to
* remove files "maxAgeMs" older than the latest file.
*/
class SeenFilesMap(maxAgeMs: Long, fileNameOnly: Boolean) {
require(maxAgeMs >= 0)
/** Mapping from file to its timestamp. */
private val map = new java.util.HashMap[String, Timestamp]
/** Timestamp of the latest file. */
private var latestTimestamp: Timestamp = 0L
/** Timestamp for the last purge operation. */
private var lastPurgeTimestamp: Timestamp = 0L
@inline private def stripPathIfNecessary(path: String) = {
if (fileNameOnly) new Path(new URI(path)).getName else path
}
/** Add a new file to the map. */
def add(path: String, timestamp: Timestamp): Unit = {
map.put(stripPathIfNecessary(path), timestamp)
if (timestamp > latestTimestamp) {
latestTimestamp = timestamp
}
}
/**
* Returns true if we should consider this file a new file. The file is only considered "new"
* if it is new enough that we are still tracking, and we have not seen it before.
*/
def isNewFile(path: String, timestamp: Timestamp): Boolean = {
// Note that we are testing against lastPurgeTimestamp here so we'd never miss a file that
// is older than (latestTimestamp - maxAgeMs) but has not been purged yet.
timestamp >= lastPurgeTimestamp && !map.containsKey(stripPathIfNecessary(path))
}
/** Removes aged entries and returns the number of files removed. */
def purge(): Int = {
lastPurgeTimestamp = latestTimestamp - maxAgeMs
val iter = map.entrySet().iterator()
var count = 0
while (iter.hasNext) {
val entry = iter.next()
if (entry.getValue < lastPurgeTimestamp) {
count += 1
iter.remove()
}
}
count
}
def size: Int = map.size()
}
}
| yanboliang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala | Scala | apache-2.0 | 12,691 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary.api
import com.eevolution.context.dictionary.domain.model.HouseKeeping
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 10/11/17.
*/
/**
* House Kepping Service
*/
trait HouseKeepingService extends api.Service[HouseKeeping, Int] {
//Definition
}
| adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/HouseKeepingService.scala | Scala | gpl-3.0 | 1,229 |
package io.github.interestinglab.waterdrop.output.batch
import io.github.interestinglab.waterdrop.config.{Config, ConfigFactory}
import io.github.interestinglab.waterdrop.apis.BaseOutput
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.{Dataset, Row, SaveMode, SparkSession}
import scala.collection.JavaConversions._
class Tidb extends BaseOutput {
var firstProcess = true
var config: Config = ConfigFactory.empty()
/**
* Set Config.
* */
override def setConfig(config: Config): Unit = {
this.config = config
}
/**
* Get Config.
* */
override def getConfig(): Config = {
this.config
}
override def checkConfig(): (Boolean, String) = {
val requiredOptions = List("url", "table", "user", "password");
val nonExistsOptions = requiredOptions.map(optionName => (optionName, config.hasPath(optionName))).filter { p =>
val (optionName, exists) = p
!exists
}
if (nonExistsOptions.length == 0) {
val saveModeAllowedValues = List("overwrite", "append", "ignore", "error");
if (!config.hasPath("save_mode") || saveModeAllowedValues.contains(config.getString("save_mode"))) {
(true, "")
} else {
(false, "wrong value of [save_mode], allowed values: " + saveModeAllowedValues.mkString(", "))
}
} else {
(
false,
"please specify " + nonExistsOptions
.map { case (option) => "[" + option + "]" }
.mkString(", ") + " as non-empty string")
}
}
override def prepare(spark: SparkSession): Unit = {
super.prepare(spark)
val defaultConfig = ConfigFactory.parseMap(
Map(
"save_mode" -> "append", // allowed values: overwrite, append, ignore, error
"useSSL" -> "false",
"isolationLevel" -> "NONE",
"batchsize" -> 150
)
)
config = config.withFallback(defaultConfig)
}
override def process(df: Dataset[Row]): Unit = {
val prop = new java.util.Properties
prop.setProperty("driver", "com.mysql.jdbc.Driver")
prop.setProperty("useSSL", config.getString("useSSL"))
prop.setProperty("isolationLevel", config.getString("isolationLevel"))
prop.setProperty("user", config.getString("user"))
prop.setProperty("password", config.getString("password"))
prop.setProperty(JDBCOptions.JDBC_BATCH_INSERT_SIZE, config.getString("batchsize"))
val saveMode = config.getString("save_mode")
firstProcess match {
case true =>
df.write.mode(saveMode).jdbc(config.getString("url"), config.getString("table"), prop)
firstProcess = false
case false =>
saveMode match {
case "overwrite" =>
// actually user only want the first time overwrite in streaming(generating multiple dataframe)
df.write.mode(SaveMode.Append).jdbc(config.getString("url"), config.getString("table"), prop)
case _ =>
df.write.mode(saveMode).jdbc(config.getString("url"), config.getString("table"), prop)
}
}
}
}
| InterestingLab/waterdrop | waterdrop-core/src/main/scala/io/github/interestinglab/waterdrop/output/batch/Tidb.scala | Scala | apache-2.0 | 3,067 |
package controllers
import play.api.mvc.{Action, Controller}
import views.html.accessOk
import security.{MyAlternativeDynamicResourceHandler, MyDeadboltHandler}
import be.objectify.deadbolt.scala.DeadboltActions
/**
*
* @author Steve Chaloner (steve@objectify.be)
*/
object DynamicRestrictionsController extends Controller with DeadboltActions
{
def pureLuck = Dynamic("pureLuck", "", new MyDeadboltHandler) {
Action {
Ok(accessOk())
}
}
def noWayJose = Dynamic("pureLuck", "", new MyDeadboltHandler(Some(MyAlternativeDynamicResourceHandler))) {
Action {
Ok(accessOk())
}
}
}
| play2-maven-plugin/play2-maven-test-projects | play23/external-modules/deadbolt/scala/app/controllers/DynamicRestrictionsController.scala | Scala | apache-2.0 | 742 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark
import java.util
import java.util.Comparator
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.Partitioner
/**
* A Partitioner implementation that will separate records to different
* HBase Regions based on region splits
*
* @param startKeys The start keys for the given table
*/
class BulkLoadPartitioner(startKeys:Array[Array[Byte]])
extends Partitioner {
override def numPartitions: Int = startKeys.length
override def getPartition(key: Any): Int = {
val comparator: Comparator[Array[Byte]] = new Comparator[Array[Byte]] {
override def compare(o1: Array[Byte], o2: Array[Byte]): Int = {
Bytes.compareTo(o1, o2)
}
}
val rowKey:Array[Byte] =
key match {
case qualifier: KeyFamilyQualifier =>
qualifier.rowKey
case wrapper: ByteArrayWrapper =>
wrapper.value
case _ =>
key.asInstanceOf[Array[Byte]]
}
val partition = util.Arrays.binarySearch(startKeys, rowKey, comparator)
if (partition < 0) partition * -1 + -2
else partition
}
}
| tmalaska/SparkOnHBase | src/main/scala/org/apache/hadoop/hbase/spark/BulkLoadPartitioner.scala | Scala | apache-2.0 | 1,918 |
package amailp.intellij.robot.psi
import com.intellij.extapi.psi.ASTWrapperPsiElement
import com.intellij.lang.ASTNode
import com.intellij.psi.tree.TokenSet
import amailp.intellij.robot.lexer.RobotIElementType
import com.intellij.psi.PsiElement
import scala.collection.JavaConversions._
import amailp.intellij.robot.psi.utils.RobotPsiUtils
abstract class RobotPsiElement(node: ASTNode) extends ASTWrapperPsiElement(node) with RobotPsiUtils {
def findChildrenByType[T <: PsiElement](tokenTypes: List[RobotIElementType]): Iterable[T] = {
findChildrenByType[T](TokenSet.create(tokenTypes: _*))
}
} | puhnastik/robot-plugin | src/main/scala/amailp/intellij/robot/psi/RobotPsiElement.scala | Scala | gpl-3.0 | 604 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import org.geotools.factory.CommonFactoryFinder
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.geometry.GeometryBuilder
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AvroSimpleFeatureFactoryTest extends Specification {
"GeoTools must use AvroSimpleFeatureFactory when hint is set" in {
AvroSimpleFeatureFactory.init
val featureFactory = CommonFactoryFinder.getFeatureFactory(null)
featureFactory.getClass mustEqual classOf[AvroSimpleFeatureFactory]
}
"SimpleFeatureBuilder should return an AvroSimpleFeature when using an AvroSimpleFeatureFactory" in {
AvroSimpleFeatureFactory.init
val geomBuilder = new GeometryBuilder(DefaultGeographicCRS.WGS84)
val featureFactory = CommonFactoryFinder.getFeatureFactory(null)
val sft = SimpleFeatureTypes.createType("testavro", "name:String,geom:Point:srid=4326")
val builder = new SimpleFeatureBuilder(sft, featureFactory)
builder.reset()
builder.add("Hello")
builder.add(geomBuilder.createPoint(1,1))
val feature = builder.buildFeature("id")
feature.getClass mustEqual classOf[AvroSimpleFeature]
}
}
| ddseapy/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/AvroSimpleFeatureFactoryTest.scala | Scala | apache-2.0 | 1,872 |
package scutil.lang.extension
import scutil.lang._
object PFunctionImplicits extends PFunctionImplicits
trait PFunctionImplicits {
implicit final class PFunctionExt[S,T](peer:S=>Option[T]) {
def applyOrElse(it:S, default:T):T =
peer(it) getOrElse default
def orDefault(default: =>T):Function1[S,T] =
orAlways(constant(default))
def orAlways(that:Function[S,T]):Function1[S,T] =
it => peer(it) getOrElse that(it)
def orElse(that:S=>Option[T]):S=>Option[T] =
it => peer(it) orElse that(it)
/** symbolic alias for andThenFixed */
def >=>[U](that:T=>Option[U]):S=>Option[U] =
this andThenFixed that
/** symbolic alias for composeFixed */
def <=<[R](that:R=>Option[S]):R=>Option[T] =
this composeFixed that
def andThenFixed[U](that:T=>Option[U]):S=>Option[U] =
it => peer(it) flatMap that
def composeFixed[R](that:R=>Option[S]):R=>Option[T] =
it => that(it) flatMap peer
def toExtractor:Extractor[S,T] =
Extractor(peer)
}
}
| ritschwumm/scutil | modules/core/src/main/scala/scutil/lang/extension/PFunctionImplicits.scala | Scala | bsd-2-clause | 981 |
import leon.annotation._
import leon.lang._
import leon.lang.synthesis._
// Sorting lists is a fundamental problem in CS.
object Sorting {
// Data types
sealed abstract class List
case class Cons(head : Int, tail : List) extends List
case class Nil() extends List
sealed abstract class LList
case class LCons(head : List, tail : LList) extends LList
case class LNil() extends LList
// Abstraction functions
def content(list : List) : Set[Int] = list match {
case Nil() => Set.empty[Int]
case Cons(x, xs) => Set(x) ++ content(xs)
}
def lContent(llist : LList) : Set[Int] = llist match {
case LNil() => Set.empty[Int]
case LCons(x, xs) => content(x) ++ lContent(xs)
}
def size(list : List) : Int = (list match {
case Nil() => 0
case Cons(_, xs) => 1 + size(xs)
}) ensuring(_ >= 0)
def isSorted(list : List) : Boolean = list match {
case Nil() => true
case Cons(_, Nil()) => true
case Cons(x1, Cons(x2, _)) if(x1 > x2) => false
case Cons(_, xs) => isSorted(xs)
}
def lIsSorted(llist : LList) : Boolean = llist match {
case LNil() => true
case LCons(x, xs) => isSorted(x) && lIsSorted(xs)
}
@induct
def sortedLemma(a: Int, x: Int, b: List) = {
!(isSorted(Cons(a,b)) && (content(b) contains x)) || (x >= a)
} holds
def abs(i : Int) : Int = {
if(i < 0) -i else i
} ensuring(_ >= 0)
/***************************
* *
* I N S E R T I O N *
* *
***************************/
def insertSpec(elem : Int, list : List, res : List) : Boolean = {
// isSorted(list) && // Part of precondition, really.
isSorted(res) && content(res) == content(list) ++ Set(elem)
}
def insert(elem : Int, list : List) : List = {
require(isSorted(list))
choose { (res : List) => insertSpec(elem, list, res) }
}
//require(head < elem && isSorted(Cons(head, tail)) && r == insertImpl(elem, tail))
//require(head < elem && isSorted(Cons(head, tail)) && isSorted(r) && content(r) == content(tail) ++ Set(elem))
// head3;tail3;r4;elem4, ((head3 < elem4) ∧ isSorted(Cons(head3, tail3)) ∧ insertSpec(elem4, tail3, r4)) ≺ ⟨ insertSpec(elem4, Cons(head3, tail3), res) ⟩ res
def insertV(head: Int, tail: List, r: List, rh: Int, rt: List, elem: Int) = {
require(head < elem && isSorted(Cons(head, tail)) && content(tail) == content(r) && isSorted(r) && r == Cons(rh, rt) && sortedLemma(head, rh, r))
//insertSpec(elem, Cons(head, tail), Cons(head, r))
//rh >= head
isSorted(Cons(head, r))
} holds
//require(head < elem && isSorted(Cons(head, tail)) && isSorted(r) && content(r) == content(tail) ++ Set(elem))
// head3;tail3;r4;elem4, ((head3 < elem4) ∧ isSorted(Cons(head3, tail3)) ∧ insertSpec(elem4, tail3, r4)) ≺ ⟨ insertSpec(elem4, Cons(head3, tail3), res) ⟩ res
def insertV2(head: Int, tail: List, r: List, rh: Int, rt: List, elem: Int) = {
require(head < elem && isSorted(Cons(head, tail)) && r == insertImpl(elem, tail) && r == Cons(rh, rt))
//insertSpec(elem, Cons(head, tail), Cons(head, r))
rh >= head
} holds
def insert2(elem : Int, list : List) : List = {
require(isSorted(list))
list match {
case Cons(h, t) =>
val r = insert2(elem, t)
if (elem > h) {
choose { (res : List) => insertSpec(elem, Cons(h,t), res) }
} else if (elem < h) {
Cons(elem, Cons(h, t))
} else {
Cons(h, t)
}
case Nil() =>
Cons(elem, Nil())
}
} ensuring { res => insertSpec(elem, list, res) }
def insertImpl(elem : Int, list : List) : List = {
require(isSorted(list))
list match {
case Nil() => Cons(elem, Nil())
case c @ Cons(x, _) if(elem <= x) => Cons(elem, c)
case Cons(x, xs) => Cons(x, insertImpl(elem, xs))
}
} ensuring(insertSpec(elem, list, _))
/**********************************
* *
* M E R G I N G (slow+fast) *
* *
**********************************/
def mergeSpec(list1 : List, list2 : List, res : List) : Boolean = {
// isSorted(list1) && isSorted(list2) && // Part of precondition, really.
isSorted(res) && content(res) == content(list1) ++ content(list2)
}
// The goal is that the synthesizer figures out that it should use insert.
// Currently, CEGIS with functions is too slow to handle that benchmark,
// even when insertImpl is the only function in the scope.
// (Note that I already propagated the path condition.)
// If you put mergeImpl in the scope, it solves it (how surprising).
def merge(list1 : List, list2 : List) : List = {
require(isSorted(list1) && isSorted(list2))
choose { (res : List) => mergeSpec(list1, list2, res) }
}
def mergeImpl(list1 : List, list2 : List) : List = {
require(isSorted(list1) && isSorted(list2))
list1 match {
case Nil() => list2
case Cons(x, xs) => insertImpl(x, mergeImpl(xs, list2))
}
} ensuring(res => mergeSpec(list1, list2, res))
def mergeFastImpl(list1 : List, list2 : List) : List = {
require(isSorted(list1) && isSorted(list2))
(list1, list2) match {
case (_, Nil()) => list1
case (Nil(), _) => list2
case (Cons(x, xs), Cons(y, ys)) =>
if(x <= y) {
Cons(x, mergeFastImpl(xs, list2))
} else {
Cons(y, mergeFastImpl(list1, ys))
}
}
} ensuring(res => mergeSpec(list1, list2, res))
/***************************
* *
* S P L I T T I N G *
* *
***************************/
def splitSpec(list : List, res : (List,List)) : Boolean = {
val s1 = size(res._1)
val s2 = size(res._2)
abs(s1 - s2) <= 1 && s1 + s2 == size(list) &&
content(res._1) ++ content(res._2) == content(list)
}
// I think this one is really close to working. I suspect it would work
// if the generators in CEGIS had a way to read from tuples. E.g. if
// t of type (Int,Int) is in context, t._1 and t._2 should be candidates
// for integers.
//
// (Note that if you weaken splitSpec in any way, you get valid and
// useless answers).
def split(list : List) : (List,List) = {
choose { (res : (List,List)) => splitSpec(list, res) }
}
def splitImpl(list : List) : (List,List) = (list match {
case Nil() => (Nil(), Nil())
case Cons(x, Nil()) => (Cons(x, Nil()), Nil())
case Cons(x1, Cons(x2, xs)) =>
val (s1,s2) = splitImpl(xs)
(Cons(x1, s1), Cons(x2, s2))
}) ensuring(res => splitSpec(list, res))
/***********************
* *
* S O R T I N G *
* *
***********************/
def sortSpec(in : List, out : List) : Boolean = {
content(out) == content(in) && isSorted(out)
}
def insertSorted(in: List, v: Int): List = {
require(isSorted(in));
in match {
case Cons(h, t) =>
val r = insertSorted(t, v)
if (h < v) {
Cons(h, r)
} else if (h > v) {
Cons(v, Cons(h, t))
} else {
Cons(h, t)
}
case _ =>
Cons(v, Nil())
}
} ensuring { res => isSorted(res) && content(res) == content(in) ++ Set(v) }
def insertSorted1(in: List, v: Int): List = {
require(isSorted(in));
in match {
case Cons(h, t) =>
val r = insertSorted(t, v)
if (h < v) {
choose { (res: List) => isSorted(res) && content(res) == content(in) ++ Set(v) }
} else if (h > v) {
Cons(v, Cons(h, t))
} else {
Cons(h, t)
}
case _ =>
Cons(v, Nil())
}
} ensuring { res => isSorted(res) && content(res) == content(in) ++ Set(v) }
def insertionSortImpl(in : List) : List = (in match {
case Nil() => Nil()
case Cons(x, xs) => insertImpl(x, insertionSortImpl(xs))
}) ensuring(res => sortSpec(in, res))
// Not really quicksort, neither mergesort.
def weirdSortImpl(in : List) : List = (in match {
case Nil() => Nil()
case Cons(x, Nil()) => Cons(x, Nil())
case _ =>
val (s1,s2) = splitImpl(in)
mergeFastImpl(weirdSortImpl(s1), weirdSortImpl(s2))
}) ensuring(res => sortSpec(in, res))
def toLList(list : List) : LList = (list match {
case Nil() => LNil()
case Cons(x, xs) => LCons(Cons(x, Nil()), toLList(xs))
}) ensuring(res => lContent(res) == content(list) && lIsSorted(res))
def mergeMap(llist : LList) : LList = {
require(lIsSorted(llist))
llist match {
case LNil() => LNil()
case o @ LCons(x, LNil()) => o
case LCons(x, LCons(y, ys)) => LCons(mergeFastImpl(x, y), mergeMap(ys))
}
} ensuring(res => lContent(res) == lContent(llist) && lIsSorted(res))
def mergeReduce(llist : LList) : List = {
require(lIsSorted(llist))
llist match {
case LNil() => Nil()
case LCons(x, LNil()) => x
case _ => mergeReduce(mergeMap(llist))
}
} ensuring(res => content(res) == lContent(llist) && isSorted(res))
def mergeSortImpl(in : List) : List = {
mergeReduce(toLList(in))
} ensuring(res => sortSpec(in, res))
}
| epfl-lara/leon | testcases/synthesis/cav2013/Sorting.scala | Scala | gpl-3.0 | 9,218 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.wordspec.getfixture
import org.scalatest.WordSpec
import collection.mutable.ListBuffer
class ExampleSpec extends WordSpec {
def fixture =
new {
val builder = new StringBuilder("ScalaTest is ")
val buffer = new ListBuffer[String]
}
"Testing" should {
"be easy" in {
val f = fixture
f.builder.append("easy!")
assert(f.builder.toString === "ScalaTest is easy!")
assert(f.buffer.isEmpty)
f.buffer += "sweet"
}
"be fun" in {
val f = fixture
f.builder.append("fun!")
assert(f.builder.toString === "ScalaTest is fun!")
assert(f.buffer.isEmpty)
}
}
}
| cheeseng/scalatest | examples/src/main/scala/org/scalatest/examples/wordspec/getfixture/ExampleSpec.scala | Scala | apache-2.0 | 1,281 |
/*
* This file is part of the "silex" library of helpers for Apache Spark.
*
* Copyright (c) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.redhat.et.silex.feature.extractor
/** Provides conversions from Breeze vectors to [[FeatureSeq]], and vice versa.
* {{{
* import com.redhat.et.silex.feature.extractor.{ FeatureSeq, Extractor }
* import com.redhat.et.silex.feature.extractor.breeze
* import com.redhat.et.silex.feature.extractor.breeze.implicits._
* import _root_.breeze.linalg.DenseVector
*
* val bv = new DenseVector(Array(1.0, 2.0))
* val featureSeq = FeatureSeq(bv)
* val bv2 = featureSeq.toBreeze
* }}}
*/
package object breeze {}
| erikerlandson/silex | src/main/scala/com/redhat/et/silex/feature/extractor/breeze/package.scala | Scala | apache-2.0 | 1,218 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.sapi
import scala.annotation.implicitNotFound
import scala.concurrent.duration.Duration
import scala.concurrent.duration._
/** Represents a timeout */
@implicitNotFound(
"""Cannot find an implicit Timeout. You might pass
an (implicit timeout: Timeout) parameter to your method""")
final case class Timeout(value: Duration) extends AnyVal
/** Timeout companion */
object Timeout {
private[sapi] trait ImplicitsTrait {
/** [[scala.concurrent.duration.Duration Duration]] to [[Timeout]] converter */
implicit class Duration2Timeout(underlying: Duration) {
def timeout: Timeout = Timeout(underlying)
}
}
val Inf = Timeout(Duration.Inf)
val MaxFiniteTimeout = Timeout(1.day)
}
| rdbc-io/rdbc | rdbc-api-scala/src/main/scala/io/rdbc/sapi/Timeout.scala | Scala | apache-2.0 | 1,324 |
package org.joda.time.chrono
import java.io.ObjectInputStream
import org.joda.time.Chronology
import org.joda.time.DateTimeField
import org.joda.time.DateTimeZone
import org.joda.time.DurationField
import org.joda.time.chrono.AssembledChronology.Fields
object AssembledChronology {
object Fields {
private def isSupported(field: DurationField): Boolean = {
if (field == null) false else field.isSupported
}
private def isSupported(field: DateTimeField): Boolean = {
if (field == null) false else field.isSupported
}
}
class Fields() {
var millis: DurationField = null
var seconds: DurationField = null
var minutes: DurationField = null
var hours: DurationField = null
var halfdays: DurationField = null
var days: DurationField = null
var weeks: DurationField = null
var weekyears: DurationField = null
var months: DurationField = null
var years: DurationField = null
var centuries: DurationField = null
var eras: DurationField = null
var millisOfSecond: DateTimeField = null
var millisOfDay: DateTimeField = null
var secondOfMinute: DateTimeField = null
var secondOfDay: DateTimeField = null
var minuteOfHour: DateTimeField = null
var minuteOfDay: DateTimeField = null
var hourOfDay: DateTimeField = null
var clockhourOfDay: DateTimeField = null
var hourOfHalfday: DateTimeField = null
var clockhourOfHalfday: DateTimeField = null
var halfdayOfDay: DateTimeField = null
var dayOfWeek: DateTimeField = null
var dayOfMonth: DateTimeField = null
var dayOfYear: DateTimeField = null
var weekOfWeekyear: DateTimeField = null
var weekyear: DateTimeField = null
var weekyearOfCentury: DateTimeField = null
var monthOfYear: DateTimeField = null
var year: DateTimeField = null
var yearOfEra: DateTimeField = null
var yearOfCentury: DateTimeField = null
var centuryOfEra: DateTimeField = null
var era: DateTimeField = null
def copyFieldsFrom(chrono: Chronology) {
{
var f: DurationField = null
if (Fields.isSupported({ f = chrono.millis(); f })) {
millis = f
}
if (Fields.isSupported({ f = chrono.seconds(); f })) {
seconds = f
}
if (Fields.isSupported({ f = chrono.minutes(); f })) {
minutes = f
}
if (Fields.isSupported({ f = chrono.hours(); f })) {
hours = f
}
if (Fields.isSupported({ f = chrono.halfdays(); f })) {
halfdays = f
}
if (Fields.isSupported({ f = chrono.days(); f })) {
days = f
}
if (Fields.isSupported({ f = chrono.weeks(); f })) {
weeks = f
}
if (Fields.isSupported({ f = chrono.weekyears(); f })) {
weekyears = f
}
if (Fields.isSupported({ f = chrono.months(); f })) {
months = f
}
if (Fields.isSupported({ f = chrono.years(); f })) {
years = f
}
if (Fields.isSupported({ f = chrono.centuries(); f })) {
centuries = f
}
if (Fields.isSupported({ f = chrono.eras(); f })) {
eras = f
}
}
{
var f: DateTimeField = null
if (Fields.isSupported({ f = chrono.millisOfSecond(); f })) {
millisOfSecond = f
}
if (Fields.isSupported({ f = chrono.millisOfDay(); f })) {
millisOfDay = f
}
if (Fields.isSupported({ f = chrono.secondOfMinute(); f })) {
secondOfMinute = f
}
if (Fields.isSupported({ f = chrono.secondOfDay(); f })) {
secondOfDay = f
}
if (Fields.isSupported({ f = chrono.minuteOfHour(); f })) {
minuteOfHour = f
}
if (Fields.isSupported({ f = chrono.minuteOfDay(); f })) {
minuteOfDay = f
}
if (Fields.isSupported({ f = chrono.hourOfDay(); f })) {
hourOfDay = f
}
if (Fields.isSupported({ f = chrono.clockhourOfDay(); f })) {
clockhourOfDay = f
}
if (Fields.isSupported({ f = chrono.hourOfHalfday(); f })) {
hourOfHalfday = f
}
if (Fields.isSupported({ f = chrono.clockhourOfHalfday(); f })) {
clockhourOfHalfday = f
}
if (Fields.isSupported({ f = chrono.halfdayOfDay(); f })) {
halfdayOfDay = f
}
if (Fields.isSupported({ f = chrono.dayOfWeek(); f })) {
dayOfWeek = f
}
if (Fields.isSupported({ f = chrono.dayOfMonth(); f })) {
dayOfMonth = f
}
if (Fields.isSupported({ f = chrono.dayOfYear(); f })) {
dayOfYear = f
}
if (Fields.isSupported({ f = chrono.weekOfWeekyear(); f })) {
weekOfWeekyear = f
}
if (Fields.isSupported({ f = chrono.weekyear(); f })) {
weekyear = f
}
if (Fields.isSupported({ f = chrono.weekyearOfCentury(); f })) {
weekyearOfCentury = f
}
if (Fields.isSupported({ f = chrono.monthOfYear(); f })) {
monthOfYear = f
}
if (Fields.isSupported({ f = chrono.year(); f })) {
year = f
}
if (Fields.isSupported({ f = chrono.yearOfEra(); f })) {
yearOfEra = f
}
if (Fields.isSupported({ f = chrono.yearOfCentury(); f })) {
yearOfCentury = f
}
if (Fields.isSupported({ f = chrono.centuryOfEra(); f })) {
centuryOfEra = f
}
if (Fields.isSupported({ f = chrono.era(); f })) {
era = f
}
}
}
}
}
@SerialVersionUID(-6728465968995518215L)
abstract class AssembledChronology extends BaseChronology {
private var iBase: Chronology = null
private var iParam: AnyRef = null
@transient private var iMillis: DurationField = null
@transient private var iSeconds: DurationField = null
@transient private var iMinutes: DurationField = null
@transient private var iHours: DurationField = null
@transient private var iHalfdays: DurationField = null
@transient private var iDays: DurationField = null
@transient private var iWeeks: DurationField = null
@transient private var iWeekyears: DurationField = null
@transient private var iMonths: DurationField = null
@transient private var iYears: DurationField = null
@transient private var iCenturies: DurationField = null
@transient private var iEras: DurationField = null
@transient private var iMillisOfSecond: DateTimeField = null
@transient private var iMillisOfDay: DateTimeField = null
@transient private var iSecondOfMinute: DateTimeField = null
@transient private var iSecondOfDay: DateTimeField = null
@transient private var iMinuteOfHour: DateTimeField = null
@transient private var iMinuteOfDay: DateTimeField = null
@transient private var iHourOfDay: DateTimeField = null
@transient private var iClockhourOfDay: DateTimeField = null
@transient private var iHourOfHalfday: DateTimeField = null
@transient private var iClockhourOfHalfday: DateTimeField = null
@transient private var iHalfdayOfDay: DateTimeField = null
@transient private var iDayOfWeek: DateTimeField = null
@transient private var iDayOfMonth: DateTimeField = null
@transient private var iDayOfYear: DateTimeField = null
@transient private var iWeekOfWeekyear: DateTimeField = null
@transient private var iWeekyear: DateTimeField = null
@transient private var iWeekyearOfCentury: DateTimeField = null
@transient private var iMonthOfYear: DateTimeField = null
@transient private var iYear: DateTimeField = null
@transient private var iYearOfEra: DateTimeField = null
@transient private var iYearOfCentury: DateTimeField = null
@transient private var iCenturyOfEra: DateTimeField = null
@transient private var iEra: DateTimeField = null
@transient private var iBaseFlags: Int = _
protected def this(base: Chronology, param: AnyRef) {
this()
iBase = base
iParam = param
setFields()
}
protected def auxConstructor(base: Chronology, param: AnyRef): Unit = {
iBase = base
iParam = param
setFields()
}
def getZone(): DateTimeZone = {
var base: Chronology = null
if ({
base = iBase; base
} != null) {
return base.getZone
}
null
}
override def getDateTimeMillis(year: Int,
monthOfYear: Int,
dayOfMonth: Int,
millisOfDay: Int): Long = {
var base: Chronology = null
if ({
base = iBase; base
} != null && (iBaseFlags & 6) == 6) {
return base.getDateTimeMillis(year, monthOfYear, dayOfMonth, millisOfDay)
}
super.getDateTimeMillis(year, monthOfYear, dayOfMonth, millisOfDay)
}
override def getDateTimeMillis(year: Int,
monthOfYear: Int,
dayOfMonth: Int,
hourOfDay: Int,
minuteOfHour: Int,
secondOfMinute: Int,
millisOfSecond: Int): Long = {
var base: Chronology = null
if ({
base = iBase; base
} != null && (iBaseFlags & 5) == 5) {
return base.getDateTimeMillis(year,
monthOfYear,
dayOfMonth,
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond)
}
super.getDateTimeMillis(year,
monthOfYear,
dayOfMonth,
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond)
}
override def getDateTimeMillis(instant: Long,
hourOfDay: Int,
minuteOfHour: Int,
secondOfMinute: Int,
millisOfSecond: Int): Long = {
var base: Chronology = null
if ({
base = iBase; base
} != null && (iBaseFlags & 1) == 1) {
return base.getDateTimeMillis(instant,
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond)
}
super.getDateTimeMillis(instant,
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond)
}
override def millis(): DurationField = iMillis
override def millisOfSecond(): DateTimeField = iMillisOfSecond
override def millisOfDay(): DateTimeField = iMillisOfDay
override def seconds(): DurationField = iSeconds
override def secondOfMinute(): DateTimeField = iSecondOfMinute
override def secondOfDay(): DateTimeField = iSecondOfDay
override def minutes(): DurationField = iMinutes
override def minuteOfHour(): DateTimeField = iMinuteOfHour
override def minuteOfDay(): DateTimeField = iMinuteOfDay
override def hours(): DurationField = iHours
override def hourOfDay(): DateTimeField = iHourOfDay
override def clockhourOfDay(): DateTimeField = iClockhourOfDay
override def halfdays(): DurationField = iHalfdays
override def hourOfHalfday(): DateTimeField = iHourOfHalfday
override def clockhourOfHalfday(): DateTimeField = iClockhourOfHalfday
override def halfdayOfDay(): DateTimeField = iHalfdayOfDay
override def days(): DurationField = iDays
override def dayOfWeek(): DateTimeField = iDayOfWeek
override def dayOfMonth(): DateTimeField = iDayOfMonth
override def dayOfYear(): DateTimeField = iDayOfYear
override def weeks(): DurationField = iWeeks
override def weekOfWeekyear(): DateTimeField = iWeekOfWeekyear
override def weekyears(): DurationField = iWeekyears
override def weekyear(): DateTimeField = iWeekyear
override def weekyearOfCentury(): DateTimeField = iWeekyearOfCentury
override def months(): DurationField = iMonths
override def monthOfYear(): DateTimeField = iMonthOfYear
override def years(): DurationField = iYears
override def year(): DateTimeField = iYear
override def yearOfEra(): DateTimeField = iYearOfEra
override def yearOfCentury(): DateTimeField = iYearOfCentury
override def centuries(): DurationField = iCenturies
override def centuryOfEra(): DateTimeField = iCenturyOfEra
override def eras(): DurationField = iEras
override def era(): DateTimeField = iEra
protected def assemble(fields: Fields): Unit
protected def getBase(): Chronology = iBase
protected def getParam(): AnyRef = iParam
private def setFields() {
val fields = new Fields()
if (iBase != null) {
fields.copyFieldsFrom(iBase)
}
assemble(fields)
{
var f: DurationField = null
iMillis = if ({ f = fields.millis; f } != null) f else super.millis()
iSeconds = if ({ f = fields.seconds; f } != null) f else super.seconds()
iMinutes = if ({ f = fields.minutes; f } != null) f else super.minutes()
iHours = if ({ f = fields.hours; f } != null) f else super.hours()
iHalfdays =
if ({ f = fields.halfdays; f } != null) f else super.halfdays()
iDays = if ({ f = fields.days; f } != null) f else super.days()
iWeeks = if ({ f = fields.weeks; f } != null) f else super.weeks()
iWeekyears =
if ({ f = fields.weekyears; f } != null) f else super.weekyears()
iMonths = if ({ f = fields.months; f } != null) f else super.months()
iYears = if ({ f = fields.years; f } != null) f else super.years()
iCenturies =
if ({ f = fields.centuries; f } != null) f else super.centuries()
iEras = if ({ f = fields.eras; f } != null) f else super.eras()
}
{
var f: DateTimeField = null
iMillisOfSecond = if ({ f = fields.millisOfSecond; f } != null) f
else super.millisOfSecond()
iMillisOfDay =
if ({ f = fields.millisOfDay; f } != null) f else super.millisOfDay()
iSecondOfMinute = if ({ f = fields.secondOfMinute; f } != null) f
else super.secondOfMinute()
iSecondOfDay =
if ({ f = fields.secondOfDay; f } != null) f else super.secondOfDay()
iMinuteOfHour =
if ({ f = fields.minuteOfHour; f } != null) f else super.minuteOfHour()
iMinuteOfDay =
if ({ f = fields.minuteOfDay; f } != null) f else super.minuteOfDay()
iHourOfDay =
if ({ f = fields.hourOfDay; f } != null) f else super.hourOfDay()
iClockhourOfDay = if ({ f = fields.clockhourOfDay; f } != null) f
else super.clockhourOfDay()
iHourOfHalfday = if ({ f = fields.hourOfHalfday; f } != null) f
else super.hourOfHalfday()
iClockhourOfHalfday = if ({ f = fields.clockhourOfHalfday; f } != null) f
else super.clockhourOfHalfday()
iHalfdayOfDay =
if ({ f = fields.halfdayOfDay; f } != null) f else super.halfdayOfDay()
iDayOfWeek =
if ({ f = fields.dayOfWeek; f } != null) f else super.dayOfWeek()
iDayOfMonth =
if ({ f = fields.dayOfMonth; f } != null) f else super.dayOfMonth()
iDayOfYear =
if ({ f = fields.dayOfYear; f } != null) f else super.dayOfYear()
iWeekOfWeekyear = if ({ f = fields.weekOfWeekyear; f } != null) f
else super.weekOfWeekyear()
iWeekyear =
if ({ f = fields.weekyear; f } != null) f else super.weekyear()
iWeekyearOfCentury = if ({ f = fields.weekyearOfCentury; f } != null) f
else super.weekyearOfCentury()
iMonthOfYear =
if ({ f = fields.monthOfYear; f } != null) f else super.monthOfYear()
iYear = if ({ f = fields.year; f } != null) f else super.year()
iYearOfEra =
if ({ f = fields.yearOfEra; f } != null) f else super.yearOfEra()
iYearOfCentury = if ({ f = fields.yearOfCentury; f } != null) f
else super.yearOfCentury()
iCenturyOfEra =
if ({ f = fields.centuryOfEra; f } != null) f else super.centuryOfEra()
iEra = if ({ f = fields.era; f } != null) f else super.era()
var flags: Int = 0
flags =
if (iBase == null) 0
else
(if (iHourOfDay == iBase.hourOfDay() && iMinuteOfHour == iBase
.minuteOfHour() &&
iSecondOfMinute == iBase.secondOfMinute() &&
iMillisOfSecond == iBase.millisOfSecond()) 1
else 0) |
(if (iMillisOfDay == iBase.millisOfDay()) 2 else 0) |
(if (iYear == iBase.year() && iMonthOfYear == iBase
.monthOfYear() &&
iDayOfMonth == iBase.dayOfMonth()) 4
else 0)
iBaseFlags = flags
}
}
private def readObject(in: ObjectInputStream) {
in.defaultReadObject()
setFields()
}
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/chrono/AssembledChronology.scala | Scala | bsd-2-clause | 17,058 |
package org.pico.statsd
import org.pico.statsd.datapoint.{AlertType, EventData, Priority}
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.{Arbitrary, Gen}
package object arb {
case class Identifier(value: String) extends AnyVal
implicit val arbitraryPriority: Arbitrary[Priority.Value] = Arbitrary(Gen.oneOf(Priority.values.toSeq))
implicit val arbitraryAlert: Arbitrary[AlertType.Value] = Arbitrary(Gen.oneOf(AlertType.values.toSeq))
implicit val arbitraryEventData: Arbitrary[EventData] = Arbitrary(for {
txt <- arbitrary[String]
pri <- arbitrary[Priority.Value]
alt <- arbitrary[AlertType.Value]
} yield EventData(txt, pri, alt))
implicit val arbitraryId = Arbitrary(Gen.identifier.map(Identifier))
}
| pico-works/pico-statsd | pico-statsd/src/test/scala/org/pico/statsd/arb/package.scala | Scala | mit | 758 |
package scala.meta
package internal.hosts.scalac
package converters
import org.scalameta.invariants._
import org.scalameta.unreachable
import scala.{Seq => _}
import scala.collection.immutable.Seq
import scala.tools.nsc.{Global => ScalaGlobal}
import scala.reflect.internal.Flags._
import scala.meta.internal.{ast => m}
import scala.meta.internal.{semantic => s}
import scala.meta.internal.ast.Helpers.XtensionTermOps
import scala.meta.internal.hosts.scalac.reflect._
// This module exposes a method to convert from scala.meta types to scala.reflect types.
// The logic is mostly straightforward except for when we need to create symbols for compound and existential types.
trait ToGtype extends GlobalToolkit with MetaToolkit {
self: Api =>
protected implicit class XtensionMtypeToGtype(mtpe: m.Type.Arg) {
private def gannotinfo(mannot: m.Mod.Annot): g.AnnotationInfo = {
val gtpe = mannot.body.tpe.require[m.Type].toGtype
val gargss = mannot.body.ctorArgss.map(_.map(_.toGtree))
if (gargss.length > 1) throw new ConvertException(mannot, s"implementation restriction: annotations with multiple argument lists are not supported by scalac")
if (gtpe <:< g.definitions.StaticAnnotationClass.tpe) {
g.AnnotationInfo(gtpe, gargss.head.toList, Nil)
} else {
require(gtpe <:< g.definitions.ClassfileAnnotationClass.tpe)
???
}
}
private def gannotinfos(mannots: Seq[m.Mod.Annot]): List[g.AnnotationInfo] = {
mannots.map(gannotinfo).toList
}
private def gtypeBounds(mbounds: m.Type.Bounds): g.TypeBounds = {
val glo = mbounds.lo.map(_.toGtype).getOrElse(g.definitions.NothingClass.tpe)
val ghi = mbounds.hi.map(_.toGtype).getOrElse(g.definitions.AnyClass.tpe)
g.TypeBounds(glo, ghi)
}
private implicit class RichGlobalSymbol(gsym: g.Symbol) {
private def mkGterm(name: String, flags: Long) = gsym.newTermSymbol(g.TermName(name), newFlags = flags)
private def mkGtype(name: String, flags: Long) = gsym.newTypeSymbol(g.TypeName(name), newFlags = flags)
def mkLabstractVal(name: String) = l.AbstractVal(mkGterm(name, DEFERRED | METHOD | STABLE | ACCESSOR))
def mkLexistentialVal(name: String) = l.AbstractType(mkGterm(name + g.nme.SINGLETON_SUFFIX, DEFERRED | EXISTENTIAL))
def mkLabstractVar(name: String) = l.AbstractVar(mkGterm(name, DEFERRED | METHOD | ACCESSOR), mkGterm(name + g.nme.SETTER_SUFFIX, DEFERRED | METHOD | ACCESSOR))
def mkLabstractDef(name: String) = l.AbstractDef(mkGterm(name, METHOD))
def mkLabstractType(name: String) = l.AbstractType(mkGtype(name, DEFERRED))
def mkLexistentialType(name: String) = l.AbstractType(mkGtype(name, DEFERRED | EXISTENTIAL))
def mkLtype(name: String) = l.Type(mkGtype(name, 0))
def mkLtermParameter(name: String) = l.TermParameter(mkGterm(name, PARAM))
def mkLtypeParameter(name: String) = l.TypeParameter(mkGtype(name, PARAM | DEFERRED))
}
private implicit class RichLogicalSymbol(lsym: l.Symbol) {
private def mimicMods(mmods: Seq[m.Mod], mtree: m.Tree): Unit = {
val gsym = lsym.gsymbol // TODO: check that this is correct
mmods.foreach({
case mmod: m.Mod.Annot => gsym.withAnnotations(List(gannotinfo(mmod)))
case m.Mod.Private(m.Name.Anonymous()) => gsym.setFlag(PRIVATE)
case m.Mod.Private(m.Term.This(_)) => gsym.setFlag(PRIVATE | LOCAL)
case m.Mod.Private(_) => ???
case m.Mod.Protected(m.Name.Anonymous()) => gsym.setFlag(PROTECTED)
case m.Mod.Protected(m.Term.This(_)) => gsym.setFlag(PROTECTED | LOCAL)
case m.Mod.Protected(_) => ???
case mmod: m.Mod.Implicit => gsym.setFlag(IMPLICIT)
case mmod: m.Mod.Final => gsym.setFlag(FINAL)
case mmod: m.Mod.Sealed => gsym.setFlag(SEALED)
case mmod: m.Mod.Override => gsym.setFlag(OVERRIDE)
case mmod: m.Mod.Case => gsym.setFlag(CASE)
case mmod: m.Mod.Abstract => gsym.setFlag(ABSTRACT)
case mmod: m.Mod.Covariant => gsym.setFlag(COVARIANT)
case mmod: m.Mod.Contravariant => gsym.setFlag(CONTRAVARIANT)
case mmod: m.Mod.Lazy => gsym.setFlag(LAZY)
case mmod: m.Mod.ValParam => // do nothing
case mmod: m.Mod.VarParam => // do nothing
})
// TODO: INTERFACE, MUTABLE, STATIC, PRESUPER, INCONSTRUCTOR, STABLE, *ACCESSOR, EXISTENTIAL
mtree match { case m.Term.Param(_, _, Some(tpe), _) if tpe.toGtype.typeSymbol == g.definitions.ByNameParamClass => gsym.setFlag(BYNAMEPARAM); case _ => }
if (gsym.hasFlag(ABSTRACT) && gsym.hasFlag(OVERRIDE)) { gsym.resetFlag(ABSTRACT | OVERRIDE); gsym.setFlag(ABSOVERRIDE) }
if (mtree.isInstanceOf[m.Defn.Trait]) gsym.setFlag(TRAIT)
mtree match { case mtree: m.Term.Param if mtree.default.nonEmpty => gsym.setFlag(DEFAULTPARAM); case _ => }
mtree match { case mtree: m.Defn.Var if mtree.rhs.isEmpty => gsym.setFlag(DEFAULTINIT); case _ => }
}
private def gtparams(mtparams: Seq[m.Type.Param]): List[g.Symbol] = {
mtparams.map(mtparam => {
val htparam = mtparam.name.require[m.Name].denot.requireSymbol
val gowner = { require(lsym.gsymbols.length == 1); lsym.gsymbol }
val ltparam = symbolTable.lookupOrElseUpdate(htparam, gowner.mkLtypeParameter(mtparam.name.toString))
ltparam.mimic(mtparam).gsymbol
}).toList
}
private def gparams(mparams: Seq[m.Term.Param]): List[g.Symbol] = {
mparams.map(mparam => {
val hparam = mparam.name.require[m.Name].denot.requireSymbol
val gowner = { require(lsym.gsymbols.length == 1); lsym.gsymbol }
val lparam = symbolTable.lookupOrElseUpdate(hparam, gowner.mkLtermParameter(mparam.name.toString))
lparam.mimic(mparam).gsymbol
}).toList
}
private def mimicInfo(mtree: m.Tree): Unit = {
(mtree, lsym) match {
case (m.Decl.Val(_, _, mtpe), l.AbstractVal(gsym)) =>
if (gsym.hasFlag(EXISTENTIAL)) {
val upperBound = g.intersectionType(List(mtpe.toGtype, g.definitions.SingletonClass.tpe), gsym.owner)
gsym.setInfo(g.TypeBounds.upper(upperBound))
} else {
gsym.setInfo(g.NullaryMethodType(mtpe.toGtype))
}
case (m.Decl.Var(_, _, mtpe), l.AbstractVar(ggetter, gsetter)) =>
ggetter.setInfo(g.NullaryMethodType(mtpe.toGtype))
val gsetterParams = List(gsetter.newTermSymbol(g.TermName("x$1"), newFlags = PARAM).setInfo(mtpe.toGtype))
val gsetterRet = g.definitions.UnitClass.tpe
gsetter.setInfo(g.MethodType(gsetterParams, gsetterRet))
case (m.Decl.Def(_, _, mtparams, mparamss, mtpe), l.AbstractDef(gsym)) =>
val gprecachedTparams = gtparams(mtparams)
val gprecachedParamss = mparamss.map(gparams)
val gmethodType = gprecachedParamss.foldLeft(mtpe.toGtype)((curr, gparams) => g.MethodType(gparams, curr))
gsym.setInfo(g.genPolyType(gprecachedTparams, gmethodType))
case (m.Decl.Type(_, _, mtparams, mtypebounds), l.AbstractType(gsym)) =>
gsym.setInfo(g.genPolyType(gtparams(mtparams), gtypeBounds(mtypebounds)))
case (m.Defn.Type(_, _, mtparams, mtpe), l.Type(gsym)) =>
gsym.setInfo(g.genPolyType(gtparams(mtparams), mtpe.toGtype))
case (m.Type.Param(_, _, mtparams, mtypebounds, mviewbounds, mcontextbounds), l.TypeParameter(gsym)) =>
require(mcontextbounds.isEmpty && mviewbounds.isEmpty)
gsym.setInfo(g.genPolyType(gtparams(mtparams), gtypeBounds(mtypebounds)))
case (m.Term.Param(_, _, mdecltpe, mdefault), l.TermParameter(gsym)) =>
require(mdecltpe.nonEmpty && mdefault.isEmpty)
gsym.setInfo(mdecltpe.get.toGtype)
case _ =>
???
}
}
def mimic(mtree: m.Tree): l.Symbol = {
if (!lsym.gsymbol.hasRawInfo) {
import scala.language.reflectiveCalls
mimicMods(mtree.require[{ def mods: Seq[m.Mod] }].mods, mtree)
mimicInfo(mtree)
}
lsym
}
}
private def gowner(mtree: m.Tree): g.Symbol = {
// TODO: we probably need something other than NoSymbol for RefinedType.decls and ExistentialType.quants
// I always had no idea about how this works in scala. I guess, it's time for find out :)
g.NoSymbol
}
private def gprefix(hprefix: s.Prefix): g.Type = {
hprefix match {
case s.Prefix.Zero => g.NoPrefix
case s.Prefix.Type(mtpe) => mtpe.require[m.Type].toGtype
}
}
def toGtype: g.Type = tpeCache.getOrElseUpdate(mtpe, {
def loop(mtpe: m.Type.Arg): g.Type = mtpe match {
case mname: m.Type.Name =>
g.TypeRef(gprefix(mname.denot.prefix), symbolTable.convert(mname.denot.requireSymbol).gsymbol, Nil)
case m.Type.Select(mqual, mname) =>
g.TypeRef(loop(m.Type.Singleton(mqual)), symbolTable.convert(mname.denot.requireSymbol).gsymbol, Nil)
case m.Type.Project(mqual, mname) =>
g.TypeRef(loop(mqual), symbolTable.convert(mname.denot.requireSymbol).gsymbol, Nil)
case m.Type.Singleton(mref) =>
def singleType(mname: m.Term.Name): g.Type = {
val gsym = symbolTable.convert(mname.denot.requireSymbol).gsymbol
if (gsym.isModuleClass) g.ThisType(gsym)
else g.SingleType(gprefix(mname.denot.prefix), gsym)
}
def superType(msuper: m.Term.Super): g.Type = {
val gpre = gprefix(msuper.thisp.require[m.Name].denot.prefix)
val gmixsym = msuper.superp.require[m.Name].denot.requireSymbol match {
case s.Symbol.Zero => g.intersectionType(gpre.typeSymbol.info.parents)
case ssym => gpre.typeSymbol.info.baseType(symbolTable.convert(ssym).gsymbol)
}
g.SuperType(gpre, gmixsym)
}
mref match {
case mname: m.Term.Name => singleType(mname)
case m.Term.Select(_, mname) => singleType(mname)
case mref: m.Term.This => g.ThisType(symbolTable.convert(mref.qual.require[m.Name].denot.requireSymbol).gsymbol)
case mref: m.Term.Super => superType(mref)
}
case m.Type.Apply(mtpe, margs) =>
g.appliedType(loop(mtpe), margs.map(loop).toList)
case m.Type.ApplyInfix(mlhs, mop, mrhs) =>
g.appliedType(loop(mop), List(loop(mlhs), loop(mrhs)))
case m.Type.Function(mparams, mres) =>
g.appliedType(g.definitions.FunctionClass(mparams.length + 1), (mparams :+ mres).map(loop).toList)
case m.Type.Tuple(melements) =>
g.appliedType(g.definitions.TupleClass(melements.length), melements.map(loop).toList)
case m.Type.Compound(mtpes, mrefinement) =>
val gscope = g.newScope
val mexplodedRefinement = mrefinement.flatMap(mrefine => mrefine.binders.map(mname => mrefine -> mname))
val refinement = mexplodedRefinement.map({ case (mrefine, mname) =>
val lrefine = symbolTable.lookupOrElseUpdate(mname.denot.requireSymbol, mrefine match {
case _: m.Decl.Val => gowner(mrefine).mkLabstractVal(mname.toString)
case _: m.Decl.Var => gowner(mrefine).mkLabstractVar(mname.toString)
case _: m.Decl.Def => gowner(mrefine).mkLabstractDef(mname.toString)
case _: m.Decl.Type => gowner(mrefine).mkLabstractType(mname.toString)
case _: m.Defn.Type => gowner(mrefine).mkLtype(mname.toString)
case _ => unreachable(debug(mrefine, mrefine.show[Structure]))
})
lrefine.gsymbols.foreach(gscope.enter)
mrefine -> lrefine
})
refinement.foreach({ case (mrefine, lrefine) => lrefine.mimic(mrefine) })
g.refinedType(mtpes.map(loop).toList, gowner(mtpe), gscope, g.NoPosition)
case m.Type.Existential(mtpe, mquants) =>
val mexplodedQuants = mquants.flatMap(mquant => mquant.binders.map(mname => mquant -> mname))
val quants = mexplodedQuants.map({ case (mquant, mname) =>
val lquant = symbolTable.lookupOrElseUpdate(mname.denot.requireSymbol, mquant match {
case _: m.Decl.Val => gowner(mquant).mkLexistentialVal(mname.toString)
case _: m.Decl.Type => gowner(mquant).mkLexistentialType(mname.toString)
case _ => unreachable(debug(mquant, mquant.show[Structure]))
})
mquant -> lquant
})
quants.foreach({ case (mquant, lquant) => lquant.mimic(mquant) })
g.ExistentialType(quants.flatMap(_._2.gsymbols).toList, loop(mtpe))
case m.Type.Annotate(mtpe, mannots) =>
g.AnnotatedType(gannotinfos(mannots), loop(mtpe))
case m.Type.Placeholder(mbounds) =>
???
case m.Type.Arg.ByName(mtpe) =>
g.appliedType(g.definitions.ByNameParamClass, List(loop(mtpe)))
case m.Type.Arg.Repeated(mtpe) =>
g.appliedType(g.definitions.RepeatedParamClass, List(loop(mtpe)))
case mlit: m.Lit =>
mlit match {
case m.Lit.Bool(value) => g.ConstantType(g.Constant(value))
case m.Lit.Int(value) => g.ConstantType(g.Constant(value))
case m.Lit.Long(value) => g.ConstantType(g.Constant(value))
case m.Lit.Float(value) => g.ConstantType(g.Constant(value))
case m.Lit.Double(value) => g.ConstantType(g.Constant(value))
case m.Lit.Char(value) => g.ConstantType(g.Constant(value))
case m.Lit.String(value) => g.ConstantType(g.Constant(value))
case m.Lit.Symbol(value) => unreachable
case m.Lit.Null() => g.ConstantType(g.Constant(null))
case m.Lit.Unit() => g.ConstantType(g.Constant(()))
}
}
loop(mtpe)
})
}
} | beni55/scalameta | scalahost/src/main/scala/scala/meta/internal/hosts/scalac/converters/ToGtype.scala | Scala | bsd-3-clause | 13,965 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.io
case class Range(offset: Int, length: Int) {
def contains(other: Range): Boolean = other.offset >= offset && other.offset + other.length <= offset + length
def strictlyContains(other: Range): Boolean = (this contains other) && this.length > other.length
/**
* @return the smallest range that contains both this and other
*/
def mergeWith(other: Range): Range = {
val List(earliest, latest) = List(this, other) sortBy (_.offset)
Range(earliest.offset, latest.offset - earliest.offset + latest.length)
}
def intersects(other: Range): Boolean =
!(other.offset >= offset + length || other.offset + other.length - 1 < offset)
def expandLeft(n: Int): Range = Range(offset - n, length + n)
}
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/core/io/Range.scala | Scala | apache-2.0 | 1,111 |
/*
* Copyright 2014 Michael Krolikowski
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mkroli.dns4s.section.resource
import org.scalatest.FunSpec
import com.github.mkroli.dns4s.MessageBuffer
import com.github.mkroli.dns4s.bytes
import com.github.mkroli.dns4s.section.ResourceRecord
class PTRResourceSpec extends FunSpec {
describe("PTRResource") {
describe("encoding/decoding") {
it("decode(encode(resource)) should be the same as resource") {
def testEncodeDecode(pr: PTRResource) {
assert(pr === PTRResource(pr(MessageBuffer()).flipped))
}
testEncodeDecode(PTRResource(""))
testEncodeDecode(PTRResource("test.test.test"))
}
it("should be decoded wrapped in ResourceRecord") {
val rr = ResourceRecord("test", ResourceRecord.typePTR, 0, 0, PTRResource("test.test"))
val a = rr(MessageBuffer()).flipped
val b = bytes("04 74 65 73 74 00 000C 0000 00000000 0007 04 74 65 73 74 C000")
assert(b === a.getBytes(a.remaining))
assert(rr === ResourceRecord(MessageBuffer().put(b.toArray).flipped))
}
}
}
}
| mesosphere/dns4s | core/src/test/scala/com/github/mkroli/dns4s/section/resource/PTRResourceSpec.scala | Scala | apache-2.0 | 1,655 |
package scalart
import org.scalatest.{Matchers, FunSuite}
class ImplicitTest extends FunSuite with Matchers {
test("Test how implicit default arguments work") {
def foo()(implicit s: Int = 0): Int = s
foo() should be(0)
{
implicit val n = 10
foo()
} should be(10)
}
}
| vidyacraghav/scalart | core/src/test/scala/scalart/ImplicitTest.scala | Scala | apache-2.0 | 304 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.memstore2
import java.util.{ArrayList => JArrayList, List => JList}
import org.apache.hadoop.hive.serde2.`lazy`.LazyFactory
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe.SerDeParameters
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorUtils,
StructField, StructObjectInspector}
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo
import shark.{SharkConfVars, SharkEnvSlave}
class ColumnarStructObjectInspector(fields: JList[StructField]) extends StructObjectInspector {
override def getCategory: Category = Category.STRUCT
override def getTypeName: String = ObjectInspectorUtils.getStandardStructTypeName(this)
override def getStructFieldRef(fieldName: String): StructField =
ObjectInspectorUtils.getStandardStructFieldRef(fieldName, fields)
override def getAllStructFieldRefs: JList[_ <: StructField] = fields
override def getStructFieldData(data: Object, fieldRef: StructField): Object =
data.asInstanceOf[ColumnarStruct].getField(
fieldRef.asInstanceOf[ColumnarStructObjectInspector.IDStructField].fieldID)
override def getStructFieldsDataAsList(data: Object): JList[Object] =
if (data == null) null else data.asInstanceOf[ColumnarStruct].getFieldsAsList()
}
object ColumnarStructObjectInspector {
def apply(serDeParams: SerDeParameters): ColumnarStructObjectInspector = {
val columnNames = serDeParams.getColumnNames()
val columnTypes = serDeParams.getColumnTypes()
val fields = new JArrayList[StructField]()
for (i <- 0 until columnNames.size) {
val typeInfo = columnTypes.get(i)
val fieldOI = typeInfo.getCategory match {
case Category.PRIMITIVE => SharkEnvSlave.objectInspectorLock.synchronized {
PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(
typeInfo.asInstanceOf[PrimitiveTypeInfo].getPrimitiveCategory)
}
case _ => SharkEnvSlave.objectInspectorLock.synchronized {
LazyFactory.createLazyObjectInspector(
typeInfo, serDeParams.getSeparators(), 1, serDeParams.getNullSequence(),
serDeParams.isEscaped(), serDeParams.getEscapeChar())
}
}
fields.add(new IDStructField(i, columnNames.get(i), fieldOI))
}
new ColumnarStructObjectInspector(fields)
}
class IDStructField(
val fieldID: Int,
val fieldName: String,
val fieldObjectInspector: ObjectInspector,
val fieldComment: String)
extends StructField {
def this(fieldID: Int, fieldName: String, fieldObjectInspector: ObjectInspector) =
this(fieldID, fieldName, fieldObjectInspector, null)
override def getFieldName: String = fieldName
override def getFieldObjectInspector: ObjectInspector = fieldObjectInspector
override def toString(): String = "" + fieldID + ":" + fieldName
override def getFieldComment() : String = fieldComment
}
}
| sameeragarwal/blinkdb_dev | src/main/scala/shark/memstore2/ColumnarStructObjectInspector.scala | Scala | apache-2.0 | 3,767 |
/*
* Author: Pablo Lalloni <plalloni@gmail.com>
* Created: 24/05/2011 13:48:43
*/
package org.retistruen.instrument
import org.joda.time.Instant
import org.retistruen.Datum
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
class IdentitySpec extends FunSpec with ShouldMatchers {
val i = new Instant
describe("An Identity functor") {
describe("when just created") {
val identity = new Identity[Int]("x")
it("should have no cached result") {
identity.last should equal(None)
}
}
describe("when received a datum") {
val emitter = new SourceEmitter[Int]("emitter")
val identity = new Identity[Int]("var")
val rec = new RecordingReceiver[Int]("rec")
emitter >> identity >> rec
emitter << Datum(1, i)
it("should emit the same datum") {
rec.data.head.value should equal(1)
}
it("should return the same datum as last emitted") {
identity.last.map(_.value) should equal(Some(1))
}
}
}
}
| plalloni/retistruen | src/test/scala/org/retistruen/instrument/IdentitySpec.scala | Scala | mit | 1,041 |
package uk.gov.gds.ier.langs
import play.api.mvc._
import com.google.inject.Singleton
@Singleton
class MessagesController extends Controller {
def all = Action {
Ok(Messages.jsMessages.all(Some("GOVUK.registerToVote.messages")))
}
def forLang(langCode:String) = Action {
implicit val lang = Language.Lang(langCode)
Ok(Messages.jsMessages(Some("GOVUK.registerToVote.messages")))
}
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/langs/MessagesController.scala | Scala | mit | 406 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.server.core
import org.infinispan.lifecycle.AbstractModuleLifecycle
import org.infinispan.server.core.ExternalizerIds._
import org.infinispan.factories.{ComponentRegistry, GlobalComponentRegistry}
import org.infinispan.configuration.global.GlobalConfiguration
import org.infinispan.configuration.cache.Configuration
import org.infinispan.factories.components.ComponentMetadataRepo
/**
* Module lifecycle callbacks implementation that enables module specific
* {@link org.infinispan.marshall.AdvancedExternalizer} implementations to be registered.
*
* @author Galder Zamarreño
* @since 5.0
*/
class LifecycleCallbacks extends AbstractModuleLifecycle {
override def cacheManagerStarting(gcr: GlobalComponentRegistry, globalCfg: GlobalConfiguration) {
LifecycleCallbacks.componentMetadataRepo = gcr.getComponentMetadataRepo
addExternalizer(globalCfg)
}
override def cacheStarting(cr: ComponentRegistry, cfg: Configuration, cacheName: String) =
cfg.storeAsBinary().enabled(false)
private[core] def addExternalizer(globalCfg : GlobalConfiguration) =
globalCfg.serialization().advancedExternalizers().put(
SERVER_CACHE_VALUE, new CacheValue.Externalizer)
}
object LifecycleCallbacks {
var componentMetadataRepo: ComponentMetadataRepo = _
} | nmldiegues/stibt | infinispan/server/core/src/main/scala/org/infinispan/server/core/LifecycleCallbacks.scala | Scala | apache-2.0 | 2,376 |
package com.github.chaabaj.openid.apis.slack
import akka.http.scaladsl.model.StatusCodes
import com.github.chaabaj.openid.exceptions.WebServiceException
import spray.json.JsValue
object SlackResponseHandler {
def handle(res: JsValue): JsValue = {
if (res.asJsObject.getFields("error").nonEmpty) {
throw WebServiceException(StatusCodes.OK, res.asJsObject.getFields("error").head)
} else {
res
}
}
}
| chaabaj/openid-scala | src/main/scala/com/github/chaabaj/openid/apis/slack/SlackResponseHandler.scala | Scala | mit | 428 |
/*
* Copyright (c) 2015-2017 EpiData, Inc.
*/
package util
import play.api.mvc.QueryStringBindable
import java.util.Date
/** Helper classes for parsing custom data types passed as url query parameters. */
object QueryStringBinders {
implicit def bindableDate(implicit longBinder: QueryStringBindable[Long]) =
new QueryStringBindable[Date] {
override def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, Date]] =
longBinder.bind(key, params) match {
case Some(Right(value)) => Some(Right(new Date(value)))
case None => None
case _ => Some(Left("Unable to parse Date."))
}
def unbind(key: String, value: Date): String = longBinder.unbind(key, value.getTime)
}
implicit def bindableOrdering(implicit stringBinder: QueryStringBindable[String]) =
new QueryStringBindable[Ordering.Value] {
override def bind(key: String, params: Map[String, Seq[String]]): Option[Either[String, Ordering.Value]] =
stringBinder.bind(key, params) match {
case Some(Right("ascending")) => Some(Right(Ordering.Ascending))
case Some(Right("descending")) => Some(Right(Ordering.Descending))
case None => None
case _ => Some(Left("Ordering must be either 'ascending' or 'descending'."))
}
def unbind(key: String, value: Ordering.Value): String =
stringBinder.unbind(key, value match {
case Ordering.Ascending => "ascending"
case Ordering.Descending => "descending"
case _ => "unspecified"
})
}
}
| epidataio/epidata-community | play/app/util/QueryStringBinders.scala | Scala | apache-2.0 | 1,594 |
trait Logged {
def log(msg: String) { }
}
trait ConsoleLogger extends Logged {
override def log(msg: String) { println(msg) }
}
trait LoggedException extends Exception with Logged {
def log() { log(getMessage()) }
}
class UnhappyException extends IllegalStateException
with LoggedException { // This class extends a trait
override def getMessage() = "arggh!"
}
class Account {
protected var balance = 0.0
}
class SavingsAccount extends Account {
def withdraw(amount: Double) {
if (amount > balance) throw new UnhappyException with ConsoleLogger
else balance -= amount
}
// More methods ...
}
object Main extends App {
try {
val acct = new SavingsAccount
acct.withdraw(100)
} catch {
case e: LoggedException => e.log()
}
}
| P7h/ScalaPlayground | Scala for the Impatient/examples/ch10/sec12/Logger.scala | Scala | apache-2.0 | 774 |
package com.nerdintheherd.config
/**
* @author Karen Davis
*/
class Config {
private var projectURIs = List[String]();
private var includedGroupIds = List[String]();
private var theseDependencies = List[String]();
private var allDependencies = false;
}
| karlroberts/release-o-matic | src/main/scala/com/nerdintheherd/config/Config.scala | Scala | bsd-3-clause | 277 |
package io.getquill.context.jdbc.oracle
import java.util.concurrent.ConcurrentLinkedQueue
import io.getquill.context.sql._
import scala.collection.JavaConverters._
class QueryResultTypeJdbcSpec extends QueryResultTypeSpec {
override val context = testContext
import context._
def await[T](r: T) = r
val insertedProducts = new ConcurrentLinkedQueue[Product]
override def beforeAll = {
context.run(deleteAll)
val ids = context.run(liftQuery(productEntries).foreach(p => productInsert(p)))
val inserted = (ids zip productEntries).map {
case (id, prod) => prod.copy(id = id)
}
insertedProducts.addAll(inserted.asJava)
()
}
def products = insertedProducts.asScala.toList
"return list" - {
"select" in {
await(context.run(selectAll)) must contain theSameElementsAs (products)
}
"map" in {
await(context.run(map)) must contain theSameElementsAs (products.map(_.id))
}
"sortBy" in {
await(context.run(sortBy)) must contain theSameElementsInOrderAs (products)
}
"take" in {
await(context.run(take)) must contain theSameElementsAs (products)
}
"drop" in {
await(context.run(drop)) must contain theSameElementsAs (products.drop(1))
}
"++" in {
await(context.run(`++`)) must contain theSameElementsAs (products ++ products)
}
"unionAll" in {
await(context.run(unionAll)) must contain theSameElementsAs (products ++ products)
}
"union" in {
await(context.run(union)) must contain theSameElementsAs (products)
}
"join" in {
await(context.run(join)) must contain theSameElementsAs (products zip products)
}
"distinct" in {
await(context.run(distinct)) must contain theSameElementsAs (products.map(_.id).distinct)
}
}
"return single result" - {
"min" - {
"some" in {
await(context.run(minExists)) mustEqual Some(products.map(_.sku).min)
}
"none" in {
await(context.run(minNonExists)) mustBe None
}
}
"max" - {
"some" in {
await(context.run(maxExists)) mustBe Some(products.map(_.sku).max)
}
"none" in {
await(context.run(maxNonExists)) mustBe None
}
}
"avg" - {
"some" in {
await(context.run(avgExists)) mustBe Some(BigDecimal(products.map(_.sku).sum) / products.size)
}
"none" in {
await(context.run(avgNonExists)) mustBe None
}
}
"size" in {
await(context.run(productSize)) mustEqual products.size
}
"parametrized size" in {
await(context.run(parametrizedSize(lift(10000)))) mustEqual 0
}
}
}
| mentegy/quill | quill-jdbc/src/test/scala/io/getquill/context/jdbc/oracle/QueryResultTypeJdbcSpec.scala | Scala | apache-2.0 | 2,654 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.external
import slamdata.Predef.List
import java.lang.ClassLoader
import java.net.URLClassLoader
import java.nio.file.Path
import scala.AnyVal
import cats.effect.Sync
final case class ClassPath(value: List[Path]) extends AnyVal
object ClassPath {
def classLoader[F[_]: Sync](parent: ClassLoader, classPath: ClassPath): F[ClassLoader] =
Sync[F].delay(new URLClassLoader(classPath.value.map(_.toUri.toURL).toArray, parent))
}
| slamdata/slamengine | impl/src/main/scala/quasar/impl/external/ClassPath.scala | Scala | apache-2.0 | 1,060 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.accumulo.ingest
import java.io._
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicLong
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.io.IOUtils
import org.geotools.data.{DataStoreFinder, DataUtilities, FeatureWriter, Transaction}
import org.geotools.factory.Hints
import org.geotools.filter.identity.FeatureIdImpl
import org.joda.time.Period
import org.joda.time.format.PeriodFormatterBuilder
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreParams
import org.locationtech.geomesa.utils.classpath.PathUtils
import org.locationtech.geomesa.utils.stats.CountingInputStream
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
/**
* Base class for handling ingestion of local or distributed files
*
* @param dsParams data store parameters
* @param typeName simple feature type name to ingest
* @param inputs files to ingest
* @param numLocalThreads for local ingest, how many threads to use
*/
abstract class AbstractIngest(val dsParams: Map[String, String],
typeName: String,
inputs: Seq[String],
numLocalThreads: Int) extends Runnable with LazyLogging {
import org.locationtech.geomesa.tools.accumulo.ingest.AbstractIngest._
/**
* Setup hook - called before run method is executed
*/
def beforeRunTasks(): Unit
/**
* Create a local ingestion converter
*
* @param file file being operated on
* @param failures used to tracks failures
* @return local converter
*/
def createLocalConverter(file: File, failures: AtomicLong): LocalIngestConverter
/**
* Run a distributed ingestion
*
* @param statusCallback for reporting status
* @return (success, failures) counts
*/
def runDistributedJob(statusCallback: (Float, Long, Long, Boolean) => Unit): (Long, Long)
val ds = DataStoreFinder.getDataStore(dsParams)
// (progress, start time, pass, fail, done)
val statusCallback: (Float, Long, Long, Long, Boolean) => Unit =
if (dsParams.get(AccumuloDataStoreParams.mockParam.getName).exists(_.toBoolean)) {
val progress = printProgress(System.err, buildString('\\u26AC', 60), ' ', _: Char) _
var state = false
(p, s, pass, fail, d) => {
state = !state
if (state) progress('\\u15e7')(p, s, pass, fail, d) else progress('\\u2b58')(p, s, pass, fail, d)
}
} else {
printProgress(System.err, buildString(' ', 60), '\\u003d', '\\u003e')
}
/**
* Main method to kick off ingestion
*/
override def run(): Unit = {
beforeRunTasks()
val distPrefixes = Seq("hdfs://", "s3n://", "s3a://")
if (distPrefixes.exists(inputs.head.toLowerCase.startsWith)) {
logger.info("Running ingestion in distributed mode")
runDistributed()
} else {
logger.info("Running ingestion in local mode")
runLocal()
}
ds.dispose()
}
private def runLocal(): Unit = {
// Global failure shared between threads
val (written, failed) = (new AtomicLong(0), new AtomicLong(0))
val bytesRead = new AtomicLong(0L)
class LocalIngestWorker(file: File) extends Runnable {
override def run(): Unit = {
try {
// only create the feature writer after the converter runs
// so that we can create the schema based off the input file
var fw: FeatureWriter[SimpleFeatureType, SimpleFeature] = null
val converter = createLocalConverter(file, failed)
// count the raw bytes read from the file, as that's what we based our total on
val countingStream = new CountingInputStream(new FileInputStream(file))
val is = PathUtils.handleCompression(countingStream, file.getPath)
try {
val (sft, features) = converter.convert(is)
if (features.hasNext) {
ds.createSchema(sft)
fw = ds.getFeatureWriterAppend(typeName, Transaction.AUTO_COMMIT)
}
features.foreach { sf =>
val toWrite = fw.next()
toWrite.setAttributes(sf.getAttributes)
toWrite.getIdentifier.asInstanceOf[FeatureIdImpl].setID(sf.getID)
toWrite.getUserData.putAll(sf.getUserData)
toWrite.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
try {
fw.write()
written.incrementAndGet()
} catch {
case e: Exception =>
logger.error(s"Failed to write '${DataUtilities.encodeFeature(toWrite)}'", e)
failed.incrementAndGet()
}
bytesRead.addAndGet(countingStream.getCount)
countingStream.resetCount()
}
} finally {
IOUtils.closeQuietly(converter)
IOUtils.closeQuietly(is)
IOUtils.closeQuietly(fw)
}
} catch {
case e: Exception =>
// Don't kill the entire program bc this thread was bad! use outer try/catch
logger.error(s"Fatal error running local ingest worker on file ${file.getPath}", e)
}
}
}
val files = inputs.flatMap(PathUtils.interpretPath)
val numFiles = files.length
val totalLength = files.map(_.length).sum.toFloat
def progress(): Float = bytesRead.get() / totalLength
logger.info(s"Ingesting ${getPlural(numFiles, "file")} with ${getPlural(numLocalThreads, "thread")}")
val start = System.currentTimeMillis()
val es = Executors.newFixedThreadPool(numLocalThreads)
files.foreach(f => es.submit(new LocalIngestWorker(f)))
es.shutdown()
while (!es.isTerminated) {
Thread.sleep(1000)
statusCallback(progress(), start, written.get(), failed.get(), false)
}
statusCallback(progress(), start, written.get(), failed.get(), true)
logger.info(s"Local ingestion complete in ${getTime(start)}")
logger.info(getStatInfo(written.get, failed.get))
}
private def runDistributed(): Unit = {
val start = System.currentTimeMillis()
val status = statusCallback(_: Float, start, _: Long, _: Long, _: Boolean)
val (success, failed) = runDistributedJob(status)
logger.info(s"Distributed ingestion complete in ${getTime(start)}")
logger.info(getStatInfo(success, failed))
}
}
object AbstractIngest {
val PeriodFormatter =
new PeriodFormatterBuilder().minimumPrintedDigits(2).printZeroAlways()
.appendHours().appendSeparator(":").appendMinutes().appendSeparator(":").appendSeconds().toFormatter
/**
* Prints progress using the provided output stream. Progress will be overwritten using '\\r', and will only
* include a line feed if done == true
*/
def printProgress(out: PrintStream,
emptyBar: String,
replacement: Char,
indicator: Char)(progress: Float, start: Long, pass: Long, fail: Long, done: Boolean): Unit = {
val numFilled = (emptyBar.length * progress).toInt
val bar = if (numFilled < 1) {
emptyBar
} else if (numFilled >= emptyBar.length) {
buildString(replacement, numFilled)
} else {
s"${buildString(replacement, numFilled - 1)}$indicator${emptyBar.substring(numFilled)}"
}
val percent = f"${(progress * 100).toInt}%3d"
// use \\r to replace current line
// trailing space separates cursor
out.print(s"\\r[$bar] $percent% complete $pass ingested $fail failed in ${getTime(start)} ")
if (done) {
out.println()
}
}
private def buildString(c: Char, length: Int) = {
val sb = new StringBuilder(length)
(0 until length).foreach(_ => sb.append(c))
sb.toString()
}
/**
* Gets elapsed time as a string
*/
def getTime(start: Long): String = PeriodFormatter.print(new Period(System.currentTimeMillis() - start))
/**
* Gets status as a string
*/
def getStatInfo(successes: Long, failures: Long): String = {
val failureString = if (failures == 0) {
"with no failures"
} else {
s"and failed to ingest ${getPlural(failures, "feature")}"
}
s"Ingested ${getPlural(successes, "feature")} $failureString."
}
private def getPlural(i: Long, base: String): String = if (i == 1) s"$i $base" else s"$i ${base}s"
}
trait LocalIngestConverter extends Closeable {
def convert(is: InputStream): (SimpleFeatureType, Iterator[SimpleFeature])
} | mdzimmerman/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/accumulo/ingest/AbstractIngest.scala | Scala | apache-2.0 | 8,974 |
package deburnat.transade
import deburnat.transade.core.loaders.{XmlFileLoader, ScalaFileLoader}
import deburnat.transade.core.admins.CoreAdmin
/**
* An algorithm for data transfer.
* Project name: deburnat
* Date: 10/2/13
* Time: 1:00 AM
* @author Patrick Meppe (tapmeppe@gmail.com)
*
*/
final class FileLoader(admin: CoreAdmin) {
def this(dirPath: String, language: String, output: String => Unit) = this(new CoreAdmin(dirPath, language, output))
def this(dirPath: String, language: String) = this(dirPath, language, _=>{})
val (xml, scala) = (new XmlFileLoader(admin), new ScalaFileLoader(admin))
}
| deburnatshazem/deburnat | core/src/main/scala/deburnat/transade/FileLoader.scala | Scala | apache-2.0 | 618 |
package x7c1.wheat.macros.database
import android.content.ContentValues
import scala.language.dynamics
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
private object TypedContentValues {
def extract[A: c.WeakTypeTag](c: blackbox.Context): c.Tree = {
import c.universe._
val field = weakTypeOf[A]
val methods = field.members filter { symbol =>
!symbol.fullName.startsWith("java.") &&
!symbol.fullName.startsWith("scala.") &&
!symbol.isConstructor && symbol.isMethod
} map (_.asMethod) filter (_.paramLists.isEmpty)
val overrides = methods map { method =>
val key = method.name.toString
q"override def ${TermName(key)} = ???"
}
val tree = q"""
new $field {
..$overrides
}"""
// println(tree)
tree
}
def unwrap[A: c.WeakTypeTag](c: blackbox.Context)(pairs: c.Tree*): c.Tree = {
import c.universe._
val parser = new TypedFieldsParser {
override val context: c.type = c
}
def forPrimitive(values: Tree)(left: Tree, right: Tree) = {
val value = parser.getJavaType(left.tpe) match {
case Some(tpe) if right.tpe <:< typeOf[Option[_]] =>
val x = TermName(c freshName "x")
q"""$right match {
case Some($x) => $x : $tpe
case None => null : $tpe
}"""
case Some(tpe) => q"$right: $tpe"
case None => q"$right"
}
Seq(q"$values.put(${parser.getColumn(left)}, $value)")
}
def forConvertible(values: Tree)(left: Tree, right: Tree) = left.tpe.typeArgs match {
case Seq(from, to) if to =:= right.tpe =>
val convertible = appliedType(
typeOf[FieldConvertible[_, _]].typeConstructor,
from,
to
)
val x = TermName(c freshName "x")
val value = parser.getJavaType(from) match {
case Some(tpe) => q"$x: $tpe"
case None => q"$x"
}
Seq(
q"val $x = implicitly[$convertible].unwrap($right)",
q"$values.put(${parser.getColumn(left)}, $value)"
)
case Seq(from, to) =>
throw new IllegalArgumentException(
s"type inconsistent: ${parser.getColumn(left)}:[$to] != $right:[${right.tpe}]")
case typeArgs =>
throw new IllegalArgumentException(s"invalid typeArgs: $typeArgs")
}
def toSetters(values: Tree): ((Tree, Tree)) => Seq[Tree] = {
case (left, right) if left.tpe.widen =:= right.tpe.widen =>
forPrimitive(values)(left, right)
case (left, right) if left.tpe <:< typeOf[FieldTransform[_, _]] =>
forConvertible(values)(left, right)
case (left, right) =>
throw new IllegalArgumentException(
s"type inconsistent: ${parser.getColumn(left)}:[${left.tpe}] != $right:[${right.tpe}]")
}
val values = q"${TermName(c.freshName("values"))}"
val setters = pairs map parser.extractArgs flatMap {
case Some(pair) => toSetters(values)(pair)
case None => throw new IllegalArgumentException("invalid form of expression")
}
val tree = q"""
val $values = new ${typeOf[ContentValues]}()
..$setters
$values
"""
// println(tree)
tree
}
}
| x7c1/Linen | wheat-macros/src/main/scala/x7c1/wheat/macros/database/TypedContentValues.scala | Scala | mit | 3,234 |
package edu.depauw.scales.graphics
import Base._
import org.scalajs.dom
case class Bitmap(canvas: dom.HTMLCanvasElement, bounds: Bounds) extends Graphic {
def render(ctx: GraphicsContext): Unit = ctx.drawImage(canvas, bounds.left, bounds.top, bounds.width, bounds.height)
def toFunction: (Double, Double) => Color = {
val ctx = canvas.getContext("2d").asInstanceOf[dom.CanvasRenderingContext2D]
val imagedata = ctx.getImageData(0, 0, canvas.width, canvas.height)
(x: Double, y: Double) => {
val col = (x * imagedata.width).toInt max 0 min (imagedata.width - 1)
val row = (y * imagedata.height).toInt max 0 min (imagedata.height - 1)
val index = (row * imagedata.width + col) * 4
val r = imagedata.data(index)
val g = imagedata.data(index + 1)
val b = imagedata.data(index + 2)
val a = imagedata.data(index + 3)
RGBA(r, g, b, a)
}
}
}
object Bitmap {
def apply(width: Double, height: Double)(fn: (Double, Double) => Color): Graphic = {
val canvas = dom.document.createElement("canvas").asInstanceOf[dom.HTMLCanvasElement]
val bounds: Bounds = RectBounds(0, width, 0, height)
// TODO this is arbitrary...
canvas.width = 500
canvas.height = 500
val ctx = canvas.getContext("2d").asInstanceOf[dom.CanvasRenderingContext2D]
val imagedata = ctx.createImageData(canvas.width, canvas.height)
for (row <- 0 until imagedata.height; col <- 0 until imagedata.width) {
val c = fn(col.toDouble / imagedata.width, row.toDouble / imagedata.height)
val index = (row * imagedata.width + col) * 4
imagedata.data(index) = c.red
imagedata.data(index + 1) = c.green
imagedata.data(index + 2) = c.blue
imagedata.data(index + 3) = math.round(c.alpha * 255).toInt
}
ctx.putImageData(imagedata, 0, 0)
Bitmap(canvas, bounds)
}
}
object Image {
// TODO this might not be useful, because of single-origin restrictions
def apply(url: String, width: Int, height: Int): Graphic = {
val canvas = dom.document.createElement("canvas").asInstanceOf[dom.HTMLCanvasElement]
val bounds = RectBounds(0, width, 0, height)
canvas.width = width
canvas.height = height
val ctx = canvas.getContext("2d").asInstanceOf[dom.CanvasRenderingContext2D]
val image = dom.document.createElement("img").asInstanceOf[dom.HTMLImageElement]
image.addEventListener("load", (_: dom.Event) => ctx.drawImage(image, 0, 0, width, height))
image.src = url
Bitmap(canvas, bounds)
}
}
object Freeze {
def apply(graphic: Graphic): Graphic = {
val canvas = dom.document.createElement("canvas").asInstanceOf[dom.HTMLCanvasElement]
val bounds = graphic.bounds
// Choose size for canvas so each dimension is at least 500, and the proportions are correct
// TODO this is arbitrary...
if (bounds.width < bounds.height) {
canvas.width = 500
canvas.height = (canvas.width * bounds.height / bounds.width).toInt
} else {
canvas.height = 500
canvas.width = (canvas.height * bounds.width / bounds.height).toInt
}
graphic.displayOn(canvas)
Bitmap(canvas, bounds)
}
} | DePauwREU2013/sjs-scales | src/main/scala/edu/depauw/scales/graphics/Bitmap.scala | Scala | mit | 3,177 |
package org.jetbrains.plugins.scala
package lang.psi.impl.statements
import com.intellij.psi._
import impl.light.LightElement
import com.intellij.openapi.util.TextRange
import lang.psi.api.expr.{ScBlockExpr, ScExpression}
final class FakePsiCodeBlock(body: ScExpression) extends LightElement(body.getManager, body.getLanguage) with PsiCodeBlock {
def shouldChangeModificationCount(place: PsiElement): Boolean = false
def getRBrace: PsiJavaToken = null
def getLBrace: PsiJavaToken = null
def getLastBodyElement: PsiElement = null
def getFirstBodyElement: PsiElement = null
def getStatements: Array[PsiStatement] = body match {
case x: ScBlockExpr => x.statements.map(new FakePsiStatement(_)).toArray
case _ => Array(new FakePsiStatement(body))
}
override def getTextRange: TextRange = body.getTextRange
override def getTextOffset: Int = body.getTextOffset
override def getContainingFile = body.getContainingFile
override def toString = s"FakePsiCodeBlock(${body.toString}})"
}
final class FakePsiStatement(elem: PsiElement) extends LightElement(elem.getManager, elem.getLanguage) with PsiStatement {
override def getTextRange: TextRange = elem.getTextRange
override def getTextOffset: Int = elem.getTextOffset
override def getContainingFile = elem.getContainingFile
override def toString = s"FakePsiStatement(${elem.toString})"
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/FakePsiCodeBlock.scala | Scala | apache-2.0 | 1,381 |
package fpinscala.streamingio
import fpinscala.iomonad.{Task,IO,Monad}
object ImperativeAndLazyIO {
/*
We are going to consider various approaches to the simple task of
checking whether a file contains more than 40,000 lines.
Our first implementation is an imperative implementation, embedded
into `IO`.
*/
type IO[A] = fpinscala.iomonad.IO[Task,A] // see `Task.scala` for details
import java.io._
def linesGt40k(filename: String): IO[Boolean] = IO {
// There are a number of convenience functions in scala.io.Source
// for reading from external sources such as files.
val src = io.Source.fromFile(filename)
try {
var count = 0
// Obtain a stateful iterator from the Source
val lines: Iterator[String] = src.getLines
while (count <= 40000 && lines.hasNext) {
lines.next // has side effect of advancing to next element
count += 1
}
count > 40000
}
finally src.close
}
/*
The above code is rather low-level, and it's not compositional,
either. Consider the following scenarios:
* Check whether the number of _nonempty_ lines in the file exceeds
40,000
* Find a line index before 40,000 where the first letter of
consecutive lines spells out `"abracadabra"`.
We cannot just compose our existing implementation with some
other combinator(s) to implement these tasks. Our implementation is
a monolithic loop, and we must modify this loop directly if we want
to change its behavior.
Now imagine if we had a `Stream[String]` for the lines of the file
and we could assemble functionality using all the `Stream` functions
we know and love.
*/
object Examples {
val lines: Stream[String] = sys.error("defined elsewhere")
val ex1 = lines.zipWithIndex.exists(_._2 + 1 >= 40000)
val ex2 = lines.filter(!_.trim.isEmpty).zipWithIndex.exists(_._2 + 1 >= 40000)
val ex3 = lines.take(40000).map(_.head).indexOfSlice("abracadabra".toList)
}
/*
Could we actually write the above? Not quite. We could 'cheat' and
return an `IO[Stream[String]]` representing the lines of a file:
*/
def lines(filename: String): IO[Stream[String]] = IO {
val src = io.Source.fromFile(filename)
src.getLines.toStream append { src.close; Stream.empty }
}
/*
This is called _lazy I/O_, and it's problematic for a number of
reasons, discussed in the book text. However, it would be nice to
recover the same high-level, compositional style we are used to
from our use of `List` and `Stream`.
*/
}
object SimpleStreamTransducers {
type IO[A] = fpinscala.iomonad.IO[Task,A]
/*
We now introduce a type, `Process`, representing pure, single-input
stream transducers. It can be in of three states - it can be
emitting a value to the output (`Emit`), reading a value from its
input (`Await`) or signaling termination via `Halt`.
*/
sealed trait Process[I,O] {
import Process._
/*
* A `Process[I,O]` can be used to transform a `Stream[I]` to a
* `Stream[O]`.
*/
def apply(s: Stream[I]): Stream[O] = this match {
case Halt() => Stream()
case Await(recv, fallback) => s match {
case h #:: t => recv(h)(t)
case _ => fallback(s) // Stream is empty
}
case Emit(h,t) => h.toStream append t(s)
}
/*
* `Process` can be thought of as a sequence of values of type `O`
* and many of the operations that would be defined for `List[O]`
* can be defined for `Process[I,O]`, for instance `map`, `++` and
* `flatMap`. The definitions are analogous.
*/
def map[O2](f: O => O2): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) => Emit(h map f, t map f)
case Await(recv,fb) => Await(recv andThen (_ map f), fb map f)
}
def ++(p: => Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Emit(h, t) => emitAll(h, t ++ p)
case Await(recv,fb) => Await(recv andThen (_ ++ p), fb ++ p)
}
def flatMap[O2](f: O => Process[I,O2]): Process[I,O2] = this match {
case Halt() => Halt()
case Emit(h, t) =>
if (h.isEmpty) t flatMap f
else f(h.head) ++ emitAll(h.tail, t).flatMap(f)
case Await(recv,fb) =>
Await(recv andThen (_ flatMap f), fb flatMap f)
}
/*
* Exercise 1: Implement `|>`. Let the types guide your implementation.
*/
def |>[O2](p2: Process[O,O2]): Process[I,O2] = {
p2 match {
case Halt() => Halt()
case Emit(h,t) => Emit(h, this |> t)
case Await(f,fb) => this match {
case Emit(h,t) => t |> p2.feed(h)
case Halt() => Halt() |> fb
case Await(g,gb) => Await((i: I) => g(i) |> p2, gb |> fb)
}
}
}
/*
* Feed `in` to this `Process`. Uses a tail recursive loop as long
* as `this` is in the `Await` state.
*/
def feed(in: Seq[I]): Process[I,O] = {
@annotation.tailrec
def go(in: Seq[I], cur: Process[I,O]): Process[I,O] =
cur match {
case Halt() => Halt()
case Await(recv,fb) =>
if (in.nonEmpty) go(in.tail, recv(in.head))
else cur
case Emit(h, t) => Emit(h, t.feed(in))
}
go(in, this)
}
/*
* See `Process.lift` for a typical repeating `Process`
* definition expressed with explicit recursion.
*/
/*
* `Process` definitions can often be expressed without explicit
* recursion, by repeating some simpler `Process` forever.
*/
def repeat: Process[I,O] = {
def go(p: Process[I,O]): Process[I,O] = p match {
case Halt() => go(this)
case Await(recv,fb) => Await(recv andThen go, fb)
case Emit(h, t) => Emit(h, go(t))
}
go(this)
}
def repeatN(n: Int): Process[I,O] = {
def go(n: Int, p: Process[I,O]): Process[I,O] = p match {
case Halt() => if (n > 0) go(n-1, this) else Halt()
case Await(recv,fb) => Await(recv andThen (go(n,_)), fb)
case Emit(h, t) => Emit(h, go(n,t))
}
go(n, this)
}
/*
* As an example of `repeat`, see `Process.filter`. We define
* a convenience function here for composing this `Process`
* with a `Process` that filters the output type `O`.
*/
def filter(f: O => Boolean): Process[I,O] =
this |> Process.filter(f)
def zip[O2](p: Process[I,O2]): Process[I,(O,O2)] =
Process.zip(this, p)
/*
* Exercise 7: Implement `zipWithIndex`.
*/
def zipWithIndex: Process[I,(O,Int)] =
this zip (count map (_ - 1))
/* Add `p` to the `fallback` of this `Process`. */
def orElse(p: Process[I,O]): Process[I,O] = this match {
case Halt() => p
case Await(recv,fb) => Await(recv, fb ++ p)
case _ => this
}
/*
* Convenience function for switching to the fallback branch
* a `Process`.
*/
def disconnectIn: Process[I,O] = this match {
case Await(recv,fb) => fb
case Halt() => Halt()
case Emit(h, t) => emitAll(h, t.disconnectIn)
}
}
object Process {
case class Emit[I,O](
head: Seq[O],
tail: Process[I,O] = Halt[I,O]())
extends Process[I,O]
case class Await[I,O](
recv: I => Process[I,O],
fallback: Process[I,O] = Halt[I,O]())
extends Process[I,O]
case class Halt[I,O]() extends Process[I,O]
def emit[I,O](head: O,
tail: Process[I,O] = Halt[I,O]()): Process[I,O] =
emitAll(Vector(head), tail)
def emitAll[I,O](head: Seq[O],
tail: Process[I,O] = Halt[I,O]()): Process[I,O] =
tail match {
case Emit(h2, tl) => Emit(head ++ h2, tl)
case _ => Emit(head, tail)
}
// Process forms a monad, and we provide monad syntax for it
import fpinscala.iomonad.Monad
def monad[I]: Monad[({ type f[x] = Process[I,x]})#f] =
new Monad[({ type f[x] = Process[I,x]})#f] {
def unit[O](o: => O): Process[I,O] = emit(o)
def flatMap[O,O2](p: Process[I,O])(f: O => Process[I,O2]): Process[I,O2] =
p flatMap f
}
// enable monadic syntax for `Process` type
implicit def toMonadic[I,O](a: Process[I,O]) = monad[I].toMonadic(a)
/*
* We can convert any function `f: I => O` to a `Process[I,O]`. We
* simply `Await`, then `Emit` the value received, transformed by
* `f`.
*/
def lift[I,O](f: I => O): Process[I,O] =
Await((i: I) => emit(f(i), lift(f)))
/*
* As an example of `repeat`, here's a definition of `filter` that
* uses `repeat`.
*/
def filter[I](f: I => Boolean): Process[I,I] =
Await[I,I](i => if (f(i)) emit(i) else Halt()) repeat
/*
* Here's a typical `Process` definition that requires tracking some
* piece of state (in this case, the running total):
*/
def sum: Process[Double,Double] = {
def go(acc: Double): Process[Double,Double] =
Await((d: Double) => emit(d+acc, go(d+acc)))
go(0.0)
}
/*
* Exercise 2: Implement `take`, `drop`, `takeWhile`, and `dropWhile`.
*/
def take[I](n: Int): Process[I,I] =
if (n <= 0) Halt()
else Await(i => emit(i, take[I](n-1)))
def drop[I](n: Int): Process[I,I] =
if (n <= 0) id
else Await(i => drop[I](n-1))
def takeWhile[I](f: I => Boolean): Process[I,I] =
Await(i =>
if (f(i)) emit(i, takeWhile(f))
else Halt())
def dropWhile[I](f: I => Boolean): Process[I,I] =
Await(i =>
if (f(i)) dropWhile(f)
else id)
/* The identity `Process`, just repeatedly echos its input. */
def id[I]: Process[I,I] = lift(identity)
/*
* Exercise 3: Implement `count`.
*
* Here's one implementation, with three stages - we map all inputs
* to 1.0, compute a running sum, then finally convert the output
* back to `Int`. The three stages will be interleaved - as soon
* as the first element is examined, it will be converted to 1.0,
* then added to the running total, and then this running total
* will be converted back to `Int`, then the `Process` will examine
* the next element, and so on.
*/
def count[I]: Process[I,Int] =
lift((i: I) => 1.0) |> sum |> lift(_.toInt)
/* For comparison, here is an explicit recursive implementation. */
def count2[I]: Process[I,Int] = {
def go(n: Int): Process[I,Int] =
Await((i: I) => emit(n+1, go(n+1)))
go(0)
}
/*
* Exercise 4: Implement `mean`.
*
* This is an explicit recursive definition. We'll factor out a
* generic combinator shortly.
*/
def mean: Process[Double,Double] = {
def go(sum: Double, count: Double): Process[Double,Double] =
Await((d: Double) => emit((sum+d) / (count+1), go(sum+d,count+1)))
go(0.0, 0.0)
}
def loop[S,I,O](z: S)(f: (I,S) => (O,S)): Process[I,O] =
Await((i: I) => f(i,z) match {
case (o,s2) => emit(o, loop(s2)(f))
})
/* Exercise 5: Implement `sum` and `count` in terms of `loop` */
def sum2: Process[Double,Double] =
loop(0.0)((d:Double, acc) => (acc+d,acc+d))
def count3[I]: Process[I,Int] =
loop(0)((_:I,n) => (n+1,n+1))
/*
* Exercise 6: Can you think of a generic combinator that would
* allow for the definition of `mean` in terms of `sum` and
* `count`?
*
* Yes, it is `zip`, which feeds the same input to two processes.
*/
def zip[A,B,C](p1: Process[A,B], p2: Process[A,C]): Process[A,(B,C)] =
(p1, p2) match {
// if both are emitting, we zip together corresponding values,
// taking care to handle case where the two processes are
// emitting different numbers of values
case (Emit(bs, t1), Emit(cs, t2)) =>
// we zip together `bs` and `cs`, and get the leftover from each side
val (z, rema, remb) = (bs.zip(cs), bs.drop(cs.length), cs.drop(bs.length))
Emit(z, zip(
if (rema.isEmpty) t1 else Emit(rema, t1),
if (remb.isEmpty) t2 else Emit(remb, t2)
))
case (Halt(), _) => Halt()
case (_, Halt()) => Halt()
// if either side is Await-ing, we do the Await, then zip once
// we have the result
case (Await(recv1, fb1), _) =>
Await(recv1 andThen (p1Next => zip(p1Next, p2)), zip (fb1, p2))
case (_, Await(recv2, fb2)) =>
Await(recv2 andThen (p2Next => zip(p1, p2Next)), zip (p1, fb2))
}
/*
* Using zip, we can then define `mean`. Again, this definition
* operates in a single pass.
*/
val mean2 = (sum zip count) |> lift { case (s,n) => s / n }
/*
* Exercise 7: Implement `zipWithIndex`.
*
* See definition on `Process` above.
*/
/*
* Exercise 8: Implement `exists`
*
* We choose to emit all intermediate values, and not halt.
* See `existsResult` below for a trimmed version.
*/
def exists[I](f: I => Boolean): Process[I,Boolean] =
lift(f) |> any
/* Emits whether a `true` input has ever been received. */
def any: Process[Boolean,Boolean] =
loop(false)((b:Boolean,s) => (s || b, s || b))
/* A trimmed `exists`, containing just the final result. */
def existsResult[I](f: I => Boolean) =
exists(f) |> takeThrough(!_) |> dropWhile(!_) |> echo.orElse(emit(false))
/*
* Like `takeWhile`, but includes the first element that tests
* false.
*/
def takeThrough[I](f: I => Boolean): Process[I,I] =
takeWhile(f) ++ echo
/* Awaits then emits a single value, then halts. */
def echo[I]: Process[I,I] = Await(i => emit(i))
def skip[I,O]: Process[I,O] = Await(i => Halt())
def ignore[I,O]: Process[I,O] = skip.repeat
def terminated[I]: Process[I,Option[I]] =
Await((i: I) => emit(Some(i), terminated[I]), emit(None))
}
/*
To implement our task of checking whether a file contains more than
40k, we need some way of binding a `Process` to talk to external
streams. Our first attempt is to define a separate type, `Source`,
which combines a `Process` with some effectful external source. We'll
see later that this approach doesn't scale very well.
*/
trait Source[O] {
def |>[O2](p: Process[O,O2]): Source[O2]
def filter(f: O => Boolean) = this |> Process.filter(f)
def map[O2](f: O => O2) = this |> Process.lift(f)
// can implement other functions on `Source` just using `|>`
def take(n: Int) = this |> Process.take(n)
def takeWhile(f: O => Boolean) = this |> Process.takeWhile(f)
// etc
def collect: IO[IndexedSeq[O]]
}
object Source {
import Process._
case class ResourceR[R,I,O]( // A resource from which we can read values
acquire: IO[R],
release: R => IO[Unit],
step: R => IO[Option[I]],
trans: Process[I,O]) extends Source[O] {
def |>[O2](p: Process[O,O2]) =
ResourceR(acquire, release, step, trans |> p)
/*
* Notice we are guaranteed to run the `release` action, whether
* we terminate normally or if an exception occurs during
* processing.
*/
def collect: IO[IndexedSeq[O]] = {
// Evaluate `a`, and run `cleanup` if this throws an exception
def tryOr[A](a: => A)(cleanup: IO[Unit]) =
try a catch { case e: Throwable => cleanup.run; throw e }
@annotation.tailrec
def go(acc: IndexedSeq[O],
step: IO[Option[I]],
p: Process[I,O],
release: IO[Unit]): IndexedSeq[O] =
p match {
case Halt() => release.run; acc
case Emit(h, t) =>
go(acc ++ h, step, t, release)
case Await(recv, fb) => tryOr(step.run)(release) match {
case None => go(acc, IO(None), fb, release)
case Some(i) => go(acc, step, tryOr(recv(i))(release), release)
}
}
acquire map (res =>
go(IndexedSeq(), step(res), trans, release(res)))
}
def run: IO[Unit] = (this |> ignore[O,Unit]).collect as ()
}
def lines(filename: String): Source[String] =
ResourceR(
IO(io.Source.fromFile(filename)),
(src: io.Source) => IO(src.close),
(src: io.Source) => {
lazy val iter = src.getLines
IO { if (iter.hasNext) Some(iter.next) else None }
},
Process.id[String])
}
/*
So far, our library cannot express programs that must incrementally
write to some _sink_, like a file. For instance, consider the
following task:
Transform `fahrenheit.txt`, a file containing temperatures in
degrees fahrenheit, to `celsius.txt`, a file containing the same
temperatures in degrees celsius.
We can produce the source, but no way to write the output to another
file!
*/
import fpinscala.iomonad.IO0.fahrenheitToCelsius
val tempsC: Source[Double] =
Source.lines("fahrenheit.txt").
filter(!_.startsWith("#")).
map(s => fahrenheitToCelsius(s.toDouble))
/*
When we encounter limitations like this, we _can_ add more special
cases to our `Source` type. Here, we define a notion of a `Sink`,
and incorporate it into the `Source` type.
*/
trait Sink[I] {
def <|[I0](p: Process[I0,I]): Sink[I0]
def filter(f: I => Boolean) = this <| Process.filter(f)
// other combinators are similar, defined in terms of `<|`
}
object Sink {
case class ResourceW[R,I,I2](
acquire: IO[R],
release: R => IO[Unit],
recv: R => (I2 => IO[Unit]),
trans: Process[I,I2]) extends Sink[I] {
def <|[I0](p: Process[I0,I]) =
ResourceW(acquire, release, recv, p |> trans)
}
import java.io.FileWriter
/* An example `Sink`, for writing to a file. */
def file(filename: String, append: Boolean = false): Sink[String] =
ResourceW(
IO(new FileWriter(filename, append)),
(w: FileWriter) => IO(w.close),
(w: FileWriter) => (s: String) => IO(w.write(s)),
Process.id[String]
)
}
/*
To integrate this into our `Source` API, let's imagine a
new combinator, `observe`:
def observe(snk: Sink[O]): Source[O]
Implementing this combinator will likely require an additional
`Source` constructor and updates to our `collect` implementation.
Assuming we can do this, the complete scenario then looks something
like:
val convert: IO[Unit] =
Source.lines("fahrenheit.txt").
filter(!_.startsWith("#")).
map(s => fahrenheitToCelsius(s.toDouble)).
map(d => d.toString + "\\n"). ~ add line separators back in
observe(Sink.file("celsius.txt")).
run
Ultimately, this approach of adding special cases to `Source`
doesn't scale. See the chapter text for details. Next, we develop a
generalized `Process` type that lets us express sources, sinks,
multiple inputs, and more complex usage scenarios than the ones
we've considered so far.
*/
}
object GeneralizedStreamTransducers {
/*
Our generalized process type is parameterized on the protocol used for
communicating with the driver. This works similarly to the `IO` type
we defined in chapter 13. The `Await` constructor emits a request of
type `F[A]`, and receives a response of type `A`:
trait Process[F,A]
case class Await[F[_],A,O](
req: F[A], recv: A => Process[F,O],
fallback: Process[F,O],
cleanup: Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
case class Emit[F[_],O](head: Seq[O], tail: Process[F,O]) extends Process[F,O]
The `Await` constructor now includes a `cleanup` process, which will be
consulted if the `Await` fails with an error.
The `Halt` constructor now has a _reason_ for termination, which may be
either normal termination, the special exception `End`, or some other
error.
We'll use the improved `Await` and `Halt` cases together to ensure
that all resources get released, even in the event of exceptions.
*/
trait Process[F[_],O] {
import Process._
/*
* Many of the same operations can be defined for this generalized
* `Process` type, regardless of the choice of `F`.
*/
def map[O2](f: O => O2): Process[F,O2] = this match {
case Await(req,recv,fb,c) =>
Await(req, recv andThen (_ map f), fb map f, c map f)
case Emit(h, t) => Try { Emit(h map f, t map f) }
case Halt(err) => Halt(err)
}
def ++(p: => Process[F,O]): Process[F,O] = this match {
case Halt(End) => Try(p) // we only consult `p` on normal termination
case Halt(err) => Halt(err) // otherwise, we keep the current error
case Emit(h, t) => emitAll(h, t ++ p)
case Await(req,recv,fb,c) =>
Await(req, recv andThen (_ ++ p), fb ++ p, c)
}
/*
* Like `++`, but _always_ runs `p`, even if `this` halts with an error.
*/
def onComplete(p: => Process[F,O]): Process[F,O] = this match {
case Halt(End) => Try(p)
case Halt(err) => Try(p) ++ Halt(err) // we always run p, but preserve any errors that occurred
case Emit(h, t) => emitAll(h, t onComplete p)
case Await(req,recv,fb,c) =>
Await(req, recv andThen (_ onComplete p), fb onComplete p, c onComplete p)
}
/*
* Anywhere we _call_ `f`, we catch exceptions and convert them to `Halt`.
* See the helper function `Try` defined below.
*/
def flatMap[O2](f: O => Process[F,O2]): Process[F,O2] =
this match {
case Halt(err) => Halt(err)
case Emit(Seq(o), t) => Try(f(o)) ++ t.flatMap(f) // optimization
case Emit(o, t) =>
if (o.isEmpty) t.flatMap(f)
else Try { f(o.head) } ++ emitAll(o.tail, t).flatMap(f)
case Await(req,recv,fb,c) =>
Await(req, recv andThen (_ flatMap f), fb flatMap f, c flatMap f)
}
def repeat: Process[F,O] = {
def go(p: Process[F,O]): Process[F,O] = p match {
case Halt(End) => go(this)
case Halt(err) => Halt(err)
case Await(req,recv,fb,c) => Await(req, recv andThen go, fb, c)
case Emit(h, t) => emitAll(h, go(t))
}
this match {
case Halt(e) => this
case _ => go(this)
}
}
/*
* This function is defined only if given a `Monad[F]` and a
* `Partial[F]`. Unlike the simple `collect` interpreter defined
* in the companion object below, this is not tail recursive and
* responsibility for stack safety is placed on the `Monad`
* instance.
*
* This uses the helper function `TryOr`, defined below.
*/
def collect(implicit F: Monad[F], P: Partial[F]): F[IndexedSeq[O]] = {
def go(cur: Process[F,O], acc: IndexedSeq[O]): F[IndexedSeq[O]] =
cur match {
case Emit(h,t) => go(t, acc ++ h)
case Halt(End) => F.unit(acc)
case Halt(err) => P.fail(err)
case Await(req,recv,fb,c) =>
F.flatMap (P.attempt(req)) {
case Left(End) => go(fb, acc)
case Left(err) =>
go(c ++ await[F,Nothing,O](P.fail(err))(), acc)
case Right(o) => go(TryAwait(recv(o))(fb,c), acc) // We catch and handle any exceptions in `recv`.
}
}
go(this, IndexedSeq())
}
/*
* We define `Process1` as a type alias - see the companion object
* for `Process` below. Using that, we can then define `|>` once
* more. The definition is extremely similar to our previous
* definition. We again use the helper function, `feed`, to take
* care of the case where `this` is emitting values while `p2`
* is awaiting these values.
*
* The one subtlety is we make sure that if `p2` halts, we
* `kill` this process, giving it a chance to run any cleanup
* actions (like closing file handles, etc).
*/
def |>[O2](p2: Process1[O,O2]): Process[F,O2] = {
p2 match {
case Halt(e) => this.kill ++ Halt(e)
case Emit(h, t) => emitAll(h, this |> t)
case Await(req,recv,fb,c) => this match {
case Emit(h,t) => t |> feed(h)(p2)
case Halt(End) => Halt(End) |> fb
case Halt(err) => Halt(err) |> c
case Await(req0,recv0,fb0,c0) =>
await(req0)(i => recv0(i) |> p2,
fb0 |> fb,
c0 |> c)
}
}
}
@annotation.tailrec
final def kill[O2]: Process[F,O2] = this match {
case Await(req,recv,fb,c) => c.drain
case Halt(e) => Halt(e)
case Emit(h, t) => t.kill
}
final def drain[O2]: Process[F,O2] = this match {
case Halt(e) => Halt(e)
case Emit(h, t) => t.drain
case Await(req,recv,fb,c) => Await(
req, recv andThen (_.drain),
fb.drain, c.drain)
}
def filter(f: O => Boolean): Process[F,O] =
this |> Process.filter(f)
def take(n: Int): Process[F,O] =
this |> Process.take(n)
def once: Process[F,O] = take(1)
/*
* Use a `Tee` to interleave or combine the outputs of `this` and
* `p2`. This can be used for zipping, interleaving, and so forth.
* Nothing requires that the `Tee` read elements from each
* `Process` in lockstep. It could read fifty elements from one
* side, then two elements from the other, then combine or
* interleave these values in some way, etc.
*
* This definition uses two helper functions, `feedL` and `feedR`,
* which feed the `Tee` in a tail-recursive loop as long as
* it is awaiting input.
*/
def tee[O2,O3](p2: Process[F,O2])(t: Tee[O,O2,O3]): Process[F,O3] = {
t match {
case Halt(e) => this.kill ++ p2.kill ++ Halt(e)
case Emit(h,t) => Emit(h, (this tee p2)(t))
case Await(side, recv, fb, c) => side.get match {
case Left(isO) => this match {
case Halt(e) => p2.kill ++ Halt(e)
case Emit(o,ot) => (ot tee p2)(feedL(o)(t))
case Await(reqL, recvL, fbL, cL) =>
Await(reqL, recvL andThen (this2 => (this2 tee p2)(t)),
(fbL tee p2)(t), (cL tee p2)(t))
}
case Right(isO2) => p2 match {
case Halt(e) => this.kill ++ Halt(e)
case Emit(o,ot) => (this tee ot)(feedR(o)(t))
case Await(reqR, recvR, fbR, cR) =>
Await(reqR, recvR andThen (p3 => (this tee p3)(t)),
(this tee fbR)(t), (this tee cR)(t))
}
}
}
}
def zipWith[O2,O3](p2: Process[F,O2])(f: (O,O2) => O3): Process[F,O3] =
(this tee p2)(Process.zipWith(f))
def zip[O2](p2: Process[F,O2]): Process[F,(O,O2)] =
zipWith(p2)((_,_))
def to[O2](sink: Sink[F,O]): Process[F,Unit] =
join { (this zipWith sink)((o,f) => f(o)) }
def through[O2](p2: Channel[F, O, O2]): Process[F,O2] =
join { (this zipWith p2)((o,f) => f(o)) }
def disconnectIn: Process[F,O] = this match {
case Await(req,recv,fb,c) => fb
case Halt(e) => Halt(e)
case Emit(h, t) => Emit(h, t.disconnectIn)
}
def disconnect[O2]: Process[F,O2] =
this.disconnectIn.drain
}
object Process {
case class Await[F[_],A,O](
req: F[A], recv: A => Process[F,O],
fallback: Process[F,O],
cleanup: Process[F,O]) extends Process[F,O]
case class Emit[F[_],O](
head: Seq[O],
tail: Process[F,O]) extends Process[F,O]
case class Halt[F[_],O](err: Throwable) extends Process[F,O]
def emitAll[F[_],O](
head: Seq[O],
tail: Process[F,O] = Halt[F,O](End)): Process[F,O] =
tail match {
case Emit(h2,t) => Emit(head ++ h2, t)
case _ => Emit(head, tail)
}
def emit[F[_],O](
head: O,
tail: Process[F,O] = Halt[F,O](End)): Process[F,O] =
emitAll(Vector(head), tail)
def await[F[_],A,O](req: F[A])(
recv: A => Process[F,O] = (a: A) => Halt[F,O](End),
fallback: Process[F,O] = Halt[F,O](End),
cleanup: Process[F,O] = Halt[F,O](End)): Process[F,O] =
Await(req, recv, fallback, cleanup)
/**
* Helper function to safely produce `p`, or gracefully halt
* with an error if an exception is thrown.
*/
def Try[F[_],O](p: => Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => Halt(e) }
/*
* Safely produce `p`, or run `cleanup` and halt gracefully with the
* exception thrown while evaluating `p`.
*/
def TryOr[F[_],O](p: => Process[F,O])(cleanup: Process[F,O]): Process[F,O] =
try p
catch { case e: Throwable => cleanup ++ Halt(e) }
/*
* Safely produce `p`, or run `cleanup` or `fallback` if an exception
* occurs while evaluating `p`.
*/
def TryAwait[F[_],O](p: => Process[F,O])(fallback: Process[F,O], cleanup: Process[F,O]): Process[F,O] =
try p
catch {
case End => fallback
case e: Throwable => cleanup ++ Halt(e)
}
/**
* Feed `in` to this `Process`. Uses a tail recursive loop as long
* as `p` is in the `Await` state.
*/
def feed[I,O](in: Seq[I])(p: Process1[I,O]): Process1[I,O] = {
@annotation.tailrec
def go(in: Seq[I], cur: Process1[I,O]): Process1[I,O] =
cur match {
case Halt(e) => Halt(e)
case Await(req,recv,fb,c) =>
if (in.nonEmpty)
go(in.tail, TryAwait(recv(req.is.to(in.head)))(fb, c))
else
cur
case Emit(h, t) => Emit(h, feed(in)(t))
}
go(in, p)
}
/*
* Feed the left side of a `Tee`. Uses a tail recursive loop as
* `p` is awaiting on the left.
*/
def feedL[I,I2,O](in: Seq[I])(t: Tee[I,I2,O]): Tee[I,I2,O] = {
@annotation.tailrec
def go(in: Seq[I], cur: Tee[I,I2,O]): Tee[I,I2,O] =
cur match {
case Halt(e) => Halt(e)
case Await(side,recv,fb,c) =>
if (in.nonEmpty) side.get match {
case Left(isI) =>
go(in.tail, Try { recv(isI.to(in.head)) })
case Right(isI2) =>
Await(side, recv andThen (feedL(in)), feedL(in)(fb), c)
}
else cur
case Emit(h, t) => Emit(h, feedL(in)(t))
}
go(in, t)
}
/*
* Feed the right side of a `Tee`. Uses a tail recursive loop as
* `p` is awaiting on the right.
*/
def feedR[I,I2,O](in: Seq[I2])(t: Tee[I,I2,O]): Tee[I,I2,O] = {
@annotation.tailrec
def go(in: Seq[I2], cur: Tee[I,I2,O]): Tee[I,I2,O] =
cur match {
case Halt(e) => Halt(e)
case Await(side,recv,fb,c) =>
if (in.nonEmpty) side.get match {
case Left(isI) =>
Await(side, recv andThen (feedR(in)), feedR(in)(fb), c)
case Right(isI2) =>
go(in.tail, Try { recv(isI2.to(in.head)) })
}
else cur
case Emit(h, t) => Emit(h, feedR(in)(t))
}
go(in, t)
}
/* Our generalized `Process` type can represent sources! */
type IO[A] = fpinscala.iomonad.IO[Task,A]
/* Special exception indicating normal termination */
case object End extends Exception
/*
* Here is a simple tail recursive function to collect all the
* output of a `Process[IO,O]`. Notice we are using the fact
* that `IO` can be `run` to produce either a result or an
* exception.
*/
def collect[O](src: Process[IO,O]): IndexedSeq[O] = {
@annotation.tailrec
def go(cur: Process[IO,O], acc: IndexedSeq[O]): IndexedSeq[O] =
cur match {
case Emit(h,t) => go(t, acc ++ h)
case Halt(End) => acc
case Halt(err) => throw err
case Await(req,recv,fb,c) =>
val next =
try recv(req.run)
catch {
case End => fb // Normal termination
case err: Throwable => c ++ Halt(err)
}
go(next, acc)
}
go(src, IndexedSeq())
}
/*
* We can write a version of collect that works for any `Monad`.
* See the definition in the body of `Process`.
*/
/*
* Generic combinator for producing a `Process[IO,O]` from some
* effectful `O` source. The source is tied to some resource,
* `R` (like a file handle) that we want to ensure is released.
* See `lines` below for an example use.
*/
def resource[R,O](acquire: IO[R])(
release: R => Process[IO,O])(
use: R => Process[IO,O]): Process[IO,O] =
await[IO,R,O](acquire)(r => use(r).onComplete(release(r)))
/*
* Like `resource`, but `release` is a single `IO` action.
*/
def resource_[R,O](acquire: IO[R])(
release: R => IO[Unit])(
use: R => Process[IO,O]): Process[IO,O] =
resource(acquire)(release andThen (eval_[IO,Unit,O]))(use)
/*
* Create a `Process[IO,O]` from the lines of a file, using
* the `resource` combinator above to ensure the file is closed
* when processing the stream of lines is finished.
*/
def lines(filename: String): Process[IO,String] =
resource
{ IO(io.Source.fromFile(filename)) }
{ src => await[IO,Unit,String](IO(src.close))() }
{ src =>
lazy val iter = src.getLines // a stateful iterator
def step = if (iter.hasNext) iter.next else throw End
await[IO,String,String](IO(step))(emit(_)).repeat
}
/* Exercise 9: Implement `eval`, `eval_`, and use these to implement `lines`. */
def eval[F[_],A](a: F[A]): Process[F,A] =
await[F,A,A](a)(a => Emit(Seq(a), Halt(End)))
/* Evaluate the action purely for its effects. */
def eval_[F[_],A,B](a: F[A]): Process[F,B] =
eval[F,A](a).drain[B]
/* Also handy - evaluate the action repeatedly to produce a (possibly) infinite stream. */
def repeatEval[F[_],A](a: F[A]): Process[F,A] =
eval[F,A](a).repeat
def lines2(filename: String): Process[IO,String] =
resource
{ IO(io.Source.fromFile(filename)) }
{ src => eval_[IO,Unit,String](IO(src.close)) }
{ src =>
lazy val iter = src.getLines // a stateful iterator
def step = if (iter.hasNext) iter.next else throw End
eval[IO,String](IO(step)).repeat
}
/* Helper function with better type inference. */
def evalIO[A](a: IO[A]): Process[IO,A] =
eval[IO,A](a)
def repeatIOEval[A](a: IO[A]): Process[IO,A] =
repeatEval[IO,A](a)
/*
* We now have nice, resource safe effectful sources, but we don't
* have any way to transform them or filter them. Luckily we can
* still represent the single-input `Process` type we introduced
* earlier, which we'll now call `Process1`.
*/
case class Is[I]() {
sealed trait f[X] { def is: Eq[X,I] } // see definition in `Eq.scala`
val Get = new f[I] { def is = Eq.refl }
}
def Get[I] = Is[I]().Get
type Process1[I,O] = Process[Is[I]#f, O]
/* Some helper functions to improve type inference. */
def halt1[I,O]: Process1[I,O] = Halt[Is[I]#f, O](End)
def await1[I,O](
recv: I => Process1[I,O],
fallback: Process1[I,O] = halt1[I,O]): Process1[I, O] =
Await(Get[I], recv, fallback, halt1)
def emit1[I,O](h: O, tl: Process1[I,O] = halt1[I,O]): Process1[I,O] =
emit(h, tl)
def emitAll1[I,O](h: Seq[O], tl: Process1[I,O] = halt1[I,O]): Process1[I,O] =
emitAll(h, tl)
def lift[I,O](f: I => O): Process1[I,O] =
await1[I,O]((i:I) => emit(f(i))) repeat
def filter[I](f: I => Boolean): Process1[I,I] =
await1[I,I](i => if (f(i)) emit(i) else halt1) repeat
// we can define take, takeWhile, and so on as before
def take[I](n: Int): Process1[I,I] =
if (n <= 0) halt1
else await1[I,I](i => emit(i, take(n-1)))
/*
We sometimes need to construct a `Process` that will pull values
from multiple input sources. For instance, suppose we want to
'zip' together two files, `f1.txt` and `f2.txt`, combining
corresponding lines in some way. Using the same trick we used for
`Process1`, we can create a two-input `Process` which can request
values from either the 'left' stream or the 'right' stream. We'll
call this a `Tee`, after the letter 'T', which looks like a
little diagram of two inputs being combined into one output.
*/
case class T[I,I2]() {
sealed trait f[X] { def get: Either[Eq[X,I], Eq[X,I2]] }
val L = new f[I] { def get = Left(Eq.refl) }
val R = new f[I2] { def get = Right(Eq.refl) }
}
def L[I,I2] = T[I,I2]().L
def R[I,I2] = T[I,I2]().R
type Tee[I,I2,O] = Process[T[I,I2]#f, O]
/* Again some helper functions to improve type inference. */
def awaitL[I,I2,O](
recv: I => Tee[I,I2,O],
fallback: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I,O](L)(recv, fallback)
def awaitR[I,I2,O](
recv: I2 => Tee[I,I2,O],
fallback: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
await[T[I,I2]#f,I2,O](R)(recv, fallback)
def haltT[I,I2,O]: Tee[I,I2,O] =
Halt[T[I,I2]#f,O](End)
def emitT[I,I2,O](h: O, tl: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
emit(h, tl)
def emitAllT[I,I2,O](h: Seq[O], tl: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] =
emitAll(h, tl)
def zipWith[I,I2,O](f: (I,I2) => O): Tee[I,I2,O] =
awaitL[I,I2,O](i =>
awaitR (i2 => emitT(f(i,i2)))) repeat
def zip[I,I2]: Tee[I,I2,(I,I2)] = zipWith((_,_))
/*
* Like `zip` on lists, the above version halts as soon as either
* input is exhausted. Here is a version that pads the shorter
* stream with values.
*/
def zipWithAll[I,I2,O](padI: I, padI2: I2)(
f: (I,I2) => O): Tee[I,I2,O] = {
val fbR = passR[I,I2] map (f(padI, _ ))
val fbL = passL[I,I2] map (f(_ , padI2))
awaitLOr(fbR)(i =>
awaitROr(fbL)(i2 => emitT(f(i,i2)))) repeat
}
def zipAll[I,I2](padI: I, padI2: I2): Tee[I,I2,(I,I2)] =
zipWithAll(padI, padI2)((_,_))
def awaitLOr[I,I2,O](fallback: Tee[I,I2,O])(
recvL: I => Tee[I,I2,O]): Tee[I,I2,O] =
awaitL(recvL, fallback)
def awaitROr[I,I2,O](fallback: Tee[I,I2,O])(
recvR: I2 => Tee[I,I2,O]): Tee[I,I2,O] =
awaitR(recvR, fallback)
/* Ignores all input from left. */
def passR[I,I2]: Tee[I,I2,I2] = awaitR(emitT(_, passR))
/* Ignores input from the right. */
def passL[I,I2]: Tee[I,I2,I] = awaitL(emitT(_, passL))
/* Alternate pulling values from the left and the right inputs. */
def interleaveT[I]: Tee[I,I,I] =
awaitL[I,I,I](i =>
awaitR (i2 => emitAllT(Seq(i,i2)))) repeat
/*
Our `Process` type can also represent effectful sinks (like a file).
A `Sink` is simply a source of effectful functions! See the
definition of `to` in `Process` for an example of how to feed a
`Process` to a `Sink`.
*/
type Sink[F[_],O] = Process[F, O => Process[F,Unit]]
import java.io.FileWriter
/* A `Sink` which writes input strings to the given file. */
def fileW(file: String, append: Boolean = false): Sink[IO,String] =
resource[FileWriter, String => Process[IO,Unit]]
{ IO { new FileWriter(file, append) }}
{ w => eval[IO,Unit](IO(w.close)).drain }
{ w => constant { (s: String) => eval[IO,Unit](IO(w.write(s))) }}
/* The infinite, constant stream. */
def constant[A](a: A): Process[IO,A] =
eval[IO,A](IO(a)).repeat
/* Exercise 10: Implement `join`. Notice this is the standard monadic combinator! */
def join[F[_],A](p: Process[F,Process[F,A]]): Process[F,A] =
p.flatMap(pa => pa)
/*
* An example use of the combinators we have so far: incrementally
* convert the lines of a file from fahrenheit to celsius.
*/
import fpinscala.iomonad.IO0.fahrenheitToCelsius
val converter: Process[IO,Unit] =
lines("fahrenheit.txt").
filter(!_.startsWith("#")).
map(line => fahrenheitToCelsius(line.toDouble).toString).
to(fileW("celsius.txt")).
drain
/*
More generally, we can feed a `Process` through an effectful
channel which returns a value other than `Unit`.
*/
type Channel[F[_],I,O] = Process[F, I => Process[F,O]]
/*
* Here is an example, a JDBC query runner which returns the
* stream of rows from the result set of the query. We have
* the channel take a `Connection => PreparedStatement` as
* input, so code that uses this channel does not need to be
* responsible for knowing how to obtain a `Connection`.
*/
import java.sql.{Connection, PreparedStatement, ResultSet}
def query(conn: IO[Connection]):
Channel[IO, Connection => PreparedStatement, Map[String,Any]] =
resource_
{ conn }
{ c => IO(c.close) }
{ conn => constant { (q: Connection => PreparedStatement) =>
resource_
{ IO {
val rs = q(conn).executeQuery
val ncols = rs.getMetaData.getColumnCount
val cols = (1 to ncols).map(rs.getMetaData.getColumnName)
(rs, cols)
}}
{ p => IO { p._1.close } } // close the ResultSet
{ case (rs, cols) => repeatEval[IO,Map[String,Any]] { IO {
if (!rs.next) throw End
else cols.map(c => (c, rs.getObject(c): Any)).toMap
}}}
}}
/*
* We can allocate resources dynamically when defining a `Process`.
* As an example, this program reads a list of filenames to process
* _from another file_, opening each file, processing it and closing
* it promptly.
*/
val convertAll: Process[IO,Unit] = (for {
out <- fileW("celsius.txt").once
file <- lines("fahrenheits.txt")
_ <- join {
lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
map(celsius => out(celsius.toString))
}
} yield ()) drain
/*
* Just by switching the order of the `flatMap` calls, we can output
* to multiple files.
*/
val convertMultisink: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
map(line => fahrenheitToCelsius(line.toDouble)).
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
/*
* We can attach filters or other transformations at any point in the
* program, for example:
*/
val convertMultisink2: Process[IO,Unit] = (for {
file <- lines("fahrenheits.txt")
_ <- lines(file).
filter(!_.startsWith("#")).
map(line => fahrenheitToCelsius(line.toDouble)).
filter(_ > 0). // ignore below zero temperatures
map(_ toString).
to(fileW(file + ".celsius"))
} yield ()) drain
}
}
object ProcessTest extends App {
import GeneralizedStreamTransducers._
import Process.{IO, lines}
println { Process.collect(Process.converter) }
println { Process.collect(Process.convertAll) }
}
| willcodejavaforfood/fpinscala | answers/src/main/scala/fpinscala/streamingio/StreamingIO.scala | Scala | mit | 45,067 |
package org.jetbrains.sbt
package project.structure
import com.intellij.execution.process.{ProcessOutputTypes, ProcessEvent, ProcessAdapter}
import com.intellij.openapi.util.Key
/**
* @author Pavel Fatin
*/
class ListenerAdapter(listener: (OutputType, String) => Unit) extends ProcessAdapter {
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]) {
val textType = outputType match {
case ProcessOutputTypes.STDOUT => OutputType.StdOut
case ProcessOutputTypes.STDERR => OutputType.StdErr
}
listener(textType, event.getText)
}
}
| consulo/consulo-scala | SBT/src/main/scala/org/jetbrains/sbt/project/structure/ListenerAdapter.scala | Scala | apache-2.0 | 576 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl
import java.util.concurrent.ExecutorService
import org.apache.camel.model.MulticastDefinition
import org.apache.camel.scala.dsl.builder.RouteBuilder
import org.apache.camel.Exchange
import org.apache.camel.processor.aggregate.AggregationStrategy
case class SMulticastDefinition(override val target: MulticastDefinition)(implicit val builder: RouteBuilder) extends SAbstractDefinition[MulticastDefinition] {
def strategy(function: (Exchange, Exchange) => Exchange) = {
target.setAggregationStrategy(
new AggregationStrategy() {
def aggregate(oldExchange: Exchange, newExchange: Exchange) = function(oldExchange, newExchange)
}
)
this
}
def strategy(strategy: AggregationStrategy) = wrap(target.setAggregationStrategy(strategy))
def parallel = wrap(target.parallelProcessing)
def streaming = wrap(target.streaming)
def stopOnException = wrap(target.stopOnException())
def executorService(executorService: ExecutorService) = wrap(target.setExecutorService(executorService))
def executorServiceRef(ref: String) = wrap(target.setExecutorServiceRef(ref))
def timeout(timeout: Long) = wrap(target.timeout(timeout))
override def wrap(block: => Unit) = super.wrap(block).asInstanceOf[SMulticastDefinition]
}
| shuliangtao/apache-camel-2.13.0-src | components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SMulticastDefinition.scala | Scala | apache-2.0 | 2,112 |
package example
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class implements a ScalaTest test suite for the methods in object
* `Lists` that need to be implemented as part of this assignment. A test
* suite is simply a collection of individual tests for some specific
* component of a program.
*
* A test suite is created by defining a class which extends the type
* `org.scalatest.FunSuite`. When running ScalaTest, it will automatically
* find this class and execute all of its tests.
*
* Adding the `@RunWith` annotation enables the test suite to be executed
* inside eclipse using the built-in JUnit test runner.
*
* You have two options for running this test suite:
*
* - Start the sbt console and run the "test" command
* - Right-click this file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class ListsSuite extends FunSuite {
/**
* Tests are written using the `test` operator which takes two arguments:
*
* - A description of the test. This description has to be unique, no two
* tests can have the same description.
* - The test body, a piece of Scala code that implements the test
*
* The most common way to implement a test body is using the method `assert`
* which tests that its argument evaluates to `true`. So one of the simplest
* successful tests is the following:
*/
test("one plus one is two")(assert(1 + 1 == 2))
/**
* In Scala, it is allowed to pass an argument to a method using the block
* syntax, i.e. `{ argument }` instead of parentheses `(argument)`.
*
* This allows tests to be written in a more readable manner:
*/
test("one plus one is three?") {
assert(1 + 1 != 3) // This assertion fails! Go ahead and fix it.
}
/**
* One problem with the previous (failing) test is that ScalaTest will
* only tell you that a test failed, but it will not tell you what was
* the reason for the failure. The output looks like this:
*
* {{{
* [info] - one plus one is three? *** FAILED ***
* }}}
*
* This situation can be improved by using a special equality operator
* `===` instead of `==` (this is only possible in ScalaTest). So if you
* run the next test, ScalaTest will show the following output:
*
* {{{
* [info] - details why one plus one is not three *** FAILED ***
* [info] 2 did not equal 3 (ListsSuite.scala:67)
* }}}
*
* We recommend to always use the `===` equality operator when writing tests.
*/
test("details why one plus one is not three") {
assert(1 + 1 != 3) // Fix me, please!
}
/**
* In order to test the exceptional behavior of a methods, ScalaTest offers
* the `intercept` operation.
*
* In the following example, we test the fact that the method `intNotZero`
* throws an `IllegalArgumentException` if its argument is `0`.
*/
test("intNotZero throws an exception if its argument is 0") {
intercept[IllegalArgumentException] {
intNotZero(0)
}
}
def intNotZero(x: Int): Int = {
if (x == 0) throw new IllegalArgumentException("zero is not allowed")
else x
}
/**
* Now we finally write some tests for the list functions that have to be
* implemented for this assignment. We fist import all members of the
* `List` object.
*/
import Lists._
/**
* We only provide two very basic tests for you. Write more tests to make
* sure your `sum` and `max` methods work as expected.
*
* In particular, write tests for corner cases: negative numbers, zeros,
* empty lists, lists with repeated elements, etc.
*
* It is allowed to have multiple `assert` statements inside one test,
* however it is recommended to write an individual `test` statement for
* every tested aspect of a method.
*/
test("sum of a few numbers") {
assert(sum(List(1, 2, 0)) === 3)
}
test("max of a few numbers") {
assert(max(List(3, 7, 2)) === 7)
}
} | relyah/CourseraFunctionalProgramming | resource/progfun-master/example/src/test/scala/example/ListsSuite.scala | Scala | gpl-2.0 | 4,025 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.connector.write.{DataWriter, WriterCommitMessage}
import org.apache.spark.sql.kafka010.producer.{CachedKafkaProducer, InternalKafkaProducerPool}
/**
* Dummy commit message. The DataSourceV2 framework requires a commit message implementation but we
* don't need to really send one.
*/
private case object KafkaDataWriterCommitMessage extends WriterCommitMessage
/**
* A [[DataWriter]] for Kafka writing. One data writer will be created in each partition to
* process incoming rows.
*
* @param targetTopic The topic that this data writer is targeting. If None, topic will be inferred
* from a `topic` field in the incoming data.
* @param producerParams Parameters to use for the Kafka producer.
* @param inputSchema The attributes in the input data.
*/
private[kafka010] class KafkaDataWriter(
targetTopic: Option[String],
producerParams: ju.Map[String, Object],
inputSchema: Seq[Attribute])
extends KafkaRowWriter(inputSchema, targetTopic) with DataWriter[InternalRow] {
private var producer: Option[CachedKafkaProducer] = None
def write(row: InternalRow): Unit = {
checkForErrors()
if (producer.isEmpty) {
producer = Some(InternalKafkaProducerPool.acquire(producerParams))
}
producer.foreach { p => sendRow(row, p.producer) }
}
def commit(): WriterCommitMessage = {
// Send is asynchronous, but we can't commit until all rows are actually in Kafka.
// This requires flushing and then checking that no callbacks produced errors.
// We also check for errors before to fail as soon as possible - the check is cheap.
checkForErrors()
producer.foreach(_.producer.flush())
checkForErrors()
KafkaDataWriterCommitMessage
}
def abort(): Unit = {}
def close(): Unit = {
producer.foreach(InternalKafkaProducerPool.release)
producer = None
}
}
| maropu/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaDataWriter.scala | Scala | apache-2.0 | 2,860 |
package models.services
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.services.IdentityService
import models.CvsUser
/**
* Handles actions to users.
*/
trait CvsUserService extends IdentityService[CvsUser] {
/**
* Saves a user.
*
* @param user the user to save
* @param loginInfo the user's login info consisting of the way they authenticated (e.g., via credentials) and the their unique identifier (e.g., their email address)
*/
def save(user: CvsUser, loginInfo: LoginInfo)
}
| mb720/cvs | app/models/services/CvsUserService.scala | Scala | bsd-2-clause | 554 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.catalyst.expressions.{And, Expression, LessThan}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{Join, JoinHint}
import org.apache.spark.sql.execution.{SparkPlan, SparkPlanTest}
import org.apache.spark.sql.execution.exchange.EnsureRequirements
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{DoubleType, IntegerType, StructType}
class OuterJoinSuite extends SparkPlanTest with SharedSparkSession {
private val EnsureRequirements = new EnsureRequirements()
private lazy val left = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, 2.0),
Row(2, 100.0),
Row(2, 1.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, 1.0),
Row(3, 3.0),
Row(5, 1.0),
Row(6, 6.0),
Row(null, null)
)), new StructType().add("a", IntegerType).add("b", DoubleType))
private lazy val right = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(0, 0.0),
Row(2, 3.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, -1.0), // This row is duplicated to ensure that we will have multiple buffered matches
Row(2, -1.0),
Row(2, 3.0),
Row(3, 2.0),
Row(4, 1.0),
Row(5, 3.0),
Row(7, 7.0),
Row(null, null)
)), new StructType().add("c", IntegerType).add("d", DoubleType))
private lazy val condition = {
And((left.col("a") === right.col("c")).expr,
LessThan(left.col("b").expr, right.col("d").expr))
}
private lazy val uniqueLeft = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, 2.0),
Row(2, 1.0),
Row(3, 3.0),
Row(5, 1.0),
Row(6, 6.0),
Row(null, null)
)), new StructType().add("a", IntegerType).add("b", DoubleType))
private lazy val uniqueRight = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(0, 0.0),
Row(2, 3.0),
Row(3, 2.0),
Row(4, 1.0),
Row(5, 3.0),
Row(7, 7.0),
Row(null, null)
)), new StructType().add("c", IntegerType).add("d", DoubleType))
private lazy val uniqueCondition = {
And((uniqueLeft.col("a") === uniqueRight.col("c")).expr,
LessThan(uniqueLeft.col("b").expr, uniqueRight.col("d").expr))
}
// Note: the input dataframes and expression must be evaluated lazily because
// the SQLContext should be used only within a test to keep SQL tests stable
private def testOuterJoin(
testName: String,
leftRows: => DataFrame,
rightRows: => DataFrame,
joinType: JoinType,
condition: => Expression,
expectedAnswer: Seq[Product]): Unit = {
def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
val join = Join(leftRows.logicalPlan, rightRows.logicalPlan,
Inner, Some(condition), JoinHint.NONE)
ExtractEquiJoinKeys.unapply(join)
}
testWithWholeStageCodegenOnAndOff(s"$testName using ShuffledHashJoin") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val buildSide = if (joinType == LeftOuter) BuildRight else BuildLeft
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
EnsureRequirements.apply(
ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, boundCondition, left, right)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
if (joinType != FullOuter) {
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastHashJoin") { _ =>
val buildSide = joinType match {
case LeftOuter => BuildRight
case RightOuter => BuildLeft
case _ => fail(s"Unsupported join type $joinType")
}
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, buildSide, boundCondition, left, right),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using SortMergeJoin") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
EnsureRequirements.apply(
SortMergeJoinExec(leftKeys, rightKeys, joinType, boundCondition, left, right)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastNestedLoopJoin build left") { _ =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildLeft, joinType, Some(condition)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastNestedLoopJoin build right") { _ =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildRight, joinType, Some(condition)),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
// --- Basic outer joins ------------------------------------------------------------------------
testOuterJoin(
"basic left outer join",
left,
right,
LeftOuter,
condition,
Seq(
(null, null, null, null),
(1, 2.0, null, null),
(2, 100.0, null, null),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null)
)
)
testOuterJoin(
"basic right outer join",
left,
right,
RightOuter,
condition,
Seq(
(null, null, null, null),
(null, null, 0, 0.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(null, null, 2, -1.0),
(null, null, 2, -1.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(5, 1.0, 5, 3.0),
(null, null, 7, 7.0)
)
)
testOuterJoin(
"basic full outer join",
left,
right,
FullOuter,
condition,
Seq(
(1, 2.0, null, null),
(null, null, 2, -1.0),
(null, null, 2, -1.0),
(2, 100.0, null, null),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null),
(null, null, 0, 0.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(null, null, 7, 7.0),
(null, null, null, null),
(null, null, null, null)
)
)
// --- Both inputs empty ------------------------------------------------------------------------
testOuterJoin(
"left outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
LeftOuter,
condition,
Seq.empty
)
testOuterJoin(
"right outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
RightOuter,
condition,
Seq.empty
)
testOuterJoin(
"full outer join with both inputs empty",
left.filter("false"),
right.filter("false"),
FullOuter,
condition,
Seq.empty
)
// --- Join keys are unique ---------------------------------------------------------------------
testOuterJoin(
"left outer join with unique keys",
uniqueLeft,
uniqueRight,
LeftOuter,
uniqueCondition,
Seq(
(null, null, null, null),
(1, 2.0, null, null),
(2, 1.0, 2, 3.0),
(3, 3.0, null, null),
(5, 1.0, 5, 3.0),
(6, 6.0, null, null)
)
)
testOuterJoin(
"right outer join with unique keys",
uniqueLeft,
uniqueRight,
RightOuter,
uniqueCondition,
Seq(
(null, null, null, null),
(null, null, 0, 0.0),
(2, 1.0, 2, 3.0),
(null, null, 3, 2.0),
(null, null, 4, 1.0),
(5, 1.0, 5, 3.0),
(null, null, 7, 7.0)
)
)
}
| jiangxb1987/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala | Scala | apache-2.0 | 9,765 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.orc
import org.apache.spark.sql.connector.catalog.Table
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.v2._
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class OrcDataSourceV2 extends FileDataSourceV2 {
override def fallbackFileFormat: Class[_ <: FileFormat] = classOf[OrcFileFormat]
override def shortName(): String = "orc"
override def getTable(options: CaseInsensitiveStringMap): Table = {
val paths = getPaths(options)
val tableName = getTableName(options, paths)
OrcTable(tableName, sparkSession, options, paths, None, fallbackFileFormat)
}
override def getTable(options: CaseInsensitiveStringMap, schema: StructType): Table = {
val paths = getPaths(options)
val tableName = getTableName(options, paths)
OrcTable(tableName, sparkSession, options, paths, Some(schema), fallbackFileFormat)
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcDataSourceV2.scala | Scala | apache-2.0 | 1,875 |
package com.twitter.finagle.redis.integration
import collection.mutable
import com.twitter.finagle.redis.Client
import com.twitter.finagle.redis.util.{CBToString, StringToChannelBuffer}
import java.util.UUID
import org.jboss.netty.buffer.ChannelBuffer
import org.specs.SpecificationWithJUnit
class BtreeClientSpec extends SpecificationWithJUnit {
{
"redis client" should {
setSequential()
var client: Client = null
var dict: mutable.HashMap[String, mutable.HashMap[String, String]] = null
"connect the client" in {
// Until the Redis Server Btree changes are checked in and merged into master
// The redis server needs to be started and shutdown externally
// And the client connects it via the port 6379
// After the changes become a part of the installed redis server
// This will use RedisCluster to start and manage the external redis server
val hostAddress = "127.0.0.1:6379"
client = Client(hostAddress)
dict = generateTestCases()
require(client != null)
}
"test adding of outerkey, innerkey and value tuples using BADD command" in {
testBadd(client, dict)
}
"test cardinality function for outerkey using BCARD command" in {
testBcard(client, dict)
}
"test value for outerkey, innerkey pair using BGET command" in {
testBget(client, dict)
}
"test BRANGE from start to end for outerkey" in {
testBrange(client, dict)
}
"test BRANGE from a start key that exists to the end for outerkey" in {
testBrangeInclusiveStart(client, dict)
}
"test BRANGE from start to end key that exists for outerkey" in {
testBrangeInclusiveEnd(client, dict)
}
"test BRANGE from start key to end key where both exist for outerkey" in {
testBrangeInclusiveStartEnd(client, dict)
}
"test BRANGE from start key that doesn't exist to end for outerkey" in {
testBrangeExclusiveStart(client, dict)
}
"test BRANGE from start to end key that doesn't exist for outerkey" in {
testBrangeExclusiveEnd(client, dict)
}
"test BRANGE from start key to end key where both don't exist for outerkey" in {
testBrangeExclusiveStartEnd(client, dict)
}
"test removal of innerkey value pairs for outerkey using BREM command" in {
testBrem(client, dict)
}
"test cardinality function for outerkey using BCARD command" in {
testBcard(client, dict)
}
println("Closing client...")
client.flushDB()
client.release()
println("Done!")
}
}
def defaultTest(client: Client) {
val key = "megatron"
val value = "optimus"
println("Setting " + key + "->" + value)
client.set(StringToChannelBuffer(key), StringToChannelBuffer(value))
println("Getting value for key " + key)
val getResult = client.get(StringToChannelBuffer(key))()
getResult match {
case Some(n) => println("Got result: " + new String(n.array))
case None => println("Didn't get the value!")
}
}
def generateTestCases(): mutable.HashMap[String, mutable.HashMap[String, String]] = {
val numSets = 100
val setSize = 100
val dict: mutable.HashMap[String, mutable.HashMap[String, String]] = new mutable.HashMap[String, mutable.HashMap[String, String]]
for (i <- 0 until numSets) {
val outerKey = UUID.randomUUID().toString
val temp: mutable.HashMap[String, String] = new mutable.HashMap[String, String]
for (j <- 0 until setSize) {
val innerKey = UUID.randomUUID().toString
val value = UUID.randomUUID().toString
temp.put(innerKey, value)
}
dict.put(outerKey, temp)
}
dict
}
def testBadd(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
for ((innerKey, value) <- inner) {
val target = client.bAdd(StringToChannelBuffer(outerKey), StringToChannelBuffer(innerKey), StringToChannelBuffer(value))
require(target.get() == 1, "BADD failed for " + outerKey + " " + innerKey)
}
}
println("Test BADD succeeded")
}
def testBcard(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
val target = client.bCard(StringToChannelBuffer(outerKey))
require(inner.size == target.get,
"BCARD failed for " + outerKey + " expected " + inner.size + " got " + target.get)
}
println("Test BCARD succeeded")
}
def testBget(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
for ((innerKey, value) <- inner) {
val target = client.bGet(StringToChannelBuffer(outerKey), StringToChannelBuffer(innerKey))
val targetVal = CBToString(target.get().get)
require(value == targetVal, "BGET failed for " + outerKey + " expected " + value + " got " + targetVal)
}
}
println("Test BGET succeeded")
}
def testBrem(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
for ((innerKey, value) <- inner) {
val target = client.bRem(StringToChannelBuffer(outerKey), Seq(StringToChannelBuffer(innerKey)))
require(target.get() == 1, "BREM failed for " + outerKey + " " + innerKey)
inner.remove(innerKey)
}
}
println("Test BREM succeeded")
}
def testBrange(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
val innerKeys = inner.toList.sortBy(_._1)
val target = client.bRange(StringToChannelBuffer(outerKey), None, None).get()
validate(outerKey, innerKeys, target)
}
println("Test BRANGE succeeded")
}
def testBrangeInclusiveStart(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
val rand = new scala.util.Random()
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val start = rand.nextInt(innerKeys.size)
innerKeys = innerKeys.drop(start)
val target = client.bRange(StringToChannelBuffer(outerKey), Option(StringToChannelBuffer(innerKeys.head._1)), None).get()
validate(outerKey, innerKeys, target)
}
println("Test BRANGE Inclusive Start succeeded")
}
def testBrangeInclusiveEnd(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
val rand = new scala.util.Random()
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val end = rand.nextInt(innerKeys.size)
innerKeys = innerKeys.dropRight(end)
val target = client.bRange(StringToChannelBuffer(outerKey), None, Option(StringToChannelBuffer(innerKeys.last._1))).get()
validate(outerKey, innerKeys, target)
}
println("Test BRANGE Inclusive End succeeded")
}
def testBrangeInclusiveStartEnd(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
val rand = new scala.util.Random()
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val start = rand.nextInt(innerKeys.size)
val end = rand.nextInt(innerKeys.size)
val target = client.bRange(
StringToChannelBuffer(outerKey),
Option(StringToChannelBuffer(innerKeys(start)._1)),
Option(StringToChannelBuffer(innerKeys(end)._1)))
if (start > end) {
require(target.isThrow, "BRANGE failed for " + outerKey + " return should be a throw")
}
else {
innerKeys = innerKeys.slice(start, end + 1)
validate(outerKey, innerKeys, target.get())
}
}
println("Test BRANGE Inclusive Start End succeeded")
}
def testBrangeExclusiveStart(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val start = UUID.randomUUID().toString
innerKeys = innerKeys.filter(p => (start <= p._1))
val target = client.bRange(StringToChannelBuffer(outerKey), Option(StringToChannelBuffer(start)), None).get()
validate(outerKey, innerKeys, target)
}
println("Test BRANGE Exclusive Start succeeded")
}
def testBrangeExclusiveEnd(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val end = UUID.randomUUID().toString
innerKeys = innerKeys.filter(p => (p._1 <= end))
val target = client.bRange(StringToChannelBuffer(outerKey), None, Option(StringToChannelBuffer(end))).get()
validate(outerKey, innerKeys, target)
}
println("Test BRANGE Exclusive End succeeded")
}
def testBrangeExclusiveStartEnd(client: Client, dict: mutable.HashMap[String, mutable.HashMap[String, String]]) {
for ((outerKey, inner) <- dict) {
var innerKeys = inner.toList.sortBy(_._1)
val start = UUID.randomUUID().toString
val end = UUID.randomUUID().toString
innerKeys = innerKeys.filter(p => (start <= p._1 && p._1 <= end))
val target = client.bRange(
StringToChannelBuffer(outerKey),
Option(StringToChannelBuffer(start)),
Option(StringToChannelBuffer(end)))
if (start > end) {
require(target.isThrow, "BRANGE failed for " + outerKey + " return should be a throw")
}
else {
validate(outerKey, innerKeys, target.get())
}
}
println("Test BRANGE Exclusive Start End succeeded")
}
def validate(outerKey: String, exp: List[(String, String)], got: Seq[(ChannelBuffer, ChannelBuffer)]) {
require(got.size == exp.size,
"BRANGE failed for " + outerKey + " expected size " + exp.size + " got size " + got.size)
for (i <- 0 until exp.size) {
val expKey = exp(i)._1
val gotKey = CBToString(got(i)._1)
val expVal = exp(i)._2
val gotVal = CBToString(got(i)._2)
require(exp(i)._1 == CBToString(got(i)._1),
"Key mismatch for outerKey " + outerKey + " expected " + expKey + "got " + gotKey)
require(exp(i)._2 == CBToString(got(i)._2),
"Value mismatch for outerKey " + outerKey + " expected " + expVal + "got " + gotVal)
}
}
}
| joshbedo/finagle | finagle-redis/src/test/scala/com/twitter/finagle/redis/integration/BtreeClientSpec.scala | Scala | apache-2.0 | 10,464 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.constructors
import _root_.org.apache.spark.rdd.RDD
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.trustedanalytics.sparktk.frame.DataTypes.DataType
import org.trustedanalytics.sparktk.frame.DataTypes
import org.apache.spark._
/**
* Helper class for creating an RDD from hBase
*/
object HbaseHelper extends Serializable {
/**
*
* @param sc default spark context
* @param tableName hBase table to read from
* @param schema hBase schema for the table above
* @param startTag optional start tag to filter the database rows
* @param endTag optional end tag to filter the database rows
* @return an RDD of converted hBase values
*/
def createRdd(sc: SparkContext, tableName: String, schema: Vector[HBaseSchemaArgs], startTag: Option[String], endTag: Option[String]): RDD[Array[Any]] = {
val hBaseRDD = sc.newAPIHadoopRDD(createConfig(tableName, startTag, endTag),
classOf[TableInputFormat],
classOf[ImmutableBytesWritable],
classOf[Result])
hbaseRddToRdd(hBaseRDD, schema)
}
def hbaseRddToRdd(hbaseRDD: RDD[(ImmutableBytesWritable, Result)], schema: Vector[HBaseSchemaArgs]): RDD[Array[Any]] =
hbaseRDD.map {
case (key, row) => {
val values = for { element <- schema }
yield getValue(row, element.columnFamily, element.columnName, element.dataType)
values.toArray
}
}
/**
* Get value for cell
*
* @param row hBase data
* @param columnFamily hBase column family
* @param columnName hBase column name
* @param dataType internal data type of the cell
* @return the value as string
*/
private def getValue(row: Result, columnFamily: String, columnName: String, dataType: DataType): Any = {
val value = row.getValue(Bytes.toBytes(columnFamily), Bytes.toBytes(columnName))
Bytes.toString(value)
}
/**
* Create initial configuration for bHase reader
*
* @param tableName name of hBase table
* @return hBase configuration
*/
private def createConfig(tableName: String, startTag: Option[String], endTag: Option[String]): Configuration = {
val conf = HBaseConfiguration.create()
conf.set(TableInputFormat.INPUT_TABLE, tableName)
if (startTag.isDefined) {
conf.set(TableInputFormat.SCAN_ROW_START, startTag.get)
}
if (endTag.isDefined) {
conf.set(TableInputFormat.SCAN_ROW_STOP, endTag.get)
}
conf
}
}
/**
* Arguments for the schema
*
* @param columnFamily hbase column family
* @param columnName hbase column name
* @param dataType data type for the cell
*/
case class HBaseSchemaArgs(columnFamily: String, columnName: String, dataType: DataType)
| aayushidwivedi01/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/constructors/HbaseHelper.scala | Scala | apache-2.0 | 3,661 |
package stainless
package proof
import stainless.lang._
import stainless.annotation._
import stainless.lang.StaticChecks._
import scala.language.postfixOps
@library
object BoundedQuantifiers {
def intForall(n: BigInt, p: BigInt => Boolean): Boolean = {
require(n >= 0)
decreases(n)
if (n <= 0) true
else p(n-1) && intForall(n-1,p)
}
def intExists(n: BigInt, p: BigInt => Boolean): Boolean = {
require(n >= 0)
decreases(n)
if (n <= 0) false
else p(n-1) || intExists(n-1,p)
}
def intForall2(n: BigInt, m: BigInt, p: (BigInt, BigInt) => Boolean): Boolean = {
require(n >= 0 && m >= 0)
decreases(n + m)
if (n <= 0 || m <= 0) true
else p(n-1,m-1) && intForall2(n-1, m, p) && intForall2(n, m-1, p)
}
@opaque
def elimForall(n: BigInt, p: BigInt => Boolean, i: BigInt): Unit = {
require(0 <= i && i < n && intForall(n, p))
decreases(n)
if (n > 0 && i < n-1)
elimForall(n-1, p, i)
} ensuring(_ => p(i))
def elimForall2(n: BigInt, m: BigInt, p: (BigInt, BigInt) => Boolean, i: BigInt, j: BigInt): Boolean = {
require(0 <= i && i < n && 0 <= j && j < m && intForall2(n, m, p))
decreases(n + m)
if (i == n-1 && j == m-1) p(i,j)
else if (i < n-1) elimForall2(n-1, m, p, i, j) && p(i,j)
else elimForall2(n, m-1, p, i, j) && p(i,j)
} holds
def elimExists(n: BigInt, p: BigInt => Boolean): BigInt = {
require(n >= 0 && intExists(n, p))
decreases(n)
if (p(n-1)) n-1
else elimExists(n-1, p)
} ensuring(res => p(res))
def notExistsImpliesForall(n: BigInt, p: BigInt => Boolean): Boolean = {
require(n >= 0 && !intExists(n,p))
decreases(n)
if (n <= 0)
intForall(n,(i: BigInt) => !p(i))
else
notExistsImpliesForall(n-1, p) &&
intForall(n,(i: BigInt) => !p(i))
} holds
def notForallImpliesExists(n: BigInt, p: BigInt => Boolean): Boolean = {
require(n >= 0 && !intForall(n,p))
decreases(n)
if (n <= 0) false
else if (!p(n-1))
intExists(n, (i: BigInt) => !p(i))
else
notForallImpliesExists(n-1,p) &&
intExists(n, (i: BigInt) => !p(i))
} holds
def witnessImpliesExists(n: BigInt, p: BigInt => Boolean, i: BigInt): Boolean = {
require(0 <= i && i < n && p(i))
decreases(n)
if (i == n-1)
intExists(n,p)
else
witnessImpliesExists(n-1, p, i) &&
intExists(n, p)
} holds
def increment(i: BigInt, n: BigInt): BigInt = {
require(0 <= i && i < n)
if (i < n-1) i+1
else BigInt(0)
} ensuring(res => 0 <= res && res < n)
def decrement(i: BigInt, n: BigInt): BigInt = {
require(0 <= i && i < n)
if (i == 0) n-1
else i-1
} ensuring(res => 0 <= res && res < n)
}
| epfl-lara/stainless | frontends/library/stainless/proof/BoundedQuantifiers.scala | Scala | apache-2.0 | 2,729 |
package dotty.tools.languageserver
import org.junit.Test
import org.eclipse.lsp4j.SymbolKind
import dotty.tools.languageserver.util.Code._
class DocumentSymbolTest {
@Test def withErroneousTree: Unit =
code"${m1}class Foo { def }$m2"
.withSource.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class))
@Test def documentSymbol0: Unit =
code"${m1}class Foo$m2".withSource.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class))
@Test def documentSymbol1: Unit =
code"${m1}class Foo$m2; ${m3}class Bar$m4".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class), (m3 to m4).symInfo("Bar", SymbolKind.Class))
@Test def documentSymbol3: Unit = {
withSources(
code"${m1}class Foo$m2",
code"${m3}class Bar$m4"
) .documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class))
.documentSymbol(m3, (m3 to m4).symInfo("Bar", SymbolKind.Class))
}
@Test def documentSymbolShowModule: Unit = {
code"""${m1}object Foo${m2}""".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Module))
}
@Test def documentSymbolShowClassAndCompanion: Unit = {
code"""${m1}object Foo${m2}
|${m3}class Foo${m4}""".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Module),
(m3 to m4).symInfo("Foo", SymbolKind.Class))
}
@Test def documentSymbolSynthetic: Unit = {
code"""${m1}case class Foo(${m3}x: Int${m4})${m2}""".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class),
(m3 to m4).symInfo("x", SymbolKind.Field, "Foo"))
}
@Test def documentSymbolEnumADT: Unit = {
code"""${m1}enum Option[${m3}+T${m4}] {
${m5}case Some(${m7}x: T${m8})${m6}
${m9}case None${m10}
}${m2}""".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Option", SymbolKind.Enum),
(m3 to m4).symInfo("T", SymbolKind.TypeParameter, "Option"),
(m5 to m6).symInfo("Some", SymbolKind.EnumMember, "Option"),
(m7 to m8).symInfo("x", SymbolKind.Field, "Some"),
(m9 to m10).symInfo("None", SymbolKind.EnumMember, "Option"))
}
@Test def documentSymbolEnum: Unit = {
code"""${m1}enum Color(${m3}val rgb: Int${m4}) {
${m5}case Red extends Color(0xFF0000)${m6}
${m7}case Green extends Color(0x00FF00)${m8}
${m9}case Blue extends Color(0x0000FF)${m10}
}${m2}""".withSource
.documentSymbol(m1, (m1 to m2).symInfo("Color", SymbolKind.Enum),
(m3 to m4).symInfo("rgb", SymbolKind.Field, "Color"),
(m5 to m6).symInfo("Red", SymbolKind.EnumMember, "Color"),
(m7 to m8).symInfo("Green", SymbolKind.EnumMember, "Color"),
(m9 to m10).symInfo("Blue", SymbolKind.EnumMember, "Color"))
}
@Test def documentSymbolTopLevelDef: Unit =
code"${m1}def foo(): Unit = { }${m2}".withSource.documentSymbol(m1, (m1 to m2).symInfo("foo", SymbolKind.Method))
@Test def documentSymbolTrait: Unit =
code"${m1}trait Foo(${m3}val x: Int${m4})${m2}".withSource.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Interface),
(m3 to m4).symInfo("x", SymbolKind.Field, "Foo"))
@Test def documentSymbolLocalDef: Unit =
code"""${m1}def foo(): Unit = {
${m3}def bar(): Unit = { }${m4}
${m5}val x: Int = 0${m6}
}${m2}""".withSource.documentSymbol(m1, (m1 to m2).symInfo("foo", SymbolKind.Method),
(m3 to m4).symInfo("bar", SymbolKind.Method, "foo"),
(m5 to m6).symInfo("x", SymbolKind.Field, "foo") )
@Test def documentSymbolTypeFields: Unit =
code"""${m1}class Foo {
${m3}type T${m4}
}${m2}""".withSource.documentSymbol(m1, (m1 to m2).symInfo("Foo", SymbolKind.Class),
(m3 to m4).symInfo("T", SymbolKind.TypeParameter, "Foo"))
}
| som-snytt/dotty | language-server/test/dotty/tools/languageserver/DocumentSymbolTest.scala | Scala | apache-2.0 | 4,139 |
package blended.streams.dispatcher.internal.builder
import akka.NotUsed
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Source}
import akka.stream.{ActorMaterializer, Materializer}
import blended.jms.utils.IdAwareConnectionFactory
import blended.streams.dispatcher.internal.ResourceTypeRouterConfig
import blended.streams.jms._
import blended.streams.message.FlowEnvelope
import blended.streams.transaction.{FlowTransactionStream, _}
import blended.streams.{StreamController, StreamControllerConfig}
import blended.util.logging.Logger
import scala.util.Try
class TransactionOutbound(
headerConfig : FlowHeaderConfig,
tMgr : ActorRef,
dispatcherCfg : ResourceTypeRouterConfig,
internalCf: IdAwareConnectionFactory,
log: Logger
)(implicit system : ActorSystem, bs: DispatcherBuilderSupport) extends JmsStreamSupport {
private implicit val materializer : Materializer = ActorMaterializer()
private val config = dispatcherCfg.providerRegistry.mandatoryProvider(internalCf.vendor, internalCf.provider)
private [builder] val jmsSource : Try[Source[FlowEnvelope, NotUsed]] = Try {
val srcSettings = JMSConsumerSettings(
log = log,
connectionFactory = internalCf,
)
.withSessionCount(3)
.withDestination(Some(config.get.transactions))
.withAcknowledgeMode(AcknowledgeMode.ClientAcknowledge)
jmsConsumer(
name = "transactionOutbound",
settings = srcSettings,
headerConfig = headerConfig
)
}
def build() : ActorRef = {
val sendFlow = new CbeSendFlow(
headerConfig = headerConfig,
dispatcherCfg = dispatcherCfg,
internalCf = internalCf,
log = log
).build()
val transactionStream : Flow[FlowEnvelope, FlowEnvelope, NotUsed] =
new FlowTransactionStream(
cfg = headerConfig,
tMgr = tMgr,
log = log,
// The default for CBE is false here
// all messages that have run through the dispatcher will have the correct CBE setting
performSend = { env =>
env.header[Boolean](bs.headerCbeEnabled).getOrElse(false) &&
FlowTransactionState.withName(
env.header[String](bs.headerConfig.headerState).getOrElse(FlowTransactionState.Updated.toString())
) != FlowTransactionState.Updated
},
sendFlow
).build()
val src : Source[FlowEnvelope, NotUsed] = jmsSource.get.via(transactionStream)
val streamCfg = StreamControllerConfig.fromConfig(dispatcherCfg.rawConfig).get
.copy(
name = "transactionOut",
source = src
)
system.actorOf(StreamController.props(streamCfg))
}
}
| lefou/blended | blended.streams.dispatcher/src/main/scala/blended/streams/dispatcher/internal/builder/TransactionOutbound.scala | Scala | apache-2.0 | 2,660 |
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.protocol
import java.io.DataOutputStream
import com.codedx.codepulse.agent.common.message.{MessageProtocol, MessageProtocolV1}
import com.codedx.codepulse.hq.protocol.ControlMessage.Configuration
/** A convenient singleton instance of the `ControlMessageSenderV1` class.
* Using this object will help avoid creating new instances of the class
* that would otherwise be needed.
*/
object ControlMessageSenderV1 extends ControlMessageSenderV1
/** A [[ControlMessageSender]] implementation that uses MessageProtocol version 1
* to send messages.
*/
class ControlMessageSenderV1 extends ControlMessageSenderBase {
var protocol: MessageProtocol = new MessageProtocolV1
def writeConfigurationMessage(out: DataOutputStream, cfg: Configuration[_]) { protocol.writeConfiguration(out, cfg.toByteArray) }
} | secdec/codepulse | hq/src/main/scala/com/secdec/bytefrog/hq/protocol/ControlMessageSenderV1.scala | Scala | apache-2.0 | 1,572 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia.definitions
import algolia.http._
import algolia.objects.{ApiKey, RequestOptions}
import org.json4s.Formats
import org.json4s.native.Serialization._
case class GetKeyDefinition(
keyName: String,
indexName: Option[String] = None,
requestOptions: Option[RequestOptions] = None
) extends Definition {
type T = GetKeyDefinition
@deprecated("use without index", "1.27.0")
def from(indexName: String): GetKeyDefinition =
copy(indexName = Some(indexName))
override def options(requestOptions: RequestOptions): GetKeyDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
val path = if (indexName.isEmpty) {
Seq("1", "keys", keyName)
} else {
Seq("1", "indexes", indexName.get, "keys", keyName)
}
HttpPayload(
GET,
path,
isSearch = false,
requestOptions = requestOptions
)
}
}
case class AddKeyDefinition(
key: ApiKey,
indexName: Option[String] = None,
requestOptions: Option[RequestOptions] = None
)(implicit val formats: Formats)
extends Definition {
type T = AddKeyDefinition
@deprecated("use without index", "1.27.0")
def to(indexName: String): AddKeyDefinition =
copy(indexName = Some(indexName))
override def options(requestOptions: RequestOptions): AddKeyDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
val path = if (indexName.isEmpty) {
Seq("1", "keys")
} else {
Seq("1", "indexes", indexName.get, "keys")
}
HttpPayload(
POST,
path,
body = Some(write(key)),
isSearch = false,
requestOptions = requestOptions
)
}
}
case class RestoreKeyDefinition(
key: String,
requestOptions: Option[RequestOptions] = None
)(implicit val formats: Formats)
extends Definition {
type T = RestoreKeyDefinition
override def options(requestOptions: RequestOptions): RestoreKeyDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
HttpPayload(
POST,
Seq("1", "keys", key, "restore"),
body = None,
isSearch = false,
requestOptions = requestOptions
)
}
}
case class DeleteKeyDefinition(
keyName: String,
indexName: Option[String] = None,
requestOptions: Option[RequestOptions] = None
) extends Definition {
type T = DeleteKeyDefinition
@deprecated("use without index", "1.27.0")
def from(indexName: String): DeleteKeyDefinition =
copy(indexName = Some(indexName))
override def options(requestOptions: RequestOptions): DeleteKeyDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
val path = if (indexName.isEmpty) {
Seq("1", "keys", keyName)
} else {
Seq("1", "indexes", indexName.get, "keys", keyName)
}
HttpPayload(
DELETE,
path,
isSearch = false,
requestOptions = requestOptions
)
}
}
case class UpdateKeyDefinition(
keyName: String,
key: Option[ApiKey] = None,
indexName: Option[String] = None,
requestOptions: Option[RequestOptions] = None
)(implicit val formats: Formats)
extends Definition {
type T = UpdateKeyDefinition
def `with`(key: ApiKey): UpdateKeyDefinition = copy(key = Some(key))
@deprecated("use without index", "1.27.0")
def from(indexName: String): UpdateKeyDefinition =
copy(indexName = Some(indexName))
override def options(requestOptions: RequestOptions): UpdateKeyDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
val path = if (indexName.isEmpty) {
Seq("1", "keys", keyName)
} else {
Seq("1", "indexes", indexName.get, "keys", keyName)
}
HttpPayload(
PUT,
path,
body = Some(write(key)),
isSearch = false,
requestOptions = requestOptions
)
}
}
case class ListKeysDefinition(
indexName: Option[String] = None,
requestOptions: Option[RequestOptions] = None
) extends Definition {
type T = ListKeysDefinition
override def options(requestOptions: RequestOptions): ListKeysDefinition =
copy(requestOptions = Some(requestOptions))
override private[algolia] def build(): HttpPayload = {
val path = if (indexName.isEmpty) {
Seq("1", "keys")
} else {
Seq("1", "indexes", indexName.get, "keys")
}
HttpPayload(
GET,
path,
isSearch = false,
requestOptions = requestOptions
)
}
}
| algolia/algoliasearch-client-scala | src/main/scala/algolia/definitions/KeyDefinition.scala | Scala | mit | 5,800 |
/*
* Copyright 2014 Kevin Herron
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.digitalpetri.modbus.master
import java.util
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import com.codahale.metrics._
import com.digitalpetri.modbus.layers.TcpPayload
import com.digitalpetri.modbus.{ExceptionResponse, ModbusRequest, ModbusResponse, ModbusResponseException}
import io.netty.channel._
import io.netty.util.{Timeout, TimerTask}
import org.slf4j.LoggerFactory
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}
class ModbusTcpMaster(config: ModbusTcpMasterConfig) extends TcpServiceResponseHandler {
private implicit val executionContext = ExecutionContext.fromExecutor(config.executor)
private val logger = config.instanceId match {
case Some(instanceId) => LoggerFactory.getLogger(s"${getClass.getName}.$instanceId")
case None => LoggerFactory.getLogger(getClass)
}
private[master] val decodingErrorCount = new Counter()
private[master] val unsupportedPduCount = new Counter()
private val requestCount = new Counter()
private val responseCount = new Counter()
private val lateResponseCount = new Counter()
private val timeoutCount = new Counter()
private val responseTime = new Timer()
private val metrics = Map[String, Metric](
metricName("request-count") -> requestCount,
metricName("response-count") -> responseCount,
metricName("late-response-count") -> lateResponseCount,
metricName("timeout-count") -> timeoutCount,
metricName("response-time") -> responseTime,
metricName("decoding-error-count") -> decodingErrorCount,
metricName("unsupported-pdu-count") -> unsupportedPduCount)
private val promises = new ConcurrentHashMap[Short, (Promise[ModbusResponse], Timeout, Timer.Context)]()
private val channelManager = new ModbusChannelManager(this, config)
private val transactionId = new AtomicInteger(0)
def sendRequest[T <: ModbusResponse](request: ModbusRequest, unitId: Short = 0): Future[T] = {
val promise = Promise[ModbusResponse]()
channelManager.getChannel match {
case Left(fch) => fch.onComplete {
case Success(ch) => writeToChannel(ch, promise, request, unitId)
case Failure(ex) => promise.failure(ex)
}
case Right(ch) => writeToChannel(ch, promise, request, unitId)
}
promise.future.transform(r => r.asInstanceOf[T], ex => ex)
}
def disconnect(): Unit = {
channelManager.disconnect()
promises.clear()
}
/** Writes a request to the channel and flushes it. */
private def writeToChannel(channel: Channel,
promise: Promise[ModbusResponse],
request: ModbusRequest,
unitId: Short): Unit = {
val txId = transactionId.getAndIncrement.toShort
val timeout = config.wheelTimer.newTimeout(
new TimeoutTask(txId),
config.timeout.toMillis,
TimeUnit.MILLISECONDS)
promises.put(txId, (promise, timeout, responseTime.time()))
channel.writeAndFlush(TcpPayload(txId, unitId, request))
requestCount.inc()
}
def onServiceResponse(service: TcpServiceResponse): Unit = {
promises.remove(service.transactionId) match {
case (p, t, c) =>
responseCount.inc()
c.stop()
t.cancel()
service.response match {
case ex: ExceptionResponse => p.failure(new ModbusResponseException(ex))
case response: ModbusResponse => p.success(response)
}
case null =>
lateResponseCount.inc()
logger.debug(s"Received response for unknown transactionId: $service")
}
}
def getMetricSet: MetricSet = new MetricSet {
import scala.collection.JavaConversions._
def getMetrics: util.Map[String, Metric] = metrics
}
def getMetricBean: MetricBean = {
new MetricBean(
requestCount,
responseCount,
lateResponseCount,
timeoutCount,
responseTime,
decodingErrorCount,
unsupportedPduCount)
}
private def metricName(name: String) = {
MetricRegistry.name(classOf[ModbusTcpMaster], config.instanceId.getOrElse(""), name)
}
private class TimeoutTask(txId: Short) extends TimerTask {
def run(timeout: Timeout): Unit = {
promises.remove(txId) match {
case (p, t, c) =>
timeoutCount.inc()
p.failure(new Exception(s"request timed out after ${config.timeout.toMillis}ms"))
case null => // Just made it...
}
}
}
}
class MetricBean(requestCount: Counter,
responseCount: Counter,
lateResponseCount: Counter,
timeoutCount: Counter,
responseTime: Timer,
decodingErrorCount: Counter,
unsupportedPduCount: Counter) {
def getRequestCount: Counter = requestCount
def getResponseCount: Counter = responseCount
def getTimeoutCount: Counter = timeoutCount
def getResponseTime: Timer = responseTime
def getDecodingErrorCounter: Counter = decodingErrorCount
def getUnsupportedPduCount: Counter = unsupportedPduCount
}
| digitalpetri/scala-modbus-tcp | modbus-master/src/main/scala/com/digitalpetri/modbus/master/ModbusTcpMaster.scala | Scala | apache-2.0 | 5,785 |
package gapt.formats.tip.transformation
import gapt.formats.tip.parser.TipSmtAnd
import gapt.formats.tip.parser.TipSmtExpression
import gapt.formats.tip.parser.TipSmtForall
import gapt.formats.tip.parser.TipSmtFunctionDefinition
import gapt.formats.tip.parser.TipSmtIte
import gapt.formats.tip.parser.TipSmtMutualRecursiveFunctionDefinition
import gapt.formats.tip.parser.TipSmtProblem
object moveUniversalQuantifiersInwards extends TipSmtProblemTransformation {
override def transform( problem: TipSmtProblem ): TipSmtProblem =
new MoveUniversalQuantifiersInwardsTransformation( problem )()
}
/**
* This class moves outermost universal quantifiers in function definitions
* inwards.
*
* Universal quantifiers are distributed over conjuncts. This transformation
* may result in redundant universal quantifiers which can be eliminated in
* a next step.
*
* @param problem The problem whose function definitions are subject to the
* transformation described above.
*/
class MoveUniversalQuantifiersInwardsTransformation( problem: TipSmtProblem ) {
def apply(): TipSmtProblem = {
problem.copy( definitions = problem.definitions map {
case fun @ TipSmtFunctionDefinition( _, _, _, _, _ ) =>
apply( fun )
case funDefs @ TipSmtMutualRecursiveFunctionDefinition( _ ) =>
funDefs.copy( functions = funDefs.functions.map { apply } )
case definition => definition
} )
}
private def apply(
fun: TipSmtFunctionDefinition ): TipSmtFunctionDefinition = {
fun.copy( body = moveUniversalQuantifiersInwards( fun.body ) )
}
private def moveUniversalQuantifiersInwards(
expression: TipSmtExpression ): TipSmtExpression = {
expression match {
case expr @ TipSmtAnd( _ ) =>
moveUniversalQuantifiersInwards( expr )
case expr @ TipSmtForall( _, _ ) =>
moveUniversalQuantifiersInwards( expr )
case expr @ TipSmtIte( _, _, _ ) =>
moveUniversalQuantifiersInwards( expr )
case formula => formula
}
}
private def moveUniversalQuantifiersInwards(
expression: TipSmtAnd ): TipSmtExpression = {
expression.copy( expression.exprs map { moveUniversalQuantifiersInwards } )
}
private def moveUniversalQuantifiersInwards(
expression: TipSmtIte ): TipSmtExpression = {
TipSmtIte(
expression.cond,
moveUniversalQuantifiersInwards( expression.ifTrue ),
moveUniversalQuantifiersInwards( expression.ifFalse ) )
}
private def moveUniversalQuantifiersInwards(
expression: TipSmtForall ): TipSmtExpression = {
expression.formula match {
case TipSmtAnd( conjuncts ) =>
TipSmtAnd( conjuncts
.map { TipSmtForall( expression.variables, _ ) }
.map { moveUniversalQuantifiersInwards } )
case _ => expression
}
}
}
| gapt/gapt | core/src/main/scala/gapt/formats/tip/transformation/moveUniversalQuantifiersInwards.scala | Scala | gpl-3.0 | 2,824 |
/**
* See <a href="https://www.codeeval.com/open_challenges/51/">Closes Pair</a>
*/
object ClosestPair extends Challenge {
val lines = scala.io.Source.fromFile(args(0)).getLines().filter(_.length > 0)
import scala.collection.mutable.ListBuffer
val input = lines.foldRight(List.empty[ListBuffer[(Int, Int)]]) {
(line, acc) =>
val n = line.indexOf(' ')
if (n >= 0) {
acc.head += line.take(n).toInt -> line.drop(n + 1).toInt
acc
}
else ListBuffer.empty[(Int, Int)] :: acc
}.tail.map(_.toIndexedSeq)
input.map {
case points => combine(points).foldLeft(Long.MaxValue) {
case (min, (p1, p2)) =>
val x = p1._1 - p2._1
val y = p1._2 - p2._2
val distance = x.toLong * x + y.toLong * y
if (distance < min) distance else min
}
}.map {
case Long.MaxValue => "INFINITY"
case min => "%1.4f" format Math.sqrt(min)
} foreach println
def combine(points: Seq[(Int, Int)]) =
for (p1 <- 1 until points.size; p2 <- p1 + 1 until points.size) yield (points(p1), points(p2))
} | zelca/codeeval | src/ClosestPair.scala | Scala | mit | 1,080 |
package models.db
import models.other.MissionWithDeckId
import scalikejdbc._
import com.ponkotuy.data
import util.scalikejdbc.BulkInsert._
/**
*
* @author ponkotuy
* Date: 2014/03/04.
*/
case class DeckPort(id: Int, memberId: Long, name: String, created: Long)
object DeckPort extends SQLSyntaxSupport[DeckPort] {
def apply(x: SyntaxProvider[DeckPort])(rs: WrappedResultSet): DeckPort = apply(x.resultName)(rs)
def apply(x: ResultName[DeckPort])(rs: WrappedResultSet): DeckPort = autoConstruct(rs, x)
val dp = DeckPort.syntax("dp")
def find(memberId: Long, deckId: Int)(implicit session: DBSession = autoSession): Option[DeckPort] = {
withSQL {
select.from(DeckPort as dp)
.where.eq(dp.memberId, memberId).and.eq(dp.id, deckId)
}.map(DeckPort(dp)).single().apply()
}
def findAllBy(where: SQLSyntax)(implicit session: DBSession = autoSession): List[DeckPort] = withSQL {
select.from(DeckPort as dp).where(where)
}.map(DeckPort(dp)).list().apply()
def findAllByUser(memberId: Long)(implicit session: DBSession = autoSession): List[DeckPort] =
findAllBy(sqls.eq(dp.memberId, memberId))
def create(dp: data.DeckPort, memberId: Long)(
implicit session: DBSession = DeckPort.autoSession): Unit = {
val created = System.currentTimeMillis()
dp.mission match {
case Some(mission) => Mission.create(mission, memberId, dp.id, created)
case None => Mission.deleteByDeck(memberId, dp.id)
}
DeckShip.bulkInsert(dp.id, memberId, dp.ships)
applyUpdate {
insert.into(DeckPort).namedValues(
column.id -> dp.id, column.memberId -> memberId,
column.name -> dp.name, column.created -> created
)
}
}
def bulkInsertEntire(dps: Seq[data.DeckPort], memberId: Long)(
implicit session: DBSession = DeckPort.autoSession): Unit = {
if(dps.nonEmpty) {
val created = System.currentTimeMillis()
// Mission
val missions = dps.flatMap(d => d.mission.map(MissionWithDeckId(_, d.id, d.ships)))
Mission.bulkInsert(missions, memberId, created)
missions.foreach { mission => MissionHistory.createFromWithDeckId(mission, memberId, created) }
// DeckShip
dps.foreach { d => DeckShip.bulkInsert(d.id, memberId, d.ships) }
// Main
applyUpdate {
insert.into(DeckPort)
.columns(column.id, column.memberId, column.name, column.created)
.multiValues(dps.map(_.id), Seq.fill(dps.size)(memberId), dps.map(_.name), Seq.fill(dps.size)(created))
}
}
}
/** まとめて従属するTableまでdelete */
def deleteByUser(memberId: Long)(
implicit session: DBSession = DeckPort.autoSession): Unit = {
Mission.deleteByUser(memberId)
DeckShip.deleteByUser(memberId)
applyUpdate { delete.from(DeckPort).where.eq(DeckPort.column.memberId, memberId) }
}
}
| b-wind/MyFleetGirls | server/app/models/db/DeckPort.scala | Scala | mit | 2,862 |
/*
* Copyright (c) 2012 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
/**
* Representation of an isomorphism between a type (typically a case class) and an `HList`.
*/
trait Iso[T, U] { self =>
def to(t : T) : U
def from(u : U) : T
def reverse : Iso[U, T] = new Iso[U, T] {
def to(u : U) : T = self.from(u)
def from(t : T) : U = self.to(t)
override def reverse = self
}
}
trait LowPriorityIso {
import Functions._
implicit def identityIso[T] = new Iso[T, T] {
def to(t : T) : T = t
def from(t : T) : T = t
}
def hlist[CC, C, T <: Product, L <: HList](apply : C, unapply : CC => Option[T])
(implicit fhl : FnHListerAux[C, L => CC], hl : HListerAux[T, L]) =
new Iso[CC, L] {
val ctor = apply.hlisted
val dtor = (cc : CC) => hl(unapply(cc).get)
def to(t : CC) : L = dtor(t)
def from(l : L) : CC = ctor(l)
}
}
object Iso extends LowPriorityIso {
import Functions._
import Tuples._
// Special case for one-element cases classes because their unapply result types
// are Option[T] rather than Option[Tuple1[T]] which would be required to fit
// the general case.
def hlist[CC, T](apply : T => CC, unapply : CC => Option[T]) =
new Iso[CC, T :: HNil] {
val ctor = apply.hlisted
val dtor = (cc : CC) => unapply(cc).get :: HNil
def to(t : CC) : T :: HNil = dtor(t)
def from(l : T :: HNil) : CC = ctor(l)
}
implicit def tupleHListIso[T <: Product, L <: HList](implicit hl : HListerAux[T, L], uhl : TuplerAux[L, T]) =
new Iso[T, L] {
val ctor = uhl.apply _
val dtor = hl.apply _
def to(t : T) : L = dtor(t)
def from(l : L) : T = ctor(l)
}
implicit def fnHListFnIso[F, L <: HList, R](implicit hl : FnHListerAux[F, L => R], unhl : FnUnHListerAux[L => R, F]) =
new Iso[F, L => R] {
def to(f : F) : L => R = hl(f)
def from(l : L => R) = unhl(l)
}
}
// vim: expandtab:ts=2:sw=2
| mpilquist/shapeless | core/src/main/scala/shapeless/iso.scala | Scala | apache-2.0 | 2,517 |
package com.twitter.zipkin.collector.builder
import com.twitter.finagle.Filter
import com.twitter.finagle.builder.Server
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.tracing.Tracer
import com.twitter.zipkin.builder.Builder
import com.twitter.zipkin.collector.WriteQueue
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.storage.Store
import java.net.InetSocketAddress
/**
* Specifies a builder for the input interface of a Zipkin collector
* @tparam T
*/
trait CollectorInterface[T]
extends Builder[(WriteQueue[T], Seq[Store], InetSocketAddress, StatsReceiver, Tracer) => Server] {
/**
* Finagle Filter that converts the server's input type to a Span
*/
val filter: Filter[T, Unit, Span, Unit]
}
| srijs/zipkin | zipkin-collector/src/main/scala/com/twitter/zipkin/collector/builder/CollectorInterface.scala | Scala | apache-2.0 | 757 |
/*
* Copyright 2014 Xored Software, Inc.
* Copyright 2015 Gleb Kanterov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.kanterov.scalajs.react
import scala.xml.Elem
import scala.language.experimental.macros
import scala.annotation.StaticAnnotation
import com.kanterov.scalajs.react.internal.ScalaxImpl
object scalax {
def apply(elem: Elem): ReactDOM = macro ScalaxImpl.apply
}
class scalax(verbose: Boolean = false) extends StaticAnnotation {
def macroTransform(annottees: Any*): ReactDOM = macro ScalaxImpl.macroTransform
}
| kanterov/scala-js-react | scalajs-react/src/main/scala/com/kanterov/scalajs/react/scalax.scala | Scala | apache-2.0 | 1,060 |
// -----------------------------------------------
//
// File: hw.scala (hw: hello world)
//
// Running -
//
// $ scala hw.scala // hw stands for hello world
//
// -----------------------------------------------
object hw { // hw stands for hello world
def main(args: Array[String]) {
println("Hello World!")
}
}
| nvijayap/scala | hw.scala | Scala | apache-2.0 | 327 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import java.math.BigInteger
import com.netflix.atlas.core.util.Strings
import scala.util.hashing.MurmurHash3
/**
* Represents an identifier for a tagged item.
*
* @param data
* Bytes for the id. This is usually the results of computing a SHA1 hash
* over a normalized representation of the tags.
* @param hc
* Precomputed hash code for the bytes.
*/
class ItemId private (private val data: Array[Byte], private val hc: Int)
extends Comparable[ItemId] {
override def hashCode(): Int = hc
override def equals(obj: Any): Boolean = {
obj match {
case other: ItemId => hc == other.hc && java.util.Arrays.equals(data, other.data)
case _ => false
}
}
override def compareTo(other: ItemId): Int = {
val length = math.min(data.length, other.data.length)
var i = 0
while (i < length) {
val b1 = java.lang.Byte.toUnsignedInt(data(i))
val b2 = java.lang.Byte.toUnsignedInt(other.data(i))
val cmp = b1 - b2
if (cmp != 0) return cmp
i += 1
}
0
}
override def toString: String = {
val buffer = new StringBuilder
var i = 0
while (i < data.length) {
val unsigned = java.lang.Byte.toUnsignedInt(data(i))
buffer.append(ItemId.hexValueForByte(unsigned))
i += 1
}
buffer.toString()
}
def toBigInteger: BigInteger = new BigInteger(1, data)
def intValue: Int = {
var result = 0
val end = math.max(0, data.length - 4)
var i = data.length - 1
var shift = 0
while (i >= end) {
result |= (data(i) & 0xFF) << shift
i -= 1
shift += 8
}
result
}
/**
* Returns the byte array representing the id. This accessor is only provided to allow
* for serialization without additional allocations. The returned array should not be
* modified.
*/
def byteArrayUnsafe: Array[Byte] = data
}
object ItemId {
private val hexValueForByte = (0 until 256).toArray.map { i =>
Strings.zeroPad(i, 2)
}
/**
* Create a new id from an array of bytes. The pre-computed hash code will be generated
* using MurmurHash3.
*/
def apply(data: Array[Byte]): ItemId = {
new ItemId(data, MurmurHash3.bytesHash(data))
}
/**
* Create a new id from a hex string. The string should match the `toString` output of
* an `ItemId`.
*/
def apply(data: String): ItemId = {
require(data.length % 2 == 0, s"invalid item id string: $data")
val bytes = new Array[Byte](data.length / 2)
var i = 0
while (i < bytes.length) {
val c1 = hexToInt(data.charAt(2 * i))
val c2 = hexToInt(data.charAt(2 * i + 1))
val v = (c1 << 4) | c2
bytes(i) = v.toByte
i += 1
}
ItemId(bytes)
}
private def hexToInt(c: Char): Int = {
c match {
case _ if c >= '0' && c <= '9' => c - '0'
case _ if c >= 'a' && c <= 'f' => c - 'a' + 10
case _ if c >= 'A' && c <= 'F' => c - 'A' + 10
case _ => throw new IllegalArgumentException(s"invalid hex digit: $c")
}
}
}
| brharrington/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/model/ItemId.scala | Scala | apache-2.0 | 3,710 |
package model
import skinny.orm._, feature._
import scalikejdbc._
import org.joda.time._
case class RemindMailSchedule(
id: Long,
lessonId: Long,
lessonDetailId: Long,
mailId: Option[Long] = None,
status: Option[Int] = None,
startDatetime: Option[DateTime] = None,
endDatetime: Option[DateTime] = None,
delayMinuite: Option[String] = None,
deleteFlg: Boolean,
createdBy: Option[String] = None,
createdAt: Option[DateTime] = None,
updatedBy: Option[String] = None,
updatedAt: Option[DateTime] = None
)
object RemindMailSchedule extends SkinnyCRUDMapper[RemindMailSchedule] {
override lazy val tableName = "remind_mail_schedule"
override lazy val defaultAlias = createAlias("rms")
override def extract(rs: WrappedResultSet, rn: ResultName[RemindMailSchedule]): RemindMailSchedule = {
autoConstruct(rs, rn)
}
}
| yoshitakes/skinny-task-example | src/main/scala/model/RemindMailSchedule.scala | Scala | mit | 850 |
package coursier.cache
import java.net.URLConnection
import coursier.core.Authentication
trait AuthenticatedURLConnection extends URLConnection {
def authenticate(authentication: Authentication): Unit
}
| alexarchambault/coursier | modules/cache/jvm/src/main/scala/coursier/cache/AuthenticatedURLConnection.scala | Scala | apache-2.0 | 208 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.businessmatching
import cats.implicits._
import connectors.DataCacheConnector
import javax.inject.Inject
import models.businessmatching.BusinessActivity
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.{ExecutionContext, Future}
case class NextService(url: String, activity: BusinessActivity)
class ServiceFlow @Inject()(businessMatchingService: BusinessMatchingService, cacheConnector: DataCacheConnector) {
def isNewActivity(cacheId: String, activity: BusinessActivity)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Boolean] =
businessMatchingService.getAdditionalBusinessActivities(cacheId)
.map {
_.contains(activity)
} getOrElse false
}
| hmrc/amls-frontend | app/services/businessmatching/ServiceFlow.scala | Scala | apache-2.0 | 1,319 |
package mesosphere.chaos.http
import com.google.inject.Injector
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer
import net.liftweb.markdown.ActuariusTransformer
import scala.collection.JavaConverters._
import scala.collection.{ SortedSet, mutable }
import scala.io.Source
import scala.language.existentials
import javax.inject.{ Named, Inject }
import java.lang.reflect.Method
import java.net.URLDecoder
import javax.servlet.http.{ HttpServletResponse, HttpServletRequest, HttpServlet }
import javax.ws.rs._
class HelpServlet @Inject() (@Named("helpPathPrefix") pathPrefix: String,
injector: Injector,
container: GuiceContainer) extends HttpServlet {
val basePathPattern = s"^$pathPrefix/?$$".r
val pathPattern = s"^$pathPrefix/([A-Z]+)(/.+)".r
lazy val pathMap = makePathMap()
val htmlHeader =
"""
<!DOCTYPE html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<title>Logger Config</title>
<link rel="stylesheet" href="/css/chaos.css">
</head>
<body>
"""
val htmlFooter =
"""
</body>
</html>
"""
val contentType = "Content-Type"
val textHtml = "text/html; charset=utf-8"
override def doGet(req: HttpServletRequest, resp: HttpServletResponse) = {
req.getRequestURI match {
case basePathPattern() => all(req, resp)
case pathPattern(method, path) => handleMethod(method, path, req, resp)
case _ => resp.setStatus(HttpServletResponse.SC_NOT_FOUND)
}
}
private def all(req: HttpServletRequest, resp: HttpServletResponse) {
resp.setStatus(HttpServletResponse.SC_OK)
resp.addHeader(contentType, textHtml)
val writer = resp.getWriter
try {
writer.print(htmlHeader)
writer.println("<h1>Help</h1>")
writer.println("<table><thead><tr><th>Resource</th><th>Description</th></tr></thead><tbody>")
for (key <- pathMap.keySet.to[SortedSet]) {
val method = pathMap(key)
writer.println(s"""
<tr>
<td><a href="$pathPrefix/${key._2}${key._1}">${key._2} ${key._1}</a></td>
<td><code>${method.getDeclaringClass.getName}#${method.getName}()</code></td>
</tr>""")
}
writer.println("</tbody></table>")
writer.print(htmlFooter)
}
finally {
writer.close()
}
}
private def handleMethod(httpMethod: String, path: String, req: HttpServletRequest, resp: HttpServletResponse) = {
val decodedPath = URLDecoder.decode(path, Option(req.getCharacterEncoding).getOrElse("UTF8"))
val writer = resp.getWriter
try {
pathMap.get((decodedPath, httpMethod)) match {
case Some(methodHandle) => {
val klass = methodHandle.getDeclaringClass
val resourceName = s"${klass.getSimpleName}_${methodHandle.getName}.md"
Option(klass.getResource(resourceName)) match {
case Some(url) => {
resp.setStatus(HttpServletResponse.SC_OK)
resp.addHeader(contentType, textHtml)
val transformer = new ActuariusTransformer
val markdown = transformer(Source.fromURL(url).mkString)
writer.print(htmlHeader)
writer.print(markdown)
writer.print(htmlFooter)
}
case None => {
resp.setStatus(HttpServletResponse.SC_NOT_FOUND)
writer.println(s"No documentation found. Create a file named $resourceName in the resources folder " +
s"for class ${klass.getSimpleName} to add it.")
}
}
}
case None => {
resp.setStatus(HttpServletResponse.SC_NOT_FOUND)
writer.println(s"No resource defined for $httpMethod $path")
}
}
}
finally {
writer.close()
}
}
private def makePathMap(): Map[(String, String), Method] = {
val pathMap = new mutable.HashMap[(String, String), Method]()
def handleClass(pathPrefix: String, klass: Class[_]) {
for (method <- klass.getDeclaredMethods) {
var httpMethod: Option[String] = None
var methodPath = ""
for (ann <- method.getAnnotations) {
ann match {
case m: GET => httpMethod = Some("GET")
case m: POST => httpMethod = Some("POST")
case m: PUT => httpMethod = Some("PUT")
case m: DELETE => httpMethod = Some("DELETE")
case m: HEAD => httpMethod = Some("HEAD")
case m: OPTIONS => httpMethod = Some("OPTIONS")
case pathAnn: Path => methodPath = s"/${pathAnn.value}"
case _ =>
}
}
val path = Option(klass.getAnnotation(classOf[Path])) match {
case Some(ann) => s"$pathPrefix/${ann.value}$methodPath"
case None => s"$pathPrefix$methodPath"
}
if (httpMethod.isDefined) {
pathMap((path, httpMethod.get)) = method
}
else if (methodPath.nonEmpty) {
// Sub-resources have a Path annotation but no HTTP method
handleClass(path, method.getReturnType)
}
}
}
for (key <- injector.getAllBindings.keySet.asScala) {
val klass = key.getTypeLiteral.getRawType
if (klass.isAnnotationPresent(classOf[Path])) {
handleClass(getServletContext.getContextPath, klass)
}
}
pathMap.toMap
}
}
| spacejam/marathon | src/main/scala/chaos/http/HelpServlet.scala | Scala | apache-2.0 | 5,435 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.time
import org.joda.time.{DateTime, DateTimeZone, Interval}
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.time.Time._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RichIntervalTest extends Specification {
"RichIntervals" should {
val dt1 = new DateTime("2012-01-01T01:00:00", DateTimeZone.UTC)
val dt2 = new DateTime("2012-02-02T02:00:00", DateTimeZone.UTC)
val dt3 = new DateTime("2012-03-03T03:00:00", DateTimeZone.UTC)
val dt4 = new DateTime("2012-04-04T04:00:00", DateTimeZone.UTC)
val dt5 = new DateTime("2012-05-05T05:00:00", DateTimeZone.UTC)
val int12 = new Interval(dt1, dt2)
val int13 = new Interval(dt1, dt3)
val int23 = new Interval(dt2, dt3)
val int25 = new Interval(dt2, dt5)
val int34 = new Interval(dt3, dt4)
val int35 = new Interval(dt3, dt5)
val int45 = new Interval(dt4, dt5)
"support unions and intersections" >> {
val u1 = int12.getSafeUnion(int23)
u1 must be equalTo int13
val u2 = u1.getSafeUnion(int13)
u2 must be equalTo int13
u2.getSafeUnion(int12) must be equalTo u2
// Test intersections
int13.getSafeIntersection(int12) must be equalTo int12
int13.getSafeIntersection(int23) must be equalTo int23
}
"support empty intersections" >> {
int12.getSafeIntersection(int34) must beNull
}.pendingUntilFixed
"handle expansions" >> {
int12.expandByDate(dt3.toDate) must be equalTo int13
int45.expandByDate(dt3.toDate) must be equalTo int35
int23.expandByInterval(int35) must be equalTo int25
int45.expandByInterval(int35) must be equalTo int35
}
}
} | ronq/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/time/RichIntervalTest.scala | Scala | apache-2.0 | 2,233 |
package com.enkidu.lignum.parsers
import _root_.java.io.File
import _root_.java.nio.charset.CodingErrorAction
import com.enkidu.lignum.parsers.commons.AbstractParser
import org.parboiled2.ParseError
import org.scalatest.prop.{Checkers, PropertyChecks}
import org.scalatest.{FreeSpec, Matchers}
import scala.io.{Codec, Source}
import scala.util.{Failure, Success, Try}
abstract class ParserTest extends FreeSpec with PropertyChecks with Matchers with Checkers with ParserImplicits {
protected def get[A](a: Try[A])(implicit parser: AbstractParser): A = a match {
case Success(res) => res
case Failure(error: ParseError) => throw new Exception(parser.formatError(error, showTraces = true))
case r => r.get
}
protected def measureTime[A](str: String)(f: => A): A = {
val start = System.currentTimeMillis
val result: A = f
val time = System.currentTimeMillis - start
println(s"executed $str in $time")
result
}
protected def traverseDirectory(file: File)(predicate: File => Boolean)(implicit codec: Codec): Stream[(File, String)] = {
def recursiveListFiles(file: File): Stream[File] = {
val these = file.listFiles.toStream
these #::: these.filter(_.isDirectory).flatMap(recursiveListFiles)
}
codec.onMalformedInput(CodingErrorAction.IGNORE)
codec.onUnmappableCharacter(CodingErrorAction.IGNORE)
recursiveListFiles(file).filter(_.isFile).filter(predicate).map { f => (f, Source.fromFile(f)(codec).mkString)}
}
protected def isJavaSource(file: File): Boolean = file.getName.endsWith(".java")
}
| marek1840/java-parser | src/test/scala/com/enkidu/lignum/parsers/ParserTest.scala | Scala | mit | 1,574 |
package scaffvis
/**
* The task runner - runs task specified as arguments one by one.
*/
object Application {
def main(args: Array[String]): Unit = {
if(args.isEmpty)
println("Specify tasks to run.")
for (task <- args) {
try {
val c = Class.forName(s"scaffvis.tasks.$task") //find the task using reflection
println(s"Executing task: $task.")
c.newInstance() //execute the task
} catch {
case e: ClassNotFoundException => {
println(s"Task not found: $task.")
System.exit(1)
}
}
}
}
}
| velkoborsky/scaffvis | generator/src/main/scala/scaffvis/Application.scala | Scala | gpl-3.0 | 593 |
import scala.quoted._
import scala.quoted.staging._
class Foo[T: Type] {
def q(using QuoteContext) = '{(null: Any).asInstanceOf[T]}
}
object Test {
given Toolbox = Toolbox.make(getClass.getClassLoader)
def main(args: Array[String]): Unit = withQuoteContext {
println((new Foo[Object]).q.show)
println((new Foo[String]).q.show)
}
}
| som-snytt/dotty | tests/run-staging/i4350.scala | Scala | apache-2.0 | 350 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package expressions
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
/*
* ArgumentExprs ::= '(' [Exprs [',']] ')'
* | [nl] BlockExpr
*/
object ArgumentExprs {
def parse(builder: ScalaPsiBuilder): Boolean = {
val argMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
builder.advanceLexer() //Ate (
builder.disableNewlines()
Expr parse builder
while (builder.getTokenType == ScalaTokenTypes.tCOMMA && !builder.consumeTrailingComma(ScalaTokenTypes.tRPARENTHESIS)) {
builder.advanceLexer()
if (!Expr.parse(builder)) builder error ErrMsg("wrong.expression")
}
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
}
builder.restoreNewlinesState()
argMarker.done(ScalaElementTypes.ARG_EXPRS)
true
case ScalaTokenTypes.tLBRACE =>
if (builder.twoNewlinesBeforeCurrentToken) {
argMarker.rollbackTo()
return false
}
BlockExpr parse builder
argMarker.done(ScalaElementTypes.ARG_EXPRS)
true
case _ =>
argMarker.drop()
false
}
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/expressions/ArgumentExprs.scala | Scala | apache-2.0 | 1,583 |
package com.seanshubin.hello.domain
import java.nio.file.{Files, Path}
object FilesFromOperatingSystem extends FilesContract {
override def readAllBytes(path: Path): Array[Byte] = Files.readAllBytes(path)
}
| SeanShubin/hello | domain/src/main/scala/com/seanshubin/hello/domain/FilesFromOperatingSystem.scala | Scala | unlicense | 211 |
package com.evojam.documents
import collection.JavaConversions._
import org.bson.Document
import reactivemongo.bson.{BSONString, BSONDocument, BSONDocumentReader, BSONDocumentWriter}
import com.evojam.driver.DocumentFormat
object StringMapFormatter {
implicit object Handler extends BSONDocumentReader[Map[String, String]] with BSONDocumentWriter[Map[String, String]] {
override def read(bson: BSONDocument): Map[String, String] =
bson.elements.map({case (key, value) => (key, value.asInstanceOf[BSONString].as[String])}).toMap
override def write(t: Map[String, String]): BSONDocument =
BSONDocument(t.toStream.map({case (k,v) => (k, BSONString(v))}))
}
implicit object javaFmt extends DocumentFormat[Map[String, String]] {
override def writes(a: Map[String, String]) = {
val doc = new Document()
a foreach {
case (k, v) => doc.append(k, v)
}
doc
}
override def reads(doc: Document): Map[String, String] =
doc.keySet().map(key => (key, doc.getString(key))).toMap
}
}
| evojam/mongo-drivers-benchmarks | src/com/evojam/documents/StringMapFormatter.scala | Scala | apache-2.0 | 1,052 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import scala.collection.mutable.ArrayBuffer
import breeze.linalg.{DenseVector => BDV, norm}
import org.apache.spark.annotation.{Experimental, DeveloperApi}
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{Vectors, Vector}
/**
* Class used to solve an optimization problem using Gradient Descent.
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
*/
class GradientDescent private[spark] (private var gradient: Gradient, private var updater: Updater)
extends Optimizer with Logging {
private var stepSize: Double = 1.0
private var numIterations: Int = 100
private var regParam: Double = 0.0
private var miniBatchFraction: Double = 1.0
private var convergenceTol: Double = 0.001
/**
* Set the initial step size of SGD for the first step. Default 1.0.
* In subsequent steps, the step size will decrease with stepSize/sqrt(t)
*/
def setStepSize(step: Double): this.type = {
this.stepSize = step
this
}
/**
* :: Experimental ::
* Set fraction of data to be used for each SGD iteration.
* Default 1.0 (corresponding to deterministic/classical gradient descent)
*/
@Experimental
def setMiniBatchFraction(fraction: Double): this.type = {
this.miniBatchFraction = fraction
this
}
/**
* Set the number of iterations for SGD. Default 100.
*/
def setNumIterations(iters: Int): this.type = {
this.numIterations = iters
this
}
/**
* Set the regularization parameter. Default 0.0.
*/
def setRegParam(regParam: Double): this.type = {
this.regParam = regParam
this
}
/**
* Set the convergence tolerance. Default 0.001
* convergenceTol is a condition which decides iteration termination.
* The end of iteration is decided based on below logic.
* - If the norm of the new solution vector is >1, the diff of solution vectors
* is compared to relative tolerance which means normalizing by the norm of
* the new solution vector.
* - If the norm of the new solution vector is <=1, the diff of solution vectors
* is compared to absolute tolerance which is not normalizing.
* Must be between 0.0 and 1.0 inclusively.
*/
def setConvergenceTol(tolerance: Double): this.type = {
require(0.0 <= tolerance && tolerance <= 1.0)
this.convergenceTol = tolerance
this
}
/**
* Set the gradient function (of the loss function of one single data example)
* to be used for SGD.
*/
def setGradient(gradient: Gradient): this.type = {
this.gradient = gradient
this
}
/**
* Set the updater function to actually perform a gradient step in a given direction.
* The updater is responsible to perform the update from the regularization term as well,
* and therefore determines what kind or regularization is used, if any.
*/
def setUpdater(updater: Updater): this.type = {
this.updater = updater
this
}
/**
* :: DeveloperApi ::
* Runs gradient descent on the given training data.
* @param data training data
* @param initialWeights initial weights
* @return solution vector
*/
@DeveloperApi
def optimize(data: RDD[(Double, Vector)], initialWeights: Vector): Vector = {
val (weights, _) = GradientDescent.runMiniBatchSGD(
data,
gradient,
updater,
stepSize,
numIterations,
regParam,
miniBatchFraction,
initialWeights,
convergenceTol)
weights
}
}
/**
* :: DeveloperApi ::
* Top-level method to run gradient descent.
*/
@DeveloperApi
object GradientDescent extends Logging {
/**
* Run stochastic gradient descent (SGD) in parallel using mini batches.
* In each iteration, we sample a subset (fraction miniBatchFraction) of the total data
* in order to compute a gradient estimate.
* Sampling, and averaging the subgradients over this subset is performed using one standard
* spark map-reduce in each iteration.
*
* @param data Input data for SGD. RDD of the set of data examples, each of
* the form (label, [feature values]).
* @param gradient Gradient object (used to compute the gradient of the loss function of
* one single data example)
* @param updater Updater function to actually perform a gradient step in a given direction.
* @param stepSize initial step size for the first step
* @param numIterations number of iterations that SGD should be run.
* @param regParam regularization parameter
* @param miniBatchFraction fraction of the input data set that should be used for
* one iteration of SGD. Default value 1.0.
* @param convergenceTol Minibatch iteration will end before numIterations if the relative
* difference between the current weight and the previous weight is less
* than this value. In measuring convergence, L2 norm is calculated.
* Default value 0.001. Must be between 0.0 and 1.0 inclusively.
* @return A tuple containing two elements. The first element is a column matrix containing
* weights for every feature, and the second element is an array containing the
* stochastic loss computed for every iteration.
*/
def runMiniBatchSGD(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
stepSize: Double,
numIterations: Int,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector,
convergenceTol: Double): (Vector, Array[Double]) = {
// convergenceTol should be set with non minibatch settings
if (miniBatchFraction < 1.0 && convergenceTol > 0.0) {
logWarning("Testing against a convergenceTol when using miniBatchFraction " +
"< 1.0 can be unstable because of the stochasticity in sampling.")
}
val stochasticLossHistory = new ArrayBuffer[Double](numIterations)
// Record previous weight and current one to calculate solution vector difference
var previousWeights: Option[Vector] = None
var currentWeights: Option[Vector] = None
val numExamples = data.count()
// if no data, return initial weights to avoid NaNs
if (numExamples == 0) {
logWarning("GradientDescent.runMiniBatchSGD returning initial weights, no data found")
return (initialWeights, stochasticLossHistory.toArray)
}
if (numExamples * miniBatchFraction < 1) {
logWarning("The miniBatchFraction is too small")
}
// Initialize weights as a column vector
var weights = Vectors.dense(initialWeights.toArray)
val n = weights.size
/**
* For the first iteration, the regVal will be initialized as sum of weight squares
* if it's L2 updater; for L1 updater, the same logic is followed.
*/
var regVal = updater.compute(
weights, Vectors.zeros(weights.size), 0, 1, regParam)._2
var converged = false // indicates whether converged based on convergenceTol
var i = 1
while (!converged && i <= numIterations) {
val bcWeights = data.context.broadcast(weights)
// Sample a subset (fraction miniBatchFraction) of the total data
// compute and sum up the subgradients on this subset (this is one map-reduce)
val (gradientSum, lossSum, miniBatchSize) = data.sample(false, miniBatchFraction, 42 + i)
.treeAggregate((BDV.zeros[Double](n), 0.0, 0L))(
seqOp = (c, v) => {
// c: (grad, loss, count), v: (label, features)
val l = gradient.compute(v._2, v._1, bcWeights.value, Vectors.fromBreeze(c._1))
(c._1, c._2 + l, c._3 + 1)
},
combOp = (c1, c2) => {
// c: (grad, loss, count)
(c1._1 += c2._1, c1._2 + c2._2, c1._3 + c2._3)
})
if (miniBatchSize > 0) {
/**
* lossSum is computed using the weights from the previous iteration
* and regVal is the regularization value computed in the previous iteration as well.
*/
stochasticLossHistory.append(lossSum / miniBatchSize + regVal)
val update = updater.compute(
weights, Vectors.fromBreeze(gradientSum / miniBatchSize.toDouble),
stepSize, i, regParam)
weights = update._1
regVal = update._2
previousWeights = currentWeights
currentWeights = Some(weights)
if (previousWeights != None && currentWeights != None) {
converged = isConverged(previousWeights.get,
currentWeights.get, convergenceTol)
}
} else {
logWarning(s"Iteration ($i/$numIterations). The size of sampled batch is zero")
}
i += 1
}
logInfo("GradientDescent.runMiniBatchSGD finished. Last 10 stochastic losses %s".format(
stochasticLossHistory.takeRight(10).mkString(", ")))
(weights, stochasticLossHistory.toArray)
}
/**
* Alias of [[runMiniBatchSGD]] with convergenceTol set to default value of 0.001.
*/
def runMiniBatchSGD(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
stepSize: Double,
numIterations: Int,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector): (Vector, Array[Double]) =
GradientDescent.runMiniBatchSGD(data, gradient, updater, stepSize, numIterations,
regParam, miniBatchFraction, initialWeights, 0.001)
private def isConverged(
previousWeights: Vector,
currentWeights: Vector,
convergenceTol: Double): Boolean = {
// To compare with convergence tolerance.
val previousBDV = previousWeights.toBreeze.toDenseVector
val currentBDV = currentWeights.toBreeze.toDenseVector
// This represents the difference of updated weights in the iteration.
val solutionVecDiff: Double = norm(previousBDV - currentBDV)
solutionVecDiff < convergenceTol * Math.max(norm(currentBDV), 1.0)
}
}
| pronix/spark | mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala | Scala | apache-2.0 | 10,865 |
package com.seanshubin.server
trait HttpServer {
def start()
def join()
def stop()
}
| SeanShubin/schulze | server/src/main/scala/com/seanshubin/server/HttpServer.scala | Scala | unlicense | 94 |
package org.jmotor.sbt.plugin
/**
*
* @author AI
* 2019-04-26
*/
object ComponentSorter extends Enumeration {
type ComponentSorter = Value
val ByLength: Value = Value(0)
val ByAlphabetically: Value = Value(1)
}
| aiyanbo/sbt-dependency-updates | src/main/scala/org/jmotor/sbt/plugin/ComponentSorter.scala | Scala | apache-2.0 | 234 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.viewmodels.application.overview
import iht.models.application.assets._
import iht.models.application.basicElements.ShareableBasicEstateElement
import iht.testhelpers.CommonBuilder
import iht.testhelpers.TestHelper._
import iht.{FakeIhtApp, TestUtils}
import org.scalatest.BeforeAndAfter
import org.scalatestplus.mockito.MockitoSugar
import iht.config.AppConfig
import play.api.i18n.{Lang, Messages, MessagesApi}
import play.api.mvc.MessagesControllerComponents
class AssetsAndGiftsSectionViewModelTest extends FakeIhtApp with MockitoSugar with TestUtils with BeforeAndAfter {
val mockControllerComponents: MessagesControllerComponents = app.injector.instanceOf[MessagesControllerComponents]
implicit val messagesApi: MessagesApi = mockControllerComponents.messagesApi
implicit val lang = Lang.defaultLang
implicit val messages: Messages = messagesApi.preferred(Seq(lang)).messages
implicit val mockAppConfig: AppConfig = app.injector.instanceOf[AppConfig]
val appConfig = mockAppConfig
val applicationDetails = CommonBuilder.buildApplicationDetails
val emptyApplicationDetails = CommonBuilder.buildApplicationDetails
val ukAddress = Some(CommonBuilder.DefaultUkAddress)
private def propertyValue(value: BigDecimal) = Some(value)
"Assets and Gifts view model" must {
//region Assets overview row tests
"have an id of 'assets' for the assets row" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.assetRow.id mustBe EstateAssetsID
}
"have the correct caption for the assets row" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.assetRow.label mustBe messagesApi("iht.estateReport.assets.inEstate")
}
"have a blank value for assets when there are no assets" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.assetRow.value mustBe ""
}
"have a blank value for assets when there are assets but no values have been given" in {
val appDetails = applicationDetails copy
(allAssets = Some(AllAssets(money = Some(ShareableBasicEstateElement(None, None, None)))))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.assetRow.value mustBe ""
}
"have the correct value with a pound sign for assets where there are some assets" in {
val appDetails = CommonBuilder.buildApplicationDetailsWithAllAssets
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.assetRow.value mustBe "£54,345.00"
}
"have the correct text when all answers to assets questions are 'No'" in {
val appDetails = applicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssetsAnsweredNo))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.assetRow.value mustBe messagesApi("site.noAssets")
}
"show View or Change when all assets are completed" in {
val appDetails = CommonBuilder.buildApplicationDetailsWithAllAssets
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.assetRow.linkText mustBe messagesApi("iht.viewOrChange")
}
"show Start when no assets questions have been answered" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.assetRow.linkText mustBe messagesApi("iht.start")
}
"show Give more details when some assets questions have been answered" in {
val appDetails = applicationDetails copy (allAssets = Some(AllAssets(stockAndShare = Some(CommonBuilder.buildStockAndShare.copy(
valueNotListed = Some(BigDecimal(100)),
valueListed = Some(BigDecimal(100)),
value = Some(BigDecimal(100)),
isNotListed = Some(true),
isListed = Some(true))))))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.assetRow.linkText mustBe messagesApi("iht.giveMoreDetails")
}
"have the correct URL for the assets link" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.assetRow.linkUrl mustBe iht.controllers.application.assets.routes.AssetsOverviewController.onPageLoad()
}
//endregion
//region Gifts overview row tests
"have an id of 'gifts' for the gifts row" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.giftRow.id mustBe EstateGiftsID
}
"have the correct caption for the gifts row" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.giftRow.label mustBe messagesApi("iht.estateReport.gifts.givenAway.title")
}
"have a blank value for gifts when there are no gifts" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.giftRow.value mustBe ""
}
"have a blank value for gifts when there are gifts but no values have been given" in {
val appDetails = applicationDetails copy(allGifts = Some(CommonBuilder.buildAllGiftsWithValues),
giftsList = Some(Seq(CommonBuilder.buildPreviousYearsGifts.copy(None, None, None, None, None))))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.value mustBe ""
}
"have the correct value with a pound sign for gifts where there are some gifts" in {
val appDetails = CommonBuilder.buildSomeGifts(applicationDetails)
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.value mustBe "£3,000.00"
}
"have the correct text when all answers to gifts questions are 'No'" in {
val allGifts = CommonBuilder.buildAllGifts.copy(isGivenAway = Some(false),
isReservation = Some(false),
isToTrust = Some(false),
isGivenInLast7Years = Some(false),
action = None)
val appDetails = applicationDetails copy (allGifts = Some(allGifts))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.value mustBe messagesApi("page.iht.application.overview.gifts.nonGiven")
}
"show View or Change when all gifts are completed" in {
val appDetails = CommonBuilder.buildSomeGifts(applicationDetails)
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.linkText mustBe messagesApi("iht.viewOrChange")
}
"show Start when no gifts questions have been answered" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.giftRow.linkText mustBe messagesApi("iht.start")
}
"show Give more details when some gifts questions have been answered" in {
val allGifts = CommonBuilder.buildAllGifts copy (isReservation = Some(true))
val appDetails = applicationDetails copy (allGifts = Some(allGifts))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.linkText mustBe messagesApi("iht.giveMoreDetails")
}
"show Give more details when only gift with reservation and gifts given away in 7 years have been answered" in {
val giftsValues = Seq(CommonBuilder.buildPreviousYearsGifts)
val allGifts = CommonBuilder.buildAllGifts copy (isGivenAway = Some(true))
val appDetails = applicationDetails copy(allGifts = Some(allGifts), giftsList = Some(giftsValues))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.linkText mustBe messagesApi("iht.giveMoreDetails")
}
"have the correct URL for the gifts link when the user has not answered any gifts questions" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.giftRow.linkUrl mustBe iht.controllers.application.gifts.routes.GiftsOverviewController.onPageLoad()
}
"have the correct URL for the gifts link when the user has answered some gifts questions" in {
val allGifts = CommonBuilder.buildAllGifts copy (isGivenAway = Some(true))
val appDetails = applicationDetails copy (allGifts = Some(allGifts))
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.giftRow.linkUrl mustBe iht.controllers.application.gifts.routes.GiftsOverviewController.onPageLoad()
}
//endregion
//region Assets and gifts total row
"have an id of 'AssetsGiftsRow' for the assets and gifts total" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.totalRow.id mustBe "assetsGiftsTotal"
}
"have the correct caption for the assets and gifts total" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.totalRow.label mustBe messagesApi("page.iht.application.estateOverview.valueOfAssetsAndGifts")
}
"have a blank value for assets and gifts total when there are no assets or debts" in {
val viewModel = AssetsAndGiftsSectionViewModel(applicationDetails, false)
viewModel.totalRow.value mustBe "£0.00"
}
"have the correct value with a pound sign for assets and gifts total where there are some assets and gifts" in {
val appDetails = CommonBuilder.buildSomeGifts(CommonBuilder.buildApplicationDetailsWithAllAssets)
val viewModel = AssetsAndGiftsSectionViewModel(appDetails, false)
viewModel.totalRow.value mustBe "£57,345.00"
}
//endregion
}
"isValueEnteredForAssets" must {
"return false if applicationDetails is empty" in {
AssetsAndGiftsSectionViewModel.isValueEnteredForAssets(emptyApplicationDetails) mustBe false
}
"return true if applicationDetails has a money with 0 value" in {
val appDetails = emptyApplicationDetails.copy(allAssets = Some(AllAssets(
money = Some(CommonBuilder.buildShareableBasicElementExtended.copy(
value = Some(BigDecimal(0)),
shareValue = None)
))))
AssetsAndGiftsSectionViewModel.isValueEnteredForAssets(appDetails) mustBe true
}
"return true if applicationDetails has a money with value other than 0" in {
val appDetails = emptyApplicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssets.copy(
money = Some(CommonBuilder.buildShareableBasicElementExtended.copy(
value = Some(BigDecimal(100)),
shareValue = None)
))))
AssetsAndGiftsSectionViewModel.isValueEnteredForAssets(appDetails) mustBe true
}
"return true if applicationDetails has a property with 0 value" in {
val appDetails = emptyApplicationDetails.copy(propertyList = List(CommonBuilder.buildProperty.copy(
id = Some("2"),
address = ukAddress,
propertyType = None,
typeOfOwnership = None,
tenure = None,
value = propertyValue(0)
)))
AssetsAndGiftsSectionViewModel.isValueEnteredForAssets(appDetails) mustBe true
}
"return true if applicationDetails has a property with value other than 0" in {
val appDetails = emptyApplicationDetails.copy(propertyList = List(CommonBuilder.buildProperty.copy(
id = Some("2"),
address = ukAddress,
propertyType = None,
typeOfOwnership = None,
tenure = None,
value = propertyValue(7500)
)))
AssetsAndGiftsSectionViewModel.isValueEnteredForAssets(appDetails) mustBe true
}
}
}
| hmrc/iht-frontend | test/iht/viewmodels/application/overview/AssetsAndGiftsSectionViewModelTest.scala | Scala | apache-2.0 | 12,101 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.industry.containers
import com.itszuvalex.femtocraft.industry.tiles.TileEntityNanoInnervator
import net.minecraft.entity.player.{EntityPlayer, InventoryPlayer}
class ContainerNanoInnervator(player: EntityPlayer, par1InventoryPlayer: InventoryPlayer, par2TileEntityFurnace: TileEntityNanoInnervator) extends ContainerFurnace[TileEntityNanoInnervator](player, par1InventoryPlayer, par2TileEntityFurnace)
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/industry/containers/ContainerNanoInnervator.scala | Scala | gpl-2.0 | 1,458 |
package com.twitter.finagle.memcached.integration
import com.twitter.common.application.ShutdownRegistry.ShutdownRegistryImpl
import com.twitter.common.zookeeper.ServerSet.EndpointStatus
import com.twitter.common.zookeeper.testing.ZooKeeperTestServer
import com.twitter.common.zookeeper.{CompoundServerSet, ZooKeeperUtils, ServerSets, ZooKeeperClient}
import com.twitter.conversions.time._
import com.twitter.finagle.Group
import com.twitter.finagle.cacheresolver.{CacheNode, CachePoolConfig, ZookeeperCacheNodeGroup}
import com.twitter.util.{Duration, Stopwatch, TimeoutException}
import java.io.ByteArrayOutputStream
import java.net.InetSocketAddress
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterEach, FunSuite}
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class CacheNodeGroupTest extends FunSuite with BeforeAndAfterEach {
/**
* Note: This integration test requires a real Memcached server to run.
*/
var shutdownRegistry: ShutdownRegistryImpl = null
var testServers: List[(TestMemcachedServer, EndpointStatus)] = List()
var serverSet: CompoundServerSet = null
var zookeeperClient: ZooKeeperClient = null
val zkPath = "/cache/test/silly-cache"
var zookeeperServer: ZooKeeperTestServer = null
override def beforeEach() {
// start zookeeper server and create zookeeper client
shutdownRegistry = new ShutdownRegistryImpl
zookeeperServer = new ZooKeeperTestServer(0, shutdownRegistry)
zookeeperServer.startNetwork()
// connect to zookeeper server
zookeeperClient = zookeeperServer.createClient(ZooKeeperClient.digestCredentials("user","pass"))
// create serverset
serverSet = new CompoundServerSet(List(
ServerSets.create(zookeeperClient, ZooKeeperUtils.EVERYONE_READ_CREATOR_ALL, zkPath)))
// start five memcached server and join the cluster
addShards(List(0, 1, 2, 3, 4))
// set cache pool config node data
val cachePoolConfig: CachePoolConfig = new CachePoolConfig(cachePoolSize = 5)
val output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(cachePoolConfig, output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
// a separate client which only does zk discovery for integration test
zookeeperClient = zookeeperServer.createClient(ZooKeeperClient.digestCredentials("user","pass"))
}
override def afterEach() {
// shutdown zookeeper server and client
shutdownRegistry.execute()
// shutdown memcached server
testServers foreach { case (s, _) => s.stop() }
testServers = List()
}
if (!sys.props.contains("SKIP_FLAKY")) // CSL-1735
test("doesn't blow up") {
val myPool = new ZookeeperCacheNodeGroup(zkPath, zookeeperClient)
assert(waitForMemberSize(myPool, 0, 5))
assert(myPool.members forall(_.key.isDefined))
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined) test("add and remove") {
// the cluster initially must have 5 members
val myPool = new ZookeeperCacheNodeGroup(zkPath, zookeeperClient)
assert(waitForMemberSize(myPool, 0, 5))
var currentMembers = myPool.members
/***** start 5 more memcached servers and join the cluster ******/
// cache pool should remain the same size at this moment
addShards(List(5, 6, 7, 8, 9))
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members == currentMembers)
// update config data node, which triggers the pool update
// cache pool cluster should be updated
updateCachePoolConfigData(10)
assert(waitForMemberSize(myPool, 5, 10))
assert(myPool.members != currentMembers)
currentMembers = myPool.members
/***** remove 2 servers from the zk serverset ******/
// cache pool should remain the same size at this moment
testServers(0)._2.leave()
testServers(1)._2.leave()
assert(waitForMemberSize(myPool, 10, 10))
assert(myPool.members == currentMembers)
// update config data node, which triggers the pool update
// cache pool should be updated
updateCachePoolConfigData(8)
assert(waitForMemberSize(myPool, 10, 8))
assert(myPool.members != currentMembers)
currentMembers = myPool.members
/***** remove 2 more then add 3 ******/
// cache pool should remain the same size at this moment
testServers(2)._2.leave()
testServers(3)._2.leave()
addShards(List(10, 11, 12))
assert(waitForMemberSize(myPool, 8, 8))
assert(myPool.members == currentMembers)
// update config data node, which triggers the pool update
// cache pool should be updated
updateCachePoolConfigData(9)
assert(waitForMemberSize(myPool, 8, 9))
assert(myPool.members != currentMembers)
currentMembers = myPool.members
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined) test("node key remap") {
// turn on detecting key remapping
val output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(CachePoolConfig(5, detectKeyRemapping = true), output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
// the cluster initially must have 5 members
val myPool = new ZookeeperCacheNodeGroup(zkPath, zookeeperClient)
assert(waitForMemberSize(myPool, 0, 5))
var currentMembers = myPool.members
/***** only remap shard key should immediately take effect ******/
testServers(2)._2.leave()
testServers(3)._2.leave()
addShards(List(2, 3))
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members != currentMembers, myPool.members + " should NOT equal to " + currentMembers)
currentMembers = myPool.members
// turn off detecting key remapping
CachePoolConfig.jsonCodec.serialize(CachePoolConfig(5, detectKeyRemapping = false), output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members == currentMembers, myPool.members + " should NOT equal to " + currentMembers)
testServers(4)._2.leave()
addShards(List(4))
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members == currentMembers, myPool.members + " should equal to " + currentMembers)
/***** remap shard key while adding keys should not take effect ******/
CachePoolConfig.jsonCodec.serialize(CachePoolConfig(5, detectKeyRemapping = true), output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
assert(waitForMemberSize(myPool, 5, 5))
testServers(0)._2.leave()
testServers(1)._2.leave()
addShards(List(5, 0, 1))
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members == currentMembers, myPool.members + " should equal to " + currentMembers)
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined) test("zk failures test") {
// the cluster initially must have 5 members
val myPool = new ZookeeperCacheNodeGroup(zkPath, zookeeperClient)
assert(waitForMemberSize(myPool, 0, 5))
var currentMembers = myPool.members
/***** fail the server here to verify the pool manager will re-establish ******/
// cache pool cluster should remain the same
zookeeperServer.expireClientSession(zookeeperClient)
zookeeperServer.shutdownNetwork()
assert(waitForMemberSize(myPool, 5, 5))
assert(myPool.members == currentMembers)
/***** start the server now ******/
// cache pool cluster should remain the same
zookeeperServer.startNetwork
assert(waitForMemberSize(myPool, 5, 5, 15.seconds))
assert(myPool.members == currentMembers)
/***** start 5 more memcached servers and join the cluster ******/
// update config data node, which triggers the pool update
// cache pool cluster should still be able to see underlying pool changes
addShards(List(5, 6, 7, 8, 9))
updateCachePoolConfigData(10)
assert(waitForMemberSize(myPool, 5, 10, 5.seconds))
assert(myPool.members != currentMembers)
currentMembers = myPool.members
}
private def waitForMemberSize(pool: Group[CacheNode], current: Int, expect: Int, timeout: Duration = 15.seconds): Boolean = {
val elapsed = Stopwatch.start()
def loop(): Boolean = {
if (current != expect && pool.members.size == expect)
true // expect pool size changes
else if (current == expect && pool.members.size != expect)
false // expect pool size remains
else if (timeout < elapsed()) {
if (current != expect) throw new TimeoutException("timed out waiting for CacheNode pool to reach the expected size")
else true
}
else {
Thread.sleep(100)
loop()
}
}
loop()
}
private def updateCachePoolConfigData(size: Int) {
val cachePoolConfig: CachePoolConfig = new CachePoolConfig(cachePoolSize = size)
val output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(cachePoolConfig, output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
}
// create temporary zk clients for additional cache servers since we will need to
private def addShards(shardIds: List[Int]): Unit = {
shardIds.foreach { shardId =>
TestMemcachedServer.start() match {
case Some(server) =>
testServers :+= ((server, serverSet.join(server.address, Map[String, InetSocketAddress](), shardId)))
case None => fail("Cannot start memcached. Skipping...")
}
}
}
}
| liamstewart/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/integration/CacheNodeGroupTest.scala | Scala | apache-2.0 | 9,477 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.