code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.optimize.program.FlinkBatchProgram
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
/**
* Test for [[FlinkLogicalRankRuleForConstantRange]].
*/
class FlinkLogicalRankRuleForConstantRangeTest extends TableTestBase {
private val util = batchTestUtil()
util.addTableSource[(Int, String, Long)]("MyTable", 'a, 'b, 'c)
util.buildBatchProgram(FlinkBatchProgram.PHYSICAL)
@Test
def testRowNumberFunc(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, ROW_NUMBER() OVER (PARTITION BY b ORDER BY a) rn FROM MyTable) t
|WHERE rn <= 2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWithoutFilter(): Unit = {
// can not be converted to Rank
util.verifyPlan("SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable")
}
@Test
def testRankValueFilterWithUpperValue(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable) t
|WHERE rk <= 2 AND a > 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithRange(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b, c ORDER BY a) rk FROM MyTable) t
|WHERE rk <= 2 AND rk > -2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithLowerValue(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a, c) rk FROM MyTable) t
|WHERE rk > 2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithEquals(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a, c) rk FROM MyTable) t
|WHERE rk = 2
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithVariableField1(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY c) rk FROM MyTable) t
|WHERE rk < a
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithVariableField2(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY c) rk FROM MyTable) t
|WHERE rk > a
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithVariableField3(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY c) rk FROM MyTable) t
|WHERE rk < a and b > 5
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testRankValueFilterWithVariableField4(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY a ORDER BY c) rk FROM MyTable) t
|WHERE rk = b
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWithoutPartitionBy(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (ORDER BY a) rk FROM MyTable) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testMultiSameRankFunctionsWithSameGroup(): Unit = {
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b,
| RANK() OVER (PARTITION BY b ORDER BY a) rk1,
| RANK() OVER (PARTITION BY b ORDER BY a) rk2 FROM MyTable) t
|WHERE rk1 < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testMultiSameRankFunctionsWithDiffGroup(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b,
| RANK() OVER (PARTITION BY b ORDER BY a) rk1,
| RANK() OVER (PARTITION BY c ORDER BY a) rk2 FROM MyTable) t
|WHERE rk1 < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testMultiDiffRankFunctions(): Unit = {
// can not be converted to Rank
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b,
| RANK() OVER (PARTITION BY b ORDER BY a) rk,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY a) rn FROM MyTable) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testDuplicateRankFunctionColumnName(): Unit = {
util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'rk)
val sqlQuery =
"""
|SELECT * FROM (
| SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY a) rk FROM MyTable2) t
|WHERE rk < 10
""".stripMargin
util.verifyPlan(sqlQuery)
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkLogicalRankRuleForConstantRangeTest.scala | Scala | apache-2.0 | 6,052 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.util
import java.io._
import org.junit.Assert._
import org.junit.Test
import org.apache.samza.config.MapConfig
import org.apache.samza.serializers._
import org.apache.samza.SamzaException
class TestUtil {
val data = "100"
val checksum = Util.getChecksumValue(data)
val file = new File(System.getProperty("java.io.tmpdir"), "test")
@Test
def testWriteDataToFile() {
// Invoke test
Util.writeDataToFile(file, data)
// Check that file exists
assertTrue("File was not created!", file.exists())
val fis = new FileInputStream(file)
val ois = new ObjectInputStream(fis)
// Check content of the file is as expected
assertEquals(checksum, ois.readLong())
assertEquals(data, ois.readUTF())
ois.close()
fis.close()
}
@Test
def testReadDataFromFile() {
// Setup
val fos = new FileOutputStream(file)
val oos = new ObjectOutputStream(fos)
oos.writeLong(checksum)
oos.writeUTF(data)
oos.close()
fos.close()
// Invoke test
val result = Util.readDataFromFile(file)
// Check data returned
assertEquals(data, result)
}
@Test
def testGetLocalHost(): Unit = {
assertNotNull(Util.getLocalHost)
}
@Test
def testDefaultSerdeFactoryFromSerdeName {
import Util._
val config = new MapConfig
assertEquals(classOf[ByteSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("byte"))
assertEquals(classOf[IntegerSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("integer"))
assertEquals(classOf[JsonSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("json"))
assertEquals(classOf[LongSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("long"))
assertEquals(classOf[SerializableSerdeFactory[java.io.Serializable@unchecked]].getName, defaultSerdeFactoryFromSerdeName("serializable"))
assertEquals(classOf[StringSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("string"))
assertEquals(classOf[DoubleSerdeFactory].getName, defaultSerdeFactoryFromSerdeName("double"))
// throw SamzaException if can not find the correct serde
var throwSamzaException = false
try {
defaultSerdeFactoryFromSerdeName("otherName")
} catch {
case e: SamzaException => throwSamzaException = true
case _: Exception =>
}
assertTrue(throwSamzaException)
}
@Test
def testClampAdd() {
assertEquals(0, Util.clampAdd(0, 0))
assertEquals(2, Util.clampAdd(1, 1))
assertEquals(-2, Util.clampAdd(-1, -1))
assertEquals(Long.MaxValue, Util.clampAdd(Long.MaxValue, 0))
assertEquals(Long.MaxValue - 1, Util.clampAdd(Long.MaxValue, -1))
assertEquals(Long.MaxValue, Util.clampAdd(Long.MaxValue, 1))
assertEquals(Long.MaxValue, Util.clampAdd(Long.MaxValue, Long.MaxValue))
assertEquals(Long.MinValue, Util.clampAdd(Long.MinValue, 0))
assertEquals(Long.MinValue, Util.clampAdd(Long.MinValue, -1))
assertEquals(Long.MinValue + 1, Util.clampAdd(Long.MinValue, 1))
assertEquals(Long.MinValue, Util.clampAdd(Long.MinValue, Long.MinValue))
assertEquals(-1, Util.clampAdd(Long.MaxValue, Long.MinValue))
}
}
| nickpan47/samza | samza-core/src/test/scala/org/apache/samza/util/TestUtil.scala | Scala | apache-2.0 | 3,949 |
package teksol.mybank.domain.events
import teksol.domain.FamilyId
import teksol.infrastructure.Event
import teksol.mybank.domain.models.AccountId
case class AccountCreated(familyId: FamilyId, accountId: AccountId) extends Event {
override def toJson: String = s"""{"family_id":${familyId.toJson},"account_id":${accountId.toJson}}"""
}
| francois/family | src/main/scala/teksol/mybank/domain/events/AccountCreated.scala | Scala | mit | 341 |
/*
* Copyright 2021 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.extra.rollup.syntax
import com.spotify.scio.coders.BeamCoders
import com.spotify.scio.values.SCollection
import com.twitter.algebird.Group
trait SCollectionSyntax {
implicit final class RollupOps[U, D, R, M](self: SCollection[(U, D, R, M)]) {
/**
* Takes an [[SCollection]] with elements consisting of three sets of dimensions and one measure
* and returns an [[SCollection]] tuple, where the key is a set of dimensions and the value the
* summed measure combined with a distinct count.
*
* This is to be used when doing a count distinct for one key over a set of dimensions, when
* that key can be present in multiple elements in the final dataset, such that there is a need
* to provide additional rollups over the non-unique dimensions where distinct counts are not
* summable.
*
* U - Unique key, this is what we want to count distinct occurrences of D - Dimensions that
* should not be rolled up (these are either unique per U or we are not expected to sum U over
* these dimensions, eg. a metric for different dates) R - Dimensions that should be rolled up M
* - Additional measure that is summable over all dimensions
*
* @param rollupFunction
* A function takes one element with dimensions of type R and returns a set of R with one
* element for each combination of rollups that we want to provide
*/
def rollupAndCount(
rollupFunction: R => Set[R]
)(implicit g: Group[M]): SCollection[((D, R), (M, Long))] = {
implicit val (coderU, coderD, coderR, coderM) = BeamCoders.getTuple4Coders(self)
val doubleCounting = self
.withName("RollupAndCountDuplicates")
.transform {
_.map { case (_, dims, rollupDims, measure) =>
((dims, rollupDims), (measure, 1L))
}.sumByKey
.flatMap { case (dims @ (_, rollupDims), measure) =>
rollupFunction(rollupDims)
.map((x: R) => dims.copy(_2 = x) -> measure)
}
}
val correctingCounts = self
.withName("RollupAndCountCorrection")
.transform {
_.map { case (uniqueKey, dims, rollupDims, _) =>
((uniqueKey, dims), rollupDims)
}.groupByKey
.filterValues(_.size > 1)
.flatMapValues { values =>
val rollupMap = collection.mutable.Map.empty[R, Long]
for (r <- values) {
for (newDim <- rollupFunction(r)) {
// Add 1 to correction count. We only care to correct for excessive counts
rollupMap(newDim) = rollupMap.getOrElse(newDim, 1L) - 1L
}
}
// We only care about correcting cases where we actually double-count
rollupMap.iterator.filter(_._2 < 0L)
}
.map { case ((_, dims), (rollupDims, count)) => ((dims, rollupDims), (g.zero, count)) }
}
SCollection
.unionAll(List(doubleCounting, correctingCounts))
.withName("RollupAndCountCorrected")
.sumByKey
}
}
}
| spotify/scio | scio-extra/src/main/scala/com/spotify/scio/extra/rollup/syntax/SCollectionSyntax.scala | Scala | apache-2.0 | 3,742 |
package java.lang
/* This is a hijacked class. Its instances are primitive numbers.
* Constructors are not emitted.
*/
final class Short private () extends Number with Comparable[Short] {
def this(value: scala.Short) = this()
def this(s: String) = this()
@inline override def shortValue(): scala.Short =
this.asInstanceOf[scala.Short]
@inline override def byteValue(): scala.Byte = shortValue.toByte
@inline def intValue(): scala.Int = shortValue.toInt
@inline def longValue(): scala.Long = shortValue.toLong
@inline def floatValue(): scala.Float = shortValue.toFloat
@inline def doubleValue(): scala.Double = shortValue.toDouble
@inline override def equals(that: Any): scala.Boolean =
this eq that.asInstanceOf[AnyRef]
@inline override def hashCode(): Int =
shortValue
@inline override def compareTo(that: Short): Int =
Short.compare(shortValue, that.shortValue)
@inline override def toString(): String =
Short.toString(shortValue)
}
object Short {
final val TYPE = classOf[scala.Short]
final val SIZE = 16
final val BYTES = 2
/* MIN_VALUE and MAX_VALUE should be 'final val's. But it is impossible to
* write a proper Short literal in Scala, that would both considered a Short
* and a constant expression (optimized as final val).
* Since vals and defs are binary-compatible (although they're not strictly
* speaking source-compatible, because of stability), we implement them as
* defs. Source-compatibility is not an issue because user code is compiled
* against the JDK .class files anyway.
*/
def MIN_VALUE: scala.Short = -32768
def MAX_VALUE: scala.Short = 32767
@inline def valueOf(shortValue: scala.Short): Short = new Short(shortValue)
@inline def valueOf(s: String): Short = valueOf(parseShort(s))
@inline def valueOf(s: String, radix: Int): Short =
valueOf(parseShort(s, radix))
@inline def parseShort(s: String): scala.Short = parseShort(s, 10)
def parseShort(s: String, radix: Int): scala.Short = {
val r = Integer.parseInt(s, radix)
if (r < MIN_VALUE || r > MAX_VALUE)
throw new NumberFormatException(s"""For input string: "$s"""")
else
r.toShort
}
@inline def toString(s: scala.Short): String =
"" + s
@inline def compare(x: scala.Short, y: scala.Short): scala.Int =
x - y
def reverseBytes(i: scala.Short): scala.Short =
(((i >>> 8) & 0xff) + ((i & 0xff) << 8)).toShort
}
| xuwei-k/scala-js | javalanglib/src/main/scala/java/lang/Short.scala | Scala | bsd-3-clause | 2,443 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions.{Attribute, Literal}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.EstimationUtils
import org.apache.spark.sql.types.{StructField, StructType}
object LocalRelation {
def apply(output: Attribute*): LocalRelation = new LocalRelation(output)
def apply(output1: StructField, output: StructField*): LocalRelation = {
new LocalRelation(StructType(output1 +: output).toAttributes)
}
def fromExternalRows(output: Seq[Attribute], data: Seq[Row]): LocalRelation = {
val schema = StructType.fromAttributes(output)
val converter = CatalystTypeConverters.createToCatalystConverter(schema)
LocalRelation(output, data.map(converter(_).asInstanceOf[InternalRow]))
}
def fromProduct(output: Seq[Attribute], data: Seq[Product]): LocalRelation = {
val schema = StructType.fromAttributes(output)
val converter = CatalystTypeConverters.createToCatalystConverter(schema)
LocalRelation(output, data.map(converter(_).asInstanceOf[InternalRow]))
}
}
/**
* Logical plan node for scanning data from a local collection.
*
* @param data The local collection holding the data. It doesn't need to be sent to executors
* and then doesn't need to be serializable.
*/
case class LocalRelation(
output: Seq[Attribute],
data: Seq[InternalRow] = Nil,
// Indicates whether this relation has data from a streaming source.
override val isStreaming: Boolean = false)
extends LeafNode with analysis.MultiInstanceRelation {
// A local relation must have resolved output.
require(output.forall(_.resolved), "Unresolved attributes found when constructing LocalRelation.")
/**
* Returns an identical copy of this relation with new exprIds for all attributes. Different
* attributes are required when a relation is going to be included multiple times in the same
* query.
*/
override final def newInstance(): this.type = {
LocalRelation(output.map(_.newInstance()), data, isStreaming).asInstanceOf[this.type]
}
override protected def stringArgs: Iterator[Any] = {
if (data.isEmpty) {
Iterator("<empty>", output)
} else {
Iterator(output)
}
}
override def computeStats(): Statistics =
Statistics(sizeInBytes = EstimationUtils.getSizePerRow(output) * data.length)
def toSQL(inlineTableName: String): String = {
require(data.nonEmpty)
val types = output.map(_.dataType)
val rows = data.map { row =>
val cells = row.toSeq(types).zip(types).map { case (v, tpe) => Literal(v, tpe).sql }
cells.mkString("(", ", ", ")")
}
"VALUES " + rows.mkString(", ") +
" AS " + inlineTableName +
output.map(_.name).mkString("(", ", ", ")")
}
}
| lvdongr/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LocalRelation.scala | Scala | apache-2.0 | 3,750 |
/*
* This file is part of the "silex" library of helpers for Apache Spark.
*
* Copyright (c) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.redhat.et.silex.rdd.multiplex
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, Partition, TaskContext,
Dependency, NarrowDependency, OneToOneDependency}
import org.apache.spark.storage.StorageLevel
/**
* Enhance RDDs with methods for generating multiplexed RDDs
* @tparam T the element type of the RDD
* {{{
* // enable multiplexing methods
* import com.redhat.et.silex.rdd.multiplex.implicits._
*
* // A boolean predicate on data elements
* val pred: Int => Boolean = ....
*
* // pos will contain data elements for which 'pred' was true.
* // neg will contain elements for which 'pred' was false.
* val (pos, neg) = data.flatMuxPartitions((data: Iterator[Int]) => {
* val pT = scala.collection.mutable.ArrayBuffer.empty[Int]
* val pF = scala.collection.mutable.ArrayBuffer.empty[Int]
* data.foreach { e => (if (pred(e)) pT else pF) += e }
* (pT, pF)
* })
* }}}
*/
class MuxRDDFunctions[T :ClassTag](self: RDD[T]) extends Serializable {
import MuxRDDFunctions.defaultSL
/** Obtain a sequence of (n) RDDs where the jth RDD is obtained from the jth element
* returned by applying (f) to each input partition.
* @param n The length of sequence returned from (f)
* @param f Function maps data from a partition into a sequence of (n) objects of type U
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The sequence of RDDs, as described above
*/
def muxPartitions[U :ClassTag](
n: Int, f: Iterator[T] => Seq[U],
persist: StorageLevel = defaultSL):
Seq[RDD[U]] =
muxPartitionsWithIndex(n, (id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a sequence of (n) RDDs where the jth RDD is obtained from the jth element
* returned by applying (f) to each input partition and its id.
* @param n The length of sequence returned from (f)
* @param f Function maps data from a partition, along with that partition's (id) value,
* into a sequence of (n) objects of type U
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The sequence of RDDs, as described above
*/
def muxPartitionsWithIndex[U :ClassTag](
n: Int, f: (Int, Iterator[T]) => Seq[U],
persist: StorageLevel = defaultSL):
Seq[RDD[U]] = {
require(n >= 0, "expected sequence length must be >= 0")
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
val r = f(id, itr)
require(r.length == n, s"multiplexed sequence did not have expected length $n")
Iterator.single(r)
}.persist(persist)
Vector.tabulate(n) { j => mux.mapPartitions { itr => Iterator.single(itr.next()(j)) } }
}
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition
* @param f Function maps data from a partition into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux2Partitions[U1 :ClassTag, U2 :ClassTag](
f: Iterator[T] => (U1, U2),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2]) =
mux2PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition and its id
* @param f Function maps data from a partition and its id into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux2PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag](
f: (Int, Iterator[T]) => (U1, U2),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => Iterator.single(itr.next._1))
val mux2 = mux.mapPartitions(itr => Iterator.single(itr.next._2))
(mux1, mux2)
}
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition
* @param f Function maps data from a partition into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux3Partitions[U1 :ClassTag, U2 :ClassTag, U3: ClassTag](
f: Iterator[T] => (U1, U2, U3),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3]) =
mux3PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition and its id
* @param f Function maps data from a partition and its id into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux3PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3: ClassTag](
f: (Int, Iterator[T]) => (U1, U2, U3),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => Iterator.single(itr.next._1))
val mux2 = mux.mapPartitions(itr => Iterator.single(itr.next._2))
val mux3 = mux.mapPartitions(itr => Iterator.single(itr.next._3))
(mux1, mux2, mux3)
}
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition
* @param f Function maps data from a partition into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux4Partitions[U1 :ClassTag, U2 :ClassTag, U3: ClassTag, U4 :ClassTag](
f: Iterator[T] => (U1, U2, U3, U4),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4]) =
mux4PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition and its id
* @param f Function maps data from a partition and its id into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux4PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3: ClassTag, U4 :ClassTag](
f: (Int, Iterator[T]) => (U1, U2, U3, U4),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => Iterator.single(itr.next._1))
val mux2 = mux.mapPartitions(itr => Iterator.single(itr.next._2))
val mux3 = mux.mapPartitions(itr => Iterator.single(itr.next._3))
val mux4 = mux.mapPartitions(itr => Iterator.single(itr.next._4))
(mux1, mux2, mux3, mux4)
}
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition
* @param f Function maps data from a partition into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux5Partitions[U1 :ClassTag, U2 :ClassTag, U3: ClassTag, U4 :ClassTag, U5 :ClassTag](
f: Iterator[T] => (U1, U2, U3, U4, U5),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4], RDD[U5]) =
mux5PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where jth component is obtained from the corresponding component
* returned by applying (f) to each partition and its id
* @param f Function maps data from a partition and its id into a tuple of objects
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def mux5PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3: ClassTag, U4 :ClassTag, U5 :ClassTag](
f: (Int, Iterator[T]) => (U1, U2, U3, U4, U5),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4], RDD[U5]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => Iterator.single(itr.next._1))
val mux2 = mux.mapPartitions(itr => Iterator.single(itr.next._2))
val mux3 = mux.mapPartitions(itr => Iterator.single(itr.next._3))
val mux4 = mux.mapPartitions(itr => Iterator.single(itr.next._4))
val mux5 = mux.mapPartitions(itr => Iterator.single(itr.next._5))
(mux1, mux2, mux3, mux4, mux5)
}
/** Obtain a sequence of (n) RDDs where the jth RDD is obtained from flattening the jth elements
* returned by applying (f) to each input partition.
* @param n The length of sequence returned from (f)
* @param f Function maps data from a partition into a sequence of (n) sequences of type U
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The sequence of RDDs, as described above
*/
def flatMuxPartitions[U :ClassTag](
n: Int, f: Iterator[T] => Seq[TraversableOnce[U]],
persist: StorageLevel = defaultSL):
Seq[RDD[U]] =
flatMuxPartitionsWithIndex(n, (id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a sequence of (n) RDDs where the jth RDD is obtained from flattening the jth elements
* returned by applying (f) to each input partition and its id
* @param n The length of sequence returned from (f)
* @param f Function maps data from a partition and its id into a sequence of (n)
* sequences of type U
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The sequence of RDDs, as described above
*/
def flatMuxPartitionsWithIndex[U :ClassTag](
n: Int, f: (Int, Iterator[T]) => Seq[TraversableOnce[U]],
persist: StorageLevel = defaultSL):
Seq[RDD[U]] = {
require(n >= 0, "expected sequence length must be >= 0")
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
val r = f(id, itr)
require(r.length == n, s"multiplexed sequence was not expected length $n")
Iterator.single(r)
}.persist(persist)
Vector.tabulate(n) { j => mux.mapPartitions { itr => itr.next()(j).toIterator } }
}
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition.
* @param f Function maps data from a partition into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux2Partitions[U1 :ClassTag, U2 :ClassTag](
f: Iterator[T] => (TraversableOnce[U1], TraversableOnce[U2]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2]) =
flatMux2PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition and its id.
* @param f Function maps data from a partition and its id into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux2PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag](
f: (Int, Iterator[T]) => (TraversableOnce[U1], TraversableOnce[U2]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => itr.next._1.toIterator)
val mux2 = mux.mapPartitions(itr => itr.next._2.toIterator)
(mux1, mux2)
}
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition.
* @param f Function maps data from a partition into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux3Partitions[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag](
f: Iterator[T] => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3]) =
flatMux3PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition and its id.
* @param f Function maps data from a partition and its id into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux3PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag](
f: (Int, Iterator[T]) => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3]) = {
val mux = self.mapPartitionsWithIndex { case(id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => itr.next._1.toIterator)
val mux2 = mux.mapPartitions(itr => itr.next._2.toIterator)
val mux3 = mux.mapPartitions(itr => itr.next._3.toIterator)
(mux1, mux2, mux3)
}
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition.
* @param f Function maps data from a partition into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux4Partitions[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag, U4 :ClassTag](
f: Iterator[T] => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3], TraversableOnce[U4]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4]) =
flatMux4PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition and its id.
* @param f Function maps data from a partition and its id into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux4PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag, U4 :ClassTag](
f: (Int, Iterator[T]) => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3], TraversableOnce[U4]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4]) = {
val mux = self.mapPartitionsWithIndex { case (id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => itr.next._1.toIterator)
val mux2 = mux.mapPartitions(itr => itr.next._2.toIterator)
val mux3 = mux.mapPartitions(itr => itr.next._3.toIterator)
val mux4 = mux.mapPartitions(itr => itr.next._4.toIterator)
(mux1, mux2, mux3, mux4)
}
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition.
* @param f Function maps data from a partition into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux5Partitions[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag, U4 :ClassTag, U5 :ClassTag](
f: Iterator[T] => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3], TraversableOnce[U4], TraversableOnce[U5]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4], RDD[U5]) =
flatMux5PartitionsWithIndex((id: Int, itr: Iterator[T]) => f(itr), persist)
/** Obtain a tuple of RDDs where the jth component is obtained from flattening the corresponding
* components returned by applying (f) to each input partition and its id.
* @param f Function maps data from a partition and its id into a tuple of sequences
* @param persist The storage level to apply to the intermediate result returned by (f)
* @return The tuple of RDDs, as described above
*/
def flatMux5PartitionsWithIndex[U1 :ClassTag, U2 :ClassTag, U3 :ClassTag, U4 :ClassTag, U5 :ClassTag](
f: (Int, Iterator[T]) => (TraversableOnce[U1], TraversableOnce[U2], TraversableOnce[U3], TraversableOnce[U4], TraversableOnce[U5]),
persist: StorageLevel = defaultSL):
(RDD[U1], RDD[U2], RDD[U3], RDD[U4], RDD[U5]) = {
val mux = self.mapPartitionsWithIndex { case(id, itr) =>
Iterator.single(f(id, itr))
}.persist(persist)
val mux1 = mux.mapPartitions(itr => itr.next._1.toIterator)
val mux2 = mux.mapPartitions(itr => itr.next._2.toIterator)
val mux3 = mux.mapPartitions(itr => itr.next._3.toIterator)
val mux4 = mux.mapPartitions(itr => itr.next._4.toIterator)
val mux5 = mux.mapPartitions(itr => itr.next._5.toIterator)
(mux1, mux2, mux3, mux4, mux5)
}
}
/** Definitions used by MuxRDDFunctions instances */
object MuxRDDFunctions {
val defaultSL = StorageLevel.MEMORY_ONLY
}
/** Implicit conversions to enhance RDDs with multiplexing methods */
object implicits {
import scala.language.implicitConversions
implicit def toMuxRDDFunctions[T :ClassTag](rdd: RDD[T]): MuxRDDFunctions[T] =
new MuxRDDFunctions(rdd)
}
| willb/silex | src/main/scala/com/redhat/et/silex/rdd/multiplex.scala | Scala | apache-2.0 | 19,095 |
/*
* Copyright (c) 2011-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
class TypeableTests {
import java.{ lang => jl }
import org.junit.Test
import org.junit.Assert._
import syntax.typeable._
import test._
@Test
def testPrimitives {
val b: Any = 23.toByte
val cb = b.cast[Byte]
assertTrue(cb.isDefined)
val s: Any = 23.toShort
val cs = s.cast[Short]
assertTrue(cs.isDefined)
val c: Any = 'c'
val cc = c.cast[Char]
assertTrue(cc.isDefined)
val i: Any = 23
val ci = i.cast[Int]
assertTrue(ci.isDefined)
val l: Any = 23L
val cl = l.cast[Long]
assertTrue(cl.isDefined)
val f: Any = 23.0F
val cf = f.cast[Float]
assertTrue(cf.isDefined)
val d: Any = 23.0
val cd = d.cast[Double]
assertTrue(cd.isDefined)
val bl: Any = true
val cbl = bl.cast[Boolean]
assertTrue(cbl.isDefined)
val u: Any = ()
val cu = u.cast[Unit]
assertTrue(cu.isDefined)
}
@Test
def testBoxedPrimitives {
val b: Any = 23.toByte
val cb = b.cast[jl.Byte]
assertTrue(cb.isDefined)
val s: Any = 23.toShort
val cs = s.cast[jl.Short]
assertTrue(cs.isDefined)
val c: Any = 'c'
val cc = c.cast[jl.Character]
assertTrue(cc.isDefined)
val i: Any = 23
val ci = i.cast[jl.Integer]
assertTrue(ci.isDefined)
val l: Any = 23L
val cl = l.cast[jl.Long]
assertTrue(cl.isDefined)
val f: Any = 23.0F
val cf = f.cast[jl.Float]
assertTrue(cf.isDefined)
val d: Any = 23.0
val cd = d.cast[jl.Double]
assertTrue(cd.isDefined)
val bl: Any = true
val cbl = bl.cast[jl.Boolean]
assertTrue(cbl.isDefined)
}
@Test
def testUnerased {
val li: Any = List(1, 2, 3, 4)
val cli = li.cast[List[Int]]
assertTrue(cli.isDefined)
val cli2 = li.cast[List[String]]
assertTrue(cli2.isEmpty)
val ls: Any = List("foo", "bar", "baz")
val cls = ls.cast[List[String]]
assertTrue(cls.isDefined)
val cls2 = ls.cast[List[Int]]
assertTrue(cls2.isEmpty)
val lvs: Any = List(Vector("foo", "bar", "baz"), Vector("wibble"))
val clvs = lvs.cast[List[Vector[String]]]
assertTrue(clvs.isDefined)
val clvs2 = lvs.cast[List[Vector[Int]]]
assertTrue(clvs2.isEmpty)
val clvs3 = lvs.cast[List[List[String]]]
assertTrue(clvs3.isEmpty)
val ln: Any = Nil
val cln = ln.cast[List[Int]]
assert(cln.isDefined)
val cln2 = ln.cast[List[String]]
assert(cln2.isDefined)
val si: Any = Set(1, 2, 3, 4)
val csi = si.cast[Set[Int]]
assertTrue(csi.isDefined)
val csi2 = si.cast[Set[String]]
assertTrue(csi2.isEmpty)
}
trait Poly[T]
@Test
def testErased {
illTyped("""
Typeable[Int => String]
""")
illTyped("""
Typeable[Poly[Int]]
""")
}
@Test
def testHList {
val lisdb: Any = 23 :: "foo" :: false :: HNil
val clisdb = lisdb.cast[Int :: String :: Boolean :: HNil]
assertTrue(clisdb.isDefined)
val clisdb2 = lisdb.cast[Int :: String :: Double :: HNil]
assertTrue(clisdb2.isEmpty)
}
@Test
def testCoproductt {
type CP = Int :+: String :+: Double :+: Boolean :+: CNil
type CP2 = Char :+: Long :+: Unit :+: CNil
val cpi: Any = Coproduct[CP](23)
val ccpi = cpi.cast[CP]
assertTrue(ccpi.isDefined)
val cps: Any = Coproduct[CP]("foo")
val ccps = cps.cast[CP]
assertTrue(ccps.isDefined)
val cpd: Any = Coproduct[CP](2.0)
val ccpd = cpd.cast[CP]
assertTrue(ccpd.isDefined)
val cpb: Any = Coproduct[CP](true)
val ccpb = cpb.cast[CP]
assertTrue(ccpb.isDefined)
val cpc: Any = Coproduct[CP2]('c')
val ccpc = cpc.cast[CP]
assertTrue(ccpc.isEmpty)
val cpl: Any = Coproduct[CP2](13L)
val ccpl = cpl.cast[CP]
assertTrue(ccpl.isEmpty)
val cpu: Any = Coproduct[CP2](())
val ccpu = cpu.cast[CP]
assertTrue(ccpu.isEmpty)
}
@Test
def testAnys {
val v: Any = 23
val cv = v.cast[AnyVal]
assertTrue(cv.isDefined)
val cv2 = v.cast[AnyRef]
assertTrue(cv2.isEmpty)
val r: Any = "foo"
val cr = r.cast[AnyRef]
assertTrue(cr.isDefined)
val cr2 = r.cast[AnyVal]
assertTrue(cr2.isEmpty)
}
@Test
def testNull {
val n: Any = null
val cn = n.cast[AnyVal]
assertTrue(!cn.isDefined)
val cn1 = n.cast[AnyRef]
assertTrue(!cn1.isDefined)
val cn2 = n.cast[Int]
assertTrue(!cn2.isDefined)
val cn3 = n.cast[String]
assertTrue(!cn3.isDefined)
val cn4 = n.cast[List[Int]]
assertTrue(!cn4.isDefined)
val cn5 = n.cast[HNil]
assertTrue(!cn5.isDefined)
val cn6 = n.cast[Int :: String :: Boolean :: HNil]
assertTrue(!cn6.isDefined)
val cn7 = n.cast[(Int, String)]
assertTrue(!cn7.isDefined)
}
@Test
def testExistentials {
val l: Any = List(1, 2, 3, 4)
val cl = l.cast[List[_]]
assertTrue(cl.isDefined)
val cl2 = l.cast[Vector[_]]
assertTrue(cl2.isEmpty)
}
@Test
def testTraits {
trait A
trait B
trait C
class D extends A with B
val d: Any = new D
val cd = d.cast[A]
assertTrue(cd.isDefined)
val cd2 = d.cast[B]
assertTrue(cd2.isDefined)
val cd3 = d.cast[C]
assertTrue(cd3.isEmpty)
}
@Test
def testIntersections {
trait A
trait B
trait C
class D extends A with B
val d: Any = new D
val cd = d.cast[A with B]
assertTrue(cd.isDefined)
val cd2 = d.cast[B with A]
assertTrue(cd2.isDefined)
val cd3 = d.cast[A with C]
assertTrue(cd3.isEmpty)
val cd4 = d.cast[C with A]
assertTrue(cd4.isEmpty)
}
@Test
def testNarrowTo {
trait A
trait B
class C extends A with B
val c: C = new C
val a: A = c
val cc1 = a.narrowTo[C]
assertTrue(cc1.isDefined)
val b: B = c
val cc2 = b.narrowTo[C]
assertTrue(cc2.isDefined)
illTyped("""
val ca = b.narrowTo[A]
""")
illTyped("""
val cb = a.narrowTo[B]
""")
}
@Test
def testTuples {
val p: Any = (23, "foo")
val cp = p.cast[(Int, String)]
assertTrue(cp.isDefined)
val cp2 = p.cast[(Boolean, String)]
assertTrue(cp2.isEmpty)
val cp3 = p.cast[(Int, List[String])]
assertTrue(cp3.isEmpty)
val m: Any = Map(1 -> "1", 2 -> "2", 3 -> "3")
val cm = m.cast[Map[Int, String]]
assertTrue(cm.isDefined)
val cm2 = m.cast[Map[Boolean, String]]
assertTrue(cm2.isEmpty)
val cm3 = m.cast[Map[Int, List[String]]]
assertTrue(cm3.isEmpty)
}
@Test
def testOption {
val o: Any = Option(23)
val co = o.cast[Option[Int]]
assertTrue(co.isDefined)
val co2 = o.cast[Option[String]]
assertTrue(co2.isEmpty)
val co3 = o.cast[Option[Any]]
assertTrue(co3.isDefined)
val co4 = o.cast[Option[_]]
assertTrue(co4.isDefined)
}
@Test
def testEither {
val ei: Any = Left[Int, String](23)
val cei = ei.cast[Either[Int, String]]
assertTrue(cei.isDefined)
val cei2 = ei.cast[Left[Int, String]]
assertTrue(cei2.isDefined)
val cei3 = ei.cast[Either[Int, _]]
assertTrue(cei3.isDefined)
val cei4 = ei.cast[Either[Boolean, String]]
assertTrue(cei4.isEmpty)
val es: Any = Right[Int, String]("foo")
val ces = es.cast[Either[Int, String]]
assertTrue(ces.isDefined)
val ces2 = es.cast[Right[Int, String]]
assertTrue(ces2.isDefined)
val ces3 = es.cast[Either[_, String]]
assertTrue(ces3.isDefined)
val ces4 = es.cast[Either[Int, Unit]]
assertTrue(ces4.isEmpty)
}
case class Foo(i: Int, s: String, b: Boolean)
case class Bar[T](t: T)
case class Baz[A, B](a: A, b: B, i: Int)
@Test
def testProducts {
val foo: Any = Foo(23, "foo", true)
val iBar: Any = Bar(23)
val sBar: Any = Bar("bar")
val baz: Any = Baz("s", 0.5, 9)
val cfoo1 = foo.cast[Foo]
assertTrue(cfoo1.isDefined)
val cfoo2 = iBar.cast[Foo]
assertTrue(cfoo2.isEmpty)
val cbar1 = iBar.cast[Bar[Int]]
assertTrue(cbar1.isDefined)
val cbar2 = sBar.cast[Bar[String]]
assertTrue(cbar2.isDefined)
val cbar3 = iBar.cast[Bar[String]]
assertTrue(cbar3.isEmpty)
val cbar4 = sBar.cast[Bar[Int]]
assertTrue(cbar4.isEmpty)
val cbaz1 = baz.cast[Baz[String, Double]]
assertTrue(cbaz1.isDefined)
assertEquals(cbaz1.get.a, "s")
val cbaz2 = baz.cast[Baz[Double, String]]
assertTrue(cbaz2.isEmpty)
}
// Typeable.caseClassTypeable is unsafe
// for these, so we should refuse to
// generate an instance for them:
case class Gen1[A](i: Int)(a: A)
trait Tc[A]
case class Gen2[A: Tc](i: Int)
case class Gen3[A](i: Int) {
var a: A = _
}
abstract class Abs[A](a: A) {
val x: A = a
}
case class Gen4[A](i: Int)(a: A) extends Abs[A](a)
@Test
def testIllegalProducts {
illTyped("""Typeable[Gen1[Int]]""")
illTyped("""Typeable[Gen2[Int]]""")
illTyped("""Typeable[Gen3[Int]]""")
illTyped("""Typeable[Gen4[Int]]""")
}
@Test
def testTypeCase {
import HList.ListCompat._
def typeCase[T: Typeable](t: Any): Option[T] = {
val T = TypeCase[T]
val `List[T]` = TypeCase[List[T]]
val `(String, T)` = TypeCase[(String, T)]
val `List[(String, T)]` = TypeCase[List[(String, T)]]
t match {
case T(t) => Some(t)
case `List[T]`(lt) => lt.headOption
case `(String, T)`(s, t) => typed[String](s) ; Some(t)
case `List[(String, T)]`((s, t) :: _) => typed[String](s); Some(t)
case `List[(String, T)]`(lst) => assertTrue(lst.isEmpty) ; None
case _ => None
}
}
assertEquals(Some(23), typeCase[Int](23: Any))
assertEquals(None, typeCase[String](23: Any))
assertEquals(Some(23), typeCase[Int](List(23): Any))
assertEquals(None, typeCase[String](List(23): Any))
assertEquals(Some(23), typeCase[Int](("foo", 23): Any))
assertEquals(None, typeCase[String](("foo", 23): Any))
assertEquals(Some(23), typeCase[Int](List(("foo", 23)): Any))
assertEquals(None, typeCase[String](List(("foo", 23)): Any))
}
@Test
def testSingletons {
val wOne = Witness(1)
type One = wOne.T
val wTrue = Witness(true)
type True = wTrue.T
val wFoo = Witness("foo")
type Foo = wFoo.T
val wSym = Witness('Foo)
type Sym = wSym.T
object ObjA
object ObjB
val c1 = (1: Any).cast[One]
typed[Option[One]](c1)
assertEquals(Some(1), c1)
val c2 = (0: Any).cast[One]
typed[Option[One]](c2)
assertEquals(None, c2)
val c3 = (true: Any).cast[True]
typed[Option[True]](c3)
assertEquals(Some(true), c3)
val c4 = (false: Any).cast[True]
typed[Option[True]](c4)
assertEquals(None, c4)
val c5 = ("foo": Any).cast[Foo]
typed[Option[Foo]](c5)
assertEquals(Some("foo"), c5)
val c6 = ("bar": Any).cast[Foo]
typed[Option[Foo]](c6)
assertEquals(None, c6)
val c7 = ('Foo: Any).cast[Sym]
typed[Option[Sym]](c7)
assertEquals(Some('Foo), c7)
val c8 = ('Bar: Any).cast[Sym]
typed[Option[Sym]](c8)
assertEquals(None, c8)
val c9 = (ObjA: Any).cast[ObjA.type]
typed[Option[ObjA.type]](c9)
assertEquals(Some(ObjA), c9)
val c10 = (ObjB: Any).cast[ObjA.type]
typed[Option[ObjA.type]](c10)
assertEquals(None, c10)
}
trait A
trait B
class C extends A with B
@Test
def testToString {
def typeableString[T](t: T)(implicit tp: Typeable[T]) = tp.toString
val i: Int = 7
assertEquals("Typeable[Int]", typeableString(i))
val u: Unit = ()
assertEquals("Typeable[Unit]", typeableString(u))
val a: Any = ()
assertEquals("Typeable[Any]", typeableString(a))
val av: AnyVal = 7
assertEquals("Typeable[AnyVal]", typeableString(av))
val ar: AnyRef = ""
assertEquals("Typeable[AnyRef]", typeableString(ar))
val f: Foo = Foo(0, "", true)
assertEquals("Typeable[Foo]", typeableString(f))
val bi: Bar[Int] = Bar(23)
assertEquals("Typeable[Bar[Int]]", typeableString(bi))
val i1: A with B = new C
assertEquals("Typeable[A with B]", typeableString(i1))
assertEquals("Typeable[A]", typeableString(new A{}))
val o: Option[Long] = Some(4l)
assertEquals("Typeable[Option[Long]]", typeableString(o))
val e: Either[Long, String] = Right("")
assertEquals("Typeable[Either[Long, String]]", typeableString(e))
assertEquals("Typeable[Right[Long]]", typeableString(Right(3l)))
val l: List[Int] = List(1,2)
assertEquals("Typeable[List[Int]]", typeableString(l))
val m: Map[Int, String] = Map(1 -> "one", 2 -> "two")
assertEquals("Typeable[Map[Int, String]]", typeableString(m))
assertEquals("Typeable[HNil.type]", typeableString(HNil))
val hl = 1 :: "" :: HNil
assertEquals("Typeable[Int :: String :: HNil]", typeableString(hl))
type CP = Double :+: Boolean :+: CNil
val cpd: CP = Coproduct[CP](2.0)
assertEquals("Typeable[Double :+: Boolean :+: CNil]", typeableString(cpd))
val wOne = Witness(1)
type One = wOne.T
val one: One = 1
assertEquals("Typeable[Int(1)]", typeableString(one))
object FooBar
val wFB = Witness(FooBar)
type FooBarT = wFB.T
val foobar: FooBarT = FooBar
assertEquals("Typeable[FooBar.type]", typeableString(foobar))
val tc = TypeCase[List[Int]]
assertEquals("TypeCase[List[Int]]", tc.toString)
}
}
| lukasz-golebiewski/shapeless | core/src/test/scala/shapeless/typeable.scala | Scala | apache-2.0 | 13,964 |
package eventstore
package core
package operations
import scala.util.{ Failure, Success }
import org.specs2.mutable.Specification
import Inspection.Decision.{Retry, Stop, Fail}
import OperationError._
import TestData._
class TransactionCommitInspectionSpec extends Specification {
val inspection = TransactionCommitInspection(transactionCommit).pf
"TransactionCommitInspection" should {
"handle TransactionCommitCompleted" in {
inspection(Success(transactionCommitCompleted)) mustEqual Stop
}
"handle PrepareTimeout" in {
inspection(Failure(PrepareTimeout)) mustEqual Retry
}
"handle CommitTimeout" in {
inspection(Failure(CommitTimeout)) mustEqual Retry
}
"handle ForwardTimeout" in {
inspection(Failure(ForwardTimeout)) mustEqual Retry
}
"handle WrongExpectedVersion" in {
inspection(Failure(WrongExpectedVersion)) must beLike {
case Fail(_: WrongExpectedVersionException) => ok
}
}
"handle StreamDeleted" in {
inspection(Failure(StreamDeleted)) must beLike {
case Fail(_: StreamDeletedException) => ok
}
}
"handle InvalidTransaction" in {
inspection(Failure(InvalidTransaction)) mustEqual Fail(InvalidTransactionException)
}
"handle AccessDenied" in {
inspection(Failure(AccessDenied)) must beLike {
case Fail(_: AccessDeniedException) => ok
}
}
}
}
| EventStore/EventStore.JVM | core/src/test/scala/eventstore/core/operations/TransactionCommitInspectionSpec.scala | Scala | bsd-3-clause | 1,424 |
package ml.combust.mleap.core.feature
import ml.combust.mleap.core.types._
import org.scalatest.FunSpec
class WordLengthFilterModelSpec extends FunSpec {
describe("word length filter model") {
val model = new WordLengthFilterModel(5)
it("has the right input schema") {
assert(model.inputSchema.fields ==
Seq(StructField("input", ListType(BasicType.String))))
}
it("has the right output schema") {
assert(model.outputSchema.fields ==
Seq(StructField("output", ListType(BasicType.String))))
}
}
}
| combust/mleap | mleap-core/src/test/scala/ml/combust/mleap/core/feature/WordLengthFilterModelSpec.scala | Scala | apache-2.0 | 553 |
package com.datastax.spark.connector.rdd.partitioner
import org.scalatest.{Matchers, FlatSpec}
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.embedded.EmbeddedCassandra
import com.datastax.spark.connector.rdd.partitioner.dht.LongToken
import com.datastax.spark.connector.testkit.SharedEmbeddedCassandra
class DataSizeEstimatesSpec extends FlatSpec with Matchers with SharedEmbeddedCassandra {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
val conn = CassandraConnector(hosts = Set(EmbeddedCassandra.getHost(0)))
val keyspaceName = "data_size_estimates"
conn.withSessionDo { session =>
session.execute(
s"CREATE KEYSPACE IF NOT EXISTS $keyspaceName " +
s"WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 }")
}
// TODO: enable this test once we upgrade to 2.1.5, which populates the size estimates table
// We cannot enable this test till we have a way of forcing the size estimates table to update
// https://issues.apache.org/jira/browse/CASSANDRA-9579 should provide us with the hook neccessary
// to trigger this event
"DataSizeEstimates" should "fetch data size estimates for a known table" ignore {
val tableName = "table1"
conn.withSessionDo { session =>
session.execute(
s"CREATE TABLE IF NOT EXISTS $keyspaceName.$tableName(key int PRIMARY KEY, value VARCHAR)")
for (i <- 1 to 10000)
session.execute(
s"INSERT INTO $keyspaceName.$tableName(key, value) VALUES (?, ?)",
i.asInstanceOf[AnyRef],
"value" + i)
}
val estimates = new DataSizeEstimates[Long, LongToken](conn, keyspaceName, tableName)
estimates.partitionCount should be > 5000L
estimates.partitionCount should be < 20000L
estimates.dataSizeInBytes should be > 0L
}
it should "should return zeroes for an empty table" in {
val tableName = "table2"
conn.withSessionDo { session =>
session.execute(
s"CREATE TABLE IF NOT EXISTS $keyspaceName.$tableName(key int PRIMARY KEY, value VARCHAR)")
}
val estimates = new DataSizeEstimates[Long, LongToken](conn, keyspaceName, tableName)
estimates.partitionCount shouldBe 0L
estimates.dataSizeInBytes shouldBe 0L
}
it should "return zeroes for a non-existing table" in {
val tableName = "table3"
val estimates = new DataSizeEstimates[Long, LongToken](conn, keyspaceName, tableName)
estimates.partitionCount shouldBe 0L
estimates.dataSizeInBytes shouldBe 0L
}
}
| rafaelbarreto87/spark-cassandra-connector | spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/rdd/partitioner/DataSizeEstimatesSpec.scala | Scala | apache-2.0 | 2,547 |
/*
* Tables.scala
*
* Copyright 2017 wayfarerx <x@wayfarerx.net> (@thewayfarerx)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.wayfarerx.dreamsleeve.data
package binary_data
import scodec.Codec
import scodec.codecs._
/**
* Binary support for the table factory object.
*/
trait Tables {
/** The implicit fragment discriminator for tables. */
@inline
final implicit def binaryAsFragment: Discriminator[Fragment, Table, Int] = Tables.AsFragment
/** The implicit table codec. */
@inline
final implicit def binaryCodec: Codec[Table] = Tables.Codec
}
/**
* Support for binary table codecs.
*/
object Tables {
/** The fragment discriminator for tables. */
val AsFragment: Discriminator[Fragment, Table, Int] = Discriminator(3)
/** The table codec. */
val Codec: Codec[Table] =
lazily(vectorOfN(uint16, Values.Codec ~ Fragments.Codec).xmap(Table(_: _*), _.entries.toVector))
} | wayfarerx/dreamsleeve | shared/data/src/main/scala/net/wayfarerx/dreamsleeve/data/binary_data/Tables.scala | Scala | apache-2.0 | 1,437 |
package com.twitter.finatra.thrift.tests.doeverything.filters
import com.twitter.finagle.{TimeoutException, Service}
import com.twitter.finatra.thrift.thriftscala.ServerErrorCause.InternalServerError
import com.twitter.finatra.thrift.thriftscala.{ServerError, NoClientIdError, UnknownClientIdError, ClientError}
import com.twitter.finatra.thrift.thriftscala.ClientErrorCause.RequestTimeout
import com.twitter.finatra.thrift.{ThriftRequest, ThriftFilter}
import com.twitter.inject.Logging
import com.twitter.util.{NonFatal, Future}
class ExceptionTranslationFilter
extends ThriftFilter
with Logging {
override def apply[T, U](request: ThriftRequest[T], service: Service[ThriftRequest[T], U]): Future[U] = {
service(request).rescue {
case e: TimeoutException =>
Future.exception(
ClientError(RequestTimeout, e.getMessage))
case e: ClientError =>
Future.exception(e)
case e: UnknownClientIdError =>
Future.exception(e)
case e: NoClientIdError =>
Future.exception(e)
case NonFatal(e) =>
error("Unhandled exception", e)
Future.exception(
ServerError(InternalServerError, e.getMessage))
}
}
}
| syamantm/finatra | thrift/src/test/scala/com/twitter/finatra/thrift/tests/doeverything/filters/ExceptionTranslationFilter.scala | Scala | apache-2.0 | 1,204 |
package views
/**
*
* @author ponkotuy
* Date: 15/02/09.
*/
object URL {
val Host = "https://myfleet.moe"
def Index = Host
val Entire = s"${Host}/entire"
val Statistics = s"${Entire}/activities"
val Login = s"${Entire}/login"
val Favicon = s"${Host}/favicon.ico"
val Assets = s"${Host}/assets"
}
| b-wind/myfleet_web | app/views/URL.scala | Scala | mit | 315 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScAnnotationsHolder
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.light.ScPrimaryConstructorWrapper
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator._
import org.jetbrains.plugins.scala.lang.psi.types.api.{TypeParameter, TypeParameterType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{ScMethodType, ScTypePolymorphicType}
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInsidePsiElement, ModCount}
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScPrimaryConstructor extends ScMember with ScMethodLike with ScAnnotationsHolder {
def hasMalformedSignature = parameterList.clauses.exists {
_.parameters.dropRight(1).exists(_.isRepeatedParameter)
}
/**
* @return has access modifier
*/
def hasModifier: Boolean
def getClassNameText: String
def parameterList: ScParameters
def parameters : Seq[ScClassParameter] = parameterList.clauses.flatMap(_.unsafeClassParameters)
/**
* return only parameters, which are additionally members.
*/
def valueParameters: Seq[ScClassParameter] = parameters.filter((p: ScClassParameter) => p.isVal || p.isVar)
/**
* All classes must have one non-implicit parameter list. If this is not declared in in the code,
* it is assumed by the compiler.
*
* In addition, view and context bounds generate an additional implicit parameter section.
*/
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def effectiveParameterClauses: Seq[ScParameterClause] = {
def emptyParameterList: ScParameterClause =
ScalaPsiElementFactory.createEmptyClassParamClauseWithContext(getManager, parameterList)
val clausesWithInitialEmpty = parameterList.clauses match {
case Seq() => Seq(emptyParameterList)
case Seq(clause) if clause.isImplicit => Seq(emptyParameterList, clause)
case clauses => clauses
}
clausesWithInitialEmpty ++ syntheticParamClause
}
def effectiveFirstParameterSection: Seq[ScClassParameter] = effectiveParameterClauses.head.unsafeClassParameters
private def syntheticParamClause: Option[ScParameterClause] = {
val hasImplicit = parameterList.clauses.exists(_.isImplicit)
if (hasImplicit) None else ScalaPsiUtil.syntheticParamClause(containingClass.asInstanceOf[ScTypeParametersOwner], parameterList, classParam = true)
}
def methodType(result: Option[ScType]): ScType = {
val parameters: ScParameters = parameterList
val clauses = parameters.clauses
val returnType: ScType = result.getOrElse({
val clazz = getParent.asInstanceOf[ScTypeDefinition]
val typeParameters = clazz.typeParameters
val parentClazz = ScalaPsiUtil.getPlaceTd(clazz)
val designatorType: ScType =
if (parentClazz != null)
ScProjectionType(ScThisType(parentClazz), clazz, superReference = false)
else ScDesignatorType(clazz)
if (typeParameters.isEmpty) designatorType
else {
ScParameterizedType(designatorType, typeParameters.map(TypeParameterType(_)))
}
})
if (clauses.isEmpty) return new ScMethodType(returnType, Seq.empty, false)(getProject, getResolveScope)
val res = clauses.foldRight[ScType](returnType){(clause: ScParameterClause, tp: ScType) =>
new ScMethodType(tp, clause.getSmartParameters, clause.isImplicit)(getProject, getResolveScope)
}
res.asInstanceOf[ScMethodType]
}
def polymorphicType: ScType = {
val typeParameters = getParent.asInstanceOf[ScTypeDefinition].typeParameters
if (typeParameters.isEmpty) methodType
else ScTypePolymorphicType(methodType, typeParameters.map(TypeParameter(_)))
}
def getParamByName(name: String, clausePosition: Int = -1): Option[ScParameter] = {
clausePosition match {
case -1 =>
for (param <- parameters if ScalaPsiUtil.memberNamesEquals(param.name, name)) return Some(param)
None
case i if i < 0 => None
case i if i >= effectiveParameterClauses.length => None
case i =>
val clause: ScParameterClause = effectiveParameterClauses.apply(i)
for (param <- clause.parameters if ScalaPsiUtil.memberNamesEquals(param.name, name)) return Some(param)
None
}
}
@Cached(synchronized = false, ModCount.getBlockModificationCount, this)
def getFunctionWrappers: Seq[ScPrimaryConstructorWrapper] = {
val buffer = new ArrayBuffer[ScPrimaryConstructorWrapper]()
buffer += new ScPrimaryConstructorWrapper(this)
for {
first <- parameterList.clauses.headOption
if first.hasRepeatedParam
if hasAnnotation("scala.annotation.varargs").isDefined
} {
buffer += new ScPrimaryConstructorWrapper(this, isJavaVarargs = true)
}
val params = parameters
for (i <- params.indices if params(i).baseDefaultParam) {
buffer += new ScPrimaryConstructorWrapper(this, forDefault = Some(i + 1))
}
buffer
}
}
object ScPrimaryConstructor {
object ofClass {
def unapply(pc: ScPrimaryConstructor): Option[ScClass] = {
pc.containingClass match {
case c: ScClass => Some(c)
case _ => None
}
}
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/base/ScPrimaryConstructor.scala | Scala | apache-2.0 | 5,653 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.ws.services.teach.attendance.impl
import java.sql.Date
import java.util.{ Calendar, Timer, TimerTask }
import org.beangle.commons.bean.Initializing
import org.beangle.commons.io.IOs
import org.beangle.commons.lang.ClassLoaders
import org.beangle.commons.lang.Dates.toDate
import org.beangle.commons.lang.Numbers.toInt
import org.beangle.commons.lang.Strings
import org.beangle.commons.lang.Strings.{ isNotBlank, replace, split }
import org.beangle.commons.logging.Logging
import org.beangle.data.jdbc.query.JdbcExecutor
import org.openurp.ws.services.teach.attendance.domain.ShardPolicy._
/**
* 数据分区和导入守护线程
*
* @author chaostone
* @version 1.0, 2014/03/22
* @since 0.0.1
*/
class ShardDaemon extends TimerTask with Logging with Initializing {
var executor: JdbcExecutor = _
var importer: DataImporter = _
/**每次同步时间,默认不加设置,程序启动时就执行第一次*/
var firstTime = ""
/**检查表的时间(每天)*/
var interval = 24 * (60 * 60 * 1000)
def checkTable(date: Date) {
checkAndCreate(activityPolicy(date))
checkAndCreate(detailPolicy(date))
checkAndCreate(logPolicy(date))
}
private def checkAndCreate(policy: Tuple2[String, String]) {
val table = policy._1
val postfix = policy._2
val tableName = table + postfix
// 1.check
val count = executor.queryForInt("select count(*) from user_tables where table_name='" + tableName.toUpperCase() + "'")
// 2.createf
if (count == 0) {
val url = ClassLoaders.getResource("ddl/create/" + table + ".sql", this.getClass)
val sqls = split(IOs.readString(url.openStream()), ";")
sqls foreach { sql =>
if (isNotBlank(sql)) {
val statment = replace(sql.trim, "${postfix}", postfix)
executor.update(statment)
logger.info("execute {}", statment)
}
}
}
}
def run() {
try {
val tabCal = Calendar.getInstance()
val year = tabCal.get(Calendar.YEAR)
//检查当月的表
checkTable(toDate(tabCal))
//检查下一个月
tabCal.add(Calendar.MONTH, 1)
checkTable(toDate(tabCal))
//导入当天的数据
val dataCal = Calendar.getInstance()
importer.importData(toDate(dataCal))
//导入明天的数据
dataCal.add(Calendar.DAY_OF_YEAR, 1)
importer.importData(toDate(dataCal))
} catch {
case e: Exception => logger.error("Cannot check and create attend table", e)
}
}
def init() {
val cal = Calendar.getInstance
if (Strings.isNotEmpty(firstTime)) {
cal.set(Calendar.HOUR_OF_DAY, toInt(firstTime.substring(0, 2)))
cal.set(Calendar.MINUTE, toInt(firstTime.substring(3)))
cal.set(Calendar.SECOND, 0)
}
new Timer("Attenance Shard Deamon", true).schedule(this, cal.getTime(), interval)
}
} | openurp/edu-attendance-core | attendance/src/main/scala/org/openurp/ws/services/teach/attendance/impl/ShardDaemon.scala | Scala | gpl-3.0 | 3,600 |
package guide
object _12_InfoTransformerHell extends App {
val g = newGlobal("-Ystop-after:delambdafy -Ydelambdafy:method")
import g._
val tree = compile("class VC(val a: Any) extends AnyVal; class D { def foo = (x: VC) => x }", g).assertNoErrors().tree
println(show(tree))
val sym = tree.collect {
case dd: DefDef if dd.name.string_==("$anonfun$foo") =>
dd.symbol
}.head
println(exitingUncurry(sym.info.params.map(_.defString))) // List(x: VC)
println(exitingPostErasure(exitingUncurry(sym.info.params).map(_.defString))) // List(x: Object)
}
| retronym/scalac-survival-guide | src/main/scala/guide/_12_InfoTransformerHell.scala | Scala | bsd-3-clause | 573 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.{AnalysisException, Row, SaveMode, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.execution.SQLViewSuite
import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton}
import org.apache.spark.sql.types.{NullType, StructType}
/**
* A test suite for Hive view related functionality.
*/
class HiveSQLViewSuite extends SQLViewSuite with TestHiveSingleton {
import testImplicits._
test("create a permanent/temp view using a hive, built-in, and permanent user function") {
val permanentFuncName = "myUpper"
val permanentFuncClass =
classOf[org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper].getCanonicalName
val builtInFuncNameInLowerCase = "abs"
val builtInFuncNameInMixedCase = "aBs"
val hiveFuncName = "histogram_numeric"
withUserDefinedFunction(permanentFuncName -> false) {
sql(s"CREATE FUNCTION $permanentFuncName AS '$permanentFuncClass'")
withTable("tab1") {
(1 to 10).map(i => (s"$i", i)).toDF("str", "id").write.saveAsTable("tab1")
Seq("VIEW", "TEMPORARY VIEW").foreach { viewMode =>
withView("view1") {
sql(
s"""
|CREATE $viewMode view1
|AS SELECT
|$permanentFuncName(str),
|$builtInFuncNameInLowerCase(id),
|$builtInFuncNameInMixedCase(id) as aBs,
|$hiveFuncName(id, 5) over()
|FROM tab1
""".stripMargin)
checkAnswer(sql("select count(*) FROM view1"), Row(10))
}
}
}
}
}
test("create a permanent/temp view using a temporary function") {
val tempFunctionName = "temp"
val functionClass =
classOf[org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper].getCanonicalName
withUserDefinedFunction(tempFunctionName -> true) {
sql(s"CREATE TEMPORARY FUNCTION $tempFunctionName AS '$functionClass'")
withView("view1") {
withTempView("tempView1") {
withTable("tab1") {
(1 to 10).map(i => s"$i").toDF("id").write.saveAsTable("tab1")
// temporary view
sql(s"CREATE TEMPORARY VIEW tempView1 AS SELECT $tempFunctionName(id) from tab1")
checkAnswer(sql("select count(*) FROM tempView1"), Row(10))
// permanent view
val e = intercept[AnalysisException] {
sql(s"CREATE VIEW view1 AS SELECT $tempFunctionName(id) from tab1")
}.getMessage
assert(e.contains("Not allowed to create a permanent view `default`.`view1` by " +
s"referencing a temporary function `$tempFunctionName`"))
}
}
}
}
}
test("SPARK-14933 - create view from hive parquet table") {
withTable("t_part") {
withView("v_part") {
spark.sql("create table t_part stored as parquet as select 1 as a, 2 as b")
spark.sql("create view v_part as select * from t_part")
checkAnswer(
sql("select * from t_part"),
sql("select * from v_part"))
}
}
}
test("SPARK-14933 - create view from hive orc table") {
withTable("t_orc") {
withView("v_orc") {
spark.sql("create table t_orc stored as orc as select 1 as a, 2 as b")
spark.sql("create view v_orc as select * from t_orc")
checkAnswer(
sql("select * from t_orc"),
sql("select * from v_orc"))
}
}
}
test("make sure we can resolve view created by old version of Spark") {
withTable("hive_table") {
withView("old_view") {
spark.sql("CREATE TABLE hive_table AS SELECT 1 AS a, 2 AS b")
// The views defined by older versions of Spark(before 2.2) will have empty view default
// database name, and all the relations referenced in the viewText will have database part
// defined.
val view = CatalogTable(
identifier = TableIdentifier("old_view"),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("a", "int").add("b", "int"),
viewText = Some("SELECT `gen_attr_0` AS `a`, `gen_attr_1` AS `b` FROM (SELECT " +
"`gen_attr_0`, `gen_attr_1` FROM (SELECT `a` AS `gen_attr_0`, `b` AS " +
"`gen_attr_1` FROM hive_table) AS gen_subquery_0) AS hive_table")
)
hiveContext.sessionState.catalog.createTable(view, ignoreIfExists = false)
val df = sql("SELECT * FROM old_view")
// Check the output rows.
checkAnswer(df, Row(1, 2))
// Check the output schema.
assert(df.schema.sameType(view.schema))
}
}
}
test("SPARK-20680: Add HiveVoidType to compatible with Hive void type") {
withView("v1") {
sql("create view v1 as select null as c")
val df = sql("select * from v1")
assert(df.schema.fields.head.dataType == NullType)
checkAnswer(
df,
Row(null)
)
sql("alter view v1 as select null as c1, 1 as c2")
val df2 = sql("select * from v1")
assert(df2.schema.fields.head.dataType == NullType)
checkAnswer(
df2,
Row(null, 1)
)
}
}
}
| rednaxelafx/apache-spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSQLViewSuite.scala | Scala | apache-2.0 | 6,203 |
package pages.vrm_retention
import org.openqa.selenium.WebDriver
import org.scalatest.selenium.WebBrowser.{find, id}
import uk.gov.dvla.vehicles.presentation.common.helpers.webbrowser.{Page, WebDriverFactory}
import views.vrm_retention.RetainFailure.ExitId
object RetainFailurePage extends Page {
def address = buildAppUrl("retention-failure")
override lazy val url = WebDriverFactory.testUrl + address.substring(1)
final override val title: String = "Transaction not successful"
def exit(implicit driver: WebDriver) = find(id(ExitId)).get
}
| dvla/vrm-retention-online | test/pages/vrm_retention/RetainFailurePage.scala | Scala | mit | 555 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.api.TableException
import org.apache.flink.table.functions.{AggregateFunction, UserDefinedFunction}
import org.apache.flink.table.planner.CalcitePair
import org.apache.flink.table.planner.expressions.PlannerNamedWindowProperty
import org.apache.flink.table.planner.functions.aggfunctions.DeclarativeAggregateFunction
import org.apache.flink.table.planner.plan.utils.ExpressionFormat.ExpressionFormat
import com.google.common.collect.ImmutableMap
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Window.Group
import org.apache.calcite.rel.core.{AggregateCall, Window}
import org.apache.calcite.rel.hint.RelHint
import org.apache.calcite.rel.{RelCollation, RelWriter}
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.SqlMatchRecognize.AfterOption
import java.util
import java.util.{SortedSet => JSortedSet}
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* Explain rel utility methods.
*/
object RelExplainUtil {
/**
* Returns the prefer [[ExpressionFormat]] of the [[RelWriter]]. Use Prefix for traditional
* writers, but use Infix for [[RelDescriptionWriterImpl]] which is more readable.
* The [[RelDescriptionWriterImpl]] is mainly used to generate
* [[org.apache.flink.table.planner.plan.nodes.FlinkRelNode#getRelDetailedDescription()]].
*/
def preferExpressionFormat(pw: RelWriter): ExpressionFormat = pw match {
// infix format is more readable for displaying
case _: RelDescriptionWriterImpl => ExpressionFormat.Infix
// traditional writer prefers prefix expression format, e.g. +(x, y)
case _ => ExpressionFormat.Prefix
}
/**
* Converts field names corresponding to given indices to String.
*/
def fieldToString(fieldIndices: Array[Int], inputType: RelDataType): String = {
val fieldNames = inputType.getFieldNames
fieldIndices.map(fieldNames(_)).mkString(", ")
}
/**
* Returns the Java string representation of this literal.
*/
def literalToString(literal: RexLiteral): String = {
literal.computeDigest(RexDigestIncludeType.NO_TYPE)
}
/**
* Converts [[RelCollation]] to String.
*
* format sort fields as field name with direction `shortString`.
*/
def collationToString(
collation: RelCollation,
inputRowType: RelDataType): String = {
val inputFieldNames = inputRowType.getFieldNames
collation.getFieldCollations.map { c =>
s"${inputFieldNames(c.getFieldIndex)} ${c.direction.shortString}"
}.mkString(", ")
}
/**
* Converts [[RelCollation]] to String.
*
* format sort fields as field index with direction `shortString`.
*/
def collationToString(collation: RelCollation): String = {
collation.getFieldCollations.map { c =>
s"$$${c.getFieldIndex} ${c.direction.shortString}"
}.mkString(", ")
}
/**
* Converts [[RexNode]] to String.
*/
def expressionToString(
expr: RexNode,
inputType: RelDataType,
expressionFunc: (RexNode, List[String], Option[List[RexNode]]) => String): String = {
if (expr != null) {
val inputFieldNames = inputType.getFieldNames.toList
expressionFunc(expr, inputFieldNames, None)
} else {
""
}
}
/**
* Converts sort fetch to String.
*/
def fetchToString(fetch: RexNode): String = {
if (fetch != null) {
s"${RexLiteral.intValue(fetch)}"
} else {
"unlimited"
}
}
/**
* Converts group aggregate attributes to String.
*/
def groupAggregationToString(
inputRowType: RelDataType,
outputRowType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
aggCallToAggFunction: Seq[(AggregateCall, UserDefinedFunction)],
isMerge: Boolean,
isGlobal: Boolean,
distinctInfos: Seq[DistinctInfo] = Seq()): String = {
val prefix = if (isMerge) {
"Final_"
} else if (!isGlobal) {
"Partial_"
} else {
""
}
val inputFieldNames = inputRowType.getFieldNames
val outputFieldNames = outputRowType.getFieldNames
val fullGrouping = grouping ++ auxGrouping
val distinctFieldNames = distinctInfos.indices.map(index => s"distinct$$$index")
val distinctStrings = if (isMerge) {
// not output distinct fields in global merge
Seq()
} else {
distinctInfos.map { distinct =>
val argListNames = distinct.argIndexes.map(inputFieldNames).mkString(",")
// TODO Refactor local&global aggregate name
val filterNames = distinct.filterArgs.filter(_ > 0).map(inputFieldNames).mkString(", ")
if (filterNames.nonEmpty) {
s"DISTINCT($argListNames) FILTER ($filterNames)"
} else {
s"DISTINCT($argListNames)"
}
}
}
val aggToDistinctMapping = mutable.HashMap.empty[Int, String]
distinctInfos.zipWithIndex.foreach {
case (distinct, index) =>
distinct.aggIndexes.foreach {
aggIndex =>
aggToDistinctMapping += (aggIndex -> distinctFieldNames(index))
}
}
// agg
var offset = fullGrouping.length
val aggStrings = aggCallToAggFunction.zipWithIndex.map {
case ((aggCall, udf), index) =>
val distinct = if (aggCall.isDistinct) {
if (aggCall.getArgList.size() == 0) {
"DISTINCT"
} else {
"DISTINCT "
}
} else {
if (isMerge && aggToDistinctMapping.contains(index)) {
"DISTINCT "
} else {
""
}
}
var newArgList = aggCall.getArgList.map(_.toInt).toList
if (isMerge) {
newArgList = udf match {
case _: AggregateFunction[_, _] =>
val argList = List(offset)
offset = offset + 1
argList
case daf: DeclarativeAggregateFunction =>
val aggBufferTypes = daf.getAggBufferTypes.map(_.getLogicalType)
val argList = aggBufferTypes.indices.map(offset + _).toList
offset = offset + aggBufferTypes.length
argList
case _ =>
throw new TableException(s"Unsupported function: $udf")
}
}
val argListNames = if (aggToDistinctMapping.contains(index)) {
aggToDistinctMapping(index)
} else if (newArgList.nonEmpty) {
newArgList.map(inputFieldNames(_)).mkString(", ")
} else {
"*"
}
if (aggCall.filterArg >= 0 && aggCall.filterArg < inputFieldNames.size) {
s"${aggCall.getAggregation}($distinct$argListNames) FILTER " +
s"${inputFieldNames(aggCall.filterArg)}"
} else {
s"${aggCall.getAggregation}($distinct$argListNames)"
}
}
// output for agg
val aggFunctions = aggCallToAggFunction.map(_._2)
offset = fullGrouping.length
val outFieldNames = aggFunctions.map { udf =>
val outFieldName = if (isGlobal) {
val name = outputFieldNames(offset)
offset = offset + 1
name
} else {
udf match {
case _: AggregateFunction[_, _] =>
val name = outputFieldNames(offset)
offset = offset + 1
name
case daf: DeclarativeAggregateFunction =>
val aggBufferTypes = daf.getAggBufferTypes.map(_.getLogicalType)
val name = aggBufferTypes.indices
.map(i => outputFieldNames(offset + i))
.mkString(", ")
offset = offset + aggBufferTypes.length
if (aggBufferTypes.length > 1) s"($name)" else name
case _ =>
throw new TableException(s"Unsupported function: $udf")
}
}
outFieldName
}
(fullGrouping.map(inputFieldNames(_)) ++ aggStrings ++ distinctStrings).zip(
fullGrouping.indices.map(outputFieldNames(_)) ++ outFieldNames ++ distinctFieldNames).map {
case (f, o) => if (f == o) {
f
} else {
s"$prefix$f AS $o"
}
}.mkString(", ")
}
def streamWindowAggregationToString(
inputRowType: RelDataType,
outputRowType: RelDataType,
aggInfoList: AggregateInfoList,
grouping: Array[Int],
windowProperties: Seq[PlannerNamedWindowProperty],
isLocal: Boolean = false,
isGlobal: Boolean = false): String = {
stringifyStreamAggregationToString(
inputRowType,
outputRowType,
aggInfoList,
grouping,
shuffleKey = None,
windowProperties,
isLocal,
isGlobal)
}
def streamGroupAggregationToString(
inputRowType: RelDataType,
outputRowType: RelDataType,
aggInfoList: AggregateInfoList,
grouping: Array[Int],
shuffleKey: Option[Array[Int]] = None,
isLocal: Boolean = false,
isGlobal: Boolean = false): String = {
stringifyStreamAggregationToString(
inputRowType,
outputRowType,
aggInfoList,
grouping,
shuffleKey,
windowProperties = Seq(),
isLocal,
isGlobal)
}
private def stringifyStreamAggregationToString(
inputRowType: RelDataType,
outputRowType: RelDataType,
aggInfoList: AggregateInfoList,
grouping: Array[Int],
shuffleKey: Option[Array[Int]],
windowProperties: Seq[PlannerNamedWindowProperty],
isLocal: Boolean,
isGlobal: Boolean): String = {
val aggInfos = aggInfoList.aggInfos
val actualAggInfos = aggInfoList.getActualAggregateInfos
val distinctInfos = aggInfoList.distinctInfos
val distinctFieldNames = distinctInfos.indices.map(index => s"distinct$$$index")
// aggIndex -> distinctFieldName
val distinctAggs = distinctInfos.zip(distinctFieldNames)
.flatMap(f => f._1.aggIndexes.map(i => (i, f._2)))
.toMap
val aggFilters = {
val distinctAggFilters = distinctInfos
.flatMap(d => d.aggIndexes.zip(d.filterArgs))
.toMap
val otherAggFilters = aggInfos
.map(info => (info.aggIndex, info.agg.filterArg))
.toMap
otherAggFilters ++ distinctAggFilters
}
val inFieldNames = inputRowType.getFieldNames.toList.toArray
val outFieldNames = outputRowType.getFieldNames.toList.toArray
val groupingNames = grouping.map(inFieldNames(_))
val aggOffset = shuffleKey match {
case None => grouping.length
case Some(k) => k.length
}
val isIncremental: Boolean = shuffleKey.isDefined
// TODO output local/global agg call names like Partial_XXX, Final_XXX
val aggStrings = if (isLocal) {
stringifyLocalAggregates(aggInfos, distinctInfos, distinctAggs, aggFilters, inFieldNames)
} else if (isGlobal || isIncremental) {
val accFieldNames = inputRowType.getFieldNames.toList.toArray
val aggOutputFieldNames = localAggOutputFieldNames(aggOffset, aggInfos, accFieldNames)
stringifyGlobalAggregates(aggInfos, distinctAggs, aggOutputFieldNames)
} else {
stringifyAggregates(actualAggInfos, distinctAggs, aggFilters, inFieldNames)
}
val isTableAggregate =
AggregateUtil.isTableAggregate(aggInfoList.getActualAggregateCalls.toList)
val outputFieldNames = if (isLocal) {
grouping.map(inFieldNames(_)) ++ localAggOutputFieldNames(aggOffset, aggInfos, outFieldNames)
} else if (isIncremental) {
val accFieldNames = inputRowType.getFieldNames.toList.toArray
grouping.map(inFieldNames(_)) ++ localAggOutputFieldNames(aggOffset, aggInfos, accFieldNames)
} else if (isTableAggregate) {
val groupingOutNames = outFieldNames.slice(0, grouping.length)
val aggOutNames = List(s"(${outFieldNames.drop(grouping.length)
.dropRight(windowProperties.length).mkString(", ")})")
val propertyOutNames = outFieldNames.slice(
outFieldNames.length - windowProperties.length,
outFieldNames.length)
groupingOutNames ++ aggOutNames ++ propertyOutNames
} else {
outFieldNames
}
val propStrings = windowProperties.map(_.getProperty.toString)
(groupingNames ++ aggStrings ++ propStrings).zip(outputFieldNames).map {
case (f, o) if f == o => f
case (f, o) => s"$f AS $o"
}.mkString(", ")
}
private def stringifyGlobalAggregates(
aggInfos: Array[AggregateInfo],
distinctAggs: Map[Int, String],
accFieldNames: Seq[String]): Array[String] = {
aggInfos.zipWithIndex.map { case (aggInfo, index) =>
val buf = new mutable.StringBuilder
buf.append(aggInfo.agg.getAggregation)
if (aggInfo.consumeRetraction) {
buf.append("_RETRACT")
}
buf.append("(")
if (index >= accFieldNames.length) {
println()
}
val argNames = accFieldNames(index)
if (distinctAggs.contains(index)) {
buf.append(s"${distinctAggs(index)} ")
}
buf.append(argNames).append(")")
buf.toString
}
}
private def stringifyLocalAggregates(
aggInfos: Array[AggregateInfo],
distincts: Array[DistinctInfo],
distinctAggs: Map[Int, String],
aggFilters: Map[Int, Int],
inFieldNames: Array[String]): Array[String] = {
val aggStrs = aggInfos.zipWithIndex.map { case (aggInfo, index) =>
val buf = new mutable.StringBuilder
buf.append(aggInfo.agg.getAggregation)
if (aggInfo.consumeRetraction) {
buf.append("_RETRACT")
}
buf.append("(")
val argNames = aggInfo.agg.getArgList.map(inFieldNames(_))
if (distinctAggs.contains(index)) {
buf.append(if (argNames.nonEmpty) s"${distinctAggs(index)} " else distinctAggs(index))
}
val argNameStr = if (argNames.nonEmpty) {
argNames.mkString(", ")
} else {
"*"
}
buf.append(argNameStr).append(")")
if (aggFilters(index) >= 0) {
val filterName = inFieldNames(aggFilters(index))
buf.append(" FILTER ").append(filterName)
}
buf.toString
}
val distinctStrs = distincts.map { distinctInfo =>
val argNames = distinctInfo.argIndexes.map(inFieldNames(_)).mkString(", ")
s"DISTINCT($argNames)"
}
aggStrs ++ distinctStrs
}
private def localAggOutputFieldNames(
aggOffset: Int,
aggInfos: Array[AggregateInfo],
accNames: Array[String]): Array[String] = {
var offset = aggOffset
val aggOutputNames = aggInfos.map { info =>
info.function match {
case _: AggregateFunction[_, _] =>
val name = accNames(offset)
offset = offset + 1
name
case daf: DeclarativeAggregateFunction =>
val aggBufferTypes = daf.getAggBufferTypes.map(_.getLogicalType)
val name = aggBufferTypes.indices
.map(i => accNames(offset + i))
.mkString(", ")
offset = offset + aggBufferTypes.length
if (aggBufferTypes.length > 1) s"($name)" else name
case _ =>
throw new TableException(s"Unsupported function: ${info.function}")
}
}
val distinctFieldNames = (offset until accNames.length).map(accNames)
aggOutputNames ++ distinctFieldNames
}
private def stringifyAggregates(
aggInfos: Array[AggregateInfo],
distinctAggs: Map[Int, String],
aggFilters: Map[Int, Int],
inFields: Array[String]): Array[String] = {
// MAX_RETRACT(DISTINCT a) FILTER b
aggInfos.zipWithIndex.map { case (aggInfo, index) =>
val buf = new mutable.StringBuilder
buf.append(aggInfo.agg.getAggregation)
if (aggInfo.consumeRetraction) {
buf.append("_RETRACT")
}
buf.append("(")
val argNames = aggInfo.agg.getArgList.map(inFields(_))
if (distinctAggs.contains(index)) {
buf.append(if (argNames.nonEmpty) "DISTINCT " else "DISTINCT")
}
val argNameStr = if (argNames.nonEmpty) {
argNames.mkString(", ")
} else {
"*"
}
buf.append(argNameStr).append(")")
if (aggFilters(index) >= 0) {
val filterName = inFields(aggFilters(index))
buf.append(" FILTER ").append(filterName)
}
buf.toString
}
}
/**
* Converts over aggregate attributes to String.
*/
def overAggregationToString(
inputRowType: RelDataType,
outputRowType: RelDataType,
constants: Seq[RexLiteral],
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
outputInputName: Boolean = true,
rowTypeOffset: Int = 0): String = {
val inputFieldNames = inputRowType.getFieldNames
val outputFieldNames = outputRowType.getFieldNames
val aggStrings = namedAggregates.map(_.getKey).map(
a => s"${a.getAggregation}(${
val prefix = if (a.isDistinct) {
"DISTINCT "
} else {
""
}
prefix + (if (a.getArgList.size() > 0) {
a.getArgList.map { arg =>
// index to constant
if (arg >= inputRowType.getFieldCount) {
constants(arg - inputRowType.getFieldCount)
}
// index to input field
else {
inputFieldNames(arg)
}
}.mkString(", ")
} else {
"*"
})
})")
val output = if (outputInputName) inputFieldNames ++ aggStrings else aggStrings
output.zip(outputFieldNames.drop(rowTypeOffset)).map {
case (f, o) => if (f == o) {
f
} else {
s"$f AS $o"
}
}.mkString(", ")
}
/**
* Converts project list to String.
*/
def projectsToString(
projects: util.List[util.List[RexNode]],
inputRowType: RelDataType,
outputRowType: RelDataType): String = {
val inFieldNames = inputRowType.getFieldNames
val outFieldNames = outputRowType.getFieldNames
projects.map { project =>
project.zipWithIndex.map {
case (r: RexInputRef, i) =>
val inputFieldName = inFieldNames.get(r.getIndex)
val outputFieldName = outFieldNames.get(i)
if (inputFieldName != outputFieldName) {
s"$inputFieldName AS $outputFieldName"
} else {
outputFieldName
}
case (l: RexLiteral, i) => s"${l.getValue3} AS ${outFieldNames.get(i)}"
case (_, i) => outFieldNames.get(i)
}.mkString("{", ", ", "}")
}.mkString(", ")
}
/**
* Converts window range in [[Group]] to String.
*/
def windowRangeToString(
logicWindow: Window,
groupWindow: Group): String = {
def calcOriginInputRows(window: Window): Int = {
window.getRowType.getFieldCount - window.groups.flatMap(_.aggCalls).size
}
def boundString(bound: RexWindowBound, window: Window): String = {
if (bound.getOffset != null) {
val ref = bound.getOffset.asInstanceOf[RexInputRef]
val boundIndex = ref.getIndex - calcOriginInputRows(window)
val offset = window.constants.get(boundIndex).getValue2
val offsetKind = if (bound.isPreceding) "PRECEDING" else "FOLLOWING"
s"$offset $offsetKind"
} else {
bound.toString
}
}
val buf = new StringBuilder
buf.append(if (groupWindow.isRows) " ROWS " else " RANG ")
val lowerBound = groupWindow.lowerBound
val upperBound = groupWindow.upperBound
if (lowerBound != null) {
if (upperBound != null) {
buf.append("BETWEEN ")
buf.append(boundString(lowerBound, logicWindow))
buf.append(" AND ")
buf.append(boundString(upperBound, logicWindow))
} else {
buf.append(boundString(lowerBound, logicWindow))
}
} else if (upperBound != null) {
buf.append(boundString(upperBound, logicWindow))
}
buf.toString
}
def conditionToString(
calcProgram: RexProgram,
f: (RexNode, List[String], Option[List[RexNode]], ExpressionFormat) => String,
expressionFormat: ExpressionFormat = ExpressionFormat.Prefix): String = {
val cond = calcProgram.getCondition
val inputFieldNames = calcProgram.getInputRowType.getFieldNames.toList
val localExprs = calcProgram.getExprList.toList
if (cond != null) {
f(cond, inputFieldNames, Some(localExprs), expressionFormat)
} else {
""
}
}
def selectionToString(
calcProgram: RexProgram,
expression: (RexNode, List[String], Option[List[RexNode]], ExpressionFormat) => String,
expressionFormat: ExpressionFormat = ExpressionFormat.Prefix): String = {
val proj = calcProgram.getProjectList.toList
val inFields = calcProgram.getInputRowType.getFieldNames.toList
val localExprs = calcProgram.getExprList.toList
val outFields = calcProgram.getOutputRowType.getFieldNames.toList
proj.map(expression(_, inFields, Some(localExprs), expressionFormat))
.zip(outFields).map { case (e, o) =>
if (e != o) {
e + " AS " + o
} else {
e
}
}.mkString(", ")
}
def correlateToString(
inputType: RelDataType,
rexCall: RexCall,
expression: (RexNode, List[String], Option[List[RexNode]]) => String): String = {
val name = rexCall.getOperator.toString
val inFields = inputType.getFieldNames.toList
val operands = rexCall.getOperands.map(expression(_, inFields, None)).mkString(",")
s"table($name($operands))"
}
def windowAggregationToString(
inputType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
rowType: RelDataType,
aggCallToAggFunction: Seq[(AggregateCall, UserDefinedFunction)],
enableAssignPane: Boolean,
isMerge: Boolean,
isGlobal: Boolean): String = {
val prefix = if (isMerge) {
"Final_"
} else if (!isGlobal) {
"Partial_"
} else {
""
}
val inFields = inputType.getFieldNames
val outFields = rowType.getFieldNames
/**
* - local window agg input type: grouping keys + aux-grouping keys + agg arg list
* - global window agg input type: grouping keys + timestamp + aux-grouping keys + agg buffer
* agg buffer as agg merge args list
*/
var offset = if (isMerge) {
grouping.length + 1 + auxGrouping.length
} else {
grouping.length + auxGrouping.length
}
val aggStrings = aggCallToAggFunction.map { case (aggCall, udf) =>
var newArgList = aggCall.getArgList.map(_.toInt).toList
if (isMerge) {
newArgList = udf match {
case _: AggregateFunction[_, _] =>
val argList = List(offset)
offset = offset + 1
argList
case daf: DeclarativeAggregateFunction =>
val argList = daf.aggBufferAttributes().indices.map(offset + _).toList
offset = offset + daf.aggBufferAttributes.length
argList
}
}
val argListNames = if (newArgList.nonEmpty) {
newArgList.map(inFields(_)).mkString(", ")
} else {
"*"
}
if (aggCall.filterArg >= 0 && aggCall.filterArg < inFields.size) {
s"${aggCall.getAggregation}($argListNames) FILTER ${inFields(aggCall.filterArg)}"
} else {
s"${aggCall.getAggregation}($argListNames)"
}
}
/**
* - local window agg output type: grouping keys + timestamp + aux-grouping keys + agg buffer
* - global window agg output type:
* grouping keys + aux-grouping keys + agg result + window props
*/
offset = if (!isGlobal) {
grouping.length + 1 + auxGrouping.length
} else {
grouping.length + auxGrouping.length
}
val outFieldNames = aggCallToAggFunction.map { case (_, udf) =>
val outFieldName = if (isGlobal) {
val name = outFields(offset)
offset = offset + 1
name
} else {
udf match {
case _: AggregateFunction[_, _] =>
val name = outFields(offset)
offset = offset + 1
name
case daf: DeclarativeAggregateFunction =>
val name = daf.aggBufferAttributes().zipWithIndex.map(offset + _._2).map(
outFields(_)).mkString(", ")
offset = offset + daf.aggBufferAttributes().length
if (daf.aggBufferAttributes.length > 1) s"($name)" else name
}
}
outFieldName
}
val inNames = grouping.map(inFields(_)) ++ auxGrouping.map(inFields(_)) ++ aggStrings
val outNames = grouping.indices.map(outFields(_)) ++
(grouping.length + 1 until grouping.length + 1 + auxGrouping.length).map(outFields(_)) ++
outFieldNames
inNames.zip(outNames).map {
case (f, o) => if (f == o) {
f
} else {
s"$prefix$f AS $o"
}
}.mkString(", ")
}
/**
* @deprecated please use [[streamWindowAggregationToString()]] instead.
*/
@Deprecated
def legacyStreamWindowAggregationToString(
inputType: RelDataType,
grouping: Array[Int],
rowType: RelDataType,
aggs: Seq[AggregateCall],
namedProperties: Seq[PlannerNamedWindowProperty],
withOutputFieldNames: Boolean = true): String = {
val inFields = inputType.getFieldNames
val isTableAggregate = AggregateUtil.isTableAggregate(aggs)
val outFields: Seq[String] = if (isTableAggregate) {
val outNames = rowType.getFieldNames
outNames.slice(0, grouping.length) ++
List(s"(${outNames.drop(grouping.length)
.dropRight(namedProperties.length).mkString(", ")})") ++
outNames.slice(outNames.length - namedProperties.length, outNames.length)
} else {
rowType.getFieldNames
}
val groupStrings = grouping.map(inFields(_))
val aggStrings = aggs.map(call => {
val distinct = if (call.isDistinct) {
if (call.getArgList.size() == 0) {
"DISTINCT"
} else {
"DISTINCT "
}
} else {
""
}
val argList = if (call.getArgList.size() > 0) {
call.getArgList.map(inFields(_)).mkString(", ")
} else {
"*"
}
val filter = if (call.filterArg >= 0 && call.filterArg < inFields.size) {
s" FILTER ${inFields(call.filterArg)}"
} else {
""
}
s"${call.getAggregation}($distinct$argList)$filter"
})
val propStrings = namedProperties.map(_.getProperty.toString)
(groupStrings ++ aggStrings ++ propStrings).zip(outFields).map {
case (f, o) => if (f == o) {
f
} else {
if (withOutputFieldNames) s"$f AS $o" else f
}
}.mkString(", ")
}
// ------------------------------------------------------------------------------------
// MATCH RECOGNIZE
// ------------------------------------------------------------------------------------
/**
* Converts measures of MatchRecognize to String.
*/
def measuresDefineToString(
measures: ImmutableMap[String, RexNode],
fieldNames: List[String],
expression: (RexNode, List[String], Option[List[RexNode]]) => String): String =
measures.map {
case (k, v) => s"${expression(v, fieldNames, None)} AS $k"
}.mkString(", ")
/**
* Converts all rows or not of MatchRecognize to ROWS PER MATCH String
*/
def rowsPerMatchToString(isAll: Boolean): String =
if (isAll) "ALL ROWS PER MATCH" else "ONE ROW PER MATCH"
/**
* Converts AFTER clause of MatchRecognize to String
*/
def afterMatchToString(
after: RexNode,
fieldNames: Seq[String]): String =
after.getKind match {
case SqlKind.SKIP_TO_FIRST => s"SKIP TO FIRST ${
after.asInstanceOf[RexCall].operands.get(0).toString
}"
case SqlKind.SKIP_TO_LAST => s"SKIP TO LAST ${
after.asInstanceOf[RexCall].operands.get(0).toString
}"
case SqlKind.LITERAL => after.asInstanceOf[RexLiteral]
.getValueAs(classOf[AfterOption]) match {
case AfterOption.SKIP_PAST_LAST_ROW => "SKIP PAST LAST ROW"
case AfterOption.SKIP_TO_NEXT_ROW => "SKIP TO NEXT ROW"
}
case _ => throw new IllegalStateException(s"Corrupted query tree. Unexpected $after for " +
s"after match strategy.")
}
/**
* Converts subset clause of MatchRecognize to String
*/
def subsetToString(subset: ImmutableMap[String, JSortedSet[String]]): String =
subset.map {
case (k, v) => s"$k = (${v.mkString(", ")})"
}.mkString(", ")
/**
* Converts [[RelHint]]s to String.
*/
def hintsToString(hints: util.List[RelHint]): String = {
val sb = new StringBuilder
sb.append("[")
hints.foreach { hint =>
sb.append("[").append(hint.hintName)
if (!hint.inheritPath.isEmpty) {
sb.append(" inheritPath:").append(hint.inheritPath)
}
if (hint.listOptions.size() > 0 || hint.kvOptions.size() > 0) {
sb.append(" options:")
if (hint.listOptions.size > 0) {
sb.append(hint.listOptions.toString)
} else {
sb.append(hint.kvOptions.toString)
}
}
sb.append("]")
}
sb.append("]")
sb.toString
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/RelExplainUtil.scala | Scala | apache-2.0 | 29,824 |
package models.persistance
import models.ReleaseTSVFileLink
import models.ReleaseTSVFileLinkTable
import play.api.db.slick.Config.driver.simple._
object ReleaseTSVFileLinkDAO {
val releaseTSVLinks = TableQuery[ReleaseTSVFileLinkTable]
def getFileIdsFromReleaseId(releaseId: Int) = { implicit session: Session =>
releaseTSVLinks.filter(l => l.releaseId === releaseId).map(l => l.tsvFileId).list
}
def getFilesFromReleaseId(releaseId: Int) = { implicit session: Session =>
val query = for {
r <- releaseTSVLinks
t <- r.tsvFile if r.releaseId === releaseId
} yield (t)
query.list
}
def tsvFileExistsInRelease(releaseId: Int, tsvFileId: Int) = { implicit session: Session =>
if (releaseTSVLinks.filter(l => l.releaseId === releaseId && l.tsvFileId === tsvFileId).list.length > 0) {
true
} else {
false
}
}
def getReleaseIdsFromFileId(fileId: Int) = { implicit session: Session =>
releaseTSVLinks.filter(l => l.tsvFileId === fileId).map(l => l.releaseId).list
}
def createLink(releaseId: Int, fileId: Int) = { implicit session: Session =>
val newReleaseTSVFileLink = new ReleaseTSVFileLink(None, releaseId, fileId, new java.sql.Timestamp(System.currentTimeMillis()))
releaseTSVLinks.insert(newReleaseTSVFileLink)
}
} | seqprodbio/restoule | app/models/persistance/ReleaseTSVFileLinkDAO.scala | Scala | gpl-3.0 | 1,349 |
package io.scalaland.chimney.internal
sealed abstract class TransformerFlags
object TransformerFlags {
final class Default extends TransformerFlags
final class Enable[F <: Flag, Flags <: TransformerFlags] extends TransformerFlags
final class Disable[F <: Flag, Flags <: TransformerFlags] extends TransformerFlags
sealed abstract class Flag
final class MethodAccessors extends Flag
final class DefaultValues extends Flag
final class BeanSetters extends Flag
final class BeanGetters extends Flag
final class OptionDefaultsToNone extends Flag
final class UnsafeOption extends Flag
}
| scalalandio/chimney | chimney/src/main/scala/io/scalaland/chimney/internal/TransformerFlags.scala | Scala | apache-2.0 | 602 |
package coursier.cli
import java.io.{File, FileWriter}
import coursier.moduleString
import coursier.cli.options.DependencyOptions
import coursier.cli.params.DependencyParams
import coursier.parse.JavaOrScalaModule
import utest._
object ParamsTests extends TestSuite {
def withFile(content: String)(testCode: (File, FileWriter) => Any) {
val file = File.createTempFile("hello", "world") // create the fixture
val writer = new FileWriter(file)
writer.write(content)
writer.flush()
try testCode(file, writer) // "loan" the fixture to the test
finally {
writer.close()
file.delete()
}
}
val tests = Tests {
test("Normal text should parse correctly") - withFile(
"org1:name1--org2:name2"
) { (file, _) =>
val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
val params = DependencyParams(options, None)
.fold(e => sys.error(e.toString), identity)
val expected = Map(JavaOrScalaModule.JavaModule(mod"org1:name1") -> Set(
JavaOrScalaModule.JavaModule(mod"org2:name2")
))
Predef.assert(params.perModuleExclude.equals(expected), s"got ${params.perModuleExclude}")
}
test("Multiple excludes should be combined") - withFile(
"org1:name1--org2:name2\\n" +
"org1:name1--org3:name3\\n" +
"org4:name4--org5:name5"
) { (file, _) =>
val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
val params = DependencyParams(options, None)
.fold(e => sys.error(e.toString), identity)
val expected = Map(
JavaOrScalaModule.JavaModule(mod"org1:name1") -> Set(
JavaOrScalaModule.JavaModule(mod"org2:name2"),
JavaOrScalaModule.JavaModule(mod"org3:name3")
),
JavaOrScalaModule.JavaModule(mod"org4:name4") -> Set(
JavaOrScalaModule.JavaModule(mod"org5:name5")
)
)
assert(params.perModuleExclude.equals(expected))
}
test("extra -- should error") - withFile(
"org1:name1--org2:name2--xxx\\n" +
"org1:name1--org3:name3\\n" +
"org4:name4--org5:name5"
) { (file, _) =>
val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
DependencyParams(options, None).toEither match {
case Left(errors) =>
assert(errors.exists(_.startsWith("Failed to parse ")))
case Right(p) =>
sys.error(s"Should have errored (got $p)")
}
}
test("child has no name should error") - withFile(
"org1:name1--org2:"
) { (file, _) =>
val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
DependencyParams(options, None).toEither match {
case Left(errors) =>
assert(errors.exists(_.startsWith("Failed to parse ")))
case Right(p) =>
sys.error(s"Should have errored (got $p)")
}
}
test("child has nothing should error") - withFile(
"org1:name1--:"
) { (file, _) =>
val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
DependencyParams(options, None).toEither match {
case Left(errors) =>
assert(errors.exists(_.startsWith("Failed to parse ")))
case Right(p) =>
sys.error(s"Should have errored (got $p)")
}
}
}
}
| coursier/coursier | modules/cli/src/test/scala/coursier/cli/ParamsTests.scala | Scala | apache-2.0 | 3,323 |
package ch.squan.game.model.command
import org.newdawn.slick.command.BasicCommand
/**
* Created by chris on 23/01/16.
*/
case object CommandRight extends BasicCommand("Right")
| cbenning/space2d | src/main/scala/ch/squan/game/model/command/CommandRight.scala | Scala | apache-2.0 | 182 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.execution.streaming.Sink
import org.apache.spark.sql.streaming.DataStreamWriter
class ForeachBatchSink[T](batchWriter: (Dataset[T], Long) => Unit, encoder: ExpressionEncoder[T])
extends Sink {
override def addBatch(batchId: Long, data: DataFrame): Unit = {
val rdd = data.queryExecution.toRdd
implicit val enc = encoder
val ds = data.sparkSession.internalCreateDataFrame(rdd, data.schema).as[T]
batchWriter(ds, batchId)
}
override def toString(): String = "ForeachBatchSink"
}
/**
* Interface that is meant to be extended by Python classes via Py4J.
* Py4J allows Python classes to implement Java interfaces so that the JVM can call back
* Python objects. In this case, this allows the user-defined Python `foreachBatch` function
* to be called from JVM when the query is active.
* */
trait PythonForeachBatchFunction {
/** Call the Python implementation of this function */
def call(batchDF: DataFrame, batchId: Long): Unit
}
object PythonForeachBatchHelper {
def callForeachBatch(dsw: DataStreamWriter[Row], pythonFunc: PythonForeachBatchFunction): Unit = {
dsw.foreachBatch(pythonFunc.call _)
}
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/ForeachBatchSink.scala | Scala | apache-2.0 | 2,129 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.config
import org.apache.samza.system.SystemStream
import org.apache.samza.util.Util
object TaskConfig {
// task config constants
val INPUT_STREAMS = "task.inputs" // streaming.input-streams
val WINDOW_MS = "task.window.ms" // window period in milliseconds
val COMMIT_MS = "task.commit.ms" // commit period in milliseconds
val SHUTDOWN_MS = "task.shutdown.ms" // how long to wait for a clean shutdown
val TASK_CLASS = "task.class" // streaming.task-factory-class
val COMMAND_BUILDER = "task.command.class" // streaming.task-factory-class
val LIFECYCLE_LISTENERS = "task.lifecycle.listeners" // li-generator,foo
val LIFECYCLE_LISTENER = "task.lifecycle.listener.%s.class" // task.lifecycle.listener.li-generator.class
val CHECKPOINT_MANAGER_FACTORY = "task.checkpoint.factory" // class name to use when sending offset checkpoints
val MESSAGE_CHOOSER_CLASS_NAME = "task.chooser.class"
val DROP_DESERIALIZATION_ERROR = "task.drop.deserialization.errors" // define whether drop the messages or not when deserialization fails
val DROP_SERIALIZATION_ERROR = "task.drop.serialization.errors" // define whether drop the messages or not when serialization fails
val IGNORED_EXCEPTIONS = "task.ignored.exceptions" // exceptions to ignore in process and window
/**
* Samza's container polls for more messages under two conditions. The first
* condition arises when there are simply no remaining buffered messages to
* process for any input SystemStreamPartition. The second condition arises
* when some input SystemStreamPartitions have empty buffers, but some do
* not. In the latter case, a polling interval is defined to determine how
* often to refresh the empty SystemStreamPartition buffers. By default,
* this interval is 50ms, which means that any empty SystemStreamPartition
* buffer will be refreshed at least every 50ms. A higher value here means
* that empty SystemStreamPartitions will be refreshed less often, which
* means more latency is introduced, but less CPU and network will be used.
* Decreasing this value means that empty SystemStreamPartitions are
* refreshed more frequently, thereby introducing less latency, but
* increasing CPU and network utilization.
*/
val POLL_INTERVAL_MS = "task.poll.interval.ms"
implicit def Config2Task(config: Config) = new TaskConfig(config)
}
class TaskConfig(config: Config) extends ScalaMapConfig(config) {
def getInputStreams = getOption(TaskConfig.INPUT_STREAMS) match {
case Some(streams) => if (streams.length > 0) {
streams.split(",").map(systemStreamNames => {
Util.getSystemStreamFromNames(systemStreamNames.trim)
}).toSet
} else {
Set[SystemStream]()
}
case _ => Set[SystemStream]()
}
def getWindowMs: Option[Long] = getOption(TaskConfig.WINDOW_MS) match {
case Some(ms) => Some(ms.toLong)
case _ => None
}
def getCommitMs: Option[Long] = getOption(TaskConfig.COMMIT_MS) match {
case Some(ms) => Some(ms.toLong)
case _ => None
}
def getShutdownMs: Option[Long] = getOption(TaskConfig.SHUTDOWN_MS) match {
case Some(ms) => Some(ms.toLong)
case _ => None
}
def getLifecycleListeners(): Option[String] = getOption(TaskConfig.LIFECYCLE_LISTENERS)
def getLifecycleListenerClass(name: String): Option[String] = getOption(TaskConfig.LIFECYCLE_LISTENER format name)
def getTaskClass = getOption(TaskConfig.TASK_CLASS)
def getCommandClass = getOption(TaskConfig.COMMAND_BUILDER)
def getCheckpointManagerFactory() = getOption(TaskConfig.CHECKPOINT_MANAGER_FACTORY)
def getMessageChooserClass = getOption(TaskConfig.MESSAGE_CHOOSER_CLASS_NAME)
def getDropDeserialization = getOption(TaskConfig.DROP_DESERIALIZATION_ERROR)
def getDropSerialization = getOption(TaskConfig.DROP_SERIALIZATION_ERROR)
def getPollIntervalMs = getOption(TaskConfig.POLL_INTERVAL_MS)
def getIgnoredExceptions = getOption(TaskConfig.IGNORED_EXCEPTIONS)
}
| Quantiply/samza | samza-core/src/main/scala/org/apache/samza/config/TaskConfig.scala | Scala | apache-2.0 | 4,804 |
class t4612 {
trait Ann[A] {
def foo: A
}
class Bob extends Ann[Bob] {
def foo = new Bob
trait Cris extends Ann[Cris] {
self: Bob =>
def foo = new Bob
}
}
}
| scala/scala | test/files/neg/t4612.scala | Scala | apache-2.0 | 197 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeSet, Expression, PythonUDF}
/**
* FlatMap groups using a udf: pandas.Dataframe -> pandas.DataFrame.
* This is used by DataFrame.groupby().apply().
*/
case class FlatMapGroupsInPandas(
groupingAttributes: Seq[Attribute],
functionExpr: Expression,
output: Seq[Attribute],
child: LogicalPlan) extends UnaryNode {
/**
* This is needed because output attributes are considered `references` when
* passed through the constructor.
*
* Without this, catalyst will complain that output attributes are missing
* from the input.
*/
override val producedAttributes = AttributeSet(output)
}
/**
* Map partitions using a udf: iter(pandas.Dataframe) -> iter(pandas.DataFrame).
* This is used by DataFrame.mapInPandas()
*/
case class MapInPandas(
functionExpr: Expression,
output: Seq[Attribute],
child: LogicalPlan) extends UnaryNode {
override val producedAttributes = AttributeSet(output)
}
/**
* Flatmap cogroups using a udf: pandas.Dataframe, pandas.Dataframe -> pandas.Dataframe
* This is used by DataFrame.groupby().cogroup().apply().
*/
case class FlatMapCoGroupsInPandas(
leftAttributes: Seq[Attribute],
rightAttributes: Seq[Attribute],
functionExpr: Expression,
output: Seq[Attribute],
left: LogicalPlan,
right: LogicalPlan) extends BinaryNode {
override val producedAttributes = AttributeSet(output)
}
trait BaseEvalPython extends UnaryNode {
def udfs: Seq[PythonUDF]
def resultAttrs: Seq[Attribute]
override def output: Seq[Attribute] = child.output ++ resultAttrs
override def producedAttributes: AttributeSet = AttributeSet(resultAttrs)
}
/**
* A logical plan that evaluates a [[PythonUDF]]
*/
case class BatchEvalPython(
udfs: Seq[PythonUDF],
resultAttrs: Seq[Attribute],
child: LogicalPlan) extends BaseEvalPython
/**
* A logical plan that evaluates a [[PythonUDF]] with Apache Arrow.
*/
case class ArrowEvalPython(
udfs: Seq[PythonUDF],
resultAttrs: Seq[Attribute],
child: LogicalPlan,
evalType: Int) extends BaseEvalPython
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/pythonLogicalOperators.scala | Scala | apache-2.0 | 2,990 |
package ai.verta.repository
import ai.verta.blobs.Blob
/** The walker runs on every file in that given folder and all of its subfolders.
* @tparam T Type of the returned value over the walk.
*/
trait FolderWalker[+T] {
/** Filters files and subfolders to be walked on.
* The returned folder can contain less elements than before.
*/
def filterFolder(folder: Folder): Folder = folder
/** Replaces the current object based on the information of a given folder.
* This can be used to replace the base object with subfolder-specific information.
*/
def replace(folder: Folder): FolderWalker[T] = this
/** Visits the given blob.
*/
def visitBlob(name: String, blob: Blob): T
/** Visits the given folder.
*/
def visitFolder(name: String, folder: Folder): T
}
| mitdbg/modeldb | client/scala/src/main/scala/ai/verta/repository/FolderWalker.scala | Scala | mit | 797 |
package fr.hmil.roshttp.node
private[roshttp] abstract class Module[T](val name: String) {
def isAvailable: Boolean = require.isDefined
def require(): Option[T]
lazy val api = require.getOrElse(throw new ModuleNotFoundException(name))
}
private[roshttp] class ModuleNotFoundException(name: String) extends RuntimeException("Module " + name + " not found")
| hmil/RosHTTP | js/src/main/scala/fr/hmil/roshttp/node/Module.scala | Scala | mit | 366 |
package org.jetbrains.plugins.scala
package lang.refactoring.introduceField
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.markup.RangeHighlighter
import com.intellij.openapi.editor.{Document, Editor}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi.{PsiDocumentManager, PsiElement, PsiFile}
import com.intellij.refactoring.HelpID
import com.intellij.refactoring.util.CommonRefactoringUtil
import org.jetbrains.plugins.scala.extensions.childOf
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScExtendsBlock, ScTemplateParents}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.introduceField.ScalaIntroduceFieldHandlerBase._
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil._
import org.jetbrains.plugins.scala.util.ScalaUtils
/**
* Nikolay.Tropin
* 6/27/13
*/
class ScalaIntroduceFieldFromExpressionHandler extends ScalaIntroduceFieldHandlerBase {
private var occurrenceHighlighters = Seq.empty[RangeHighlighter]
def invoke(project: Project, editor: Editor, file: PsiFile, startOffset: Int, endOffset: Int) {
try {
UsageTrigger.trigger(ScalaBundle.message("introduce.field.id"))
PsiDocumentManager.getInstance(project).commitAllDocuments()
checkFile(file, project, editor, REFACTORING_NAME)
val (expr: ScExpression, types: Array[ScType]) = getExpression(project, editor, file, startOffset, endOffset) match {
case Some((e, tps)) => (e, tps)
case None =>
showErrorMessage(ScalaBundle.message("cannot.refactor.not.expression"), project, editor)
return
}
afterClassChoosing[ScExpression](expr, types, project, editor, file, "Choose class for Introduce Field") {
convertExpressionToField
}
}
catch {
case _: IntroduceException =>
}
}
override def invoke(project: Project, editor: Editor, file: PsiFile, dataContext: DataContext) {
val canBeIntroduced: (ScExpression) => Boolean = checkCanBeIntroduced(_)
afterExpressionChoosing(project, editor, file, dataContext, REFACTORING_NAME, canBeIntroduced) {
trimSpacesAndComments(editor, file)
invoke(project, editor, file, editor.getSelectionModel.getSelectionStart, editor.getSelectionModel.getSelectionEnd)
}
}
override def invoke(project: Project, elements: Array[PsiElement], dataContext: DataContext) {
//nothing
}
def convertExpressionToField(ifc: IntroduceFieldContext[ScExpression]) {
val possiblePlace = checkCanBeIntroduced(ifc.element, showErrorMessage(_, ifc.project, ifc.editor))
if (!possiblePlace) return
def runWithDialog() {
val settings = new IntroduceFieldSettings(ifc)
if (!settings.canBeInitInDeclaration && !settings.canBeInitLocally) {
showErrorMessage("Cannot create field from this expression", ifc.project, ifc.editor)
} else {
val dialog = getDialog(ifc, settings)
if (dialog.isOK) {
runRefactoring(ifc, settings)
}
}
}
runWithDialog()
}
private def runRefactoringInside(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]) {
val expression = expressionToIntroduce(ifc.element)
val mainOcc = ifc.occurrences.filter(_.getStartOffset == ifc.editor.getSelectionModel.getSelectionStart)
val occurrencesToReplace = if (settings.replaceAll) ifc.occurrences else mainOcc
val aClass = ifc.aClass
val checkAnchor: PsiElement = anchorForNewDeclaration(expression, occurrencesToReplace, aClass)
if (checkAnchor == null) {
showErrorMessage("Cannot find place for the new field", ifc.project, ifc.editor)
return
}
implicit val projectContext = aClass.projectContext
val name = settings.name
val typeName = Option(settings.scType).map(_.canonicalText).getOrElse("")
val replacedOccurences = replaceOccurences(occurrencesToReplace, name, ifc.file)
val anchor = anchorForNewDeclaration(expression, replacedOccurences, aClass)
val initInDecl = settings.initInDeclaration
var createdDeclaration: PsiElement = null
if (initInDecl) {
createdDeclaration = createDeclaration(name, typeName, settings.defineVar, expression)
} else {
val underscore = createExpressionFromText("_")
createdDeclaration = createDeclaration(name, typeName, settings.defineVar, underscore)
anchorForInitializer(replacedOccurences, ifc.file) match {
case Some(anchorForInit) =>
val parent = anchorForInit.getParent
val assignStmt = createExpressionFromText(s"$name = ${expression.getText}")
parent.addBefore(assignStmt, anchorForInit)
parent.addBefore(createNewLine(), anchorForInit)
case None => throw new IntroduceException
}
}
settings.visibilityLevel match {
case "" =>
case other =>
val modifier = createModifierFromText(other)
createdDeclaration.asInstanceOf[ScMember].getModifierList.add(modifier)
}
lazy val document: Document = ifc.editor.getDocument
anchor match {
case (_: ScTemplateParents) childOf (extBl: ScExtendsBlock) =>
val earlyDef = extBl.addEarlyDefinitions()
createdDeclaration = earlyDef.addAfter(createdDeclaration, earlyDef.getFirstChild)
case _ childOf (ed: ScEarlyDefinitions) if onOneLine(document, ed.getTextRange) =>
def isBlockStmtOrMember(elem: PsiElement) = elem != null && (elem.isInstanceOf[ScBlockStatement] || elem.isInstanceOf[ScMember])
var declaration = createdDeclaration.getText
if (isBlockStmtOrMember(anchor)) declaration += "; "
if (isBlockStmtOrMember(anchor.getPrevSibling)) declaration = "; " + declaration
document.insertString(anchor.getTextRange.getStartOffset, declaration)
PsiDocumentManager.getInstance(ifc.project).commitDocument(document)
case _ childOf parent =>
createdDeclaration = parent.addBefore(createdDeclaration, anchor)
parent.addBefore(createNewLine(), anchor)
}
ScalaPsiUtil.adjustTypes(createdDeclaration)
}
def runRefactoring(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]) {
val runnable = new Runnable {
def run(): Unit = runRefactoringInside(ifc, settings)
}
ScalaUtils.runWriteAction(runnable, ifc.project, REFACTORING_NAME)
ifc.editor.getSelectionModel.removeSelection()
}
protected def getDialog(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]): ScalaIntroduceFieldDialog = {
val occCount = ifc.occurrences.length
// Add occurrences highlighting
if (occCount > 1)
occurrenceHighlighters = highlightOccurrences(ifc.project, ifc.occurrences, ifc.editor)
val dialog = new ScalaIntroduceFieldDialog(ifc, settings)
dialog.show()
if (!dialog.isOK) {
if (occCount > 1) {
occurrenceHighlighters.foreach(_.dispose())
occurrenceHighlighters = Seq.empty
}
}
dialog
}
protected override def isSuitableClass(elem: PsiElement, clazz: ScTemplateDefinition): Boolean = true
private def onOneLine(document: Document, range: TextRange): Boolean = {
document.getLineNumber(range.getStartOffset) == document.getLineNumber(range.getEndOffset)
}
private def showErrorMessage(text: String, project: Project, editor: Editor) = {
CommonRefactoringUtil.showErrorHint(project, editor, text, REFACTORING_NAME, HelpID.INTRODUCE_FIELD)
}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/introduceField/ScalaIntroduceFieldFromExpressionHandler.scala | Scala | apache-2.0 | 8,055 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.cats.data
import cats.Order
import cats.data.NonEmptySet
import cats.instances.sortedSet._
object nonEmptySet {
implicit def catsNonEmptySetOrder[A: Order]: Order[NonEmptySet[A]] =
Order.by(_.toSortedSet)
}
| slamdata/quasar | foundation/src/main/scala/quasar/contrib/cats/data/nonEmptySet.scala | Scala | apache-2.0 | 843 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.ui
import java.util.concurrent.TimeUnit
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable
import scala.xml.{Node, Unparsed}
import org.apache.spark.internal.Logging
import org.apache.spark.ui.{GraphUIData, JsCollector, UIUtils => SparkUIUtils, WebUIPage}
import org.apache.spark.util.Utils
/**
* A helper class for "scheduling delay", "processing time" and "total delay" to generate data that
* will be used in the timeline and histogram graphs.
*
* @param data (batchTime, milliseconds). "milliseconds" is something like "processing time".
*/
private[ui] class MillisecondsStatUIData(data: Seq[(Long, Long)]) {
/**
* Converting the original data as per `unit`.
*/
def timelineData(unit: TimeUnit): Seq[(Long, Double)] =
data.map(x => x._1 -> UIUtils.convertToTimeUnit(x._2, unit))
/**
* Converting the original data as per `unit`.
*/
def histogramData(unit: TimeUnit): Seq[Double] =
data.map(x => UIUtils.convertToTimeUnit(x._2, unit))
val avg: Option[Long] = if (data.isEmpty) None else Some(data.map(_._2).sum / data.size)
val formattedAvg: String = StreamingPage.formatDurationOption(avg)
val max: Option[Long] = if (data.isEmpty) None else Some(data.map(_._2).max)
}
/**
* A helper class for "input rate" to generate data that will be used in the timeline and histogram
* graphs.
*
* @param data (batch time, record rate).
*/
private[ui] class RecordRateUIData(val data: Seq[(Long, Double)]) {
val avg: Option[Double] = if (data.isEmpty) None else Some(data.map(_._2).sum / data.size)
val formattedAvg: String = avg.map(_.formatted("%.2f")).getOrElse("-")
val max: Option[Double] = if (data.isEmpty) None else Some(data.map(_._2).max)
}
/** Page for Spark Web UI that shows statistics of a streaming job */
private[ui] class StreamingPage(parent: StreamingTab)
extends WebUIPage("") with Logging {
import StreamingPage._
private val listener = parent.listener
private def startTime: Long = listener.startTime
/** Render the page */
def render(request: HttpServletRequest): Seq[Node] = {
val resources = generateLoadResources(request)
val onClickTimelineFunc = generateOnClickTimelineFunction()
val basicInfo = generateBasicInfo()
val content = resources ++
onClickTimelineFunc ++ basicInfo ++
listener.synchronized {
generateStatTable() ++
generateBatchListTables(request)
}
SparkUIUtils.headerSparkPage(request, "Streaming Statistics", content, parent)
}
/**
* Generate html that will load css/js files for StreamingPage
*/
private def generateLoadResources(request: HttpServletRequest): Seq[Node] = {
// scalastyle:off
<script src={SparkUIUtils.prependBaseUri(request, "/static/d3.min.js")}></script>
<link rel="stylesheet" href={SparkUIUtils.prependBaseUri(request, "/static/streaming-page.css")} type="text/css"/>
<script src={SparkUIUtils.prependBaseUri(request, "/static/streaming-page.js")}></script>
// scalastyle:on
}
/** Generate html that will set onClickTimeline declared in streaming-page.js */
private def generateOnClickTimelineFunction(): Seq[Node] = {
val js = "onClickTimeline = getOnClickTimelineFunction();"
<script>{Unparsed(js)}</script>
}
/** Generate basic information of the streaming program */
private def generateBasicInfo(): Seq[Node] = {
val timeSinceStart = System.currentTimeMillis() - startTime
<div>Running batches of
<strong>
{SparkUIUtils.formatDurationVerbose(listener.batchDuration)}
</strong>
for
<strong>
{SparkUIUtils.formatDurationVerbose(timeSinceStart)}
</strong>
since
<strong>
{SparkUIUtils.formatDate(startTime)}
</strong>
(<strong>{listener.numTotalCompletedBatches}</strong>
completed batches, <strong>{listener.numTotalReceivedRecords}</strong> records)
</div>
<br />
}
/**
* Generate a global "timeFormat" dictionary in the JavaScript to store the time and its formatted
* string. Because we cannot specify a timezone in JavaScript, to make sure the server and client
* use the same timezone, we use the "timeFormat" dictionary to format all time values used in the
* graphs.
*
* @param times all time values that will be used in the graphs.
*/
private def generateTimeMap(times: Seq[Long]): Seq[Node] = {
val js = "var timeFormat = {};\\n" + times.map { time =>
val formattedTime =
SparkUIUtils.formatBatchTime(time, listener.batchDuration, showYYYYMMSS = false)
s"timeFormat[$time] = '$formattedTime';"
}.mkString("\\n")
<script>{Unparsed(js)}</script>
}
private def generateTimeTipStrings(times: Seq[Long]): Seq[Node] = {
// We leverage timeFormat as the value would be same as timeFormat. This means it is
// sensitive to the order - generateTimeMap should be called earlier than this.
val js = "var timeTipStrings = {};\\n" + times.map { time =>
s"timeTipStrings[$time] = timeFormat[$time];"
}.mkString("\\n")
<script>{Unparsed(js)}</script>
}
private def generateStatTable(): Seq[Node] = {
val batches = listener.retainedBatches
val batchTimes = batches.map(_.batchTime.milliseconds)
val minBatchTime = if (batchTimes.isEmpty) startTime else batchTimes.min
val maxBatchTime = if (batchTimes.isEmpty) startTime else batchTimes.max
val recordRateForAllStreams = new RecordRateUIData(batches.map { batchInfo =>
(batchInfo.batchTime.milliseconds, batchInfo.numRecords * 1000.0 / listener.batchDuration)
})
val schedulingDelay = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.schedulingDelay.map(batchInfo.batchTime.milliseconds -> _)
})
val processingTime = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.processingDelay.map(batchInfo.batchTime.milliseconds -> _)
})
val totalDelay = new MillisecondsStatUIData(batches.flatMap { batchInfo =>
batchInfo.totalDelay.map(batchInfo.batchTime.milliseconds -> _)
})
// Use the max value of "schedulingDelay", "processingTime", and "totalDelay" to make the
// Y axis ranges same.
val _maxTime =
(for (m1 <- schedulingDelay.max; m2 <- processingTime.max; m3 <- totalDelay.max) yield
m1 max m2 max m3).getOrElse(0L)
// Should start at 0
val minTime = 0L
val (maxTime, normalizedUnit) = UIUtils.normalizeDuration(_maxTime)
val formattedUnit = UIUtils.shortTimeUnitString(normalizedUnit)
// Use the max input rate for all InputDStreams' graphs to make the Y axis ranges same.
// If it's not an integral number, just use its ceil integral number.
val maxRecordRate = recordRateForAllStreams.max.map(_.ceil.toLong).getOrElse(0L)
val minRecordRate = 0L
val batchInterval = UIUtils.convertToTimeUnit(listener.batchDuration, normalizedUnit)
val jsCollector = new JsCollector
val graphUIDataForRecordRateOfAllStreams =
new GraphUIData(
"all-stream-records-timeline",
"all-stream-records-histogram",
recordRateForAllStreams.data,
minBatchTime,
maxBatchTime,
minRecordRate,
maxRecordRate,
"records/sec")
graphUIDataForRecordRateOfAllStreams.generateDataJs(jsCollector)
val graphUIDataForSchedulingDelay =
new GraphUIData(
"scheduling-delay-timeline",
"scheduling-delay-histogram",
schedulingDelay.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit)
graphUIDataForSchedulingDelay.generateDataJs(jsCollector)
val graphUIDataForProcessingTime =
new GraphUIData(
"processing-time-timeline",
"processing-time-histogram",
processingTime.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit, Some(batchInterval))
graphUIDataForProcessingTime.generateDataJs(jsCollector)
val graphUIDataForTotalDelay =
new GraphUIData(
"total-delay-timeline",
"total-delay-histogram",
totalDelay.timelineData(normalizedUnit),
minBatchTime,
maxBatchTime,
minTime,
maxTime,
formattedUnit)
graphUIDataForTotalDelay.generateDataJs(jsCollector)
// It's false before the user registers the first InputDStream
val hasStream = listener.streamIds.nonEmpty
val numCompletedBatches = listener.retainedCompletedBatches.size
val numActiveBatches = batchTimes.length - numCompletedBatches
val numReceivers = listener.numInactiveReceivers + listener.numActiveReceivers
val table =
// scalastyle:off
<table id="stat-table" class="table table-bordered" style="width: auto">
<thead>
<tr>
<th style="width: 160px;"></th>
<th style="width: 492px;">Timelines (Last {batchTimes.length} batches, {numActiveBatches} active, {numCompletedBatches} completed)</th>
<th style="width: 350px;">Histograms</th></tr>
</thead>
<tbody>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div>
{
if (hasStream) {
<span class="expand-input-rate">
<span class="expand-input-rate-arrow arrow-closed"></span>
<a data-toggle="tooltip" title="Show/hide details of each receiver" data-placement="top">
<strong>Input Rate</strong>
</a>
</span>
} else {
<strong>Input Rate</strong>
}
}
</div>
{
if (numReceivers > 0) {
<div>Receivers: {listener.numActiveReceivers} / {numReceivers} active</div>
}
}
<div>Avg: {recordRateForAllStreams.formattedAvg} records/sec</div>
</div>
</td>
<td class="timeline">{graphUIDataForRecordRateOfAllStreams.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForRecordRateOfAllStreams.generateHistogramHtml(jsCollector)}</td>
</tr>
{if (hasStream) {
<tr id="inputs-table" style="display: none;" >
<td colspan="3">
{generateInputDStreamsTable(jsCollector, minBatchTime, maxBatchTime, minRecordRate)}
</td>
</tr>
}}
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Scheduling Delay {SparkUIUtils.tooltip("Time taken by Streaming scheduler to submit jobs of a batch", "top")}</strong></div>
<div>Avg: {schedulingDelay.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForSchedulingDelay.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForSchedulingDelay.generateHistogramHtml(jsCollector)}</td>
</tr>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Processing Time {SparkUIUtils.tooltip("Time taken to process all jobs of a batch", "top")}</strong></div>
<div>Avg: {processingTime.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForProcessingTime.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForProcessingTime.generateHistogramHtml(jsCollector)}</td>
</tr>
<tr>
<td style="vertical-align: middle;">
<div style="width: 160px;">
<div><strong>Total Delay {SparkUIUtils.tooltip("Total time taken to handle a batch", "top")}</strong></div>
<div>Avg: {totalDelay.formattedAvg}</div>
</div>
</td>
<td class="timeline">{graphUIDataForTotalDelay.generateTimelineHtml(jsCollector)}</td>
<td class="histogram">{graphUIDataForTotalDelay.generateHistogramHtml(jsCollector)}</td>
</tr>
</tbody>
</table>
// scalastyle:on
generateTimeMap(batchTimes) ++ generateTimeTipStrings(batchTimes) ++ table ++
jsCollector.toHtml
}
private def generateInputDStreamsTable(
jsCollector: JsCollector,
minX: Long,
maxX: Long,
minY: Double): Seq[Node] = {
val maxYCalculated = listener.receivedRecordRateWithBatchTime.values
.flatMap { case streamAndRates => streamAndRates.map { case (_, recordRate) => recordRate } }
.reduceOption[Double](math.max)
.map(_.ceil.toLong)
.getOrElse(0L)
val content: Seq[Node] = listener.receivedRecordRateWithBatchTime.toList.sortBy(_._1).flatMap {
case (streamId, recordRates) =>
generateInputDStreamRow(
jsCollector, streamId, recordRates, minX, maxX, minY, maxYCalculated)
}
// scalastyle:off
<table class="table table-bordered" style="width: auto">
<thead>
<tr>
<th style="width: 151px;"></th>
<th style="width: 167px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Status</div></th>
<th style="width: 167px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Executor ID / Host</div></th>
<th style="width: 166px; padding: 8px 0 8px 0"><div style="margin: 0 8px 0 8px">Last Error Time</div></th>
<th>Last Error Message</th>
</tr>
</thead>
<tbody>
{content}
</tbody>
</table>
// scalastyle:on
}
private def generateInputDStreamRow(
jsCollector: JsCollector,
streamId: Int,
recordRates: Seq[(Long, Double)],
minX: Long,
maxX: Long,
minY: Double,
maxY: Double): Seq[Node] = {
// If this is a ReceiverInputDStream, we need to show the receiver info. Or we only need the
// InputDStream name.
val receiverInfo = listener.receiverInfo(streamId)
val receiverName = receiverInfo.map(_.name).
orElse(listener.streamName(streamId)).getOrElse(s"Stream-$streamId")
val receiverActive = receiverInfo.map { info =>
if (info.active) "ACTIVE" else "INACTIVE"
}.getOrElse(emptyCell)
val receiverLocation = receiverInfo.map { info =>
val executorId = if (info.executorId.isEmpty) emptyCell else info.executorId
val location = if (info.location.isEmpty) emptyCell else info.location
s"$executorId / $location"
}.getOrElse(emptyCell)
val receiverLastError = receiverInfo.map { info =>
val msg = s"${info.lastErrorMessage} - ${info.lastError}"
if (msg.length > 100) msg.take(97) + "..." else msg
}.getOrElse(emptyCell)
val receiverLastErrorTime = receiverInfo.map {
r => if (r.lastErrorTime < 0) "-" else SparkUIUtils.formatDate(r.lastErrorTime)
}.getOrElse(emptyCell)
val receivedRecords = new RecordRateUIData(recordRates)
val graphUIDataForRecordRate =
new GraphUIData(
s"stream-$streamId-records-timeline",
s"stream-$streamId-records-histogram",
receivedRecords.data,
minX,
maxX,
minY,
maxY,
"records/sec")
graphUIDataForRecordRate.generateDataJs(jsCollector)
<tr>
<td rowspan="2" style="vertical-align: middle; width: 151px;">
<div style="width: 151px;">
<div style="word-wrap: break-word;"><strong>{receiverName}</strong></div>
<div>Avg: {receivedRecords.formattedAvg} records/sec</div>
</div>
</td>
<td>{receiverActive}</td>
<td>{receiverLocation}</td>
<td>{receiverLastErrorTime}</td>
<td><div style="width: 342px;">{receiverLastError}</div></td>
</tr>
<tr>
<td colspan="3" class="timeline">
{graphUIDataForRecordRate.generateTimelineHtml(jsCollector)}
</td>
<td class="histogram">{graphUIDataForRecordRate.generateHistogramHtml(jsCollector)}</td>
</tr>
}
private def streamingTable(request: HttpServletRequest, batches: Seq[BatchUIData],
tableTag: String): Seq[Node] = {
val interval: Long = listener.batchDuration
val streamingPage = Option(request.getParameter(s"$tableTag.page")).map(_.toInt).getOrElse(1)
try {
new StreamingPagedTable(
request,
tableTag,
batches,
SparkUIUtils.prependBaseUri(request, parent.basePath),
"streaming",
interval
).table(streamingPage)
} catch {
case e @ (_: IllegalArgumentException | _: IndexOutOfBoundsException) =>
<div class="alert alert-error">
<p>Error while rendering streaming table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
}
}
private def generateBatchListTables(request: HttpServletRequest): Seq[Node] = {
val runningBatches = listener.runningBatches.sortBy(_.batchTime.milliseconds).reverse
val waitingBatches = listener.waitingBatches.sortBy(_.batchTime.milliseconds).reverse
val completedBatches = listener.retainedCompletedBatches.
sortBy(_.batchTime.milliseconds).reverse
val content = mutable.ListBuffer[Node]()
if (runningBatches.nonEmpty) {
content ++=
<div class="row">
<div class="col-12">
<span id="runningBatches" class="collapse-aggregated-runningBatches collapse-table"
onClick="collapseTable('collapse-aggregated-runningBatches',
'aggregated-runningBatches')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Running Batches ({runningBatches.size})</a>
</h4>
</span>
<div class="aggregated-runningBatches collapsible-table">
{ streamingTable(request, runningBatches, "runningBatches") }
</div>
</div>
</div>
}
if (waitingBatches.nonEmpty) {
content ++=
<div class="row">
<div class="col-12">
<span id="waitingBatches" class="collapse-aggregated-waitingBatches collapse-table"
onClick="collapseTable('collapse-aggregated-waitingBatches',
'aggregated-waitingBatches')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Waiting Batches ({waitingBatches.size})</a>
</h4>
</span>
<div class="aggregated-waitingBatches collapsible-table">
{ streamingTable(request, waitingBatches, "waitingBatches") }
</div>
</div>
</div>
}
if (completedBatches.nonEmpty) {
content ++=
<div class="row">
<div class="col-12">
<span id="completedBatches" class="collapse-aggregated-completedBatches collapse-table"
onClick="collapseTable('collapse-aggregated-completedBatches',
'aggregated-completedBatches')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Completed Batches (last {completedBatches.size}
out of {listener.numTotalCompletedBatches})</a>
</h4>
</span>
<div class="aggregated-completedBatches collapsible-table">
{ streamingTable(request, completedBatches, "completedBatches") }
</div>
</div>
</div>
}
content
}
}
private[ui] object StreamingPage {
val BLACK_RIGHT_TRIANGLE_HTML = "▶"
val BLACK_DOWN_TRIANGLE_HTML = "▼"
val emptyCell = "-"
/**
* Returns a human-readable string representing a duration such as "5 second 35 ms"
*/
def formatDurationOption(msOption: Option[Long]): String = {
msOption.map(SparkUIUtils.formatDurationVerbose).getOrElse(emptyCell)
}
}
| ueshin/apache-spark | streaming/src/main/scala/org/apache/spark/streaming/ui/StreamingPage.scala | Scala | apache-2.0 | 20,786 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package other.kafka
import org.I0Itec.zkclient.ZkClient
import kafka.api._
import kafka.utils.{ZkUtils, ShutdownableThread}
import org.apache.kafka.common.protocol.SecurityProtocol
import scala.collection._
import kafka.client.ClientUtils
import joptsimple.OptionParser
import kafka.common.{ErrorMapping, OffsetAndMetadata, TopicAndPartition}
import kafka.network.BlockingChannel
import scala.util.Random
import java.io.IOException
import kafka.metrics.{KafkaTimer, KafkaMetricsGroup}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import java.nio.channels.ClosedByInterruptException
object TestOffsetManager {
val random = new Random
val SocketTimeoutMs = 10000
class StatsThread(reportingIntervalMs: Long, commitThreads: Seq[CommitThread], fetchThread: FetchThread)
extends ShutdownableThread("stats-thread") {
def printStats() {
println("--------------------------------------------------------------------------------")
println("Aggregate stats for commits:")
println("Error count: %d; Max:%f; Min: %f; Mean: %f; Commit count: %d".format(
commitThreads.map(_.numErrors.get).sum,
commitThreads.map(_.timer.max()).max,
commitThreads.map(_.timer.min()).min,
commitThreads.map(_.timer.mean()).sum / commitThreads.size,
commitThreads.map(_.numCommits.get).sum))
println("--------------------------------------------------------------------------------")
commitThreads.foreach(t => println(t.stats))
println(fetchThread.stats)
}
override def doWork() {
printStats()
Thread.sleep(reportingIntervalMs)
}
}
class CommitThread(id: Int, partitionCount: Int, commitIntervalMs: Long, zkUtils: ZkUtils)
extends ShutdownableThread("commit-thread")
with KafkaMetricsGroup {
private val groupId = "group-" + id
private val metadata = "Metadata from commit thread " + id
private var offsetsChannel = ClientUtils.channelToOffsetManager(groupId, zkUtils, SocketTimeoutMs)
private var offset = 0L
val numErrors = new AtomicInteger(0)
val numCommits = new AtomicInteger(0)
val timer = newTimer("commit-thread", TimeUnit.MILLISECONDS, TimeUnit.SECONDS)
private val commitTimer = new KafkaTimer(timer)
val shutdownLock = new Object
private def ensureConnected() {
if (!offsetsChannel.isConnected)
offsetsChannel = ClientUtils.channelToOffsetManager(groupId, zkUtils, SocketTimeoutMs)
}
override def doWork() {
val commitRequest = OffsetCommitRequest(groupId, immutable.Map((1 to partitionCount).map(TopicAndPartition("topic-" + id, _) -> OffsetAndMetadata(offset, metadata)):_*))
try {
ensureConnected()
offsetsChannel.send(commitRequest)
numCommits.getAndIncrement
commitTimer.time {
val response = OffsetCommitResponse.readFrom(offsetsChannel.receive().payload())
if (response.commitStatus.exists(_._2 != ErrorMapping.NoError)) numErrors.getAndIncrement
}
offset += 1
}
catch {
case e1: ClosedByInterruptException =>
offsetsChannel.disconnect()
case e2: IOException =>
println("Commit thread %d: Error while committing offsets to %s:%d for group %s due to %s.".format(id, offsetsChannel.host, offsetsChannel.port, groupId, e2))
offsetsChannel.disconnect()
}
finally {
Thread.sleep(commitIntervalMs)
}
}
override def shutdown() {
super.shutdown()
awaitShutdown()
offsetsChannel.disconnect()
println("Commit thread %d ended. Last committed offset: %d.".format(id, offset))
}
def stats = {
"Commit thread %d :: Error count: %d; Max:%f; Min: %f; Mean: %f; Commit count: %d"
.format(id, numErrors.get(), timer.max(), timer.min(), timer.mean(), numCommits.get())
}
}
class FetchThread(numGroups: Int, fetchIntervalMs: Long, zkUtils: ZkUtils)
extends ShutdownableThread("fetch-thread")
with KafkaMetricsGroup {
private val timer = newTimer("fetch-thread", TimeUnit.MILLISECONDS, TimeUnit.SECONDS)
private val fetchTimer = new KafkaTimer(timer)
private val channels = mutable.Map[Int, BlockingChannel]()
private var metadataChannel = ClientUtils.channelToAnyBroker(zkUtils, SocketTimeoutMs)
private val numErrors = new AtomicInteger(0)
override def doWork() {
val id = random.nextInt().abs % numGroups
val group = "group-" + id
try {
metadataChannel.send(ConsumerMetadataRequest(group))
val coordinatorId = ConsumerMetadataResponse.readFrom(metadataChannel.receive().payload()).coordinatorOpt.map(_.id).getOrElse(-1)
val channel = if (channels.contains(coordinatorId))
channels(coordinatorId)
else {
val newChannel = ClientUtils.channelToOffsetManager(group, zkUtils, SocketTimeoutMs)
channels.put(coordinatorId, newChannel)
newChannel
}
try {
// send the offset fetch request
val fetchRequest = OffsetFetchRequest(group, Seq(TopicAndPartition("topic-"+id, 1)))
channel.send(fetchRequest)
fetchTimer.time {
val response = OffsetFetchResponse.readFrom(channel.receive().payload())
if (response.requestInfo.exists(_._2.error != ErrorMapping.NoError)) {
numErrors.getAndIncrement
}
}
}
catch {
case e1: ClosedByInterruptException =>
channel.disconnect()
channels.remove(coordinatorId)
case e2: IOException =>
println("Error while fetching offset from %s:%d due to %s.".format(channel.host, channel.port, e2))
channel.disconnect()
channels.remove(coordinatorId)
}
}
catch {
case e: IOException =>
println("Error while querying %s:%d - shutting down query channel.".format(metadataChannel.host, metadataChannel.port))
metadataChannel.disconnect()
println("Creating new query channel.")
metadataChannel = ClientUtils.channelToAnyBroker(zkUtils, SocketTimeoutMs)
}
finally {
Thread.sleep(fetchIntervalMs)
}
}
override def shutdown() {
super.shutdown()
awaitShutdown()
channels.foreach(_._2.disconnect())
metadataChannel.disconnect()
}
def stats = {
"Fetch thread :: Error count: %d; Max:%f; Min: %f; Mean: %f; Fetch count: %d"
.format(numErrors.get(), timer.max(), timer.min(), timer.mean(), timer.count())
}
}
def main(args: Array[String]) {
val parser = new OptionParser
val zookeeperOpt = parser.accepts("zookeeper", "The ZooKeeper connection URL.")
.withRequiredArg
.describedAs("ZooKeeper URL")
.ofType(classOf[java.lang.String])
.defaultsTo("localhost:2181")
val commitIntervalOpt = parser.accepts("commit-interval-ms", "Offset commit interval.")
.withRequiredArg
.describedAs("interval")
.ofType(classOf[java.lang.Integer])
.defaultsTo(100)
val fetchIntervalOpt = parser.accepts("fetch-interval-ms", "Offset fetch interval.")
.withRequiredArg
.describedAs("interval")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1000)
val numPartitionsOpt = parser.accepts("partition-count", "Number of partitions per commit.")
.withRequiredArg
.describedAs("interval")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val numThreadsOpt = parser.accepts("thread-count", "Number of commit threads.")
.withRequiredArg
.describedAs("threads")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val reportingIntervalOpt = parser.accepts("reporting-interval-ms", "Interval at which stats are reported.")
.withRequiredArg
.describedAs("interval (ms)")
.ofType(classOf[java.lang.Integer])
.defaultsTo(3000)
val helpOpt = parser.accepts("help", "Print this message.")
val options = parser.parse(args : _*)
if (options.has(helpOpt)) {
parser.printHelpOn(System.out)
System.exit(0)
}
val commitIntervalMs = options.valueOf(commitIntervalOpt).intValue()
val fetchIntervalMs = options.valueOf(fetchIntervalOpt).intValue()
val threadCount = options.valueOf(numThreadsOpt).intValue()
val partitionCount = options.valueOf(numPartitionsOpt).intValue()
val zookeeper = options.valueOf(zookeeperOpt)
val reportingIntervalMs = options.valueOf(reportingIntervalOpt).intValue()
println("Commit thread count: %d; Partition count: %d, Commit interval: %d ms; Fetch interval: %d ms; Reporting interval: %d ms"
.format(threadCount, partitionCount, commitIntervalMs, fetchIntervalMs, reportingIntervalMs))
var zkUtils: ZkUtils = null
var commitThreads: Seq[CommitThread] = Seq()
var fetchThread: FetchThread = null
var statsThread: StatsThread = null
try {
zkUtils = ZkUtils(zookeeper, 6000, 2000, false)
commitThreads = (0 to (threadCount-1)).map { threadId =>
new CommitThread(threadId, partitionCount, commitIntervalMs, zkUtils)
}
fetchThread = new FetchThread(threadCount, fetchIntervalMs, zkUtils)
val statsThread = new StatsThread(reportingIntervalMs, commitThreads, fetchThread)
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
cleanShutdown()
statsThread.printStats()
}
})
commitThreads.foreach(_.start())
fetchThread.start()
statsThread.start()
commitThreads.foreach(_.join())
fetchThread.join()
statsThread.join()
}
catch {
case e: Throwable =>
println("Error: ", e)
}
finally {
cleanShutdown()
}
def cleanShutdown() {
commitThreads.foreach(_.shutdown())
commitThreads.foreach(_.join())
if (fetchThread != null) {
fetchThread.shutdown()
fetchThread.join()
}
if (statsThread != null) {
statsThread.shutdown()
statsThread.join()
}
zkUtils.close()
}
}
}
| vkroz/kafka | core/src/test/scala/other/kafka/TestOffsetManager.scala | Scala | apache-2.0 | 11,034 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.security
import akka.actor.Actor
import com.ibm.spark.kernel.protocol.v5.KernelMessage
import com.ibm.spark.security.Hmac
import com.ibm.spark.utils.LogLike
import play.api.libs.json.Json
/**
* Constructs a signature from any kernel message received.
* @param hmac The HMAC to use for signature construction
*/
class SignatureProducerActor(
private val hmac: Hmac
) extends Actor with LogLike {
override def receive: Receive = {
case message: KernelMessage =>
val signature = hmac(
Json.stringify(Json.toJson(message.header)),
Json.stringify(Json.toJson(message.parentHeader)),
Json.stringify(Json.toJson(message.metadata)),
message.contentString
)
sender ! signature
}
}
| bpburns/spark-kernel | kernel/src/main/scala/com/ibm/spark/kernel/protocol/v5/security/SignatureProducerActor.scala | Scala | apache-2.0 | 1,372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.{Row, QueryTest}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.test.TestHive._
import org.apache.spark.sql.hive.test.TestHive.implicits._
//开窗函数
class HiveDataFrameWindowSuite extends QueryTest {
//重用窗口partitionby
test("reuse window partitionBy") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.show(false)
val w = Window.partitionBy("key").orderBy("value")
checkAnswer(
df.select(
//over分析函数用于计算基于组的某种聚合值,它和聚合函数的不同之处是:对于每个组返回多行,而聚合函数对于每个组只返回一行。
lead("key", 1).over(w),
lead("value", 1).over(w)),
Row(1, "1") :: Row(2, "2") :: Row(null, null) :: Row(null, null) :: Nil)
}
//重用窗口进行排序
test("reuse window orderBy") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
val w = Window.orderBy("value").partitionBy("key")
checkAnswer(
df.select(
lead("key", 1).over(w),
lead("value", 1).over(w)),
Row(1, "1") :: Row(2, "2") :: Row(null, null) :: Row(null, null) :: Nil)
}
/**
lag 和lead 可以 获取结果集中,按一定排序所排列的当前行的上下相邻若干offset 的某个行的某个列(不用结果集的自关联);
lag ,lead 分别是向前,向后;
lag 和lead 有三个参数,第一个参数是列名,第二个参数是偏移的offset,第三个参数是 超出记录窗口时的默认值)
*/
//Lag和Lead分析函数可以在同一次查询中取出同一字段的前N行的数据(Lag)和后N行的数据(Lead)作为独立的列。
//这种操作可以代替表的自联接,并且LAG和LEAD有更高的效率。
test("lead") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
/**
+---+-----+
|key|value|
+---+-----+
|1 |1 |
|2 |2 |
|1 |1 |
|2 |2 |
+---+-----+
**/
sql(
"""SELECT
| *
| FROM window_table""".stripMargin).show(false)
/**
+----+
|_c0 |
+----+
|1 |
|null|
|2 |
|null|
+----+ */
sql(
"""SELECT
| lead(value) OVER (PARTITION BY key ORDER BY value)
| FROM window_table""".stripMargin).show(false)
checkAnswer(
df.select(
lead("value", 1).over(Window.partitionBy($"key").orderBy($"value"))),
sql(
"""SELECT
| lead(value) OVER (PARTITION BY key ORDER BY value)
| FROM window_table""".stripMargin).collect())
}
test("lag") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
lag("value", 1).over(Window.partitionBy($"key").orderBy($"value"))),
sql(
"""SELECT
| lag(value) OVER (PARTITION BY key ORDER BY value)
| FROM window_table""".stripMargin).collect())
}
//默认值引导
test("lead with default value") {
val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
(2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
lead("value", 2, "n/a").over(Window.partitionBy("key").orderBy("value"))),
sql(
"""SELECT
| lead(value, 2, "n/a") OVER (PARTITION BY key ORDER BY value)
| FROM window_table""".stripMargin).collect())
}
//默认值滞后
test("lag with default value") {
val df = Seq((1, "1"), (1, "1"), (2, "2"), (1, "1"),
(2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
lag("value", 2, "n/a").over(Window.partitionBy($"key").orderBy($"value"))),
sql(
"""SELECT
| lag(value, 2, "n/a") OVER (PARTITION BY key ORDER BY value)
| FROM window_table""".stripMargin).collect())
}
//在非特定窗口中的排名函数
test("rank functions in unspecific window") {
val df = Seq((1, "1"), (2, "2"), (1, "2"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
$"key",
max("key").over(Window.partitionBy("value").orderBy("key")),
min("key").over(Window.partitionBy("value").orderBy("key")),
mean("key").over(Window.partitionBy("value").orderBy("key")),
count("key").over(Window.partitionBy("value").orderBy("key")),
sum("key").over(Window.partitionBy("value").orderBy("key")),
ntile(2).over(Window.partitionBy("value").orderBy("key")),
rowNumber().over(Window.partitionBy("value").orderBy("key")),
denseRank().over(Window.partitionBy("value").orderBy("key")),
rank().over(Window.partitionBy("value").orderBy("key")),
cumeDist().over(Window.partitionBy("value").orderBy("key")),
percentRank().over(Window.partitionBy("value").orderBy("key"))),
sql(
s"""SELECT
|key,
|max(key) over (partition by value order by key),
|min(key) over (partition by value order by key),
|avg(key) over (partition by value order by key),
|count(key) over (partition by value order by key),
|sum(key) over (partition by value order by key),
|ntile(2) over (partition by value order by key),
|row_number() over (partition by value order by key),
|dense_rank() over (partition by value order by key),
|rank() over (partition by value order by key),
|cume_dist() over (partition by value order by key),
|percent_rank() over (partition by value order by key)
|FROM window_table""".stripMargin).collect())
}
//聚合和行之间
test("aggregation and rows between") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
avg("key").over(Window.partitionBy($"value").orderBy($"key").rowsBetween(-1, 2))),
sql(
"""SELECT
| avg(key) OVER
| (PARTITION BY value ORDER BY key ROWS BETWEEN 1 preceding and 2 following)
| FROM window_table""".stripMargin).collect())
}
//聚合和范围之间
test("aggregation and range betweens") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
avg("key").over(Window.partitionBy($"value").orderBy($"key").rangeBetween(-1, 1))),
sql(
"""SELECT
| avg(key) OVER
| (PARTITION BY value ORDER BY key RANGE BETWEEN 1 preceding and 1 following)
| FROM window_table""".stripMargin).collect())
}
//聚合和无界的行
test("aggregation and rows betweens with unbounded") {
val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, "3")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
$"key",
last("value").over(
Window.partitionBy($"value").orderBy($"key").rowsBetween(0, Long.MaxValue)),
last("value").over(
Window.partitionBy($"value").orderBy($"key").rowsBetween(Long.MinValue, 0)),
last("value").over(Window.partitionBy($"value").orderBy($"key").rowsBetween(-1, 3))),
sql(
"""SELECT
| key,
| last_value(value) OVER
| (PARTITION BY value ORDER BY key ROWS between current row and unbounded following),
| last_value(value) OVER
| (PARTITION BY value ORDER BY key ROWS between unbounded preceding and current row),
| last_value(value) OVER
| (PARTITION BY value ORDER BY key ROWS between 1 preceding and 3 following)
| FROM window_table""".stripMargin).collect())
}
//聚合和范围在无界之间
test("aggregation and range betweens with unbounded") {
val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, "2")).toDF("key", "value")
df.registerTempTable("window_table")
checkAnswer(
df.select(
$"key",
last("value").over(
Window.partitionBy($"value").orderBy($"key").rangeBetween(-2, -1))
.equalTo("2")
.as("last_v"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(Long.MinValue, 1))
.as("avg_key1"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(0, Long.MaxValue))
.as("avg_key2"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(-1, 0))
.as("avg_key3")
),
sql(
"""SELECT
| key,
| last_value(value) OVER
| (PARTITION BY value ORDER BY key RANGE BETWEEN 2 preceding and 1 preceding) == "2",
| avg(key) OVER
| (PARTITION BY value ORDER BY key RANGE BETWEEN unbounded preceding and 1 following),
| avg(key) OVER
| (PARTITION BY value ORDER BY key RANGE BETWEEN current row and unbounded following),
| avg(key) OVER
| (PARTITION BY value ORDER BY key RANGE BETWEEN 1 preceding and current row)
| FROM window_table""".stripMargin).collect())
}
//反向滑动范围框架
test("reverse sliding range frame") {
val df = Seq(
(1, "Thin", "Cell Phone", 6000),
(2, "Normal", "Tablet", 1500),
(3, "Mini", "Tablet", 5500),
(4, "Ultra thin", "Cell Phone", 5500),
(5, "Very thin", "Cell Phone", 6000),
(6, "Big", "Tablet", 2500),
(7, "Bendable", "Cell Phone", 3000),
(8, "Foldable", "Cell Phone", 3000),
(9, "Pro", "Tablet", 4500),
(10, "Pro2", "Tablet", 6500)).
toDF("id", "product", "category", "revenue")
val window = Window.
partitionBy($"category").
orderBy($"revenue".desc).
rangeBetween(-2000L, 1000L)
checkAnswer(
df.select(
$"id",
avg($"revenue").over(window).cast("int")),
Row(1, 5833) :: Row(2, 2000) :: Row(3, 5500) ::
Row(4, 5833) :: Row(5, 5833) :: Row(6, 2833) ::
Row(7, 3000) :: Row(8, 3000) :: Row(9, 5500) ::
Row(10, 6000) :: Nil)
}
// This is here to illustrate the fact that reverse order also reverses offsets.
//这是为了说明反向顺序也反转偏移的事实
test("reverse unbounded range frame") {//反向无限范围框架
val df = Seq(1, 2, 4, 3, 2, 1).
map(Tuple1.apply).
toDF("value")
val window = Window.orderBy($"value".desc)
checkAnswer(
df.select(
$"value",
sum($"value").over(window.rangeBetween(Long.MinValue, 1)),
sum($"value").over(window.rangeBetween(1, Long.MaxValue))),
Row(1, 13, null) :: Row(2, 13, 2) :: Row(4, 7, 9) ::
Row(3, 11, 6) :: Row(2, 13, 2) :: Row(1, 13, null) :: Nil)
}
}
| tophua/spark1.52 | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameWindowSuite.scala | Scala | apache-2.0 | 12,003 |
package example
object Lists {
/**
* This method computes the sum of all elements in the list xs. There are
* multiple techniques that can be used for implementing this method, and
* you will learn during the class.
*
* For this example assignment you can use the following methods in class
* `List`:
*
* - `xs.isEmpty: Boolean` returns `true` if the list `xs` is empty
* - `xs.head: Int` returns the head element of the list `xs`. If the list
* is empty an exception is thrown
* - `xs.tail: List[Int]` returns the tail of the list `xs`, i.e. the the
* list `xs` without its `head` element
*
* ''Hint:'' instead of writing a `for` or `while` loop, think of a recursive
* solution.
*
* @param xs A list of natural numbers
* @return The sum of all elements in `xs`
*/
def sum(xs: List[Int]): Int = {
if (xs.isEmpty) return 0
xs.head + sum(xs.tail)
}
/**
* This method returns the largest element in a list of integers. If the
* list `xs` is empty it throws a `java.util.NoSuchElementException`.
*
* You can use the same methods of the class `List` as mentioned above.
*
* ''Hint:'' Again, think of a recursive solution instead of using looping
* constructs. You might need to define an auxiliary method.
*
* @param xs A list of natural numbers
* @return The largest element in `xs`
* @throws java.util.NoSuchElementException if `xs` is an empty list
*/
def max(xs: List[Int]): Int = {
if (xs.isEmpty) throw new NoSuchElementException
if (xs.size == 1) return xs.head
if (xs.size == 2) return Math.max(xs.head, xs.tail.head)
Math.max(xs.head, max(xs.tail))
}
}
| binout/progfun1 | example/src/main/scala/example/Lists.scala | Scala | apache-2.0 | 1,727 |
package dao.generic
import generated.Tables.LinkedAccount
import generated.Tables.profile.api._
import play.api.db.slick._
import slick.lifted.CanBeQueryCondition
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
/**
* Generic DAO implementation
*/
abstract class GenericDaoImpl[T <: Table[E] with IdentifyableTable[PK], E <: Entity[PK], PK: BaseColumnType]
(dbConfigProvider: DatabaseConfigProvider, tableQuery: TableQuery[T]) extends GenericDao[T, E, PK] {
//------------------------------------------------------------------------
// public
//------------------------------------------------------------------------
/**
* Returns the row count for this Model
* @return the row count for this Model
*/
override def count(): Future[Int] = db.run(tableQuery.length.result)
//------------------------------------------------------------------------
/**
* Returns the matching entity for the given id
* @param id identifier
* @return the matching entity for the given id
*/
override def findById(id: PK): Future[Option[E]] = db.run(tableQuery.filter(_.id === id).result.headOption)
//------------------------------------------------------------------------
/**
* Returns all entities in this model
* @return all entities in this model
*/
override def findAll(): Future[Seq[E]] = db.run(tableQuery.result)
//------------------------------------------------------------------------
/**
* Returns entities that satisfy the filter expression.
* @param expr input filter expression
* @param wt
* @tparam C
* @return entities that satisfy the filter expression.
*/
override def filter[C <: Rep[_]](expr: T => C)(implicit wt: CanBeQueryCondition[C]) : Future[Seq[E]] =
db.run(tableQuery.filter(expr).result)
//------------------------------------------------------------------------
/**
* Returns newly created entity
* @param entity entity to create, input id is ignored
* @return newly created entity
*/
override def create(entity: E): Future[Unit] = {
val action = (tableQuery += entity)
db.run(action).map(_ => ())
}
//------------------------------------------------------------------------
/**
* Returns number of inserted entities
* @param entities to be inserted
* @return number of inserted entities
*/
override def create(entities: Seq[E]): Future[Unit] = db.run(tableQuery ++= entities).map(_ => ())
//------------------------------------------------------------------------
/**
* Updates the given entity and returns a Future
* @param update Entity to update (by id)
* @return returns a Future
*/
override def update(update: E): Future[Unit] = {
db.run(tableQuery.filter(_.id === update.id).update(update)).map(_ => ())
}
//------------------------------------------------------------------------
/**
* Deletes the given entity by Id and returns a Future
* @param id The Id to delete
* @return returns a Future
*/
override def delete(id: PK): Future[Unit] = db.run(tableQuery.filter(_.id === id).delete).map(_ => ())
//------------------------------------------------------------------------
/**
* Deletes the given entity by Id and returns a Future
* @return returns a Future
*/
override def deleteAll: Future[Unit] = db.run(sqlu"""TRUNCATE TABLE "#${tableQuery.baseTableRow.tableName}" RESTART IDENTITY CASCADE""").map(_ => ())
}
| bravegag/play-authenticate-usage-scala | app/dao/generic/GenericDaoImpl.scala | Scala | apache-2.0 | 3,506 |
package rta.misc
import rta.model.BaseModel
import rta.model.triggers.Implicits._
import scala.collection.Iterator
import spire.math._
trait StateGen {
final def singleSize: Int = 8
def batteryGen: Iterator[Battery] = {
def level: Iterator[UByte] = (0 to 100 by 5).iterator.map(UByte(_))
def plugged: Iterator[Battery.Plugged] = Battery.Plugged.values.iterator
def present: Iterator[Boolean] = Iterator(true, false)
def status: Iterator[Battery.Status] = Battery.Status.values.iterator
val b = for {
l <- level
pl <- plugged
pr <- present
s <- status
} yield Battery(l, pl, pr, s)
b
}
def batteryStateGen: Iterator[BatteryState] = BatteryState.values.iterator
def powerStateGen: Iterator[PowerState] = PowerState.values.iterator
def bluetoothStateGen: Iterator[BluetoothState] = BluetoothState.values.iterator
def dockStateGen: Iterator[DockState] = DockState.values.iterator
def headsetGen: Iterator[Headset] = Headset.values.iterator
def networkGen: Iterator[Network] = {
def connection: Iterator[Network.Connection] = Network.Connection.values.iterator
def state: Iterator[Network.State] = Network.State.values.iterator
val n = for {
c <- connection
t <- state
} yield Network(c, t)
n
}
def wiFiStateGen: Iterator[WiFiState] = WiFiState.values.iterator
lazy val allLength = batteryGen.length * batteryStateGen.length *
powerStateGen.length * dockStateGen.length * headsetGen.length *
networkGen.length * bluetoothStateGen.length * wiFiStateGen.length
@transient
lazy val state: Int => Iterator[BaseModel] = {
def gen = for {
battery <- batteryGen
batteryState <- batteryStateGen
powerState <- powerStateGen
dock <- dockStateGen
headset <- headsetGen
network <- networkGen
bluetoothState <- bluetoothStateGen
wiFiState <- wiFiStateGen
} yield Iterator[BaseModel](battery, batteryState, powerState, dock, headset, network, bluetoothState, wiFiState)
size => gen.flatten.slice(sizes.takeWhile(_ < size).sum, size)
}
lazy val sizes: Vector[Int] =
Iterator.iterate(singleSize)(_ * 10).takeWhile(_ <= allLength).toVector
}
| kjanosz/RuleThemAll | app/src/test/scala/rta/misc/StateGen.scala | Scala | apache-2.0 | 2,219 |
package com.lateralthoughts.points.controllers
import com.lateralthoughts.points.controllers.handlers.HandlingError
import com.lateralthoughts.points.model.ApplicationError
import org.scalatra.{ActionResult, Created, NoContent, Ok}
trait Controller extends HandlingError {
private def render[T](status: (Any, Map[String, String], String) => ActionResult)(serviceResponse: Either[ApplicationError, T]) = serviceResponse match {
case Left(error) => buildErrorResponse(error)
case Right(body) => status(body, Map.empty, "")
}
def created = render(Created.apply) _
def ok = render(Ok.apply) _
def noContent = render((x, y, z) => NoContent.apply(y, z)) _
}
| vincentdoba/points | points-server/src/main/scala/com/lateralthoughts/points/controllers/Controller.scala | Scala | mit | 677 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.connector.cassandra
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CassandraCreateExternalTableIT extends CassandraWithSharedContext {
"The Cassandra connector" should "execute natively create a External Table" in {
val tableName = "newtable"
val createTableQueryString =
s"""|CREATE EXTERNAL TABLE $tableName (
|id Integer,
|name String,
|booleanFile boolean,
|timeTime Timestamp,
|binaryType Binary,
|arrayType ARRAY<STRING>,
|mapType MAP<INT, INT>,
|decimalType DECIMAL
|)
|USING $SourceProvider
|OPTIONS (
|keyspace '$Catalog',
|table '$tableName',
|cluster '$ClusterName',
|pushdown "true",
|spark_cassandra_connection_host '$CassandraHost',
|primary_key_string 'id'
|)
""".stripMargin.replaceAll("\\n", " ")
//Experimentation
val result = sql(createTableQueryString).collect()
//Expectations
val table = xdContext.table(tableName)
table should not be null
table.schema.fieldNames should contain ("name")
// In case that the table didn't exist, then this operation would throw a InvalidQueryException
val resultSet = client.get._2.execute(s"SELECT * FROM $Catalog.$tableName")
import scala.collection.JavaConversions._
resultSet.getColumnDefinitions.asList.map(cd => cd.getName) should contain ("name")
}
it should "execute natively create a External Table with no existing Keyspace" in {
val createTableQueryString =
s"""|CREATE EXTERNAL TABLE newkeyspace.othertable (id Integer, name String)
|USING $SourceProvider
|OPTIONS (
|keyspace 'newkeyspace',
|cluster '$ClusterName',
|pushdown "true",
|spark_cassandra_connection_host '$CassandraHost',
|primary_key_string 'id',
|with_replication "{'class' : 'SimpleStrategy', 'replication_factor' : 3}"
|)
""".stripMargin.replaceAll("\\n", " ")
try {
//Experimentation
val result = sql(createTableQueryString).collect()
//Expectations
val table = xdContext.table(s"newkeyspace.othertable")
table should not be null
table.schema.fieldNames should contain("name")
}finally {
//AFTER
client.get._2.execute(s"DROP KEYSPACE newkeyspace")
}
}
it should "fail execute natively create a External Table with no existing Keyspace without with_replication" in {
val createTableQueryString =
s"""|CREATE EXTERNAL TABLE NoKeyspaceCreatedBefore.newTable (id Integer, name String)
|USING $SourceProvider
|OPTIONS (
|keyspace 'NoKeyspaceCreatedBefore',
|cluster '$ClusterName',
|pushdown "true",
|spark_cassandra_connection_host '$CassandraHost',
|primary_key_string 'id'
|)
""".stripMargin.replaceAll("\\n", " ")
//Experimentation
the [IllegalArgumentException] thrownBy {
sql(createTableQueryString).collect()
} should have message "requirement failed: with_replication required when use CREATE EXTERNAL TABLE command"
}
}
| Stratio/crossdata | cassandra/src/test/scala/com/stratio/crossdata/connector/cassandra/CassandraCreateExternalTableIT.scala | Scala | apache-2.0 | 3,920 |
/*
* Copyright 2020 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.registration.applicant
import config.FrontendAppConfig
import controllers.registration.applicant.{routes => applicantRoutes}
import itutil.ControllerISpec
import models.ApplicantDetails
import models.api.{EligibilitySubmissionData, NonUkNonEstablished, Trust, UnincorpAssoc}
import models.external.soletraderid.OverseasIdentifierDetails
import models.external.{BusinessVerificationStatus, BvPass, MinorEntity}
import play.api.libs.json.{JsObject, Json}
import play.api.libs.ws.WSResponse
import play.api.test.Helpers._
import scala.concurrent.Future
class MinorEntityIdControllerISpec extends ControllerISpec {
val appConfig: FrontendAppConfig = app.injector.instanceOf[FrontendAppConfig]
val testUnincorpAssocJourneyId = "1"
val testTrustJourneyId = "2"
val testNonUkCompanyJourneyId = "3"
val testJourneyUrl = "/test-journey-url"
val createTrustJourneyUrl = "/minor-entity-identification/api/trusts-journey"
val createUnincorpAssocJourneyUrl = "/minor-entity-identification/api/unincorporated-association-journey"
val createNonUkCompanyJourneyUrl = "/minor-entity-identification/api/overseas-company-journey"
def retrieveDetailsUrl(journeyId: String) = s"/minor-entity-identification/api/journey/$journeyId"
val testPostCode = "ZZ1 1ZZ"
val testTrustResponse: JsObject = Json.obj(
"sautr" -> testSautr,
"postcode" -> testPostCode,
"chrn" -> testChrn,
"businessVerification" -> Json.obj(
"verificationStatus" -> Json.toJson[BusinessVerificationStatus](BvPass)
),
"registration" -> Json.obj(
"registrationStatus" -> testRegistration,
"registeredBusinessPartnerId" -> testSafeId
),
"identifiersMatch" -> true
)
val testTrust: MinorEntity = MinorEntity(
None,
Some(testSautr),
None,
None,
Some(testPostCode),
Some(testChrn),
None,
testRegistration,
Some(BvPass),
Some(testSafeId),
identifiersMatch = true
)
val trustApplicantDetails: ApplicantDetails = validFullApplicantDetails.copy(entity = Some(testTrust))
val testUnincorpAssocResponse: JsObject = Json.obj(
"sautr" -> testSautr,
"postcode" -> testPostCode,
"chrn" -> testChrn,
"casc" -> testCasc,
"businessVerification" -> Json.obj(
"verificationStatus" -> Json.toJson[BusinessVerificationStatus](BvPass)
),
"registration" -> Json.obj(
"registrationStatus" -> testRegistration,
"registeredBusinessPartnerId" -> testSafeId
),
"identifiersMatch" -> true
)
val testUnincorpAssoc: MinorEntity = MinorEntity(
None,
Some(testSautr),
None,
None,
Some(testPostCode),
Some(testChrn),
Some(testCasc),
testRegistration,
Some(BvPass),
Some(testSafeId),
identifiersMatch = true
)
val unincorpAssocApplicantDetails: ApplicantDetails = validFullApplicantDetails.copy(entity = Some(testUnincorpAssoc))
val testOverseasIdentifier = "1234567890"
val testOverseasIdentifierCountry = "EE"
val testOverseasIdentifierDetails = OverseasIdentifierDetails(testOverseasIdentifier, testOverseasIdentifierCountry)
val testNonUkCompanyResponse: JsObject = Json.obj(
"ctutr" -> testCrn,
"overseas" -> Json.obj(
"taxIdentifier" -> testOverseasIdentifier,
"country" -> testOverseasIdentifierCountry
),
"businessVerification" -> Json.obj(
"verificationStatus" -> Json.toJson[BusinessVerificationStatus](BvPass)
),
"registration" -> Json.obj(
"registrationStatus" -> testRegistration,
"registeredBusinessPartnerId" -> testSafeId
),
"identifiersMatch" -> true
)
val testNonUkCompany: MinorEntity = MinorEntity(
None,
None,
Some(testCrn),
Some(testOverseasIdentifierDetails),
None,
None,
None,
testRegistration,
Some(BvPass),
Some(testSafeId),
identifiersMatch = true
)
val nonUkCompanyApplicantDetails: ApplicantDetails = validFullApplicantDetails.copy(entity = Some(testNonUkCompany))
"GET /start-minor-entity-id-journey" when {
"STI returns a journey ID" must {
"redirect to the journey using the ID provided for Trust" in new Setup {
given()
.user.isAuthorised()
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = Trust)))
insertCurrentProfileIntoDb(currentProfile, sessionId)
stubPost(createTrustJourneyUrl, CREATED, Json.obj("journeyStartUrl" -> testJourneyUrl).toString())
val res: Future[WSResponse] = buildClient("/start-minor-entity-id-journey").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(testJourneyUrl)
}
}
"redirect to the journey using the ID provided for Unincorporated Association" in new Setup {
given()
.user.isAuthorised()
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = UnincorpAssoc)))
insertCurrentProfileIntoDb(currentProfile, sessionId)
stubPost(createUnincorpAssocJourneyUrl, CREATED, Json.obj("journeyStartUrl" -> testJourneyUrl).toString())
val res: Future[WSResponse] = buildClient("/start-minor-entity-id-journey").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(testJourneyUrl)
}
}
"redirect to the journey using the ID provided for Non UK Company" in new Setup {
given()
.user.isAuthorised()
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = NonUkNonEstablished)))
insertCurrentProfileIntoDb(currentProfile, sessionId)
stubPost(createNonUkCompanyJourneyUrl, CREATED, Json.obj("journeyStartUrl" -> testJourneyUrl).toString())
val res: Future[WSResponse] = buildClient("/start-minor-entity-id-journey").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(testJourneyUrl)
}
}
}
}
"GET /minor-entity-id-callback" must {
"redirect to the lead business entity type page for Trust" when {
"S4L model is not full" in new Setup {
given()
.user.isAuthorised()
.vatScheme.has("applicant-details", Json.toJson(ApplicantDetails()))
.s4lContainer[ApplicantDetails].contains(ApplicantDetails())
.s4lContainer[ApplicantDetails].isUpdatedWith(ApplicantDetails(entity = Some(testTrust)))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = Trust)))
stubGet(retrieveDetailsUrl(testTrustJourneyId), OK, testTrustResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testTrustJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
"the model in S4l is full" in new Setup {
given()
.user.isAuthorised()
.s4lContainer[ApplicantDetails].contains(trustApplicantDetails)
.s4lContainer[ApplicantDetails].clearedByKey
.vatScheme.isUpdatedWith(trustApplicantDetails)
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = Trust)))
stubGet(retrieveDetailsUrl(testTrustJourneyId), OK, testTrustResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testTrustJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
}
"redirect to the lead business entity type page for Unincorporated Association" when {
"S4L model is not full" in new Setup {
given()
.user.isAuthorised()
.vatScheme.has("applicant-details", Json.toJson(ApplicantDetails()))
.s4lContainer[ApplicantDetails].contains(ApplicantDetails())
.s4lContainer[ApplicantDetails].isUpdatedWith(ApplicantDetails(entity = Some(testUnincorpAssoc)))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = UnincorpAssoc)))
stubGet(retrieveDetailsUrl(testUnincorpAssocJourneyId), OK, testUnincorpAssocResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testUnincorpAssocJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
"the model in S4l is full" in new Setup {
given()
.user.isAuthorised()
.s4lContainer[ApplicantDetails].contains(unincorpAssocApplicantDetails)
.s4lContainer[ApplicantDetails].clearedByKey
.vatScheme.isUpdatedWith(unincorpAssocApplicantDetails)
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = UnincorpAssoc)))
stubGet(retrieveDetailsUrl(testUnincorpAssocJourneyId), OK, testUnincorpAssocResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testUnincorpAssocJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
}
"redirect to the lead business entity type page for Non UK Company" when {
"S4L model is not full" in new Setup {
given()
.user.isAuthorised()
.vatScheme.has("applicant-details", Json.toJson(ApplicantDetails()))
.s4lContainer[ApplicantDetails].contains(ApplicantDetails())
.s4lContainer[ApplicantDetails].isUpdatedWith(ApplicantDetails(entity = Some(testNonUkCompany)))
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = NonUkNonEstablished)))
stubGet(retrieveDetailsUrl(testNonUkCompanyJourneyId), OK, testNonUkCompanyResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testNonUkCompanyJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
"the model in S4l is full" in new Setup {
given()
.user.isAuthorised()
.s4lContainer[ApplicantDetails].contains(nonUkCompanyApplicantDetails)
.s4lContainer[ApplicantDetails].clearedByKey
.vatScheme.isUpdatedWith(nonUkCompanyApplicantDetails)
.registrationApi.getSection[EligibilitySubmissionData](Some(testEligibilitySubmissionData.copy(partyType = NonUkNonEstablished)))
stubGet(retrieveDetailsUrl(testNonUkCompanyJourneyId), OK, testNonUkCompanyResponse.toString)
insertCurrentProfileIntoDb(currentProfile, sessionId)
val res: Future[WSResponse] = buildClient(s"/register-for-vat/minor-entity-id-callback?journeyId=$testNonUkCompanyJourneyId").get()
whenReady(res) { result =>
result.status mustBe SEE_OTHER
result.headers(LOCATION) must contain(applicantRoutes.IndividualIdentificationController.startJourney.url)
}
}
}
}
} | hmrc/vat-registration-frontend | it/controllers/registration/applicant/MinorEntityIdControllerISpec.scala | Scala | apache-2.0 | 12,879 |
package com.temportalist.weepingangels.common.extended
import com.temportalist.origin.internal.common.extended.ExtendedEntityHandler
import com.temportalist.weepingangels.common.WAOptions
import com.temportalist.weepingangels.common.entity.EntityAngel
import com.temportalist.weepingangels.common.lib.AngelUtility
import cpw.mods.fml.common.eventhandler.SubscribeEvent
import cpw.mods.fml.common.gameevent.TickEvent
import cpw.mods.fml.common.gameevent.TickEvent.PlayerTickEvent
import cpw.mods.fml.relauncher.Side
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import net.minecraft.util.DamageSource
import net.minecraftforge.event.entity.living.LivingAttackEvent
/**
*
*
* @author TheTemportalist
*/
object AngelPlayerHandler {
def get(player: EntityPlayer): AngelPlayer = {
ExtendedEntityHandler.getExtended(player, classOf[AngelPlayer])
}
@SubscribeEvent
def playerTickEvent(event: PlayerTickEvent): Unit = {
val side: Side = event.side
if (side == Side.SERVER) {
if (event.phase == TickEvent.Phase.START) {
val player: EntityPlayer = event.player
val angelPlayer: AngelPlayer = ExtendedEntityHandler.getExtended(
player, classOf[AngelPlayer])
if (angelPlayer == null) return
if (player.capabilities.isCreativeMode) {
if (angelPlayer.converting()) {
angelPlayer.stopConversion()
angelPlayer.setAngelHealth(0.0F)
angelPlayer.clearRegenTicks()
}
}
else if (angelPlayer.converting()) {
if (angelPlayer.getAngelHealth >= WAOptions.maxAngelHealth) {
angelPlayer.stopConversion()
val angelEntity: EntityAngel = new EntityAngel(player.worldObj)
angelEntity.setPositionAndRotation(player.posX, player.posY, player.posZ,
player.rotationYaw, player.rotationPitch)
if (WAOptions.angelsTakePlayerName) {
angelEntity.setCustomNameTag(player.getCommandSenderName)
}
if (WAOptions.angelsStealPlayerInventory) {
val inventory: Array[ItemStack] = new
Array[ItemStack](player.inventory.getSizeInventory)
for (i <- 0 until player.inventory.getSizeInventory) {
val itemStack: ItemStack = player.inventory.getStackInSlot(i)
if (itemStack != null) {
inventory(i) = itemStack.copy()
}
else {
inventory(i) = null
}
player.inventory.setInventorySlotContents(i, null)
}
angelEntity.setStolenInventory(inventory)
}
if (!player.worldObj.isRemote)
player.worldObj.spawnEntityInWorld(angelEntity)
player.setHealth(0F)
player.setDead()
/*
if (!player.attackEntityFrom(DamageSource.causeMobDamage(angelEntity),
Float.MaxValue)) {
if (angelEntity.hasStolenInventory) angelEntity.dropStolenInventory()
angelEntity.setDead()
// TODO Maybe return here, to prevent anything from occuring after death
}
*/
}
else {
if (angelPlayer.getTicksUntilNextRegen <= 0) {
angelPlayer.clearRegenTicks()
}
else {
angelPlayer.decrementTicksUntilRegen()
}
angelPlayer.setHealthWithRespectToTicks()
}
}
else if (!angelPlayer.converting() && angelPlayer.getAngelHealth > 0.0F) {
angelPlayer.setAngelHealth(0.0F)
angelPlayer.clearRegenTicks()
}
/*
if (Api.getMorphEntity(player.getName, false)
.isInstanceOf[EntityAngel]) {
if (player.motionY > 0.4F) {
player.motionY = 0.0F
angelPlayer.syncEntity()
}
}
*/
}
}
}
@SubscribeEvent
def onLivingAttack(event: LivingAttackEvent): Unit = {
event.entityLiving match {
case player: EntityPlayer =>
if (!AngelPlayerHandler.isAngel(player)) return
// hurt entity is player
this.onLivingAttack_do(player, isAttacker = false)
// TODO, this needs configuring. Very OP
if (!AngelUtility
.canAttackEntityFrom(player.worldObj, event.source, event.ammount)) {
event.setCanceled(true)
}
case _ =>
event.source.getSourceOfDamage match {
case player: EntityPlayer =>
if (!AngelPlayerHandler.isAngel(player)) return
// caused by player
this.onLivingAttack_do(player, isAttacker = true)
case _ =>
}
}
}
private def isAngel(player: EntityPlayer): Boolean = {
/*
Api.getMorphEntity(player.getName,
FMLCommonHandler.instance().getEffectiveSide.isClient)
.isInstanceOf[EntityAngel]
*/
false
}
private def onLivingAttack_do(player: EntityPlayer, isAttacker: Boolean): Unit = {
val angelPlayer: AngelPlayer = AngelPlayerHandler.get(player)
if (isAttacker) {
angelPlayer.setIsAttacking()
}
else {
angelPlayer.setIsAttacked()
}
}
}
| TheTemportalist/WeepingAngels | src/main/scala/com/temportalist/weepingangels/common/extended/AngelPlayerHandler.scala | Scala | apache-2.0 | 4,740 |
package scoverage
import java.text.{DecimalFormat, DecimalFormatSymbols}
import java.util.Locale
object DoubleFormat {
private[this] val twoFractionDigitsFormat: DecimalFormat = {
val fmt = new DecimalFormat()
fmt.setDecimalFormatSymbols(new DecimalFormatSymbols(Locale.US))
fmt.setMinimumIntegerDigits(1)
fmt.setMinimumFractionDigits(2)
fmt.setMaximumFractionDigits(2)
fmt.setGroupingUsed(false)
fmt
}
def twoFractionDigits(d: Double) = twoFractionDigitsFormat.format(d)
}
| rorygraves/scalac-scoverage-plugin | scalac-scoverage-plugin/src/main/scala/scoverage/DoubleFormat.scala | Scala | apache-2.0 | 512 |
package models.services
import models.{ Department }
import scala.concurrent.Future
trait DepartmentService {
def findById(id: String): Future[Department]
def findByName(name: String): Future[Option[Department]]
def all(): Future[Seq[Department]]
def insert(Department: Department): Future[String]
}
| yoo-haemin/hufs-planner | project/app/models/services/DepartmentService.scala | Scala | agpl-3.0 | 329 |
/*************************************************************************
* *
* This file is part of the 20n/act project. *
* 20n/act enables DNA prediction for synthetic biology/bioengineering. *
* Copyright (C) 2017 20n Labs, Inc. *
* *
* Please direct all queries to act@20n.com. *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
*************************************************************************/
package com.act.biointerpretation.rsmiles.single_sar_construction
import act.server.MongoDB
import chemaxon.formats.{MolImporter, MolFormatException}
import chemaxon.standardizer.Standardizer
import com.act.analysis.chemicals.molecules.{MoleculeExporter, MoleculeFormat, MoleculeImporter}
import com.act.analysis.chemicals.molecules.MoleculeFormat.Cleaning
import com.act.biointerpretation.Utils.ReactionProjector
import com.act.biointerpretation.desalting.Desalter
import com.act.biointerpretation.rsmiles.chemicals.JsonInformationTypes.{ChemicalInformation, AbstractChemicalInfo}
import com.act.biointerpretation.rsmiles.chemicals.abstract_chemicals.AbstractReactions._
import com.act.workflow.tool_manager.workflow.workflow_mixins.mongo.ChemicalKeywords
import com.mongodb.DBObject
import org.apache.log4j.LogManager
import scala.collection.mutable
import scala.collection.parallel.immutable.{ParMap, ParSeq}
/**
* Finds abstract chemicals in the DB and processes their R-smiles for later use in SAR generation
*/
class SingleSarChemicals(mongoDb: MongoDB) {
val logger = LogManager.getLogger(getClass)
private val REGEX_TO_SEARCH = "\\\\[R[0-9]*\\\\]"
private val REGEX_TO_REPLACE = "\\\\[[^\\\\[]*R(\\\\]|[^euh][^\\\\]]*\\\\])"
private val CARBON_REPLACEMENT = "\\\\[Au\\\\]"
val cleanSmartsFormat = new MoleculeFormat.MoleculeFormatType(MoleculeFormat.smarts,
List(Cleaning.clean2d, Cleaning.aromatize))
val desalter: Desalter = new Desalter(new ReactionProjector())
desalter.initReactors()
val standardizer: Standardizer = new Standardizer("addexplicith");
// There are many, many repeated abstract smiles in the DB
// This cache ensures we only process each one once
val smilesCache : mutable.Map[String, Option[AbstractChemicalInfo]] =
new mutable.HashMap[String, Option[AbstractChemicalInfo]]()
/**
* Returns a list of the AbstractChemicalInfo corresponding to each abstract chemical
* Abstract chemicals are detected by containing the regex REGEX_TO_SEARCH, and being importable by chemaxon.
* The importability ensures that "smiles" with strings like "a nucleobase" in them are not processed further.
*/
def getAbstractChemicals() : List[AbstractChemicalInfo] = {
logger.info("Finding abstract chemicals.")
/*
Mongo DB Query
Query: All elements that contain "[R]" or "[R#]", for some number #, in their SMILES
TODO: try incorporating elements containing R in their inchi, which don't have a smiles, by replacing R with Cl.
*/
var query = Mongo.createDbObject(ChemicalKeywords.SMILES, Mongo.defineMongoRegex(REGEX_TO_SEARCH))
val filter = Mongo.createDbObject(ChemicalKeywords.SMILES, 1)
val result: Seq[DBObject] = Mongo.mongoQueryChemicals(mongoDb)(query, filter, notimeout = true).toStream
/*
Convert from DB Object => Smarts and return that.
*/
var counter = 0
val abtractChemicalList: List[AbstractChemicalInfo] = result.flatMap(dbObj => {
counter = counter + 1
if (counter % 1000 == 0) {
println(s"Processed $counter chemicals.")
}
getAbstractChemicalInfo(dbObj)
}).toList
logger.info(s"Finished finding abstract chemicals. Found ${abtractChemicalList.size}")
abtractChemicalList
}
/**
* Gets abstract chemical info by pulling the appropriate fields from the db object and calling
* calculateConcreteSubstrateAndProduct
*/
def getAbstractChemicalInfo(dbChemical : DBObject) : Option[AbstractChemicalInfo] = {
val chemicalId = dbChemical.get("_id").asInstanceOf[Long].toInt
val smiles = dbChemical.get("SMILES").asInstanceOf[String]
val result : Option[AbstractChemicalInfo] = calculateConcreteSubstrateAndProduct(chemicalId, smiles)
if (result.isDefined) {
return Some(result.get)
}
None
}
/**
* Calculates the concrete smiles that should be used for the given chemical in order to atom map an abstract
* reaction. Since this is different for a substrate and a product, we calculate both versions here for later use
* in either side of a reaction.
* The pipeline we use is as follows:
* SUBSTRATE pipeline: import smiles, make hydrogens explicit, export to string, replace R groups with C's, import to molecule, desalt the molecule, export to smarts
* PRODUCT pipeline: replace R groups with C's, import molecule, desalt the molecule, export to smarts
* The difference is that, for substrates, we need to make all H's explicit except those touching where the abstract group was
* This ensures that the variation in concrete molecules that will match the generated substructure can only occur at the site of abstraction
* For products, this is not a concern, and explicit hydrogens can actually hinder our ability to match products of a projection to DB products. So, we leave hydrogens implicit.
* @param chemicalId The ID of the chemical.
* @param chemicalSmiles The smiles of the chemical from the DB.
* @return An object grouping the chemical Id to the modified smiles to be used if this chemical is a substrate
* or product of an abstract reaction.
*/
def calculateConcreteSubstrateAndProduct(chemicalId : Int, chemicalSmiles : String): Option[AbstractChemicalInfo] = {
// Use cache to avoid repeat calculations
val cachedResult = smilesCache.get(chemicalSmiles)
if (cachedResult.isDefined) {
val cached = cachedResult.get
if (cached.isDefined) {
val info = cached.get
return Some(AbstractChemicalInfo(chemicalId, info.dbSmiles, info.asSubstrate, info.asProduct))
}
return None
}
try {
println("Original R-smiles: " + chemicalSmiles)
//Replace the R with Gold (Au)
val goldSmiles = replaceRWithGold(chemicalSmiles)
println("Goldsmiles: " + goldSmiles)
//Load the gold-modified substrate into ChemAxon as a concrete molecule, not as SMARTs
val substrateMolecule = MolImporter.importMol(goldSmiles)
// Make substrate hydrogens explicit
standardizer.standardize(substrateMolecule)
// Desalt the substrate
val desaltedSubstrateList = desalter.desaltMoleculeForAbstractReaction(substrateMolecule.clone()) // clone to avoid destroying molecule
if (desaltedSubstrateList.size() != 1) {
// I haven't seen this case so far
println(s"Found multiple fragments after desalting chemical $chemicalSmiles. Don't handle this case yet. Exiting!")
return None
}
val desaltedSubstrate = desaltedSubstrateList.get(0)
//Export the substrate as SMILES (I'ts still concrete)
val desaltedSmiles = MoleculeExporter.exportAsSmiles(desaltedSubstrate)
println("Hydrogenized and desalted smiles: " + desaltedSmiles)
// Replace Gold with Carbon, which will convert it to abstract SMARTS
val finalSmarts = replaceGoldWithCarbon(desaltedSmiles)
println("Final SMARTS: " + finalSmarts)
val result = new AbstractChemicalInfo(chemicalId, chemicalSmiles, finalSmarts, finalSmarts)
smilesCache.put(chemicalSmiles, Some(result))
return Some(result)
} catch {
case e : MolFormatException => {
smilesCache.put(chemicalSmiles, None)
return None
}
}
}
def replaceRWithGold(chemical: String): String = {
chemical.replaceAll(REGEX_TO_REPLACE, CARBON_REPLACEMENT)
}
def replaceGoldWithCarbon(chemical: String): String = {
chemical.replaceAll("Au", "C")
}
}
| 20n/act | reachables/src/main/scala/com/act/biointerpretation/rsmiles/single_sar_construction/SingleSarChemicals.scala | Scala | gpl-3.0 | 9,163 |
package biz.mediabag.qcoder.ffmpeg
import avformat._
import avcodec._
import avutil._
import com.sun.jna._
import com.ochafik.lang.jnaerator.runtime._
import com.ochafik.lang.jnaerator.runtime.Structure
import grizzled.slf4j.Logger
import java.nio._
import biz.mediabag.qcoder._
import grizzled.slf4j.Logging
object FFmpegUtils extends Logging {
val log = Logger("FFmpegUtils")
val FormatLibrary = AvformatLibrary.INSTANCE
FormatLibrary.av_register_all
log.info("AvformatLibrary " + AvformatLibrary.JNA_LIBRARY_NAME + ": " + FormatLibrary.avformat_configuration)
val CodecLibrary = AvcodecLibrary.INSTANCE
CodecLibrary.avcodec_init
CodecLibrary.avcodec_register_all
lazy val codecs = buildCodecList(null)
lazy val iFormats = buildFormatList(null.asInstanceOf[AVInputFormat])
lazy val oFormats = buildFormatList(null.asInstanceOf[AVOutputFormat])
log.info("AvcodecLibrary: " + CodecLibrary.avcodec_configuration)
log.info("Codecs are " + codecs)
log.info("Input formats are " + iFormats)
log.info("Output formats are " + oFormats)
val UtilLibrary = AvutilLibrary.INSTANCE
implicit def convertToMemory(str: String): Memory = {
assert(str.length > 0, "String length needs to be greater than 0")
val mem = new Memory(str.length)
mem.write(0, str.getBytes(), 0, str.length());
mem
}
def buildCodecList(codec:AVCodec):List[String] = {
val ac = CodecLibrary.av_codec_next(codec)
if (ac == null) {
Nil
} else {
ac.name :: buildCodecList(ac)
}
}
def buildFormatList(format:AVInputFormat):List[String] = {
val ac = FormatLibrary.av_iformat_next(format)
if (ac == null) {
Nil
} else {
ac.name :: buildFormatList(ac)
}
}
def buildFormatList(format:AVOutputFormat):List[String] = {
val ac = FormatLibrary.av_oformat_next(format)
if (ac == null) {
Nil
} else {
ac.name :: buildFormatList(ac)
}
}
implicit def convertToString(pointer: Pointer): String = {
pointer.getString(0)
}
object FFmpegCall {
def apply(call: => Int): Int = {
val code = call
if (code < 0) {
val errBuf = ByteBuffer.allocate(1024)
val errBufSize = new NativeSize(1024)
val success = UtilLibrary.av_strerror(code, errBuf, errBufSize);
throw new QCoderException(code, new String(errBuf.array))
} else {
info("Call result= " + code)
code
}
}
}
object FFmpegCallObject {
def apply[T](call: => T): T = {
val code = call
if (code == null) {
throw new QCoderException("Call failed")
} else {
code
}
}
}
}
| sumo/QCoder | src/main/scala/biz/mediabag/qcoder/ffmpeg/FFmpegUtils.scala | Scala | bsd-2-clause | 2,667 |
/*
* Copyright 2015 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.worldinterface.entity
import java.util.UUID
import simx.core.entity.Entity
import simx.core.svaractor.unifiedaccess.EntityUpdateHandling
import simx.core.worldinterface.base.WorldInterfaceActorBase
import simx.core.svaractor.semantictrait.base.CPST.cpsIterable
import simx.core.worldinterface.entity.filter.EntityFilter
import scala.language.reflectiveCalls
/**
* Created by martin
* on 21/04/15.
*/
private[worldinterface] trait NewEntityRegistration extends WorldInterfaceActorBase with EntityUpdateHandling{
private case class OnNextRegistrationTask(filter: EntityFilter, provideAnswer: Entity => Any, var sentOnce: Boolean = false, id: UUID = UUID.randomUUID())
private var registeredEntities: collection.immutable.Set[Entity] = Set[Entity]()
private var entityRegistrationListeners = Set[AddOnEntityAppearanceListener]()
private var onNextRegistrationRequests = List[OnNextRegistrationTask]()
//List[(EntityFilter, Entity => Any)]()
//TODO Implement if old entity registry is removed
//addHandler[EntityRegisterRequest]{ msg => }
//TODO Implement if old entity registry is removed
//addHandler[EntityUnregisterRequest]{ msg => }
addHandler[AddOnEntityAppearanceListener]{ msg =>
bindAndCheckFilter(msg.filter)
entityRegistrationListeners += msg
// scala.util.continuations.reset {
// registeredEntities.cps.filter(msg.filter).foreach(e => msg.actor ! CreationMessageNew(e, msg.reference))
// }
}
addHandler[RequestEntities]{ msg =>
bindAndCheckFilter(msg.filter)
val _provideAnswer = provideAnswer[Set[Entity]]
scala.util.continuations.reset {
_provideAnswer(registeredEntities.cps.filter(msg.filter))
}
DelayedAnswer
}
addHandler[OnNextEntityAppearance]{ msg =>
bindAndCheckFilter(msg.filter)
onNextRegistrationRequests ::= OnNextRegistrationTask(msg.filter, provideAnswer[Entity])
DelayedAnswer
}
addHandler[OnOneEntityAppearance]{ msg =>
// println("Received request from " + sender())
bindAndCheckFilter(msg.filter)
val _provideAnswer = provideAnswer[Entity]
scala.util.continuations.reset {
val task = OnNextRegistrationTask(msg.filter, _provideAnswer)
onNextRegistrationRequests ::= task
val matching = registeredEntities.cps.filter(msg.filter)
if(matching.nonEmpty && !task.sentOnce) {
task.provideAnswer(matching.head)
task.sentOnce = false
onNextRegistrationRequests = onNextRegistrationRequests.filterNot(_.id == task.id)
}
}
DelayedAnswer
}
private def bindAndCheckFilter(f : EntityFilter): Unit = {
f.bindContext(this)
}
protected def _registerEntity(e: Entity): Unit = {
// println("Added '" + e.getSimpleName + "' to the new entity registry")
registeredEntities += e
// e.onUpdate(notifyRegistrationListeners)
// notifyRegistrationListeners(e)
//var firstTime = true //TODO check why observe triggers twice
e.onUpdate(entity => {
//if(firstTime) firstTime = false
//else
notifyRegistrationListeners(entity)
})
notifyRegistrationListeners(e)
}
protected def _unRegisterEntity(e: Entity): Unit = {
e.ignore()
registeredEntities -= e
}
private def notifyRegistrationListeners(registeredEntity: Entity): Unit = {
scala.util.continuations.reset {
val matchingListeners = entityRegistrationListeners.cps.filter(_.filter(registeredEntity))
matchingListeners.foreach(
listener => listener.actor ! EntityAppearance(registeredEntity, listener.reference)
)
//TODO add 'partition' to cps
val matching = onNextRegistrationRequests.cps.filter(_.filter.apply(registeredEntity))
val nonMatching = onNextRegistrationRequests.cps.filter(!_.filter.apply(registeredEntity))
matching.foreach { task =>
if(!task.sentOnce) {
task.provideAnswer(registeredEntity)
task.sentOnce = false
}
}
onNextRegistrationRequests = nonMatching
}
}
} | simulator-x/core | src/simx/core/worldinterface/entity/NewEntityRegistration.scala | Scala | apache-2.0 | 4,880 |
package sample.stream.word_count
import java.io.File
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.io.SynchronousFileSource
import akka.stream.scaladsl._
import akka.util.ByteString
object WordCounterClosedShape {
def main(args: Array[String]) {
val fileSource = SynchronousFileSource(new File("abc.txt"))
implicit val actorSystem = ActorSystem("word-counter")
implicit val materializer = ActorMaterializer()
import actorSystem.dispatcher
val sink = Flow[Int].map({ x => println(x); x }).toMat(Sink.fold(0)(_ + _))(Keep.right)
val flow = Flow[ByteString].map(_.utf8String)
val runnableGraph = FlowGraph.closed(sink, sink, sink)((m1, m2, m3) => m1.flatMap(_ => m2).flatMap(_ => m3).onComplete(xx => { println("hi"); actorSystem.shutdown; })) { implicit builder =>
(s1, s2, s3) =>
import FlowGraph.Implicits._
val broadcast = builder.add(Broadcast[ByteString](3))
fileSource ~> broadcast.in
broadcast.out(0) ~> flow.map(_.length) ~> s1
broadcast.out(1) ~> flow.map(_.split(" ").length) ~> s2
broadcast.out(2) ~> flow.map(_.split("\\n").length) ~> s3
}
runnableGraph.run()
}
}
| pallavig/akka-examples | src/main/scala/sample/stream/word_count/WordCounterClosedShape.scala | Scala | cc0-1.0 | 1,216 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core
import com.comcast.money.api.{ InstrumentationLibrary, SpanFactory }
import com.comcast.money.core.internal.SpanLocal
import com.comcast.money.core.samplers.AlwaysOnSampler
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class MoneyTraceProviderSpec extends AnyWordSpec with Matchers {
"MoneyTraceProvider" should {
"wrap an existing Tracer with a decorated SpanFactory" in {
val factory = new CoreSpanFactory(SpanLocal, SystemClock, DisabledSpanHandler, DisabledFormatter, AlwaysOnSampler, Money.InstrumentationLibrary)
val tracer = new Tracer {
override val spanFactory: SpanFactory = factory
}
val underTest = MoneyTracerProvider(tracer)
val result = underTest.get("test")
result shouldBe a[Tracer]
result should not be tracer
val libraryTracer = result.asInstanceOf[Tracer]
val span = libraryTracer.startSpan("test")
val info = span.info
info.library shouldBe new InstrumentationLibrary("test")
}
"returns the same Tracer for equivalent InstrumentationLibraries" in {
val factory = new CoreSpanFactory(SpanLocal, SystemClock, DisabledSpanHandler, DisabledFormatter, AlwaysOnSampler, Money.InstrumentationLibrary)
val tracer = new Tracer {
override val spanFactory: SpanFactory = factory
}
val underTest = MoneyTracerProvider(tracer)
val result = underTest.get("test", "0.0.1")
val other = underTest.get("test", "0.0.1")
other shouldBe result
}
"return different Tracers for different InstrumentationLibraries" in {
val factory = new CoreSpanFactory(SpanLocal, SystemClock, DisabledSpanHandler, DisabledFormatter, AlwaysOnSampler, Money.InstrumentationLibrary)
val tracer = new Tracer {
override val spanFactory: SpanFactory = factory
}
val underTest = MoneyTracerProvider(tracer)
val result = underTest.get("test", "0.0.1")
val other = underTest.get("test", "0.0.2")
other shouldBe a[Tracer]
other should not be result
}
}
}
| Comcast/money | money-core/src/test/scala/com/comcast/money/core/MoneyTraceProviderSpec.scala | Scala | apache-2.0 | 2,749 |
package scalan.collections
import scalan.{ScalanDslStd, Scalan}
import scalan.it.BaseItTests
import scalan.primitives.Functions
trait SimpleMapProg extends Scalan {
lazy val mapEmpty = fun {_:Rep[Int] =>
MMap.empty[Int,Int]
}
lazy val mapPutContains = fun { p:Rep[(Int,Double)] =>
val m = MMap.empty[Int,Double]
val m1 = m.update(p._1, p._2) | m
m1.contains(p._1)
}
lazy val mapGetOrElse = fun { p:Rep[(Int,Double)] =>
val m = MMap.empty[Int,Double]
val m1 = m.update(p._1, p._2) | m
m1.getOrElse(p._1, p._2)
}
lazy val mapAsSet = fun {in:Rep[Int] =>
val m = MMap.empty[Int,Unit]
val m1 = m.update(in,()) | m
m1.contains(in)
}
lazy val unionMaps = fun { in: Rep[(Array[(Int, Double)], Array[(Int, Double)])] =>
val map1 = MMap.fromArray[Int, Double](in._1)
val map2 = MMap.fromArray[Int, Double](in._2)
map1.union(map2).toArray.sort
}
lazy val differenceMaps = fun { in: Rep[(Array[(Int, Double)], Array[(Int, Double)])] =>
val map1 = MMap.fromArray[Int, Double](in._1)
val map2 = MMap.fromArray[Int, Double](in._2)
map1.difference(map2).toArray.sort
}
lazy val joinMaps = fun { in: Rep[(Array[(Int, Double)], Array[(Int, Double)])] =>
val map1 = MMap.fromArray[Int, Double](in._1)
val map2 = MMap.fromArray[Int, Double](in._2)
map1.join(map2).toArray.sort
}
lazy val reduceMaps = fun { in: Rep[(Array[(Int, Double)], Array[(Int, Double)])] =>
val map1 = MMap.fromArray[Int, Double](in._1)
val map2 = MMap.fromArray[Int, Double](in._2)
map1.reduce(map2, fun2 { (a, b) => a + b}).toArray.sort
}
lazy val iterateMap = fun { in: Rep[Array[(Int, Double)]] =>
val map = MMap.fromArray[Int, Double](in)
loopUntil2(1, 0.0)(
{ (i, sum) => (!map.contains(i) && i > map.size)}, { (i, sum) => (i + 1, sum + map(i))}
)
}
lazy val mapReduceByKey = fun { in: Rep[Array[Int]] =>
in.mapReduce[Int, Int](a => (a, toRep(1)), (s1, s2) => s1 + s2).toArray.sort
}
lazy val compoundMapKey = fun { in: Rep[(Array[(Int, Double)], Array[Int])] =>
val map = MMap.fromArray[(Int, Double), Int](in._1 zip in._2)
loopUntil2(0, 0)(
{ (i, sum) => (i >= map.size)}, { (i, sum) => (i + 1, sum + map(in._1(i)))}
)
}
lazy val compoundMapValue = fun { in: Rep[(Array[String], Array[(Int, Double)])] =>
val map = MMap.fromArray[String, (Int, Double)](in._1 zip in._2)
map("two")._2
}
lazy val groupByCount = fun { in: Rep[Array[(Int, Int)]] =>
in.groupBy(fun { p => p._1}).mapValues(g => g.length).toArray.sortBy(fun { p => p._1})
}
lazy val groupBySum = fun { in: Rep[Array[(Int, Int)]] =>
in.groupBy(fun { p => p._1}).mapValues(g => g.toArray.map(p => p._2).sum).toArray.sortBy(fun { p => p._1})
}
}
abstract class MapItTests extends BaseItTests[SimpleMapProg](new ScalanDslStd with SimpleMapProg) {
test("mapPutContains") {
compareOutputWithStd(_.mapPutContains)(Tuple2(314,3.14))
}
// test("mapGetOrElse") {
// compareOutputWithStd(_.mapGetOrElse)(Tuple2(314,3.14))
// }
test("mapAsSet") {
compareOutputWithStd(_.mapAsSet)(314)
}
test("unionMaps") {
val in = (Array((1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4), (5, 5.5)), Array((0, 0.0), (2, 2.0), (4, 4.0), (6, 6.0)))
compareOutputWithStd(_.unionMaps)(in)
}
test("differenceMaps") {
val in = (Array((1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4), (5, 5.5)), Array((0, 0.0), (2, 2.0), (4, 4.0), (6, 6.0)))
compareOutputWithStd(_.differenceMaps)(in)
}
test("iterateMap") {
val in = Array((1, 1.1), (2, 2.2), (3, 3.3))
compareOutputWithStd(_.iterateMap)(in)
}
test("mapReduce") {
val in = Array(1, 2, 1, 1, 2, 3, 4, 1, 5, 4, 3, 2, 5, 2, 1)
compareOutputWithStd(_.mapReduceByKey)(in)
}
test("joinMaps") {
val in = (Array((1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4), (5, 5.5)), Array((0, 0.0), (2, 2.0), (4, 4.0), (6, 6.0)))
compareOutputWithStd(_.joinMaps)(in)
}
test("compoundMapKey") {
val in = (Array((2, 1.0), (3, 2.0), (1, 3.0), (5, 4.0), (4, 5.0)), Array(1, 2, 3, 4, 5))
compareOutputWithStd(_.compoundMapKey)(in)
}
test("reduceMaps") {
val in = (Array((1, 1.1), (2, 2.2), (3, 3.3), (4, 4.4), (5, 5.5)), Array((0, 0.0), (2, 2.0), (4, 4.0), (6, 6.0)))
compareOutputWithStd(_.reduceMaps)(in)
}
test("groupByCount") {
val in = Array((2, 1), (3, 2), (2, 5), (1, 3), (5, 4), (1, 3), (4, 5), (2, 4))
compareOutputWithStd(_.groupByCount)(in)
}
test("groupBySum") {
val in = Array((2, 1), (3, 2), (2, 5), (1, 3), (5, 4), (1, 3), (4, 5), (2, 4))
compareOutputWithStd(_.groupBySum)(in)
}
test("compoundMapValue") {
val in = (Array("one", "two", "three"), Array((1, 1.1), (2, 2.2), (3, 3.3)))
compareOutputWithStd(_.compoundMapValue)(in)
}
} | PCMNN/scalan-ce | core/src/test/scala/scalan/collections/SimpleMapTests.scala | Scala | apache-2.0 | 4,776 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl
import builder.RouteBuilder
import org.apache.camel.model.ThreadsDefinition
/**
* Scala enrichment for Camel's ThreadsDefinition
*/
case class SThreadsDefinition(override val target: ThreadsDefinition)(implicit val builder: RouteBuilder) extends SAbstractDefinition[ThreadsDefinition] {
def poolSize(size: Int) = wrap(target.poolSize(size))
override def wrap(block: => Unit) = super.wrap(block).asInstanceOf[SThreadsDefinition]
}
| engagepoint/camel | components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SThreadsDefinition.scala | Scala | apache-2.0 | 1,282 |
package contests.worldCodeSpring
/**
* Created by yuJieShui on 2016/6/26.
*/
object MinimumDistances {
def solution(seq: Seq[Int],
i: Int,
startIndex: Map[Int, Int],
historyDistances: Map[Int, Int]): Int = seq match {
case Nil =>
scala.util.Try(historyDistances.minBy(_._2)._2).getOrElse(-1)
case head :: tail =>
startIndex.get(head) match {
case Some(start) =>
val now_distance = i - start
val distance =
historyDistances.get(head)
.map(before => math.min(before, now_distance)).getOrElse(now_distance)
solution(tail, i + 1, startIndex + (head -> i), historyDistances + (head -> distance))
case None =>
solution(tail,
i + 1,
startIndex + (head -> i),
historyDistances)
}
}
def main(args: Array[String]) {
def readListInt() = io.StdIn.readLine().split(" ").toList.map(_.toInt)
val n :: Nil = readListInt()
val data = readListInt()
println(
solution(
data, 0, Map(), Map()
)
)
}
}
| 1178615156/hackerrank | src/main/scala/contests/worldCodeSpring/MinimumDistances.scala | Scala | apache-2.0 | 1,143 |
package xitrum
import akka.actor.{Actor, PoisonPill}
import io.netty.handler.codec.http.HttpResponseStatus
import xitrum.handler.HandlerEnv
import xitrum.handler.inbound.Dispatcher
/**
* An actor will be created when there's request. It will be stopped when:
* - The connection is closed
* - The response has been sent by respondText, respondView etc.
*
* For chunked response, it is not stopped right away. It is stopped when the
* last chunk is sent.
*
* See also Action and FutureAction.
*/
trait ActorAction extends Actor with Action {
// Sending PoisonPill at postStop (or at another method called by it) causes
// postStop to be called again!
private var postStopCalled = false
def receive: Receive = {
case (env: HandlerEnv, skipCsrfCheck: Boolean) =>
apply(env)
// Don't use context.stop(self) to avoid leaking context outside this actor
addConnectionClosedListener {
// The check is for avoiding "Dead actor sends Terminate msg to itself"
// See onDoneResponding below
// https://github.com/xitrum-framework/xitrum/issues/183
if (!isDoneResponding) {
env.release()
if (!postStopCalled) self ! PoisonPill
}
}
dispatchWithFailsafe(skipCsrfCheck)
}
override def onDoneResponding(): Unit = {
// Use context.stop(self) instead of sending PoisonPill to avoid double
// response, because the PoisonPill mesage takes some time to arrive, and
// during that time other messages may arrive, and the actor logic may
// respond again. With context.stop(self), there should be no more message.
//
// Unlike the use of PoisonPill at addConnectionClosedListener above,
// responding should be called only within the actor, so when onDoneResponding
// is called, here we are inside the actor.
if (!postStopCalled) context.stop(self)
}
override def postStop(): Unit = {
if (postStopCalled) return
postStopCalled = true
// When there's uncaught exception (dispatchWithFailsafe can only catch
// exception thrown from the current calling thread, not from other threads):
// - This actor is stopped without responding anything
// - Akka won't pass the exception to this actor, it will just log the error
if (isDoneResponding) return
if (!channel.isWritable) {
handlerEnv.release()
return
}
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR)
if (Config.productionMode) {
Config.routes.error500 match {
case None =>
respondDefault500Page()
case Some(error500) =>
if (error500 == getClass) {
respondDefault500Page()
} else {
response.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR)
Dispatcher.dispatch(error500, handlerEnv, skipCsrfCheck = true)
}
}
} else {
val errorMsg = s"The ActorAction ${getClass.getName} has stopped without responding anything. Check server log for exception."
if (isAjax)
jsRespond(s"""alert("${jsEscape(errorMsg)}")""")
else
respondText(errorMsg)
}
}
}
| xitrum-framework/xitrum | src/main/scala/xitrum/ActorAction.scala | Scala | mit | 3,151 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.specs2
import slamdata.Predef._
import quasar.contrib.pathy.Helpers._
import java.io.{File => JFile}
import org.specs2.execute._
/**
* Specs2 trait that is like "pendingUntilFixed" but additionally tracks
* actual results. It fails a test in case the expected actual is not equal to
* the (actual) actual.
*
* This trait can run in 2 modes:
* - TestMode: the usual mode for testing
* - WriteMode: when there are new actuals, running in this mode will
* overwrite the old actuals with the new ones
*
* Note: Style of this code is adopted from specs2 (PendingUntilFixed.scala).
*/
sealed trait Mode
case object TestMode extends Mode
case object WriteMode extends Mode
trait PendingWithActualTracking {
val mode: Mode
implicit def toPendingWithActualTracking[T : AsResult](t: =>T)
: PendingWithActualTrackingClass[T] = new PendingWithActualTrackingClass(t)
def unsafeRead(f: JFile): String =
jtextContents(f).unsafePerformSync
def unsafeWrite(jFile: jFile, contents: String): Unit = {
java.nio.file.Files.write(
jFile.toPath,
contents.getBytes(java.nio.charset.StandardCharsets.UTF_8))
()
}
class PendingWithActualTrackingClass[T : AsResult](t: =>T) {
def pendingWithActual(m: String, file: JFile): Result =
mode match {
case TestMode => pendingWithActualTestMode(m, file)
case WriteMode => pendingWithActualWriteMode(m, file)
}
def pendingWithActualTestMode(m: String, file: JFile): Result = ResultExecution.execute(AsResult(t)) match {
case s @ Success(_,_) =>
Failure(m + " Fixed now, you should remove the 'pendingWithActual' marker")
case Failure(_, _, _, FailureDetails(actual, _)) =>
val expectedActual = unsafeRead(file)
if (actual != expectedActual)
Failure(m + " Behaviour has changed. Please review the test and set new expectation. New actual is: " + actual)
else
Pending(m + " Pending until fixed (actual unchanged)")
case other =>
Failure(m + " Behaviour has changed. Please review the test and set new expectation. Test result is: " + other)
}
def pendingWithActualWriteMode(m: String, file: JFile): Result = ResultExecution.execute(AsResult(t)) match {
case Failure(_, _, _, FailureDetails(actual, _)) =>
unsafeWrite(file, actual)
Success(m + s" Wrote file with new actual $file")
case other =>
Failure(m + " Unexpected format of test result: actual can not be written")
}
}
}
| jedesah/Quasar | foundation/src/test/scala/quasar/contrib/specs2/PendingWithActualTracking.scala | Scala | apache-2.0 | 3,162 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async.sse
import io.gatling.http.action.async.AsyncTx
import io.gatling.http.ahc.HttpEngine
import akka.actor.ActorRef
object SseTx {
def start(tx: AsyncTx, sseActor: ActorRef, httpEngine: HttpEngine): Unit = {
val (newTx, client) = {
val (newSession, client) = httpEngine.httpClient(tx.session, tx.protocol)
(tx.copy(session = newSession), client)
}
val handler = new SseHandler(newTx, sseActor)
client.executeRequest(newTx.request, handler)
}
}
| ryez/gatling | gatling-http/src/main/scala/io/gatling/http/action/async/sse/SseTx.scala | Scala | apache-2.0 | 1,133 |
/*
* Copyright © 2014 Nemanja Stanarevic <nemanja@alum.mit.edu>
*
* Made with ❤ in NYC at Hacker School <http://hackerschool.com>
*
* Licensed under the GNU Affero General Public License, Version 3
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at:
*
* <http://www.gnu.org/licenses/agpl-3.0.html>
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gmailapi.methods
import akka.actor.Actor
import gmailapi.oauth2.OAuth2Identity
import gmailapi.resources.{ Thread, ThreadList, GmailSerializer }
import org.json4s.jackson.Serialization.{ read, write }
import org.json4s.jackson.JsonMethods.parse
import scala.collection.immutable.Map
import scala.language.postfixOps
import spray.http.{HttpCredentials, HttpEntity, HttpMethods, ContentTypes, Uri}
object Threads {
import GmailSerializer._
case class Get(id: String, userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = s"$baseUri/users/$userId/threads/$id"
val method = HttpMethods.GET
val credentials: Option[HttpCredentials] = token
val entity = HttpEntity.Empty
val unmarshaller = Some(read[Thread](_: String))
val quotaUnits = 10
}
case class List(
includeSpamTrash: Boolean = false,
labelIds: Seq[String] = Nil,
maxResults: Option[Int] = None,
pageToken: Option[String] = None,
query: Option[String] = None,
userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = {
val queryBuilder = Uri.Query.newBuilder
queryBuilder += ("includeSpamTrash" -> includeSpamTrash.toString)
labelIds foreach {
labelIds => queryBuilder += ("labelIds" -> labelIds)
}
maxResults foreach {
maxResults => queryBuilder += ("maxResults" -> maxResults.toString)
}
pageToken foreach {
pageToken => queryBuilder += ("pageToken" -> pageToken)
}
query foreach {
query => queryBuilder += ("query" -> query)
}
Uri(s"$baseUri/users/$userId/threads") withQuery (
queryBuilder.result()) toString
}
val method = HttpMethods.GET
val credentials : Option[HttpCredentials] = token
val entity = HttpEntity.Empty
val unmarshaller = Some(read[ThreadList](_:String))
val quotaUnits = 10
}
case class Modify(
id: String,
addLabelIds: Seq[String] = Nil,
removeLabelIds: Seq[String] = Nil,
userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = s"$baseUri/users/$userId/threads/$id/modify"
val method = HttpMethods.POST
val credentials: Option[HttpCredentials] = token
val entity = HttpEntity(ContentTypes.`application/json`, write(Map(
"addLabelIds" -> addLabelIds,
"removeLabelIds" -> removeLabelIds)))
val unmarshaller = Some(read[Thread](_: String))
val quotaUnits = 10
}
case class Delete(id: String, userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = s"$baseUri/users/$userId/threads/$id"
val method = HttpMethods.DELETE
val credentials: Option[HttpCredentials] = token
val entity = HttpEntity.Empty
val unmarshaller = None
val quotaUnits = 20
}
case class Trash(id: String, userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = s"$baseUri/users/$userId/threads/$id/trash"
val method = HttpMethods.POST
val credentials: Option[HttpCredentials] = token
val entity = HttpEntity.Empty
val unmarshaller = Some(read[Thread](_: String))
val quotaUnits = 10
}
case class Untrash(id: String, userId: String = "me")
(implicit val token: OAuth2Identity) extends GmailRestRequest {
val uri = s"$baseUri/users/$userId/threads/$id/untrash"
val method = HttpMethods.POST
val credentials: Option[HttpCredentials] = token
val entity = HttpEntity.Empty
val unmarshaller = Some(read[Thread](_: String))
val quotaUnits = 10
}
}
| nemanja-stanarevic/gmail-api-scala-client | src/main/scala/gmailapi/methods/Threads.scala | Scala | agpl-3.0 | 4,378 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.sync.queue
import com.waz.api.SyncState
import com.waz.content._
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.log.LogSE._
import com.waz.model.SyncId
import com.waz.model.sync.SyncJob.SyncJobDao
import com.waz.model.sync._
import com.waz.sync.queue.SyncJobMerger.{Merged, Unchanged, Updated}
import com.waz.threading.SerialDispatchQueue
import com.waz.utils._
import com.waz.utils.events.{AggregatingSignal, EventContext, EventStream, Signal}
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Keeps actual SyncJobs in memory, and persists all changes to db.
* Handles merging of new requests, only adds new jobs if actually needed.
*/
trait SyncContentUpdater {
def syncJobs: Signal[Map[SyncId, SyncJob]]
def addSyncJob(job: SyncJob, forceRetry: Boolean = false): Future[SyncJob]
def removeSyncJob(id: SyncId): Future[Any]
def updateSyncJob(id: SyncId)(updater: SyncJob => SyncJob): Future[Option[SyncJob]]
def getSyncJob(id: SyncId): Future[Option[SyncJob]]
def listSyncJobs: Future[Iterable[SyncJob]]
def syncStorage[A](body: SyncStorage => A): Future[A]
}
class SyncContentUpdaterImpl(db: Database) extends SyncContentUpdater with DerivedLogTag {
import EventContext.Implicits.global
import SyncContentUpdater._
private implicit val dispatcher = new SerialDispatchQueue(name = "SyncContentUpdaterQueue")
private val mergers = new mutable.HashMap[Any, SyncJobMerger]
val syncStorageFuture = db(SyncJobDao.list(_)).future map { jobs =>
// make sure no job is loaded with Syncing state, this could happen if app is killed while syncing
jobs map { job =>
if (job.state == SyncState.SYNCING) {
verbose(l"found job in state: SYNCING on initial load: $job")
job.copy(state = SyncState.WAITING)
} else job
}
} map { jobs =>
returningF(new SyncStorage(db, jobs)) { storage =>
jobs foreach { updateMerger(_, storage) }
storage.onAdded { job =>
job.request match {
case SerialConvRequest(conv) =>
storage.getJobs.filter { j => SerialConvRequest.unapply(j.request).contains(conv) && j.priority > job.priority } foreach { j =>
storage.update(j.id)(j => j.copy(priority = math.min(j.priority, job.priority)))
}
case _ =>
}
updateDeps(job, storage)
updateMerger(job, storage)
}
storage.onUpdated { case (prev, updated) => updateMerger(updated, storage) }
storage.onRemoved { job =>
mergers.get(job.mergeKey) foreach { merger =>
merger.remove(job.id)
if (merger.isEmpty) mergers.remove(job.mergeKey)
}
}
}
}
override lazy val syncJobs = {
val onChange = EventStream[Cmd]()
syncStorageFuture.map { syncStorage =>
syncStorage.onUpdated { case (prev, updated) => onChange ! Update(updated) }
syncStorage.onAdded { job => onChange ! Add(job) }
syncStorage.onRemoved { job => onChange ! Del(job) }
}
new AggregatingSignal[Cmd, Map[SyncId, SyncJob]](onChange, listSyncJobs.map(_.map(j => j.id -> j).toMap), { (jobs, cmd) =>
cmd match {
case Add(job) => jobs + (job.id -> job)
case Del(job) => jobs - job.id
case Update(job) => jobs + (job.id -> job)
}
})
}
// XXX: this exposes internal SyncStorage instance which should never be used outside of our dispatch queue (as it is not thread safe)
// We should use some kind of delegate here, which gets closed once body completes
override def syncStorage[A](body: SyncStorage => A) = syncStorageFuture map body
/**
* Adds new request, merges it to existing request or skips it if duplicate.
* @return affected (new or updated) SyncJob
*/
override def addSyncJob(job: SyncJob, forceRetry: Boolean = false) = syncStorageFuture map { syncStorage =>
def onAdded(added: SyncJob) = {
assert(added.id == job.id)
verbose(l"addRequest: $job, added: $added")
added
}
def onMerged(merged: SyncJob) = {
verbose(l"addRequest: $job, merged: $merged")
if (forceRetry) merged.copy(attempts = 0, startTime = math.min(merged.startTime, job.startTime))
else merged
}
val toSave = merge(job, syncStorage) match {
case Unchanged => error(l"Unexpected result from SyncJobMerger"); job
case Updated(added) => onAdded(added)
case Merged(merged) => onMerged(merged)
}
syncStorage.add(toSave)
}
override def removeSyncJob(id: SyncId) = syncStorageFuture.map(_.remove(id))
override def getSyncJob(id: SyncId) = {
for {
job <- syncStorageFuture.map(_.get(id))
updated <- job.fold(Future.successful(Option.empty[SyncJob])) { j =>
if (System.currentTimeMillis() - j.timestamp > StaleJobTimeout.toMillis)
removeSyncJob(j.id).map(_ => None) else Future.successful(Some(j))
}
} yield updated
}
override def listSyncJobs = syncStorageFuture.map(_.getJobs)
override def updateSyncJob(id: SyncId)(updater: SyncJob => SyncJob) = syncStorageFuture.map(_.update(id)(updater))
private def updateMerger(job: SyncJob, storage: SyncStorage) =
mergers.getOrElseUpdate(job.mergeKey, new SyncJobMerger(job.mergeKey, storage)).insert(job)
private def merge(job: SyncJob, storage: SyncStorage) =
mergers.getOrElseUpdate(job.mergeKey, new SyncJobMerger(job.mergeKey, storage)).merge(job)
private def updateDeps(job: SyncJob, syncStorage: SyncStorage): Unit =
job.dependsOn foreach { dep => updateSchedule(dep, job.priority, syncStorage) }
private def updateSchedule(id: SyncId, priority: Int, syncStorage: SyncStorage): Unit =
syncStorage.update(id) { job =>
job.copy(priority = math.min(job.priority, priority))
} foreach { job =>
job.dependsOn foreach { updateSchedule(_, priority, syncStorage) }
}
}
object SyncContentUpdater {
val StaleJobTimeout = 1.day
sealed trait Cmd {
val job: SyncJob
}
case class Add(job: SyncJob) extends Cmd
case class Del(job: SyncJob) extends Cmd
case class Update(job: SyncJob) extends Cmd
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/sync/queue/SyncContentUpdater.scala | Scala | gpl-3.0 | 6,870 |
package test
trait B {
def foo: Int
}
class traitSuper extends B {
def <caret>foo: Int = 0
}
| ilinum/intellij-scala | testdata/supers/traits/traitSuper.scala | Scala | apache-2.0 | 100 |
package atari.st.disk.exceptions
class InvalidFormatException(msg: String = null, cause: Throwable = null)
extends Exception(msg, cause)
| suiryc/atari-st-tools | src/main/scala/atari/st/disk/exceptions/InvalidFormatException.scala | Scala | gpl-3.0 | 138 |
/*
* Copyright 2013-2021 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package jetbrains.buildServer.sbtlogger
import sbt.Keys._
import sbt.jetbrains.buildServer.sbtlogger.apiAdapter._
import sbt.plugins.JvmPlugin
import sbt.{Def, _}
import scala.collection.mutable
object SbtTeamCityLogger extends AutoPlugin with (State => State) {
override def requires: Plugins = JvmPlugin
override def trigger: PluginTrigger = allRequirements
def apply(state: State): State = {
val extracted = Project.extract(state)
val sbtLoggerVersion = System.getProperty(TC_LOGGER_PROPERTY_NAME)
if (sbtLoggerVersion == "reloaded") {
state
} else {
extracted.structure.allProjectRefs.foldLeft(state)(append(SbtTeamCityLogger.projectSettings, extracted))
}
}
private def append(settings: Seq[Setting[_]], extracted: Extracted)(state: State, projectRef: ProjectRef): State = {
val scope = projectScope(projectRef)
val appendSettings = transformSettings(scope, projectRef.build, extracted.rootProject, settings)
reapply(Project.session(state).appendRaw(appendSettings), state)
}
// copied from sbt.internal.Load
private def transformSettings(thisScope: Scope, uri: URI, rootProject: URI => String, settings: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.resolveScope(thisScope, uri, rootProject), settings)
// copied from sbt.internal.SessionSettings
private def reapply(session: SessionSettings, s: State): State =
BuiltinCommands.reapply(session, Project.structure(s), s)
lazy val tcLogAppender = new TCLogAppender()
lazy val tcLoggers: mutable.Map[String, TCLogger] = collection.mutable.Map[String, TCLogger]()
lazy val tcTestListener = new TCReportListener(tcLogAppender)
lazy val startCompilationLogger: TaskKey[Unit] = TaskKey[Unit]("start-compilation-logger", "runs before compile")
lazy val startTestCompilationLogger: TaskKey[Unit] = TaskKey[Unit]("start-test-compilation-logger", "runs before compile in test")
lazy val endCompilationLogger: TaskKey[Unit] = TaskKey[Unit]("end-compilation-logger", "runs after compile")
lazy val endTestCompilationLogger: TaskKey[Unit] = TaskKey[Unit]("end-test-compilation-logger", "runs after compile in test")
lazy val tcEndCompilation: TaskKey[Unit] = TaskKey[Unit]("tc-end-compilation", "")
lazy val tcEndTestCompilation: TaskKey[Unit] = TaskKey[Unit]("tc-end-test-compilation", "")
val tcVersion: Option[String] = sys.env.get("TEAMCITY_VERSION")
val tcFound: Boolean = tcVersion.isDefined
val TC_LOGGER_PROPERTY_NAME = "TEAMCITY_SBT_LOGGER_VERSION"
val tcLoggerVersion: String = System.getProperty(TC_LOGGER_PROPERTY_NAME)
if (tcLoggerVersion == null) {
System.setProperty(TC_LOGGER_PROPERTY_NAME, "loaded")
} else if (tcLoggerVersion == "loaded") {
System.setProperty(TC_LOGGER_PROPERTY_NAME, "reloaded")
}
var testResultLoggerFound = true
try {
val _: Def.Initialize[sbt.TestResultLogger] = Def.setting {
(testResultLogger in Test).value
}
} catch {
case _: java.lang.NoSuchMethodError =>
testResultLoggerFound = false
}
//noinspection TypeAnnotation,ConvertExpressionToSAM
override lazy val projectSettings = if (tcFound && testResultLoggerFound)
loggerOnSettings ++ Seq(
testResultLogger in(Test, test) := new TestResultLogger {
import sbt.Tests._
def run(log: Logger, results: Output, taskName: String): Unit = {
//default behaviour there is
//TestResultLogger.SilentWhenNoTests.run(log, results, taskName)
//we will just ignore to prevent appearing of 'exit code 1' when test failed
}
}
)
else if (tcFound) loggerOnSettings
else loggerOffSettings
lazy val loggerOnSettings: Seq[Def.Setting[_]] = Seq(
commands += tcLoggerStatusCommand,
extraLoggers := {
val currentFunction: Def.ScopedKey[_] => Seq[ExtraLogger] = extraLoggers.value
key: ScopedKey[_] => {
val scope: String = getScopeId(key.scope.project)
val logger: ExtraLogger = extraLogger(tcLoggers, tcLogAppender, scope)
logger +: currentFunction(key)
}
},
testListeners += tcTestListener,
startCompilationLogger := tcLogAppender.compilationBlockStart(getScopeId(streams.value.key.scope.project)),
startTestCompilationLogger := tcLogAppender.compilationTestBlockStart(getScopeId(streams.value.key.scope.project)),
endCompilationLogger := tcLogAppender.compilationBlockEnd(getScopeId(streams.value.key.scope.project)),
endTestCompilationLogger := tcLogAppender.compilationTestBlockEnd(getScopeId(streams.value.key.scope.project)),
compile in Compile := ((compile in Compile) dependsOn startCompilationLogger).value,
compile in Test := ((compile in Test) dependsOn startTestCompilationLogger).value,
tcEndCompilation := (endCompilationLogger triggeredBy (compile in Compile)).value,
tcEndTestCompilation := (endTestCompilationLogger triggeredBy (compile in Test)).value
) ++
inConfig(Compile)(Seq(reporterSettings(tcLogAppender))) ++
inConfig(Test)(Seq(reporterSettings(tcLogAppender)))
lazy val loggerOffSettings: Seq[Def.Setting[_]] = Seq(
commands += tcLoggerStatusCommand
)
def tcLoggerStatusCommand: Command = Command.command("sbt-teamcity-logger") {
state => doCommand(state)
}
private def doCommand(state: State): State = {
println("Plugin sbt-teamcity-logger was loaded.")
val tcv = tcVersion.getOrElse("undefined")
if (tcFound) {
println(s"TeamCity version='$tcv'")
} else {
println(s"TeamCity was not discovered. Logger was switched off.")
}
state
}
private def getScopeId(scope: ScopeAxis[sbt.Reference]):String = {
"" + scope.hashCode()
}
}
| JetBrains/sbt-tc-logger | logger/src/main/scala/jetbrains/buildServer/sbtlogger/SbtTeamCityLogger.scala | Scala | apache-2.0 | 6,308 |
/*
* Copyright 2015 Niklas Grossmann
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.n12n.momo
import java.net.InetSocketAddress
import akka.actor._
import akka.io.{Tcp, IO}
import net.n12n.momo.couchbase.MetricActor
class TcpReceiverActor(metricActor: ActorRef, configPath: String,
val parseMetric: ReceiverActor.MetricParser)
extends Actor with ActorLogging {
import Tcp._
import context.system
val address = system.settings.config.getString(s"${configPath}.listen-address")
val port = system.settings.config.getInt(s"${configPath}.port")
IO(Tcp) ! Tcp.Bind(self, new InetSocketAddress(address, port))
log.info("Binding to TCP {}:{}", address, port)
def receive = {
case Bound(address) =>
log.info("TcpReceiverActor actor bound to {}", address)
case Connected(remote, local) =>
log.info("TcpReceiver got connection form {}", remote)
val handler = context.actorOf(Props(classOf[TcpConnectionActor],
metricActor, parseMetric))
sender() ! Register(handler)
}
}
class TcpConnectionActor(metricActor: ActorRef,
val parseMetric: ReceiverActor.MetricParser)
extends Actor with ActorLogging with ReceiverActor {
import Tcp._
override def receive = {
case Received(data) => parseMetrics(data).foreach { p =>
metricActor ! MetricActor.Save(p)
}
case PeerClosed => context stop self
}
}
| ngrossmann/momo | src/main/scala/net/n12n/momo/TcpReceiverActor.scala | Scala | apache-2.0 | 1,939 |
package chat.tox.antox.utils
object StorageType extends Enumeration {
type StorageType = Value
val NONE, INTERNAL, EXTERNAL = Value
} | wiiam/Antox | app/src/main/scala/chat/tox/antox/utils/StorageType.scala | Scala | gpl-3.0 | 138 |
package com.mesosphere.cosmos.thirdparty.adminrouter.circe
import com.mesosphere.cosmos.thirdparty.adminrouter.model.DcosVersion
import com.mesosphere.universe.v3.circe.Decoders._
import com.mesosphere.universe.v3.model.DcosReleaseVersion
import io.circe.{Decoder, HCursor}
object Decoders {
implicit val decodeDcosVersion: Decoder[DcosVersion] = Decoder.instance { (cursor: HCursor) =>
for {
v <- cursor.downField("version").as[DcosReleaseVersion]
dIC <- cursor.downField("dcos-image-commit").as[String]
bId <- cursor.downField("bootstrap-id").as[String]
} yield DcosVersion(v, dIC, bId)
}
}
| movicha/cosmos | cosmos-json/src/main/scala/com/mesosphere/cosmos/thirdparty/adminrouter/circe/Decoders.scala | Scala | apache-2.0 | 627 |
package test.endtoend.auctionsniper
import auctionsniper.ui.MainWindow.{NEW_ITEM_ID_NAME, NEW_ITEM_STOP_PRICE_NAME}
import com.objogate.wl.swing.matcher.IterableComponentsMatcher.matching
import com.objogate.wl.swing.matcher.JLabelTextMatcher.withLabelText
import java.lang.String.valueOf
import javax.swing.{JButton, JTextField}
import javax.swing.table.JTableHeader
import com.objogate.wl.swing.AWTEventQueueProber
import com.objogate.wl.swing.driver.{JButtonDriver, JFrameDriver, JTableDriver, JTableHeaderDriver, JTextFieldDriver, ComponentDriver}
import com.objogate.wl.swing.gesture.GesturePerformer
import auctionsniper.ui.MainWindow
class AuctionSniperDriver(timeoutMillis: Int)
extends JFrameDriver(
new GesturePerformer(),
JFrameDriver.topLevelFrame(
ComponentDriver.named(MainWindow.MAIN_WINDOW_NAME),
ComponentDriver.showingOnScreen()),
new AWTEventQueueProber(timeoutMillis, 100)) {
def hasColumnTitles() {
val headers = new JTableHeaderDriver(this, classOf[JTableHeader])
headers.hasHeaders(
matching(withLabelText("Item"), withLabelText("Last Price"),
withLabelText("Last Bid"), withLabelText("State")))
}
def showsSniperStatus(itemId: String, lastPrice: Int, lastBid: Int, statusText: String) {
val table = new JTableDriver(this)
table.hasRow(
matching(withLabelText(itemId), withLabelText(valueOf(lastPrice)),
withLabelText(valueOf(lastBid)), withLabelText(statusText)))
}
def startBiddingWithStopPrice(itemId: String, stopPrice: Int) {
textField(NEW_ITEM_ID_NAME).replaceAllText(itemId)
textField(NEW_ITEM_STOP_PRICE_NAME).replaceAllText(stopPrice.toString)
bidButton().click()
}
private def textField(fieldName: String) = {
val newItemId = new JTextFieldDriver(this, classOf[JTextField], ComponentDriver.named(fieldName))
newItemId.focusWithMouse()
newItemId
}
private def bidButton() =
new JButtonDriver(this, classOf[JButton], ComponentDriver.named(MainWindow.JOIN_BUTTON_NAME))
}
| sptz45/goos-scala | test/end-to-end/test/endtoend/auctionsniper/AuctionSniperDriver.scala | Scala | apache-2.0 | 2,042 |
package ua.nure.lab5
import java.io.File
import akka.actor.{Actor, ActorIdentity, ActorRef, ActorSystem, Props, Terminated}
import scala.collection.mutable
import scala.io.Source
/**
* Class Matchers implementation.
*
* @author Bohdan_Suprun
*/
object Matchers {
def main(args: Array[String]): Unit = {
val system = ActorSystem()
val actor = system.actorOf(Props[SubTreeWalkerActor])
actor ! ("C:/Users/Bohdan_Suprun/git/spp/src", "class")
}
}
class MatcherDetectorActor extends Actor {
private var collector: ActorRef = _
override def receive: PartialFunction[Any, Unit] = {
case (file: File, regExp: String) =>
var cnt = 0
for (line <- Source.fromFile(file).getLines) {
if (line.contains(regExp)) {
cnt += 1
}
}
context.stop(self)
collector ! (file.getAbsolutePath, cnt)
case ActorIdentity("collector", actor) => collector = actor.get
}
}
class SubTreeWalkerActor extends Actor {
private var childTerminated: Int = 0
private val collector: ActorRef = context.actorOf(Props[CollectorActor])
override def receive: PartialFunction[Any, Unit] = {
case (root: String, regExp: String) =>
for (file <- recursiveListFiles(new File(root))) {
if (file.isFile) {
val matcher = context.actorOf(Props[MatcherDetectorActor])
context.watch(matcher)
matcher ! ActorIdentity("collector", Option(collector))
matcher ! (file, regExp)
}
}
case Terminated(_) =>
childTerminated += 1
if (childTerminated >= context.children.size) {
collector ! "finish"
context.stop(self)
context.system.shutdown()
}
}
def recursiveListFiles(f: File): Array[File] = {
val these = f.listFiles
these ++ these.filter(_.isDirectory).flatMap(recursiveListFiles)
}
}
class CollectorActor extends Actor {
val set: mutable.Set[String] = mutable.HashSet()
override def receive: PartialFunction[Any, Unit] = {
case (file: String, cnt: Int) =>
if (cnt > 0) {
set.add(file)
}
case "finish" =>
println("Finished [")
set.foreach(println)
println("]")
context.stop(self)
}
}
| isCompetent/spp | src/main/scala/ua/nure/lab5/Matchers.scala | Scala | mit | 2,233 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.expressions.Expression
import org.apache.flink.table.planner.calcite.FlinkContext
import org.apache.flink.table.planner.expressions.converter.ExpressionConverter
import org.apache.flink.table.planner.plan.schema.{FlinkRelOptTable, TableSourceTable}
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
import org.apache.flink.table.planner.plan.utils.{FlinkRelOptUtil, RexNodeExtractor}
import org.apache.flink.table.sources.FilterableTableSource
import org.apache.calcite.plan.RelOptRule.{none, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.`type`.RelDataTypeFactory
import org.apache.calcite.rel.core.Filter
import org.apache.calcite.rel.logical.LogicalTableScan
import java.util
import java.util.TimeZone
import scala.collection.JavaConversions._
/**
* Planner rule that tries to push a filter into a [[FilterableTableSource]].
*/
class PushFilterIntoTableSourceScanRule extends RelOptRule(
operand(classOf[Filter],
operand(classOf[LogicalTableScan], none)),
"PushFilterIntoTableSourceScanRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val config = call.getPlanner.getContext.asInstanceOf[FlinkContext].getTableConfig
if (!config.getConfiguration.getBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_PREDICATE_PUSHDOWN_ENABLED)) {
return false
}
val filter: Filter = call.rel(0)
if (filter.getCondition == null) {
return false
}
val scan: LogicalTableScan = call.rel(1)
scan.getTable.unwrap(classOf[TableSourceTable[_]]) match {
case table: TableSourceTable[_] =>
table.tableSource match {
case source: FilterableTableSource[_] => !source.isFilterPushedDown
case _ => false
}
case _ => false
}
}
override def onMatch(call: RelOptRuleCall): Unit = {
val filter: Filter = call.rel(0)
val scan: LogicalTableScan = call.rel(1)
val table: FlinkRelOptTable = scan.getTable.asInstanceOf[FlinkRelOptTable]
pushFilterIntoScan(call, filter, scan, table)
}
private def pushFilterIntoScan(
call: RelOptRuleCall,
filter: Filter,
scan: LogicalTableScan,
relOptTable: FlinkRelOptTable): Unit = {
val relBuilder = call.builder()
val functionCatalog = call.getPlanner.getContext.asInstanceOf[FlinkContext].getFunctionCatalog
val maxCnfNodeCount = FlinkRelOptUtil.getMaxCnfNodeCount(scan)
val (predicates, unconvertedRexNodes) =
RexNodeExtractor.extractConjunctiveConditions(
filter.getCondition,
maxCnfNodeCount,
filter.getInput.getRowType.getFieldNames,
relBuilder.getRexBuilder,
functionCatalog,
TimeZone.getTimeZone(scan.getCluster.getPlanner.getContext
.asInstanceOf[FlinkContext].getTableConfig.getLocalTimeZone))
if (predicates.isEmpty) {
// no condition can be translated to expression
return
}
val remainingPredicates = new util.LinkedList[Expression]()
predicates.foreach(e => remainingPredicates.add(e))
val newRelOptTable = applyPredicate(remainingPredicates, relOptTable, relBuilder.getTypeFactory)
val newScan = new LogicalTableScan(scan.getCluster, scan.getTraitSet, newRelOptTable)
// check whether framework still need to do a filter
if (remainingPredicates.isEmpty && unconvertedRexNodes.isEmpty) {
call.transformTo(newScan)
} else {
relBuilder.push(scan)
val converter = new ExpressionConverter(relBuilder)
val remainingConditions = remainingPredicates.map(_.accept(converter)) ++ unconvertedRexNodes
val remainingCondition = remainingConditions.reduce((l, r) => relBuilder.and(l, r))
val newFilter = filter.copy(filter.getTraitSet, newScan, remainingCondition)
call.transformTo(newFilter)
}
}
private def applyPredicate(
predicates: util.List[Expression],
relOptTable: FlinkRelOptTable,
typeFactory: RelDataTypeFactory): FlinkRelOptTable = {
val originPredicatesSize = predicates.size()
val tableSourceTable = relOptTable.unwrap(classOf[TableSourceTable[_]])
val filterableSource = tableSourceTable.tableSource.asInstanceOf[FilterableTableSource[_]]
val newTableSource = filterableSource.applyPredicate(predicates)
val updatedPredicatesSize = predicates.size()
val statistic = tableSourceTable.statistic
val newStatistic = if (originPredicatesSize == updatedPredicatesSize) {
// Keep all Statistics if no predicates can be pushed down
statistic
} else if (statistic == FlinkStatistic.UNKNOWN) {
statistic
} else {
// Remove tableStats after predicates pushed down
FlinkStatistic.builder().statistic(statistic).tableStats(null).build()
}
val newTableSourceTable = new TableSourceTable(
newTableSource, tableSourceTable.isStreamingMode, newStatistic)
relOptTable.copy(newTableSourceTable, tableSourceTable.getRowType(typeFactory))
}
}
object PushFilterIntoTableSourceScanRule {
val INSTANCE: RelOptRule = new PushFilterIntoTableSourceScanRule
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/PushFilterIntoTableSourceScanRule.scala | Scala | apache-2.0 | 6,064 |
package org.machine.engine.communication.routes
import akka.http.javadsl.model.HttpHeader;
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.directives.Credentials
import akka.http.scaladsl.server.{Directive0, Directive1}
import akka.http.scaladsl.server.Route
import com.softwaremill.session._
import com.softwaremill.session.SessionDirectives._
import com.softwaremill.session.SessionOptions._
import com.softwaremill.session.SessionResult._
import com.typesafe.config._
import com.typesafe.scalalogging.{LazyLogging}
import java.util.Optional;
import org.machine.engine.authentication.PasswordTools
import org.machine.engine.communication.SessionBroker
import org.machine.engine.communication.headers.UserSession
import org.machine.engine.communication.services._
import org.machine.engine.graph.Neo4JHelper
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Try, Success, Failure};
object IdentityServiceRouteBuilder extends Directives
with LazyLogging
with LoginUserServiceJsonSupport
with UserServiceJsonSupport{
private val config = ConfigFactory.load()
private val SESSION_REQUEST_HEADER = config.getString("akka.http.session.header.get-from-client-name")
private val SESSION_RESPONSE_HEADER = config.getString("akka.http.session.header.send-to-client-name")
private val sessionBroker:SessionBroker = SessionBroker.getInstance
implicit val sessionManager = sessionBroker.sessionManagerInstance
def buildRoutes():Route = {
val routes = {
/*
Basic Auth is used from a service account perspective.
User authentication is provided in the body of the requests.
*/
authenticateBasic(realm = "Engine User Service", authenticator){ user =>
authorize(hasRights(user)){
path("users"){
/* TODO: Just for testing. Remove after the API is done. - SDH */
get{
complete(StatusCodes.OK, "Hello World\\n")
}~
post{
entity(as[CreateUser]){ newUserRequest =>
createNewUser(newUserRequest){ newUser =>
complete(StatusCodes.OK, newUser)
}
}
}
}~
// Authenticate a user and return a session ID.
path("login"){
post{
entity(as[LoginRequest]){ loginRequest =>
attemptLogin(loginRequest){ loginResponse =>
saveSession{
setSession(oneOff, usingHeaders,
UserSession(loginRequest.userName,
loginResponse.userId,
sessionBroker.createSessionId(),
Neo4JHelper.time)){
complete(StatusCodes.OK, UserLoginResponse(loginResponse.userId))
}
}
}
}
}
}~
path("logout"){
get{
requiredSession(oneOff, usingHeaders){ session =>
invalidateSession(oneOff, usingHeaders){
headerValueByName(SESSION_REQUEST_HEADER){ session =>
logoutTheUser(session){
complete(StatusCodes.OK)
}
}
}
}
}
}~
path("protected"){
get{
requiredSession(oneOff, usingHeaders){ session =>
headerValueByName(SESSION_REQUEST_HEADER){ session =>
requireActiveSession(session) {
complete(StatusCodes.OK)
}
}
}
}
}
}
}
}
return routes;
}
/*
This route will attempt to save the session that was created in the inner route.
*/
private def saveSession(innerRoutes: => Route): Route = mapResponse(saveActiveUserSession)(innerRoutes);
/*
Communicates with an Akka Actor that saves the session associated with the user.
*/
private def saveActiveUserSession(response: HttpResponse): HttpResponse = {
val sessionHeader:Optional[HttpHeader] = response.getHeader(SESSION_RESPONSE_HEADER)
if(sessionHeader.isPresent()){
//Assume's that the string is of the format: Header Name + Space + Encoded JWT Token
val sessionTokenStr:String = sessionHeader.get().toString();
val tokens = sessionTokenStr.split(" ")
val decodeAttempt:SessionResult[UserSession] = sessionBroker.decodeToken(tokens.last)
/*
TODO: The error conditions should not just return the response but rather
force the service to return a 500 or something...
*/
(decodeAttempt: @unchecked) match {
case Decoded(session) => {
logger.debug("Successfully decoded the session header.")
sessionBroker.brokerSavingUserSession(session)
}
case Expired => {
logger.error("The user's token is expired.")
}
case Corrupt(exc) => {
logger.error("The user's session token is corrupt.")
}
case _ => logger.error("An unexpected response occured when attempting to decode the token.")
}
}
return response;
}
private def authenticator(credentials: Credentials): Option[String] =
credentials match {
case p @ Credentials.Provided(id) if p.verify(config.getString("engine.communication.identity_service.password")) => Some(id)
case _ => None
}
private def hasRights(user: String):Boolean = {
val registeredUser = config.getString("engine.communication.identity_service.user")
return registeredUser == user
}
private def requireActiveSession[T](sessionToken: String): Directive0 = {
val tokens = sessionToken.split(" ")
val decodeAttempt:SessionResult[UserSession] = sessionBroker.decodeToken(tokens.last);
(decodeAttempt: @unchecked) match {
case Decoded(session) => {
logger.debug("Successfully decoded the session header.")
//Note: The akka-http-session framework is enforcing the token expiration.
//So we're not checking it ourselves.
sessionBroker.verifyTheTokenExists(session) match {
case true => pass
case false => complete(StatusCodes.Unauthorized)
}
}
case Expired => {
logger.debug("The user's token is expired.")
complete(StatusCodes.Unauthorized)
}
case Corrupt(exc) => {
logger.error("The user's session token is corrupt.")
complete(StatusCodes.Unauthorized)
}
case _ => {
logger.error("An unexpected response occured when attempting to decode the token.")
complete(StatusCodes.Unauthorized)
}
}
}
private def createNewUser(newUserRequest: CreateUser):Directive1[NewUser] = {
sessionBroker.brokerCreatingNewUser(newUserRequest) match{
case ur: NewUserResponse => provide(ur.newUser)
case _ => complete(StatusCodes.InternalServerError)
}
}
private def attemptLogin(request: LoginRequest):Directive1[LoginResponse] = {
val response = sessionBroker.brokerUserLogin(request)
response.status match{
case 200 => provide(response)
case 401 => complete(StatusCodes.Unauthorized)
case _ => complete(StatusCodes.InternalServerError)
}
}
private def logoutTheUser(sessionToken: String):Directive0 = {
//The token may or may not have "Bearer before it.
val tokens = sessionToken.split(" ")
val decodeAttempt:SessionResult[UserSession] = sessionBroker.decodeToken(tokens.last);
(decodeAttempt: @unchecked) match {
case Decoded(session) => {
logger.debug("Successfully decoded the session header.")
sessionBroker.brokerloggingOutTheUser(session.userId)
pass
}
case Expired => {
logger.debug("The user's token is expired.")
complete(StatusCodes.Unauthorized)
}
case Corrupt(exc) => {
logger.error("The user's session token is corrupt.")
complete(StatusCodes.Unauthorized)
}
case _ => {
logger.error("An unexpected response occured when attempting to decode the token.")
pass
}
}
}
}
| sholloway/graph-engine | src/main/scala/org/machine/engine/communication/routes/IdentityServiceRouteBuilder.scala | Scala | mit | 8,462 |
package scala.meta.internal.javacp
/**
* Minimal utility to resolve generic signature type variables to fully qualified symbols.
*
* @param bindings Map from type variable names to their resolved symbols.
*/
class Scope(bindings: Map[String, String]) {
/** Resolve a type variable name to a symbol */
def resolve(name: String): String = {
bindings.getOrElse(name, {
// FIXME: fix https://github.com/scalameta/scalameta/issues/1365
// There are still a handful of cases in spark-sql where resolution fails for some reason.
name
})
}
/** Returns new scope where name resolves to symbol, shadowing previous binding of name if any */
def enter(name: String, symbol: String): Scope =
new Scope(bindings.updated(name, symbol))
}
object Scope {
val empty: Scope = new Scope(Map.empty)
}
| olafurpg/scalameta | semanticdb/metacp/src/main/scala/scala/meta/internal/javacp/Scope.scala | Scala | bsd-3-clause | 834 |
package ru.makkarpov.scalingua.pofile.parse
import java_cup.runtime.ComplexSymbolFactory.Location
case class ParserException(left: Location, right: Location, msg: String)
extends RuntimeException(s"at ${left.getUnit}:${left.getLine}:${left.getColumn}: $msg") {
}
| makkarpov/scalingua | scalingua/shared/src/main/scala/ru/makkarpov/scalingua/pofile/parse/ParserException.scala | Scala | apache-2.0 | 266 |
package webcrank
package data
import scalaz._, Scalaz._
/** http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.1 */
sealed trait Method
object Method {
case object Options extends Method
case object Get extends Method
case object Head extends Method
case object Post extends Method
case object Put extends Method
case object Delete extends Method
case object Trace extends Method
case object Connect extends Method
case class ExtensionMethod(token: String) extends Method
implicit val MethodShow =
Show.showFromToString[Method]
implicit val MethodEqual =
Equal.equalA[Method]
}
| webcrank/webcrank.scala | src/main/scala/webcrank/data/Method.scala | Scala | bsd-3-clause | 623 |
package wakfutcp.protocol
import java.nio.ByteBuffer
import akka.util.{ByteString, ByteStringBuilder}
trait BinarSerialPart {
def id: Byte
}
trait BinarSerialCodec[B <: BinarSerialPart] extends Codec[Seq[_ <: B]] {
import Encoder.DefaultOrder
def fromId(id: Byte): Codec[_ <: B]
final override def decode(bb: ByteBuffer): Seq[_ <: B] = {
val parts = Seq.fill(bb.get)((bb.get, bb.getInt))
parts.zipWithIndex.map {
case ((idx, offset), i) =>
val size =
if (i < parts.length - 1) {
parts(i + 1)._2 - offset - 1
} else {
bb.limit - offset - 1
}
bb.position(offset + 1)
val slice = bb.slice()
slice.limit(size)
fromId(idx).decode(slice)
}
}
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
final override def encode(
builder: ByteStringBuilder,
t: Seq[_ <: B]
): ByteStringBuilder = {
builder.putByte(t.length.toByte)
val headerSize = 1 + 5 * t.length
val content = ByteString.newBuilder
t.map { part =>
val offset = content.length
val codec = fromId(part.id)
content.putByte(part.id)
codec.asInstanceOf[Codec[B]].encode(content, part)
(part.id, offset)
}.foreach {
case (idx, offset) =>
builder.putByte(idx)
builder.putInt(headerSize + offset)
}
builder ++= content.result()
}
}
object BinarSerialCodec {
@inline def apply[T <: BinarSerialPart: BinarSerialCodec]: BinarSerialCodec[T] =
implicitly[BinarSerialCodec[T]]
}
| OpenWakfu/wakfutcp | protocol/src/main/scala/wakfutcp/protocol/BinarSerialCodec.scala | Scala | mit | 1,564 |
package edu.mayo.hadoop.commons.examples
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import edu.mayo.hadoop.commons.hbase.AutoConfigure
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.client.Put
import edu.mayo.hadoop.commons.minicluster.MiniClusterUtil
object HBaseTest {
def main(args: Array[String]) {
println("Hello, world\\n\\n\\n\\n\\n\\n")
val sparkConf = new SparkConf().setAppName("HBaseTest").setMaster("local")
val sc = new SparkContext(sparkConf)
val tableName = TableName.valueOf("spark-test")
val configuration = AutoConfigure.getConfiguration()
configuration.set(TableInputFormat.INPUT_TABLE, tableName.getNameAsString())
val connection = ConnectionFactory.createConnection(configuration)
HBaseAdmin.checkHBaseAvailable(configuration)
val admin = connection.getAdmin()
if (!admin.isTableAvailable(tableName)) {
val tableDesc = new HTableDescriptor(tableName)
tableDesc.addFamily(new HColumnDescriptor("column-family"))
admin.createTable(tableDesc)
// Add 100 rows
val connection = ConnectionFactory.createConnection(configuration)
val table = connection.getTable(tableName)
for (i <- 1 to 100) {
val put = new Put(Bytes.toBytes(i))
put.addColumn(Bytes.toBytes("column-family"), Bytes.toBytes("column"), Bytes.toBytes(i))
table.put(put)
}
}
val hBaseRDD = sc.newAPIHadoopRDD(configuration, classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
println("\\n\\n\\n\\n\\nFound " + hBaseRDD.count() + " rows \\n\\n\\n\\n\\n\\n")
sc.stop()
admin.close()
println("\\n\\n\\n\\n\\n\\nAll ur HBase are belong to us!!!\\n\\n\\n\\n\\n\\n")
MiniClusterUtil.stopAll();
System.exit(0)
}
}
| drachimera/GenomicWarehouse | hadoop-commons/src/main/scala/edu/mayo/hadoop/commons/examples/HBaseTest.scala | Scala | apache-2.0 | 2,167 |
package uk.gov.gds.ier.form
import uk.gov.gds.ier.test.UnitTestSuite
import uk.gov.gds.ier.validation.{ErrorMessages, FormKeys}
import uk.gov.gds.ier.model.{HasAddressOption, PartialManualAddress, PartialAddress, LastAddress}
import uk.gov.gds.ier.transaction.forces.confirmation.ConfirmationForms
import play.api.data.Form
import uk.gov.gds.ier.transaction.ordinary.address.AddressForms
import uk.gov.gds.ier.serialiser.WithSerialiser
import uk.gov.gds.ier.transaction.forces.InprogressForces
import uk.gov.gds.ier.transaction.ordinary.InprogressOrdinary
/**
* Test AddressHelpers utility class against multiple forms
*/
class AddressHelpersTests extends UnitTestSuite {
val helpers = new AddressHelpers {}
val formKeys = new FormKeys {}
import formKeys._
// Use composition rather then inheritance to get Play Form instances
val manualAddressForm = new AddressForms with ErrorMessages with FormKeys with WithSerialiser {
val serialiser = null
}.manualAddressForm
val confirmationForm = new ConfirmationForms {
val serialiser = null
val addressService = null
}.confirmationForm
behavior of "AddressHelpers.manualAddressToOneLine with Forces ConfirmationForm"
it should "return all elements of address separated by comma for form with full manual address" in {
val partiallyFilledForm = confirmationForm.fillAndValidate(InprogressForces(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.YesAndLivingThere),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")))
))
))
))
// local variable names matter!
// if you try to rename 'result' to 'manualAddress' Scala compiler implodes with internal error
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.address.manualAddress)
result should be(Some("Unit 4, Elgar Business Centre, Moseley Road, Hallow, Worcester"))
}
it should "return address without comma for form with manual address of just one line" in {
val partiallyFilledForm = confirmationForm.fillAndValidate(InprogressForces(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.YesAndLivingThere),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = Some("123 Fake Street"),
lineTwo = None,
lineThree = None,
city = None))
))
))
))
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.address.manualAddress)
result should be(Some("123 Fake Street"))
}
it should "return None for form without manual address" in {
val partiallyFilledForm = confirmationForm.fillAndValidate(InprogressForces(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.YesAndLivingThere),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = None,
lineTwo = None,
lineThree = None,
city = None))
))
))
))
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.manualAddress)
result should be(None)
}
behavior of "AddressHelpers.manualAdressToOneLine with Ordinary AddressForm"
it should "return all elements of address separated by comma for form with full manual address" in {
val partiallyFilledForm = manualAddressForm.fillAndValidate(InprogressOrdinary(
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")))
))
))
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.manualAddress)
result should be(Some("Unit 4, Elgar Business Centre, Moseley Road, Hallow, Worcester"))
}
it should "return address without comma for form with manual address of just one line" in {
val partiallyFilledForm = manualAddressForm.fillAndValidate(InprogressOrdinary(
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = Some("123 Fake Street"),
lineTwo = None,
lineThree = None,
city = None))
))
))
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.manualAddress)
result should be(Some("123 Fake Street"))
}
it should "return None for form without manual address" in {
val partiallyFilledForm = manualAddressForm.fillAndValidate(InprogressOrdinary(
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "AB12 3CD",
manualAddress = Some(PartialManualAddress(
lineOne = None,
lineTwo = None,
lineThree = None,
city = None))
))
))
val result = helpers.manualAddressToOneLine(
partiallyFilledForm,
keys.address.manualAddress)
result should be(None)
}
"concatListOfOptionalStrings()" should
"concatenate only Some values if there are any or return None" in {
helpers.concatListOfOptionalStrings(List(None, None, None)) should be(None)
helpers.concatListOfOptionalStrings(List(None, Some("aaa"), None)) should be(Some("aaa"))
helpers.concatListOfOptionalStrings(List(Some("aaa"), Some("bbb"), Some("ccc"))) should be(Some("aaa, bbb, ccc"))
}
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/form/AddressHelpersTests.scala | Scala | mit | 6,168 |
/*
* Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/HttpEncoding.scala
*
* Copyright (C) 2011-2012 spray.io
* Based on code copyright (C) 2010-2011 by the BlueEyes Web Framework Team (http://github.com/jdegoes/blueeyes)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import org.http4s.util._
import org.http4s.syntax.string._
final case class ContentCoding (coding: CaseInsensitiveString, qValue: QValue = QValue.One) extends HasQValue with Renderable {
def withQValue(q: QValue): ContentCoding = copy(coding, q)
def satisfies(encoding: ContentCoding): Boolean = encoding.satisfiedBy(this)
def satisfiedBy(encoding: ContentCoding): Boolean = {
(this.coding.toString == "*" || this.coding == encoding.coding) &&
qValue.isAcceptable && encoding.qValue.isAcceptable
}
override def render(writer: Writer): writer.type = writer << coding << qValue
// We want the normal case class generated methods except copy
private def copy(coding: CaseInsensitiveString = this.coding, q: QValue = this.qValue) = ContentCoding(coding, q)
}
object ContentCoding extends Registry {
type Key = CaseInsensitiveString
type Value = ContentCoding
implicit def fromKey(k: CaseInsensitiveString): ContentCoding = ContentCoding(k)
implicit def fromValue(v: ContentCoding): CaseInsensitiveString = v.coding
val `*`: ContentCoding = registerKey("*".ci)
// http://www.iana.org/assignments/http-parameters/http-parameters.xml#http-parameters-1
val compress = registerKey("compress".ci)
val deflate = registerKey("deflate".ci)
val exi = registerKey("exi".ci)
val gzip = registerKey("gzip".ci)
val identity = registerKey("identity".ci)
val `pack200-gzip` = registerKey("pack200-gzip".ci)
// Legacy encodings defined by RFC2616 3.5.
val `x-compress` = register("x-compress".ci, compress)
val `x-gzip` = register("x-gzip".ci, gzip)
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/ContentCoding.scala | Scala | apache-2.0 | 2,489 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3
import org.neo4j.cypher.GraphDatabaseFunSuite
import org.neo4j.cypher.internal.compiler.v2_3.ast.convert.commands.StatementConverters._
import org.neo4j.cypher.internal.compiler.v2_3.commands._
import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions._
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders.{BuilderTest, Solved, TraversalMatcherBuilder, Unsolved}
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.{ExecutionPlanInProgress, PartiallySolvedQuery}
import org.neo4j.cypher.internal.compiler.v2_3.pipes.{ArgumentPipe, SingleRowPipe}
import org.neo4j.cypher.internal.compiler.v2_3.spi.PlanContext
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import org.neo4j.cypher.internal.frontend.v2_3.parser.CypherParser
import org.neo4j.cypher.internal.spi.v2_3.TransactionBoundPlanContext
import org.neo4j.graphdb.Transaction
class TraversalMatcherBuilderTest extends GraphDatabaseFunSuite with BuilderTest {
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
var builder: TraversalMatcherBuilder = null
var ctx: PlanContext = null
var tx: Transaction = null
override def beforeEach() {
super.beforeEach()
builder = new TraversalMatcherBuilder
tx = graph.beginTx()
ctx = new TransactionBoundPlanContext(statement, graph)
}
override def afterEach() {
tx.close()
super.afterEach()
}
test("should_not_accept_queries_without_patterns") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(NodeByIndex("n", "index", Literal("key"), Literal("expression"))))
)
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(false)
}
test("should_accept_variable_length_paths") {
val q = query("START me=node:node_auto_index(name = 'Jane') " +
"MATCH me-[:jane_knows*]->friend-[:has]->status " +
"RETURN me")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
}
test("should_not_accept_queries_with_varlength_paths") {
val q = query("START me=node:node_auto_index(name = 'Tarzan'), you=node:node_auto_index(name = 'Jane') " +
"MATCH me-[:LOVES*]->banana-[:LIKES*]->you " +
"RETURN me")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
}
test("should_handle_loops") {
val q = query("START me=node:node_auto_index(name = 'Tarzan'), you=node:node_auto_index(name = 'Jane') " +
"MATCH me-[:LIKES]->(u1)<-[:LIKES]->you, me-[:HATES]->(u2)<-[:HATES]->you " +
"RETURN me")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
}
test("should_not_take_on_path_expression_predicates") {
val q = query("START a=node({self}) MATCH a-->b WHERE b-->() RETURN b")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
val testPlan = plan(SingleRowPipe(), q)
val newPlan = builder.apply(testPlan, ctx)
assertQueryHasNotSolvedPathExpressions(newPlan)
}
test("should_handle_global_queries") {
val q = query("START a=node({self}), b = node(*) MATCH a-->b RETURN b")
val testPlan = plan(SingleRowPipe(), q)
builder.canWorkWith(testPlan, ctx) should be(true)
val newPlan = builder.apply(testPlan, ctx)
newPlan.query.start.exists(_.unsolved) should be(false)
}
test("does_not_take_on_paths_overlapping_with_identifiers_already_in_scope") {
val q = query("START a = node(*) MATCH a-->b RETURN b")
val sourcePipe = ArgumentPipe(new SymbolTable(Map("b" -> CTNode)))()
builder.canWorkWith(plan(sourcePipe, q), ctx) should be(false)
}
test("should handle starting from node and relationship") {
val q = query("start a=node(0), ab=relationship(0) match (a)-[ab]->(b) return b")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
val newPlan = builder.apply(plan(SingleRowPipe(), q), ctx)
newPlan.query.start.exists(_.unsolved) should be(false)
}
test("should handle starting from two nodes") {
val q = query("start a=node(0), b=node(1) match (a)-[ab]->(b) return b")
builder.canWorkWith(plan(SingleRowPipe(), q), ctx) should be(true)
val newPlan = builder.apply(plan(SingleRowPipe(), q), ctx)
newPlan.query.start.exists(_.unsolved) should be(false)
}
def assertQueryHasNotSolvedPathExpressions(newPlan: ExecutionPlanInProgress) {
newPlan.query.where.foreach {
case Solved(pred) if pred.exists(_.isInstanceOf[PathExpression]) => fail("Didn't expect the predicate to be solved")
case _ =>
}
}
private val parser = new CypherParser
private def query(text: String): PartiallySolvedQuery = PartiallySolvedQuery(parser.parse(text).asQuery(devNullLogger).asInstanceOf[Query])
}
| HuangLS/neo4j | community/cypher/cypher/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/TraversalMatcherBuilderTest.scala | Scala | apache-2.0 | 5,655 |
package com.caibowen.webface.gae.misc
/**
* exact request location info, including time, ip, geo info
* used as return value as well as query filters
* Created by Bowen Cai on 9/3/2015.
*/
case class ReqLocation(timestamp: Long, ip: String,
country: String, region: String, city: String,
latitude: Float,
longitude: Float)
| xkommando/WebFace | src/com/caibowen/webface/gae/misc/ReqLocation.scala | Scala | apache-2.0 | 410 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import org.apache.hadoop.io._
import org.apache.orc.mapred.{OrcList, OrcMap, OrcStruct, OrcTimestamp}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{SpecificInternalRow, UnsafeArrayData}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.RebaseDateTime.rebaseJulianToGregorianDays
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A deserializer to deserialize ORC structs to Spark rows.
*/
class OrcDeserializer(
dataSchema: StructType,
requiredSchema: StructType,
requestedColIds: Array[Int]) {
private val resultRow = new SpecificInternalRow(requiredSchema.map(_.dataType))
// `fieldWriters(index)` is
// - null if the respective source column is missing, since the output value
// is always null in this case
// - a function that updates target column `index` otherwise.
private val fieldWriters: Array[WritableComparable[_] => Unit] = {
requiredSchema.zipWithIndex
.map { case (f, index) =>
if (requestedColIds(index) == -1) {
null
} else {
val writer = newWriter(f.dataType, new RowUpdater(resultRow))
(value: WritableComparable[_]) => writer(index, value)
}
}.toArray
}
def deserialize(orcStruct: OrcStruct): InternalRow = {
var targetColumnIndex = 0
while (targetColumnIndex < fieldWriters.length) {
if (fieldWriters(targetColumnIndex) != null) {
val value = orcStruct.getFieldValue(requestedColIds(targetColumnIndex))
if (value == null) {
resultRow.setNullAt(targetColumnIndex)
} else {
fieldWriters(targetColumnIndex)(value)
}
}
targetColumnIndex += 1
}
resultRow
}
/**
* Creates a writer to write ORC values to Catalyst data structure at the given ordinal.
*/
private def newWriter(
dataType: DataType, updater: CatalystDataUpdater): (Int, WritableComparable[_]) => Unit =
dataType match {
case NullType => (ordinal, _) =>
updater.setNullAt(ordinal)
case BooleanType => (ordinal, value) =>
updater.setBoolean(ordinal, value.asInstanceOf[BooleanWritable].get)
case ByteType => (ordinal, value) =>
updater.setByte(ordinal, value.asInstanceOf[ByteWritable].get)
case ShortType => (ordinal, value) =>
updater.setShort(ordinal, value.asInstanceOf[ShortWritable].get)
case IntegerType => (ordinal, value) =>
updater.setInt(ordinal, value.asInstanceOf[IntWritable].get)
case LongType => (ordinal, value) =>
updater.setLong(ordinal, value.asInstanceOf[LongWritable].get)
case FloatType => (ordinal, value) =>
updater.setFloat(ordinal, value.asInstanceOf[FloatWritable].get)
case DoubleType => (ordinal, value) =>
updater.setDouble(ordinal, value.asInstanceOf[DoubleWritable].get)
case StringType => (ordinal, value) =>
updater.set(ordinal, UTF8String.fromBytes(value.asInstanceOf[Text].copyBytes))
case BinaryType => (ordinal, value) =>
val binary = value.asInstanceOf[BytesWritable]
val bytes = new Array[Byte](binary.getLength)
System.arraycopy(binary.getBytes, 0, bytes, 0, binary.getLength)
updater.set(ordinal, bytes)
case DateType => (ordinal, value) =>
updater.setInt(ordinal, OrcShimUtils.getGregorianDays(value))
case TimestampType => (ordinal, value) =>
updater.setLong(ordinal, DateTimeUtils.fromJavaTimestamp(value.asInstanceOf[OrcTimestamp]))
case DecimalType.Fixed(precision, scale) => (ordinal, value) =>
val v = OrcShimUtils.getDecimal(value)
v.changePrecision(precision, scale)
updater.set(ordinal, v)
case st: StructType => (ordinal, value) =>
val result = new SpecificInternalRow(st)
val fieldUpdater = new RowUpdater(result)
val fieldConverters = st.map(_.dataType).map { dt =>
newWriter(dt, fieldUpdater)
}.toArray
val orcStruct = value.asInstanceOf[OrcStruct]
var i = 0
while (i < st.length) {
val value = orcStruct.getFieldValue(i)
if (value == null) {
result.setNullAt(i)
} else {
fieldConverters(i)(i, value)
}
i += 1
}
updater.set(ordinal, result)
case ArrayType(elementType, _) => (ordinal, value) =>
val orcArray = value.asInstanceOf[OrcList[WritableComparable[_]]]
val length = orcArray.size()
val result = createArrayData(elementType, length)
val elementUpdater = new ArrayDataUpdater(result)
val elementConverter = newWriter(elementType, elementUpdater)
var i = 0
while (i < length) {
val value = orcArray.get(i)
if (value == null) {
result.setNullAt(i)
} else {
elementConverter(i, value)
}
i += 1
}
updater.set(ordinal, result)
case MapType(keyType, valueType, _) => (ordinal, value) =>
val orcMap = value.asInstanceOf[OrcMap[WritableComparable[_], WritableComparable[_]]]
val length = orcMap.size()
val keyArray = createArrayData(keyType, length)
val keyUpdater = new ArrayDataUpdater(keyArray)
val keyConverter = newWriter(keyType, keyUpdater)
val valueArray = createArrayData(valueType, length)
val valueUpdater = new ArrayDataUpdater(valueArray)
val valueConverter = newWriter(valueType, valueUpdater)
var i = 0
val it = orcMap.entrySet().iterator()
while (it.hasNext) {
val entry = it.next()
keyConverter(i, entry.getKey)
val value = entry.getValue
if (value == null) {
valueArray.setNullAt(i)
} else {
valueConverter(i, value)
}
i += 1
}
// The ORC map will never have null or duplicated map keys, it's safe to create a
// ArrayBasedMapData directly here.
updater.set(ordinal, new ArrayBasedMapData(keyArray, valueArray))
case udt: UserDefinedType[_] => newWriter(udt.sqlType, updater)
case _ =>
throw new UnsupportedOperationException(s"$dataType is not supported yet.")
}
private def createArrayData(elementType: DataType, length: Int): ArrayData = elementType match {
case BooleanType => UnsafeArrayData.fromPrimitiveArray(new Array[Boolean](length))
case ByteType => UnsafeArrayData.fromPrimitiveArray(new Array[Byte](length))
case ShortType => UnsafeArrayData.fromPrimitiveArray(new Array[Short](length))
case IntegerType => UnsafeArrayData.fromPrimitiveArray(new Array[Int](length))
case LongType => UnsafeArrayData.fromPrimitiveArray(new Array[Long](length))
case FloatType => UnsafeArrayData.fromPrimitiveArray(new Array[Float](length))
case DoubleType => UnsafeArrayData.fromPrimitiveArray(new Array[Double](length))
case _ => new GenericArrayData(new Array[Any](length))
}
/**
* A base interface for updating values inside catalyst data structure like `InternalRow` and
* `ArrayData`.
*/
sealed trait CatalystDataUpdater {
def set(ordinal: Int, value: Any): Unit
def setNullAt(ordinal: Int): Unit = set(ordinal, null)
def setBoolean(ordinal: Int, value: Boolean): Unit = set(ordinal, value)
def setByte(ordinal: Int, value: Byte): Unit = set(ordinal, value)
def setShort(ordinal: Int, value: Short): Unit = set(ordinal, value)
def setInt(ordinal: Int, value: Int): Unit = set(ordinal, value)
def setLong(ordinal: Int, value: Long): Unit = set(ordinal, value)
def setDouble(ordinal: Int, value: Double): Unit = set(ordinal, value)
def setFloat(ordinal: Int, value: Float): Unit = set(ordinal, value)
}
final class RowUpdater(row: InternalRow) extends CatalystDataUpdater {
override def setNullAt(ordinal: Int): Unit = row.setNullAt(ordinal)
override def set(ordinal: Int, value: Any): Unit = row.update(ordinal, value)
override def setBoolean(ordinal: Int, value: Boolean): Unit = row.setBoolean(ordinal, value)
override def setByte(ordinal: Int, value: Byte): Unit = row.setByte(ordinal, value)
override def setShort(ordinal: Int, value: Short): Unit = row.setShort(ordinal, value)
override def setInt(ordinal: Int, value: Int): Unit = row.setInt(ordinal, value)
override def setLong(ordinal: Int, value: Long): Unit = row.setLong(ordinal, value)
override def setDouble(ordinal: Int, value: Double): Unit = row.setDouble(ordinal, value)
override def setFloat(ordinal: Int, value: Float): Unit = row.setFloat(ordinal, value)
}
final class ArrayDataUpdater(array: ArrayData) extends CatalystDataUpdater {
override def setNullAt(ordinal: Int): Unit = array.setNullAt(ordinal)
override def set(ordinal: Int, value: Any): Unit = array.update(ordinal, value)
override def setBoolean(ordinal: Int, value: Boolean): Unit = array.setBoolean(ordinal, value)
override def setByte(ordinal: Int, value: Byte): Unit = array.setByte(ordinal, value)
override def setShort(ordinal: Int, value: Short): Unit = array.setShort(ordinal, value)
override def setInt(ordinal: Int, value: Int): Unit = array.setInt(ordinal, value)
override def setLong(ordinal: Int, value: Long): Unit = array.setLong(ordinal, value)
override def setDouble(ordinal: Int, value: Double): Unit = array.setDouble(ordinal, value)
override def setFloat(ordinal: Int, value: Float): Unit = array.setFloat(ordinal, value)
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcDeserializer.scala | Scala | apache-2.0 | 10,507 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.akka.typed
import org.specs2.mutable.Specification
import play.api.inject.Injector
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.inject.guice.GuiceableModule
final class AkkaTypedDocSpec extends Specification {
"Runtime DI support for FP-style" in fpStyle
"Runtime DI support for OO-style" in ooStyle
"Runtime DI support for multi-instance FP-style" in fpStyleMulti
"Runtime DI support for multi-instance OO-style" in ooStyleMulti
"Compile-time DI without support works" in compileTimeDI
private def fpStyle = { // functional programming style
import fp._
val main = newInjector(AppModule).instanceOf[Main]
(main.helloActor !== null).and(main.configuredActor !== null)
}
private def ooStyle = { // object-oriented style
import oo._
val main = newInjector(AppModule).instanceOf[Main]
(main.helloActor !== null).and(main.configuredActor !== null)
}
private def fpStyleMulti = { // with multiple spawned ActorRef[T]
import fp.multi._
val main = newInjector(AppModule).instanceOf[Main]
(main.helloActor1 !== null)
.and(main.helloActor2 !== null)
.and(main.configuredActor1 !== null)
.and(main.configuredActor2 !== null)
}
private def ooStyleMulti = { // with multiple spawned ActorRef[T]
import oo.multi._
val main = newInjector(AppModule).instanceOf[Main]
(main.helloActor1 !== null)
.and(main.helloActor2 !== null)
.and(main.configuredActor1 !== null)
.and(main.configuredActor2 !== null)
}
private def compileTimeDI = { // a sanity-check of what compile-time DI looks like
import java.io.File
import play.api._
import fp._
val environment = Environment(new File("."), getClass.getClassLoader, Mode.Test)
val context = ApplicationLoader.Context.create(environment, Map("my.config" -> "foo"))
val appComponents = new AppComponents(context)
val main = appComponents.main
(main.helloActor !== null).and(main.configuredActor !== null)
}
private def newInjector(bindModules: GuiceableModule): Injector = {
GuiceApplicationBuilder().configure("my.config" -> "foo").bindings(bindModules).injector()
}
}
| benmccann/playframework | documentation/manual/working/commonGuide/akka/code/scalaguide/akka/typed/AkkaTypedDocSpec.scala | Scala | apache-2.0 | 2,286 |
/*
* =========================================================================================
* Copyright © 2013-2017 the kamon project <http://kamon.io/>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package kamon.netty.instrumentation
import kamon.Kamon
import kamon.context.Context
import org.aspectj.lang.annotation._
import scala.beans.BeanProperty
trait ChannelContextAware {
@volatile var startTime: Long = 0
@volatile @BeanProperty var context:Context = Kamon.currentContext()
}
trait RequestContextAware {
@volatile @BeanProperty var context:Context = Kamon.currentContext()
}
@Aspect
class ChannelInstrumentation {
@DeclareMixin("io.netty.channel.Channel+")
def mixinChannelToContextAware: ChannelContextAware = new ChannelContextAware{}
@DeclareMixin("io.netty.handler.codec.http.HttpMessage+")
def mixinRequestToContextAware: RequestContextAware = new RequestContextAware{}
@After("execution(io.netty.handler.codec.http.HttpMessage+.new(..)) && this(request)")
def afterCreation(request: RequestContextAware): Unit = {
// Force traceContext initialization.
request.getContext()
}
}
| kamon-io/kamon-netty | src/main/scala/kamon/netty/instrumentation/ChannelInstrumentation.scala | Scala | apache-2.0 | 1,738 |
package opencl.generator
import java.io._
import ir._
import ir.ast._
import lift.arithmetic.SizeVar
import opencl.executor.{Execute, Executor, TestWithExecutor}
import opencl.ir._
import opencl.ir.pattern._
import org.junit.Assert._
import org.junit.{Ignore, Test}
import scala.language.postfixOps
import scala.sys.process._
object TestGraphTheory extends TestWithExecutor
class TestGraphTheory {
val add = UserFun("add", Array("a","b"), "return a+b;", Seq(Float, Float), Float)
val mult = UserFun("mult", Array("a","b"), "return a*b;", Seq(Float, Float), Float)
//boolean operations - odd names for compatibility on NVIDIA platforms
val or = UserFun("b_or", Array("a","b"), "return (((a>0.0f)||(b>0.0f))?(1.0f):(0.0f));", Seq(Float, Float), Float)
val and = UserFun("b_and", Array("a","b"), "return (((a>0.0f)&&(b>0.0f))?(1.0f):(0.0f));", Seq(Float, Float), Float)
@Test def DENSE_BFS_ITERATION(): Unit = {
println("DENSE_BFS_ITERATION")
val inputSize = 1024
val graph = Array.tabulate(inputSize, inputSize)((r:Int,c:Int) => (if(util.Random.nextInt(100)>2) 0 else 1).toFloat)
val fringe = Array.fill(inputSize)(0.0f)
fringe(util.Random.nextInt(inputSize)) = 1.0f
val N = SizeVar("N")
val denseBFSIteration = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), //must be a square matrix for a graph
ArrayTypeWSWC(Float, N),
(graph, bfsFringe) => {
fun((fr) =>
Join() o MapWrg(
Join() o MapLcl( fun( (r) => toGlobal(MapSeq(id)) o ReduceSeq(or, 0.0f) o MapSeq(and) $ Zip(fr,r)))
) o Split(128) $ graph
) $ bfsFringe
}
)
val (output, runtime) = Execute(inputSize*inputSize)[Array[Float]](denseBFSIteration, graph, fringe)
val gold:Array[Float] = scalaBFSIteration(graph,fringe)
println(fringe.toList)
println("Fringe sum = "+ fringe.sum)
println(gold.toList)
println("Gold sum = "+ gold.sum)
println(output.toList)
println("Output sum = "+ output.sum)
println("runtime = " + runtime)
assertArrayEquals(gold, output, 0.0f)
}
@Test def DENSE_PAGERANK_ITERATION(): Unit = {
println("DENSE_PAGERANK_ITERATION")
val inputSize = 1024
val graph = buildPageRankMatrix(Array.tabulate(inputSize, inputSize)((r:Int,c:Int) => (if(util.Random.nextInt(100)>20) 0 else 1).toFloat))
val ranks = Array.fill(inputSize)(1.0f/inputSize.toFloat)
val N = SizeVar("N")
val densePageRankIteration = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), //must be a square matrix for a graph
ArrayTypeWSWC(Float, N),
(graph, ranks) => {
fun((fr) =>
Join() o MapWrg(
MapLcl( fun( (r) => toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) o MapSeq(mult) $ Zip(fr,r)))
) o Split(128) $ graph
) $ ranks
}
)
val (output, runtime) = Execute(inputSize*inputSize)[Array[Float]](densePageRankIteration, graph, ranks)
val gold:Array[Float] = scalaDotProductIteration(graph,ranks)
println(ranks.toList)
println("Fringe sum = "+ ranks.sum)
println(gold.toList)
println("Gold sum = "+ gold.sum)
println(output.toList)
println("Output sum = "+ output.sum)
println("runtime = " + runtime)
assertArrayEquals(gold, output, 0.0f)
}
@Test def DENSE_BFS_ITERATION_FIXED_SIZE(): Unit = {
println("DENSE_BFS_ITERATION_FIXED_SIZE")
val inputSize = 1024
val graph = Array.tabulate(inputSize, inputSize)((r:Int,c:Int) => (if(util.Random.nextInt(100)>2) 0 else 1).toFloat)
val fringe = Array.fill(inputSize)(0.0f)
fringe(util.Random.nextInt(inputSize)) = 1.0f
val denseBFSIteration = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, 1024), 1024), //must be a square matrix for a graph
ArrayTypeWSWC(Float, 1024),
(graph, bfsFringe) => {
Join() o MapWrg(
MapLcl( fun( (r) => toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) o MapSeq(mult) $ Zip(bfsFringe,r)))
) o Split(128) $ graph
}
)
val (output, runtime) = Execute(inputSize*inputSize)[Array[Float]](denseBFSIteration, graph, fringe)
val gold:Array[Float] = scalaBFSIteration(graph,fringe)
println(fringe.toList)
println("Fringe sum = "+ fringe.sum)
println(gold.toList)
println("Gold sum = "+ gold.sum)
println(output.toList)
println("Output sum = "+ output.sum)
println("runtime = " + runtime)
assertArrayEquals(gold, output, 0.0f)
}
@Test def DENSE_PAGERANK_MULTI_ITERATION(): Unit = {
println("DENSE_PAGERANK_MULTI_ITERATION")
val inputSize = 1024
val graph = buildPageRankMatrix(Array.tabulate(inputSize, inputSize)((r:Int,c:Int) => (if(util.Random.nextInt(100)>20) 0 else 1).toFloat))
val ranks = Array.fill(inputSize)(1.0f/inputSize.toFloat)
val N = SizeVar("N")
val pageRankMultiIteration = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), N), //must be a square matrix for a graph
ArrayTypeWSWC(Float, N),
(graph, pageRanks) => {
Iterate(1)( fun((fr) =>
Join() o MapWrg(
Join() o MapLcl(
fun( (r) => toGlobal(MapSeq(id)) o ReduceSeq(add, 0.0f) o MapSeq(mult) $ Zip(fr,r))
)) o Split(128) $ graph
)) $ pageRanks
})
val (output, runtime) = Execute(inputSize*inputSize)[Array[Float]](pageRankMultiIteration, graph, ranks)
val gold = scalaIterateDotProduct(1,graph,ranks)
println(ranks.toList)
println("Fringe sum = "+ ranks.sum)
println(gold.toList)
println("Gold sum = "+ gold.sum)
println(output.toList)
println("Output sum = "+ output.sum)
println("runtime = " + runtime)
assertArrayEquals(gold, output, 0.0f)
}
@Test @Ignore def DENSE_BFS_MULTI_ITERATION_FIXED_SIZE() : Unit = {
println("DENSE_BFS_MULTI_ITERATION_FIXED_SIZE")
val inputSize = 64
val graphArr = Array.tabulate(inputSize, inputSize)((r:Int,c:Int) => (if(util.Random.nextInt(25)>2) 0 else 1).toFloat)
val fringeArr = Array.fill(inputSize)(0.0f)
fringeArr(util.Random.nextInt(inputSize)) = 1.0f
val BFSMultiIteration = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, 64), 64), //must be a square matrix for a graph
ArrayTypeWSWC(Float, 64),
(graph, bfsFringe) => {
Iterate(5)( fun((fr) =>
Join() o MapSeq( fun( (r) => toGlobal(MapSeq(id)) o ReduceSeq(or, 0.0f) o MapSeq(and) $ Zip(fr,r))) $ graph
)) $ bfsFringe
})
val (output, runtime) = Execute(1,1)[Array[Float]](BFSMultiIteration, graphArr, fringeArr)
val gold = scalaIterateBFS(5,graphArr,fringeArr)
println(fringeArr.toList)
println("Fringe sum = "+ fringeArr.sum)
println(gold.toList)
println("Gold sum = "+ gold.sum)
println(output.toList)
println("Output sum = "+ output.sum)
println("runtime = " + runtime)
assertArrayEquals(gold, output, 0.0f)
}
def scalaIterateDotProduct(iterations: Int,matrix:Array[Array[Float]],vector:Array[Float]) : Array[Float] = {
var tVector = vector
for(i:Int <- 0 until iterations){
println("Iteration!")
tVector = scalaDotProductIteration(matrix,tVector)
}
tVector
}
def scalaDotProductIteration(matrix:Array[Array[Float]],vector:Array[Float]) : Array[Float] = {
matrix.map((row) => (row, vector).zipped.map((a, b) => a * b).sum)
}
def scalaIterateBFS(iterations: Int,graph:Array[Array[Float]],fringe:Array[Float]) : Array[Float] = {
var tFringe = fringe
for(i:Int <- 0 until iterations){
println("Iteration!")
tFringe = scalaBFSIteration(graph, tFringe)
}
tFringe
}
def scalaBFSIteration(graph:Array[Array[Float]],fringe:Array[Float]) : Array[Float] = {
graph.map(
(row) => (row, fringe).zipped.map((a,b) =>
if(a>0.0f && b>0.0f) 1.0f else 0.0f
).reduce((a,b) =>
if(a>0.0f || b>0.0f) 1.0f else 0.0f
)
)
}
def buildPageRankMatrix(graph: Array[Array[Float]]) = {
/* take a matrix with a boolean edge graph, and transform it to a weighted edge graph */
/* First, transpose the matrix so rows hold the "out" edge information, instead of columns */
var tGraph = graph.transpose
/* For each row, calculate the number of edges, and divide each weight by that number */
tGraph = tGraph.map {
case (row: Array[Float]) =>
val edge_count = row.sum
if(edge_count>0) {
row.map((x: Float) => x / edge_count)
}else{
row
}
}
/* Transpose the graph back, so we can work with it using standard linear algebra stuff */
tGraph = tGraph.transpose
tGraph
}
def printDFSDotFile(graph:Array[Array[Float]], fringe:Array[Float], gold: Array[Float], init: Array[Float]) : Unit = {
"pwd".!
val writer = new PrintWriter(new File("dfsGraph.dot"))
writer.write("digraph DFSIteration {\\n")
graph.zipWithIndex.foreach {
case (row: Array[Float], v1: Int) => row.zipWithIndex.foreach {
case (w: Float, v2: Int) =>
// if (w > 0.0f && (fringe(v1) > 0.0f || fringe(v2) > 0.0f)) {
if (w > 0.0f) {
writer.write(v2.toString + " -> " + v1.toString + ";\\n")
}
}
}
fringe.zipWithIndex.foreach {
case (w, v) =>
if (w > 0.0f) {
writer.write(v.toString + "[shape=square]\\n")
if (gold(v) <= 0.0f) {
writer.write(v.toString + "[color=red]\\n")
}
}
}
gold.zipWithIndex.foreach {
case (w, v) =>
if (w > 0.0f) {
if (fringe(v) <= 0.0f) {
writer.write(v.toString + "[shape=triangle]\\n")
writer.write(v.toString + "[color=red]\\n")
} else {
writer.write(v.toString + "[color=green]\\n")
}
}
}
init.zipWithIndex.foreach {
case (w, v) =>
if (w > 0.0f) {
writer.write(v.toString + "[color=blue]\\n")
}
}
writer.write("}\\n")
writer.close()
"dot -Tpng dfsGraph.dot -odotgraph.png -v -Goverlap=scale".!
"open dotgraph.png".!
}
}
| lift-project/lift | src/test/opencl/generator/TestGraphTheory.scala | Scala | mit | 10,042 |
package sttp.client3.impl.cats
import java.util.concurrent.TimeoutException
import cats.effect.IO
import cats.effect.unsafe.IORuntime
import sttp.client3.testing.ConvertToFuture
import sttp.monad.MonadError
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt
trait CatsTestBase {
implicit def executionContext: ExecutionContext
implicit lazy val monad: MonadError[IO] = new CatsMonadAsyncError[IO]
implicit val ioRuntime: IORuntime = IORuntime.global
implicit val convertToFuture: ConvertToFuture[IO] = convertCatsIOToFuture()
def timeoutToNone[T](t: IO[T], timeoutMillis: Int): IO[Option[T]] =
t.map(Some(_))
.timeout(timeoutMillis.milliseconds)
.handleErrorWith {
case _: TimeoutException => IO(None)
case e => throw e
}
}
| softwaremill/sttp | effects/cats/src/test/scala/sttp/client3/impl/cats/CatsTestBase.scala | Scala | apache-2.0 | 833 |
package akka.persistence.jdbc.integration
import akka.persistence.jdbc.serialization.StoreOnlySerializableMessagesTest
import akka.persistence.jdbc.testkit.internal.MySQL
import akka.persistence.jdbc.testkit.internal.Oracle
import akka.persistence.jdbc.testkit.internal.Postgres
import akka.persistence.jdbc.testkit.internal.SqlServer
class PostgresStoreOnlySerializableMessagesTest
extends StoreOnlySerializableMessagesTest("postgres-application.conf", Postgres)
class MySQLStoreOnlySerializableMessagesTest extends StoreOnlySerializableMessagesTest("mysql-application.conf", MySQL)
class OracleStoreOnlySerializableMessagesTest
extends StoreOnlySerializableMessagesTest("oracle-application.conf", Oracle)
class SqlServerStoreOnlySerializableMessagesTest
extends StoreOnlySerializableMessagesTest("sqlserver-application.conf", SqlServer)
| dnvriend/akka-persistence-jdbc | core/src/it/scala/akka/persistence/jdbc/integration/StoreOnlySerializableMessagesTest.scala | Scala | apache-2.0 | 856 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.helium.generate
import laika.ast.Path.Root
import laika.ast._
import laika.helium.Helium
private[laika] class FOStyles (helium: Helium) {
import helium.pdfSettings._
/*
TODO - this string template approach is a temporary solution until Laika's 'CSS for PDF' supports CSS variables
*/
val input: String = s"""
|Paragraph {
| font-family: ${themeFonts.body};
| font-size: ${fontSizes.body.displayValue};
| line-height: ${layout.defaultLineHeight};
| text-align: justify;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|TitledBlock {
| padding-left: 20mm;
| padding-right: 20mm;
| space-after: 6mm;
|}
|
|TitledBlock .title {
| font-family: ${themeFonts.headlines};
| font-size: ${fontSizes.header4.displayValue};
| font-weight: bold;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|QuotedBlock {
| font-style: italic;
| margin-left: 8mm;
| margin-right: 8mm;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|QuotedBlock .attribution {
| font-family: ${themeFonts.body};
| font-size: ${fontSizes.body.displayValue};
| text-align: right;
| line-height: ${layout.defaultLineHeight};
|}
|
|Image {
| space-after: 6mm;
| width: 85%;
| height: auto;
| content-width: scale-down-to-fit;
| scaling: uniform;
|}
|
|Figure {
| space-after: 6mm;
|}
|
|Figure .caption {
| font-family: ${themeFonts.body};
| font-size: ${fontSizes.code.displayValue};
| font-style: italic;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|Figure .legend {
| font-size: ${fontSizes.code.displayValue};
| font-style: italic;
|}
|
|Footnote, Citation {
| font-size: ${fontSizes.small.displayValue};
|}
|
|.default-space {
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|Header {
| font-family: ${themeFonts.headlines};
| font-size: ${fontSizes.header4.displayValue};
| font-weight: bold;
| space-before: 7mm;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|Title {
| font-family: ${themeFonts.headlines};
| font-size: ${fontSizes.title.displayValue};
| font-weight: bold;
| color: ${colors.theme.primary.displayValue};
| space-before: 0mm;
| space-after: 6mm;
|}
|
|Header.level1 {
| font-size: ${fontSizes.header2.displayValue};
| space-before: 12mm;
| space-after: 6mm;
|}
|
|Header.level2 {
| font-size: ${fontSizes.header2.displayValue};
|}
|
|Header.level3 {
| font-size: ${fontSizes.header3.displayValue};
|}
|
|BulletList, EnumList {
| space-after: 6mm;
| provisional-distance-between-starts: 5mm;
|}
|
|DefinitionList {
| space-after: 6mm;
| provisional-distance-between-starts: 20mm;
|}
|
|BulletListItem, EnumListItem, DefinitionListItem {
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|CodeBlock, LiteralBlock, ParsedLiteralBlock {
| font-family: ${themeFonts.code};
| font-size: ${fontSizes.code.displayValue};
| line-height: 1.4;
| color: ${colors.syntaxHighlighting.base.c5.displayValue};
| background-color: ${colors.syntaxHighlighting.base.c1.displayValue};
| fox:border-radius: 2mm;
| margin-left: 2mm;
| margin-right: 2mm;
| padding: 2mm;
| white-space-treatment: preserve;
| linefeed-treatment: preserve;
| white-space-collapse: false;
| space-after: 6mm;
|}
|
|Literal, InlineCode {
| font-family: ${themeFonts.code};
| font-size: ${fontSizes.code.displayValue};
|}
|
|SpanSequence.icon {
| padding-top: -2mm;
|}
|
|IconGlyph {
| font-family: IcoFont;
| font-size: 16pt;
|}
|
|InlineSVGIcon {
| content-height: 1.5em;
| content-width: 1.5em;
|}
|
|.svg-shape {
| color: ${colors.theme.primary.displayValue};
|}
|
|.callout.info IconGlyph, .callout.info .svg-shape {
| color: ${colors.messages.info.displayValue};
|}
|.callout.warning IconGlyph, .callout.warning .svg-shape {
| color: ${colors.messages.warning.displayValue};
|}
|.callout.error IconGlyph, .callout.error .svg-shape {
| color: ${colors.messages.error.displayValue};
|}
|
|.callout {
| font-family: ${themeFonts.body};
| font-size: ${fontSizes.body.displayValue};
| line-height: ${layout.defaultLineHeight};
| margin-left: 2mm;
| margin-right: 2mm;
| padding: ${layout.defaultBlockSpacing.displayValue} ${layout.defaultBlockSpacing.displayValue} 0.1mm ${layout.defaultBlockSpacing.displayValue};
| fox:border-after-end-radius: 2mm;
| fox:border-before-end-radius: 2mm;
| space-after: 6mm;
|}
|
|.callout.info {
| border-left: 3pt solid ${colors.messages.info.displayValue};
| background-color: ${colors.messages.infoLight.displayValue};
|}
|
|.callout.warning {
| border-left: 3pt solid ${colors.messages.warning.displayValue};
| background-color: ${colors.messages.warningLight.displayValue};
|}
|
|.callout.error {
| border-left: 3pt solid ${colors.messages.error.displayValue};
| background-color: ${colors.messages.errorLight.displayValue};
|}
|
|Table {
| border: 1pt solid #cccccc;
| border-collapse: separate;
| space-after: 6mm;
|}
|
|TableHead Cell {
| font-weight: bold;
| border-bottom: 1pt solid #cccccc;
|}
|
|TableBody .cell-odd {
| background-color: #f2f2f2;
|}
|
|Cell {
| padding: 2mm;
|}
|
|FootnoteLink, CitationLink, SpanLink {
| color: ${colors.theme.secondary.displayValue};
|}
|
|SpanLink {
| font-weight: bold;
|}
|
|NavigationItem SpanLink {
| font-weight: bold;
| color: ${colors.theme.primary.displayValue};
|}
|
|Paragraph.level2.nav SpanLink {
| font-weight: bold;
| color: ${colors.theme.secondary.displayValue};
|}
|
|Emphasized {
| font-style: italic;
|}
|
|Strong {
| font-weight: bold;
|}
|
|Deleted {
| text-decoration: line-through;
|}
|
|Inserted {
| text-decoration: underline;
|}
|
|.subscript {
| font-size: ${fontSizes.small.displayValue};
| vertical-align: sub;
|}
|
|.superscript {
| font-size: ${fontSizes.small.displayValue};
| vertical-align: super;
|}
|
|.footnote-label {
| font-size: ${fontSizes.small.displayValue};
| vertical-align: super;
|}
|
|CodeBlock .comment, CodeBlock .xml-cdata, CodeBlock .markup-quote {
| color: ${colors.syntaxHighlighting.base.c2.displayValue};
|}
|
|CodeBlock .tag-punctuation {
| color: ${colors.syntaxHighlighting.base.c3.displayValue};
|}
|
|CodeBlock .identifier {
| color: ${colors.syntaxHighlighting.base.c4.displayValue};
|}
|
|CodeBlock .substitution, CodeBlock .annotation, CodeBlock .markup-emphasized, CodeBlock .xml-processing-instruction {
| color: ${colors.syntaxHighlighting.wheel.c1.displayValue};
|}
|
|CodeBlock .keyword, CodeBlock .escape-sequence, CodeBlock .markup-headline {
| color: ${colors.syntaxHighlighting.wheel.c2.displayValue};
|}
|
|CodeBlock .attribute-name, CodeBlock .declaration-name, CodeBlock .markup-link-target {
| color: ${colors.syntaxHighlighting.wheel.c3.displayValue};
|}
|
|CodeBlock .number-literal, CodeBlock .string-literal, CodeBlock .char-literal, CodeBlock .boolean-literal, CodeBlock .regex-literal, CodeBlock .symbol-literal, CodeBlock .literal-value, CodeBlock .markup-link-text {
| color: ${colors.syntaxHighlighting.wheel.c4.displayValue};
|}
|
|CodeBlock .type-name, CodeBlock .tag-name, CodeBlock .xml-dtd-tag-name, CodeBlock .markup-fence {
| color: ${colors.syntaxHighlighting.wheel.c5.displayValue};
|}
|
|Paragraph.nav {
| font-size: ${fontSizes.body.displayValue};
| text-align-last: justify;
| margin-left: 8mm;
| space-before: 2mm;
| space-after: 0mm;
|}
|
|Paragraph.nav.level1 {
| font-size: 22pt; /* TODO - align with header font sizes */
| font-weight: bold;
| color: ${colors.theme.secondary.displayValue};
| margin-left: 0mm;
| text-align-last: center;
| text-transform: uppercase;
| space-before: 15mm;
|}
|
|Paragraph.nav.level2 {
| font-size: 17pt;
| color: ${colors.theme.secondary.displayValue};
| margin-left: 4mm;
| space-before: 7mm;
|}
|
|Paragraph.nav.level3 {
| font-size: ${fontSizes.header3.displayValue};
| margin-left: 6mm;
| space-before: ${layout.defaultBlockSpacing.displayValue};
|}
|
|Paragraph.nav.level4 {
| margin-left: 8mm;
|}
|
|.align-top {
| vertical-align: top;
|}
|
|.align-bottom {
| vertical-align: bottom;
|}
|
|.align-middle {
| vertical-align: middle;
|}
|
|.align-left {
| text-align: left;
|}
|
|.align-right {
| text-align: right;
|}
|
|.align-center {
| text-align: center;
|}
|
|.keepWithPrevious { /* TODO - avoid camel case */
| keep-with-previous: always;
|}
|
|.keepWithNext { /* TODO - avoid camel case */
| keep-with-next: always;
|}
|
|.keep-together.pdf {
| page-break-inside: avoid;
|}
|
|PageBreak {
| page-break-before: always;
|}
|
|Rule {
| leader-length: 100%;
| rule-style: solid;
| rule-thickness: 2pt;
| space-after: ${layout.defaultBlockSpacing.displayValue};
|}
|
|RuntimeMessage.debug, RuntimeMessage.info {
| color: ${colors.messages.info.displayValue};
| background-color: ${colors.messages.infoLight.displayValue};
| padding: 1pt 2pt;
| border: 1pt solid ${colors.messages.info.displayValue};
|}
|
|RuntimeMessage.warning {
| color: ${colors.messages.warning.displayValue};
| background-color: ${colors.messages.warningLight.displayValue};
| padding: 1pt 2pt;
| border: 1pt solid ${colors.messages.warning.displayValue};
|}
|
|RuntimeMessage.error, RuntimeMessage.fatal {
| color: ${colors.messages.error.displayValue};
| background-color: ${colors.messages.errorLight.displayValue};
| padding: 1pt 2pt;
| border: 1pt solid ${colors.messages.error.displayValue};
|}
|""".stripMargin
}
object FOStyles {
val defaultPath: Path = Root / "styles.fo.css"
}
| planet42/Laika | io/src/main/scala/laika/helium/generate/FOStyles.scala | Scala | apache-2.0 | 11,635 |
fmap(f)(alpha(h)) == alpha(f compose h) | hmemcpy/milewski-ctfp-pdf | src/content/2.4/code/scala/snippet08.scala | Scala | gpl-3.0 | 39 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{File, FileInputStream, FileWriter, InputStream, IOException}
import java.net.{HttpURLConnection, URL}
import java.nio.charset.StandardCharsets
import java.util.zip.ZipInputStream
import javax.servlet._
import javax.servlet.http.{HttpServletRequest, HttpServletRequestWrapper, HttpServletResponse}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import com.google.common.io.{ByteStreams, Files}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.eclipse.jetty.proxy.ProxyServlet
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.json4s.JsonAST._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
import org.mockito.Mockito._
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.selenium.WebBrowser
import org.apache.spark._
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.History._
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI._
import org.apache.spark.status.api.v1.ApplicationInfo
import org.apache.spark.status.api.v1.JobData
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{ResetSystemProperties, ShutdownHookManager, Utils}
/**
* A collection of tests against the historyserver, including comparing responses from the json
* metrics api to a set of known "golden files". If new endpoints / parameters are added,
* cases should be added to this test suite. The expected outcomes can be generated by running
* the HistoryServerSuite.main. Note that this will blindly generate new expectation files matching
* the current behavior -- the developer must verify that behavior is correct.
*
* Similarly, if the behavior is changed, HistoryServerSuite.main can be run to update the
* expectations. However, in general this should be done with extreme caution, as the metrics
* are considered part of Spark's public api.
*/
class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers with MockitoSugar
with JsonTestUtils with Eventually with WebBrowser with LocalSparkContext
with ResetSystemProperties {
private val logDir = getTestResourcePath("spark-events")
private val expRoot = getTestResourceFile("HistoryServerExpectations")
private val storeDir = Utils.createTempDir(namePrefix = "history")
private var provider: FsHistoryProvider = null
private var server: HistoryServer = null
private var port: Int = -1
def init(extraConf: (String, String)*): Unit = {
Utils.deleteRecursively(storeDir)
assert(storeDir.mkdir())
val conf = new SparkConf()
.set(HISTORY_LOG_DIR, logDir)
.set(UPDATE_INTERVAL_S.key, "0")
.set(IS_TESTING, true)
.set(LOCAL_STORE_DIR, storeDir.getAbsolutePath())
.set(EVENT_LOG_STAGE_EXECUTOR_METRICS, true)
.set(EXECUTOR_PROCESS_TREE_METRICS_ENABLED, true)
conf.setAll(extraConf)
provider = new FsHistoryProvider(conf)
provider.checkForLogs()
val securityManager = HistoryServer.createSecurityManager(conf)
server = new HistoryServer(conf, provider, securityManager, 18080)
server.initialize()
server.bind()
provider.start()
port = server.boundPort
}
def stop(): Unit = {
server.stop()
server = null
}
before {
if (server == null) {
init()
}
}
val cases = Seq(
"application list json" -> "applications",
"completed app list json" -> "applications?status=completed",
"running app list json" -> "applications?status=running",
"minDate app list json" -> "applications?minDate=2015-02-10",
"maxDate app list json" -> "applications?maxDate=2015-02-10",
"maxDate2 app list json" -> "applications?maxDate=2015-02-03T16:42:40.000GMT",
"minEndDate app list json" -> "applications?minEndDate=2015-05-06T13:03:00.950GMT",
"maxEndDate app list json" -> "applications?maxEndDate=2015-05-06T13:03:00.950GMT",
"minEndDate and maxEndDate app list json" ->
"applications?minEndDate=2015-03-16&maxEndDate=2015-05-06T13:03:00.950GMT",
"minDate and maxEndDate app list json" ->
"applications?minDate=2015-03-16&maxEndDate=2015-05-06T13:03:00.950GMT",
"limit app list json" -> "applications?limit=3",
"one app json" -> "applications/local-1422981780767",
"one app multi-attempt json" -> "applications/local-1426533911241",
"job list json" -> "applications/local-1422981780767/jobs",
"job list from multi-attempt app json(1)" -> "applications/local-1426533911241/1/jobs",
"job list from multi-attempt app json(2)" -> "applications/local-1426533911241/2/jobs",
"one job json" -> "applications/local-1422981780767/jobs/0",
"succeeded job list json" -> "applications/local-1422981780767/jobs?status=succeeded",
"succeeded&failed job list json" ->
"applications/local-1422981780767/jobs?status=succeeded&status=failed",
"executor list json" -> "applications/local-1422981780767/executors",
"executor list with executor metrics json" ->
"applications/application_1553914137147_0018/executors",
"stage list json" -> "applications/local-1422981780767/stages",
"complete stage list json" -> "applications/local-1422981780767/stages?status=complete",
"failed stage list json" -> "applications/local-1422981780767/stages?status=failed",
"one stage json" -> "applications/local-1422981780767/stages/1",
"one stage attempt json" -> "applications/local-1422981780767/stages/1/0",
"stage task summary w shuffle write"
-> "applications/local-1430917381534/stages/0/0/taskSummary",
"stage task summary w shuffle read"
-> "applications/local-1430917381534/stages/1/0/taskSummary",
"stage task summary w/ custom quantiles" ->
"applications/local-1430917381534/stages/0/0/taskSummary?quantiles=0.01,0.5,0.99",
"stage task list" -> "applications/local-1430917381534/stages/0/0/taskList",
"stage task list w/ offset & length" ->
"applications/local-1430917381534/stages/0/0/taskList?offset=10&length=50",
"stage task list w/ sortBy" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=DECREASING_RUNTIME",
"stage task list w/ sortBy short names: -runtime" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=-runtime",
"stage task list w/ sortBy short names: runtime" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=runtime",
"stage list with accumulable json" -> "applications/local-1426533911241/1/stages",
"stage with accumulable json" -> "applications/local-1426533911241/1/stages/0/0",
"stage task list from multi-attempt app json(1)" ->
"applications/local-1426533911241/1/stages/0/0/taskList",
"stage task list from multi-attempt app json(2)" ->
"applications/local-1426533911241/2/stages/0/0/taskList",
"blacklisting for stage" -> "applications/app-20180109111548-0000/stages/0/0",
"blacklisting node for stage" -> "applications/application_1516285256255_0012/stages/0/0",
"rdd list storage json" -> "applications/local-1422981780767/storage/rdd",
"executor node blacklisting" -> "applications/app-20161116163331-0000/executors",
"executor node blacklisting unblacklisting" -> "applications/app-20161115172038-0000/executors",
"executor memory usage" -> "applications/app-20161116163331-0000/executors",
"executor resource information" -> "applications/application_1555004656427_0144/executors",
"app environment" -> "applications/app-20161116163331-0000/environment",
// Enable "spark.eventLog.logBlockUpdates.enabled", to get the storage information
// in the history server.
"one rdd storage json" -> "applications/local-1422981780767/storage/rdd/0"
)
// run a bunch of characterization tests -- just verify the behavior is the same as what is saved
// in the test resource folder
cases.foreach { case (name, path) =>
test(name) {
val (code, jsonOpt, errOpt) = getContentAndCode(path)
code should be (HttpServletResponse.SC_OK)
jsonOpt should be (Symbol("defined"))
errOpt should be (None)
val exp = IOUtils.toString(new FileInputStream(
new File(expRoot, HistoryServerSuite.sanitizePath(name) + "_expectation.json")))
// compare the ASTs so formatting differences don't cause failures
import org.json4s._
import org.json4s.jackson.JsonMethods._
val jsonAst = parse(clearLastUpdated(jsonOpt.get))
val expAst = parse(exp)
assertValidDataInJson(jsonAst, expAst)
}
}
// SPARK-10873 added the lastUpdated field for each application's attempt,
// the REST API returns the last modified time of EVENT LOG file for this field.
// It is not applicable to hard-code this dynamic field in a static expected file,
// so here we skip checking the lastUpdated field's value (setting it as "").
private def clearLastUpdated(json: String): String = {
if (json.indexOf("lastUpdated") >= 0) {
val subStrings = json.split(",")
for (i <- subStrings.indices) {
if (subStrings(i).indexOf("lastUpdatedEpoch") >= 0) {
subStrings(i) = subStrings(i).replaceAll("(\\\\d+)", "0")
} else if (subStrings(i).indexOf("lastUpdated") >= 0) {
val regex = "\\"lastUpdated\\"\\\\s*:\\\\s*\\".*\\"".r
subStrings(i) = regex.replaceAllIn(subStrings(i), "\\"lastUpdated\\" : \\"\\"")
}
}
subStrings.mkString(",")
} else {
json
}
}
test("download all logs for app with multiple attempts") {
doDownloadTest("local-1430917381535", None)
}
test("download one log for app with multiple attempts") {
(1 to 2).foreach { attemptId => doDownloadTest("local-1430917381535", Some(attemptId)) }
}
// Test that the files are downloaded correctly, and validate them.
def doDownloadTest(appId: String, attemptId: Option[Int]): Unit = {
val url = attemptId match {
case Some(id) =>
new URL(s"${generateURL(s"applications/$appId")}/$id/logs")
case None =>
new URL(s"${generateURL(s"applications/$appId")}/logs")
}
val (code, inputStream, error) = HistoryServerSuite.connectAndGetInputStream(url)
code should be (HttpServletResponse.SC_OK)
inputStream should not be None
error should be (None)
val zipStream = new ZipInputStream(inputStream.get)
var entry = zipStream.getNextEntry
entry should not be null
val totalFiles = {
attemptId.map { x => 1 }.getOrElse(2)
}
var filesCompared = 0
while (entry != null) {
if (!entry.isDirectory) {
val expectedFile = {
new File(logDir, entry.getName)
}
val expected = Files.toString(expectedFile, StandardCharsets.UTF_8)
val actual = new String(ByteStreams.toByteArray(zipStream), StandardCharsets.UTF_8)
actual should be (expected)
filesCompared += 1
}
entry = zipStream.getNextEntry
}
filesCompared should be (totalFiles)
}
test("response codes on bad paths") {
val badAppId = getContentAndCode("applications/foobar")
badAppId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badAppId._3 should be (Some("unknown app: foobar"))
val badStageId = getContentAndCode("applications/local-1422981780767/stages/12345")
badStageId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badStageId._3 should be (Some("unknown stage: 12345"))
val badStageAttemptId = getContentAndCode("applications/local-1422981780767/stages/1/1")
badStageAttemptId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badStageAttemptId._3 should be (Some("unknown attempt for stage 1. Found attempts: [0]"))
val badStageId2 = getContentAndCode("applications/local-1422981780767/stages/flimflam")
badStageId2._1 should be (HttpServletResponse.SC_NOT_FOUND)
// will take some mucking w/ jersey to get a better error msg in this case
val badQuantiles = getContentAndCode(
"applications/local-1430917381534/stages/0/0/taskSummary?quantiles=foo,0.1")
badQuantiles._1 should be (HttpServletResponse.SC_BAD_REQUEST)
badQuantiles._3 should be (Some("Bad value for parameter \\"quantiles\\". Expected a double, " +
"got \\"foo\\""))
getContentAndCode("foobar")._1 should be (HttpServletResponse.SC_NOT_FOUND)
}
test("automatically retrieve uiRoot from request through Knox") {
assert(sys.props.get("spark.ui.proxyBase").isEmpty,
"spark.ui.proxyBase is defined but it should not for this UT")
assert(sys.env.get("APPLICATION_WEB_PROXY_BASE").isEmpty,
"APPLICATION_WEB_PROXY_BASE is defined but it should not for this UT")
val page = new HistoryPage(server)
val requestThroughKnox = mock[HttpServletRequest]
val knoxBaseUrl = "/gateway/default/sparkhistoryui"
when(requestThroughKnox.getHeader("X-Forwarded-Context")).thenReturn(knoxBaseUrl)
val responseThroughKnox = page.render(requestThroughKnox)
val urlsThroughKnox = responseThroughKnox \\\\ "@href" map (_.toString)
val siteRelativeLinksThroughKnox = urlsThroughKnox filter (_.startsWith("/"))
all (siteRelativeLinksThroughKnox) should startWith (knoxBaseUrl)
val directRequest = mock[HttpServletRequest]
val directResponse = page.render(directRequest)
val directUrls = directResponse \\\\ "@href" map (_.toString)
val directSiteRelativeLinks = directUrls filter (_.startsWith("/"))
all (directSiteRelativeLinks) should not startWith (knoxBaseUrl)
}
test("static relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
val uiRoot = Option(System.getenv("APPLICATION_WEB_PROXY_BASE")).getOrElse("/testwebproxybase")
val page = new HistoryPage(server)
val request = mock[HttpServletRequest]
// when
System.setProperty("spark.ui.proxyBase", uiRoot)
val response = page.render(request)
// then
val urls = response \\\\ "@href" map (_.toString)
val siteRelativeLinks = urls filter (_.startsWith("/"))
all (siteRelativeLinks) should startWith (uiRoot)
}
test("/version api endpoint") {
val response = getUrl("version")
assert(response.contains(SPARK_VERSION))
}
test("ajax rendered relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
val uiRoot = "/testwebproxybase"
System.setProperty("spark.ui.proxyBase", uiRoot)
stop()
init()
val port = server.boundPort
val servlet = new ProxyServlet {
override def rewriteTarget(request: HttpServletRequest): String = {
// servlet acts like a proxy that redirects calls made on
// spark.ui.proxyBase context path to the normal servlet handlers operating off "/"
val sb = request.getRequestURL()
if (request.getQueryString() != null) {
sb.append(s"?${request.getQueryString()}")
}
val proxyidx = sb.indexOf(uiRoot)
sb.delete(proxyidx, proxyidx + uiRoot.length).toString
}
}
val contextHandler = new ServletContextHandler
val holder = new ServletHolder(servlet)
contextHandler.setContextPath(uiRoot)
contextHandler.addServlet(holder, "/")
server.attachHandler(contextHandler)
implicit val webDriver: WebDriver = new HtmlUnitDriver(true)
try {
val url = s"http://localhost:$port"
go to s"$url$uiRoot"
// expect the ajax call to finish in 5 seconds
implicitlyWait(org.scalatest.time.Span(5, org.scalatest.time.Seconds))
// once this findAll call returns, we know the ajax load of the table completed
findAll(ClassNameQuery("odd"))
val links = findAll(TagNameQuery("a"))
.map(_.attribute("href"))
.filter(_.isDefined)
.map(_.get)
.filter(_.startsWith(url)).toList
// there are at least some URL links that were generated via javascript,
// and they all contain the spark.ui.proxyBase (uiRoot)
links.length should be > 4
all(links) should startWith(url + uiRoot)
} finally {
contextHandler.stop()
quit()
}
}
/**
* Verify that the security manager needed for the history server can be instantiated
* when `spark.authenticate` is `true`, rather than raise an `IllegalArgumentException`.
*/
test("security manager starts with spark.authenticate set") {
val conf = new SparkConf()
.set(IS_TESTING, true)
.set(SecurityManager.SPARK_AUTH_CONF, "true")
HistoryServer.createSecurityManager(conf)
}
test("incomplete apps get refreshed") {
implicit val webDriver: WebDriver = new HtmlUnitDriver
implicit val formats = org.json4s.DefaultFormats
// this test dir is explicitly deleted on successful runs; retained for diagnostics when
// not
val logDir = Utils.createDirectory(System.getProperty("java.io.tmpdir", "logs"))
// a new conf is used with the background thread set and running at its fastest
// allowed refresh rate (1Hz)
stop()
val myConf = new SparkConf()
.set(HISTORY_LOG_DIR, logDir.getAbsolutePath)
.set(EVENT_LOG_DIR, logDir.getAbsolutePath)
.set(UPDATE_INTERVAL_S.key, "1s")
.set(EVENT_LOG_ENABLED, true)
.set(LOCAL_STORE_DIR, storeDir.getAbsolutePath())
.remove(IS_TESTING)
val provider = new FsHistoryProvider(myConf)
val securityManager = HistoryServer.createSecurityManager(myConf)
sc = new SparkContext("local", "test", myConf)
val logDirUri = logDir.toURI
val logDirPath = new Path(logDirUri)
val fs = FileSystem.get(logDirUri, sc.hadoopConfiguration)
def listDir(dir: Path): Seq[FileStatus] = {
val statuses = fs.listStatus(dir)
statuses.flatMap(
stat => if (stat.isDirectory) listDir(stat.getPath) else Seq(stat))
}
def dumpLogDir(msg: String = ""): Unit = {
if (log.isDebugEnabled) {
logDebug(msg)
listDir(logDirPath).foreach { status =>
val s = status.toString
logDebug(s)
}
}
}
server = new HistoryServer(myConf, provider, securityManager, 0)
server.initialize()
server.bind()
provider.start()
val port = server.boundPort
val metrics = server.cacheMetrics
// build a URL for an app or app/attempt plus a page underneath
def buildURL(appId: String, suffix: String): URL = {
new URL(s"http://localhost:$port/history/$appId$suffix")
}
// build a rest URL for the application and suffix.
def applications(appId: String, suffix: String): URL = {
new URL(s"http://localhost:$port/api/v1/applications/$appId$suffix")
}
// start initial job
val d = sc.parallelize(1 to 10)
d.count()
val stdInterval = interval(100.milliseconds)
val appId = eventually(timeout(20.seconds), stdInterval) {
val json = getContentAndCode("applications", port)._2.get
val apps = parse(json).asInstanceOf[JArray].arr
apps should have size 1
(apps.head \\ "id").extract[String]
}
val appIdRoot = buildURL(appId, "")
val rootAppPage = HistoryServerSuite.getUrl(appIdRoot)
logDebug(s"$appIdRoot ->[${rootAppPage.length}] \\n$rootAppPage")
// sanity check to make sure filter is chaining calls
rootAppPage should not be empty
def getAppUI: SparkUI = {
server.withSparkUI(appId, None) { ui => ui }
}
// selenium isn't that useful on failures...add our own reporting
def getNumJobs(suffix: String): Int = {
val target = buildURL(appId, suffix)
val targetBody = HistoryServerSuite.getUrl(target)
try {
go to target.toExternalForm
findAll(cssSelector("tbody tr")).toIndexedSeq.size
} catch {
case ex: Exception =>
throw new Exception(s"Against $target\\n$targetBody", ex)
}
}
// use REST API to get #of jobs
def getNumJobsRestful(): Int = {
val json = HistoryServerSuite.getUrl(applications(appId, "/jobs"))
val jsonAst = parse(json)
val jobList = jsonAst.asInstanceOf[JArray]
jobList.values.size
}
// get a list of app Ids of all apps in a given state. REST API
def listApplications(completed: Boolean): Seq[String] = {
val json = parse(HistoryServerSuite.getUrl(applications("", "")))
logDebug(s"${JsonMethods.pretty(json)}")
json match {
case JNothing => Seq()
case apps: JArray =>
apps.children.filter(app => {
(app \\ "attempts") match {
case attempts: JArray =>
val state = (attempts.children.head \\ "completed").asInstanceOf[JBool]
state.value == completed
case _ => false
}
}).map(app => (app \\ "id").asInstanceOf[JString].values)
case _ => Seq()
}
}
def completedJobs(): Seq[JobData] = {
getAppUI.store.jobsList(List(JobExecutionStatus.SUCCEEDED).asJava)
}
def activeJobs(): Seq[JobData] = {
getAppUI.store.jobsList(List(JobExecutionStatus.RUNNING).asJava)
}
def isApplicationCompleted(appInfo: ApplicationInfo): Boolean = {
appInfo.attempts.nonEmpty && appInfo.attempts.head.completed
}
activeJobs() should have size 0
completedJobs() should have size 1
getNumJobs("") should be (1)
getNumJobs("/jobs") should be (1)
getNumJobsRestful() should be (1)
assert(metrics.lookupCount.getCount > 0, s"lookup count too low in $metrics")
// dump state before the next bit of test, which is where update
// checking really gets stressed
dumpLogDir("filesystem before executing second job")
logDebug(s"History Server: $server")
val d2 = sc.parallelize(1 to 10)
d2.count()
dumpLogDir("After second job")
val stdTimeout = timeout(10.seconds)
logDebug("waiting for UI to update")
eventually(stdTimeout, stdInterval) {
assert(2 === getNumJobs(""),
s"jobs not updated, server=$server\\n dir = ${listDir(logDirPath)}")
assert(2 === getNumJobs("/jobs"),
s"job count under /jobs not updated, server=$server\\n dir = ${listDir(logDirPath)}")
getNumJobsRestful() should be(2)
}
d.count()
d.count()
eventually(stdTimeout, stdInterval) {
assert(4 === getNumJobsRestful(), s"two jobs back-to-back not updated, server=$server\\n")
}
val jobcount = getNumJobs("/jobs")
assert(!isApplicationCompleted(provider.getListing().next))
listApplications(false) should contain(appId)
// stop the spark context
resetSparkContext()
// check the app is now found as completed
eventually(stdTimeout, stdInterval) {
assert(isApplicationCompleted(provider.getListing().next),
s"application never completed, server=$server\\n")
}
// app becomes observably complete
eventually(stdTimeout, stdInterval) {
listApplications(true) should contain (appId)
}
// app is no longer incomplete
listApplications(false) should not contain(appId)
assert(jobcount === getNumJobs("/jobs"))
// no need to retain the test dir now the tests complete
ShutdownHookManager.registerShutdownDeleteDir(logDir)
}
test("ui and api authorization checks") {
val appId = "local-1430917381535"
val owner = "irashid"
val admin = "root"
val other = "alice"
stop()
init(
UI_FILTERS.key -> classOf[FakeAuthFilter].getName(),
HISTORY_SERVER_UI_ACLS_ENABLE.key -> "true",
HISTORY_SERVER_UI_ADMIN_ACLS.key -> admin)
val tests = Seq(
(owner, HttpServletResponse.SC_OK),
(admin, HttpServletResponse.SC_OK),
(other, HttpServletResponse.SC_FORBIDDEN),
// When the remote user is null, the code behaves as if auth were disabled.
(null, HttpServletResponse.SC_OK))
val port = server.boundPort
val testUrls = Seq(
s"http://localhost:$port/api/v1/applications/$appId/1/jobs",
s"http://localhost:$port/history/$appId/1/jobs/",
s"http://localhost:$port/api/v1/applications/$appId/logs",
s"http://localhost:$port/api/v1/applications/$appId/1/logs",
s"http://localhost:$port/api/v1/applications/$appId/2/logs")
tests.foreach { case (user, expectedCode) =>
testUrls.foreach { url =>
val headers = if (user != null) Seq(FakeAuthFilter.FAKE_HTTP_USER -> user) else Nil
val sc = TestUtils.httpResponseCode(new URL(url), headers = headers)
assert(sc === expectedCode, s"Unexpected status code $sc for $url (user = $user)")
}
}
}
test("access history application defaults to the last attempt id") {
def getRedirectUrl(url: URL): (Int, String) = {
val connection = url.openConnection().asInstanceOf[HttpURLConnection]
connection.setRequestMethod("GET")
connection.setUseCaches(false)
connection.setDefaultUseCaches(false)
connection.setInstanceFollowRedirects(false)
connection.connect()
val code = connection.getResponseCode()
val location = connection.getHeaderField("Location")
(code, location)
}
def buildPageAttemptUrl(appId: String, attemptId: Option[Int]): URL = {
attemptId match {
case Some(id) =>
new URL(s"http://localhost:$port/history/$appId/$id")
case None =>
new URL(s"http://localhost:$port/history/$appId")
}
}
val oneAttemptAppId = "local-1430917381534"
HistoryServerSuite.getUrl(buildPageAttemptUrl(oneAttemptAppId, None))
val multiAttemptAppid = "local-1430917381535"
val lastAttemptId = Some(2)
val lastAttemptUrl = buildPageAttemptUrl(multiAttemptAppid, lastAttemptId)
Seq(None, Some(1), Some(2)).foreach { attemptId =>
val url = buildPageAttemptUrl(multiAttemptAppid, attemptId)
val (code, location) = getRedirectUrl(url)
assert(code === 302, s"Unexpected status code $code for $url")
attemptId match {
case None =>
assert(location.stripSuffix("/") === lastAttemptUrl.toString)
case _ =>
assert(location.stripSuffix("/") === url.toString)
}
HistoryServerSuite.getUrl(new URL(location))
}
}
def getContentAndCode(path: String, port: Int = port): (Int, Option[String], Option[String]) = {
HistoryServerSuite.getContentAndCode(new URL(s"http://localhost:$port/api/v1/$path"))
}
def getUrl(path: String): String = {
HistoryServerSuite.getUrl(generateURL(path))
}
def generateURL(path: String): URL = {
new URL(s"http://localhost:$port/api/v1/$path")
}
def generateExpectation(name: String, path: String): Unit = {
val json = getUrl(path)
val file = new File(expRoot, HistoryServerSuite.sanitizePath(name) + "_expectation.json")
val out = new FileWriter(file)
out.write(clearLastUpdated(json))
out.write('\\n')
out.close()
}
}
object HistoryServerSuite {
def main(args: Array[String]): Unit = {
// generate the "expected" results for the characterization tests. Just blindly assume the
// current behavior is correct, and write out the returned json to the test/resource files
val suite = new HistoryServerSuite
FileUtils.deleteDirectory(suite.expRoot)
suite.expRoot.mkdirs()
try {
suite.init()
suite.cases.foreach { case (name, path) =>
suite.generateExpectation(name, path)
}
} finally {
suite.stop()
}
}
def getContentAndCode(url: URL): (Int, Option[String], Option[String]) = {
val (code, in, errString) = connectAndGetInputStream(url)
val inString = in.map(IOUtils.toString)
(code, inString, errString)
}
def connectAndGetInputStream(url: URL): (Int, Option[InputStream], Option[String]) = {
val connection = url.openConnection().asInstanceOf[HttpURLConnection]
connection.setRequestMethod("GET")
connection.connect()
val code = connection.getResponseCode()
val inStream = try {
Option(connection.getInputStream())
} catch {
case io: IOException => None
}
val errString = try {
val err = Option(connection.getErrorStream())
err.map(IOUtils.toString)
} catch {
case io: IOException => None
}
(code, inStream, errString)
}
def sanitizePath(path: String): String = {
// this doesn't need to be perfect, just good enough to avoid collisions
path.replaceAll("\\\\W", "_")
}
def getUrl(path: URL): String = {
val (code, resultOpt, error) = getContentAndCode(path)
if (code == 200) {
resultOpt.get
} else {
throw new RuntimeException(
"got code: " + code + " when getting " + path + " w/ error: " + error)
}
}
}
/**
* A filter used for auth tests; sets the request's user to the value of the "HTTP_USER" header.
*/
class FakeAuthFilter extends Filter {
override def destroy(): Unit = { }
override def init(config: FilterConfig): Unit = { }
override def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain): Unit = {
val hreq = req.asInstanceOf[HttpServletRequest]
val wrapped = new HttpServletRequestWrapper(hreq) {
override def getRemoteUser(): String = hreq.getHeader(FakeAuthFilter.FAKE_HTTP_USER)
}
chain.doFilter(wrapped, res)
}
}
object FakeAuthFilter {
val FAKE_HTTP_USER = "HTTP_USER"
}
| goldmedal/spark | core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala | Scala | apache-2.0 | 30,265 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.external
import slamdata.Predef.String
import scala.AnyVal
final case class ClassName(value: String) extends AnyVal
| djspiewak/quasar | impl/src/main/scala/quasar/impl/external/ClassName.scala | Scala | apache-2.0 | 735 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogdebugger.ui.components
import swing._
/** A panel that displays an infinite progress bar at its center, with an
* optional label.
*
* Drop this panel into your GUI where you want to put components that take a
* long time to initialize. Once said expensive component has finished its
* initialization, swap it with your LoadingAnimationPanel. */
class LoadingAnimationPanel(label: String) extends BoxPanel(Orientation.Vertical) {
def this() = this(null)
contents += Swing.VGlue
if (label != null)
contents += new Label(label) {
horizontalAlignment = Alignment.Center
peer.setAlignmentX(0.5f)
}
val hBox = new BoxPanel(Orientation.Horizontal)
hBox.contents += Swing.HGlue
hBox.contents += Component.wrap(new javax.swing.JProgressBar() { setIndeterminate(true) })
hBox.contents += Swing.HGlue
contents += hBox
contents += Swing.VGlue
}
| hpe-cct/cct-core | src/main/scala/cogdebugger/ui/components/LoadingAnimationPanel.scala | Scala | apache-2.0 | 1,524 |
package at.forsyte.apalache.tla.bmcmt.analyses
import at.forsyte.apalache.tla.lir._
import at.forsyte.apalache.tla.lir.oper.TlaBoolOper
import com.google.inject.Inject
import com.typesafe.scalalogging.LazyLogging
/**
* <p>This analysis finds formulas of specific structure and labels them with hints.
* For instance, the top-level conjunctions (that are situated only under other conjunctions and quantifiers)
* are labelled with a hint.</p>
*
* <p>This class will be probably removed in the future, as lazy circuiting with an incremental solver gives us
* a speed-up only on relatively small instances.</p>
*
* @author Igor Konnov
*/
class HintFinder @Inject()(val hintsStore: FormulaHintsStoreImpl) extends LazyLogging {
def introHints(ex: TlaEx): Unit = ex match {
case OperEx(TlaBoolOper.exists, _, _, quantifiedEx) =>
introHints(quantifiedEx)
case OperEx(TlaBoolOper.forall, _, _, quantifiedEx) =>
introHints(quantifiedEx)
case OperEx(TlaBoolOper.and, args@_*) =>
hintsStore.store.put(ex.ID, FormulaHintsStore.HighAnd())
args foreach introHints
case _ =>
() // do not explore any further
}
}
| konnov/dach | tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/analyses/HintFinder.scala | Scala | apache-2.0 | 1,170 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.tf.ControlDependency
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{Node, Util}
import scala.reflect.ClassTag
/**
* A graph container. The modules in the container are connected as a DAG graph.
*
* @param _inputs inputs modules, user can feed data into these modules in the forward method
* @param _outputs output modules
* @param _variables
* @tparam T Numeric type. Only support float/double now
*/
class StaticGraph[T: ClassTag](
private val _inputs : Seq[ModuleNode[T]],
private val _outputs : Seq[ModuleNode[T]],
private val _variables: Option[(Array[Tensor[T]], Array[Tensor[T]])] = None,
private val enableExcludeChecking: Boolean = true
)(implicit ev: TensorNumeric[T]) extends Graph[T](_inputs, _outputs, _variables) {
private val forwardExecution = forwardGraph.topologySort.reverse
private var backwardExecution: Array[Node[AbstractModule[Activity, Activity, T]]] = _
private val inputCache = new Array[Activity](forwardExecution.length)
private var backId2ForwardId: Array[Int] = _
private var gradOutputCache: Array[Activity] = _
if (enableExcludeChecking) {
excludeInvalidLayers(forwardExecution.map {_.element})
}
buildBackwardGraph()
override def updateOutput(input: Activity): Activity = {
var i = 0
while(i < forwardExecution.length) {
val node = forwardExecution(i)
val nodeInput = findInput(node, input)
inputCache(i) = nodeInput
node.element.forward(nodeInput)
i += 1
}
output = dummyOutput.element.output
output
}
override def backward(input: Activity, gradOutput: Activity): Activity = {
val before = System.nanoTime()
val gradients = backwardExecution(input, gradOutput, true)
backwardTime += System.nanoTime() - before
gradients
}
override def updateGradInput(input: Activity, gradOutput: Activity): Activity = {
backwardExecution(input, gradOutput, false)
}
override def buildBackwardGraph(): this.type = {
super.buildBackwardGraph()
backwardExecution = backwardGraph.topologySort.reverse
backId2ForwardId = new Array[Int](backwardExecution.length)
gradOutputCache = new Array[Activity](backwardExecution.length)
var i = 0
while(i < backwardExecution.length - 1) {
var j = 0
var find = false
while(j < forwardExecution.length) {
if (forwardExecution(j).element.getName() == backwardExecution(i).element.getName()) {
backId2ForwardId(i) = j
find = true
}
j += 1
}
require(find, "Cannot find backward layer in forward executions")
i += 1
}
this
}
override def accGradParameters(input: Activity, gradOutput: Activity): Unit = {
var i = 0
while (i < backwardExecution.length - 1) {
val curNode = backwardExecution(i)
val curInput = inputCache(backId2ForwardId(i))
curNode.element.accGradParameters(curInput, gradOutputCache(i))
i += 1
}
}
override def populateModules(): Unit = {
modules.appendAll(
forwardGraph.topologySort
// todo: convert control dep node to edge
.filterNot(_.element.isInstanceOf[ControlDependency[T]])
.filter(n => !n.eq(dummyOutput)).map(_.element)
.reverse
)
checkDuplicate()
}
private def backwardExecution(input: Activity, gradOutput: Activity,
executeBackward: Boolean): Activity = {
dummyOutputGrad.element.gradInput = gradOutput
var i = 0
while (i < backwardExecution.length - 1) { // do not execute the dummy backward end
val curNode = backwardExecution(i)
val curGradOutput = findGradOutput(curNode, gradOutput)
gradOutputCache(i) = curGradOutput
val curInput = inputCache(backId2ForwardId(i))
if (!isStopGradient(curNode.element)) {
if (executeBackward) {
curNode.element.backward(curInput, curGradOutput)
} else {
curNode.element.updateGradInput(curInput, curGradOutput)
}
} else if (executeBackward) {
curNode.element.accGradParameters(curInput, curGradOutput)
}
i += 1
}
gradInput = fetchModelGradInput()
gradInput
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/StaticGraph.scala | Scala | apache-2.0 | 5,069 |
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.Image
import org.scalatest.FunSuite
/** @author Stephen Samuel */
class VignetteFilterTest extends FunSuite {
val original = Image(getClass.getResourceAsStream("/bird_small.png"))
test("filter output matches expected") {
val expected = Image(getClass.getResourceAsStream("/com/sksamuel/scrimage/filters/bird_small_vignette.png"))
val actual = original.filter(VignetteFilter())
assert(actual === expected)
}
}
| carlosFattor/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/VignetteFilterTest.scala | Scala | apache-2.0 | 497 |
/*
* Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/parser/CookieHeaders.scala
*
* Copyright (C) 2011-2012 spray.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package parser
import java.time.Instant
import org.http4s.internal.parboiled2._
import org.http4s.headers.`Set-Cookie`
import org.http4s.internal.parboiled2.support.{HNil, ::}
private[parser] trait CookieHeader {
def SET_COOKIE(value: String): ParseResult[`Set-Cookie`] =
new SetCookieParser(value).parse
def COOKIE(value: String): ParseResult[headers.Cookie] =
new CookieParser(value).parse
// scalastyle:off public.methods.have.type
private class SetCookieParser(input: ParserInput) extends BaseCookieParser[`Set-Cookie`](input) {
def entry: Rule1[`Set-Cookie`] = rule {
CookiePair ~ zeroOrMore(";" ~ OptWS ~ CookieAttrs) ~ EOI ~> (`Set-Cookie`(_))
}
}
private class CookieParser(input: ParserInput) extends BaseCookieParser[headers.Cookie](input) {
def entry: Rule1[headers.Cookie] = rule {
oneOrMore(CookiePair).separatedBy(";" ~ OptWS) ~ EOI ~> {xs: Seq[Cookie] => headers.Cookie(xs.head, xs.tail: _*)}
}
}
private abstract class BaseCookieParser[H <: Header](input: ParserInput) extends Http4sHeaderParser[H](input) {
def CookiePair = rule {
Token ~ ch('=') ~ CookieValue ~> (Cookie(_, _))
}
def CookieValue: Rule1[String] = rule {
(('"' ~ capture(zeroOrMore(CookieOctet)) ~ "\\"") | (capture(zeroOrMore(CookieOctet)))) ~ OptWS
}
def CookieOctet = rule {
"\\u003c" - "\\u005b" |
"\\u005d" - "\\u007e" |
'\\u0021' |
"\\u0023" - "\\u002b" |
"\\u002d" - "\\u003a"
}
def CookieAttrs: Rule[Cookie::HNil, Cookie::HNil] = rule {
"Expires=" ~ HttpDate ~> { (cookie: Cookie, dateTime: Instant) => cookie.copy(expires = Some(dateTime)) } |
"Max-Age=" ~ NonNegativeLong ~> { (cookie: Cookie, seconds: Long) => cookie.copy(maxAge = Some(seconds)) } |
"Domain=" ~ DomainName ~> { (cookie: Cookie, domainName: String) => cookie.copy(domain = Some(domainName)) } |
"Path=" ~ StringValue ~> { (cookie: Cookie, pathValue: String) => cookie.copy(path = Some(pathValue)) } |
// TODO: Capture so we can create the rule, but there must be a better way
capture("Secure") ~> { (cookie: Cookie, s: String) => cookie.copy(secure = true) } |
capture("HttpOnly") ~> { (cookie: Cookie, s: String) => cookie.copy(httpOnly = true) } |
StringValue ~> { (cookie: Cookie, stringValue: String) => cookie.copy(extension = Some(stringValue)) }
}
def NonNegativeLong: Rule1[Long] = rule { capture(oneOrMore(Digit)) ~> { s: String => s.toLong } }
def DomainName: Rule1[String] = rule { capture(oneOrMore(DomainNamePart).separatedBy('.')) }
def DomainNamePart: Rule0 = rule { AlphaNum ~ zeroOrMore(AlphaNum | ch('-')) }
def StringValue: Rule1[String] = rule { capture(oneOrMore((!(CTL | ch(';'))) ~ Char)) }
}
// scalastyle:on public.methods.have.type
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/parser/CookieHeader.scala | Scala | apache-2.0 | 3,590 |
package templemore.onx.version3
import org.scalatest.matchers.MustMatchers
import org.scalatest. {FeatureSpec, GivenWhenThen}
/**
* @author Chris Turner
*/
class GridSpec extends FeatureSpec with GivenWhenThen with MustMatchers {
import Token._
feature("The OnX grid") {
info("As a player")
info("I want to add tokens to the OnX grid")
info("So that I can play the game")
scenario("is empty when first created") {
given("a new grid")
val grid = new Grid
then("the grid is not full")
grid.full_? must be (false)
and("no tokens are present")
for ( row <- 0 to 2; col <- 0 to 2 ) {
grid.token(Position(row, col)) must be (null)
}
}
scenario("can have a token added to it") {
given("a new grid")
val grid = new Grid
when("a token is added at a given position")
grid.token(Grid.Middle, Token.Nought)
then("that position contains the token")
grid.token(Grid.Middle) must be (Token.Nought)
}
scenario("is not full if positions remain empty") {
given("a partially filled grid")
val grid = new Grid
grid.token(Grid.Middle, Token.Nought)
grid.token(Grid.TopLeft, Token.Nought)
grid.token(Grid.BottomRight, Token.Nought)
then("the grid is not full")
grid.full_? must be (false)
}
scenario("is full if there are no empty positions") {
given("a fully populated grid")
val grid = new Grid
for ( row <- 0 to 2; col <- 0 to 2 ) {
grid.token(Position(row, col), Token.Nought)
}
then("the grid is full")
grid.full_? must be (true)
}
scenario("can not have a token added at an already filled positon") {
given("a grid with a position populated")
val grid = new Grid
grid.token(Grid.Middle, Token.Nought)
when("placing a token at a fille position")
intercept[IllegalStateException] {
grid.token(Grid.Middle, Token.Nought)
then("an exception is thrown")
}
}
scenario("can be constructed in a pre-configured state from a string") {
given("a string representation of a OnX grid")
val spec = """|O X
|XOO
| X""".stripMargin
when("a grid is created from it")
val grid = Grid(spec)
then("the positions are populated correctly")
grid.token(Grid.TopLeft) must be (Token.Nought)
grid.occupied_?(Grid.TopMiddle) must be (false)
grid.token(Grid.TopRight) must be (Token.Cross)
grid.token(Grid.MiddleLeft) must be (Token.Cross)
grid.token(Grid.Middle) must be (Token.Nought)
grid.token(Grid.MiddleRight) must be (Token.Nought)
grid.occupied_?(Grid.BottomLeft) must be (false)
grid.occupied_?(Grid.BottomMiddle) must be (false)
grid.token(Grid.BottomRight) must be (Token.Cross)
}
}
}
| skipoleschris/OandX | src/test/scala/templemore/onx/version3/GridSpec.scala | Scala | apache-2.0 | 2,892 |
package org.apache.datacommons.prepbuddy.clusterers
import org.apache.commons.lang.StringUtils
import scala.collection.mutable
/**
* This algorithm generates a key using Simple Fingerprint Algorithm for
* every cardinal value (facet) in column and add them to the Cluster.
*/
class SimpleFingerprintAlgorithm extends FingerprintAlgorithm with Serializable {
def getClusters(tuples: Array[(String, Int)]): Clusters = super.getClusters(tuples, generateSimpleFingerprint)
def generateSimpleFingerprint(value: String): String = {
val fragments: Array[String] = StringUtils.split(removeAllPunctuations(value.trim.toLowerCase))
rearrangeAlphabetically(fragments)
}
def rearrangeAlphabetically(fragments: Array[String]): String = {
val set: mutable.TreeSet[String] = new mutable.TreeSet()
set ++= fragments
set.mkString(" ")
}
}
| blpabhishek/prep-buddy | src/main/scala/org/apache/datacommons/prepbuddy/clusterers/SimpleFingerprintAlgorithm.scala | Scala | apache-2.0 | 894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.IOException
import java.lang.reflect.InvocationTargetException
import java.net.URI
import java.util
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.ql.metadata.HiveException
import org.apache.thrift.TException
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, Statistics}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.PartitioningUtils
import org.apache.spark.sql.hive.client.HiveClient
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.sql.types.{DataType, StructType}
/**
* A persistent implementation of the system catalog using Hive.
* All public methods must be synchronized for thread-safety.
*/
private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configuration)
extends ExternalCatalog with Logging {
import CatalogTypes.TablePartitionSpec
import HiveExternalCatalog._
import CatalogTableType._
/**
* A Hive client used to interact with the metastore.
*/
val client: HiveClient = {
HiveUtils.newClientForMetadata(conf, hadoopConf)
}
// Exceptions thrown by the hive client that we would like to wrap
private val clientExceptions = Set(
classOf[HiveException].getCanonicalName,
classOf[TException].getCanonicalName,
classOf[InvocationTargetException].getCanonicalName)
/**
* Whether this is an exception thrown by the hive client that should be wrapped.
*
* Due to classloader isolation issues, pattern matching won't work here so we need
* to compare the canonical names of the exceptions, which we assume to be stable.
*/
private def isClientException(e: Throwable): Boolean = {
var temp: Class[_] = e.getClass
var found = false
while (temp != null && !found) {
found = clientExceptions.contains(temp.getCanonicalName)
temp = temp.getSuperclass
}
found
}
/**
* Run some code involving `client` in a [[synchronized]] block and wrap certain
* exceptions thrown in the process in [[AnalysisException]].
*/
private def withClient[T](body: => T): T = synchronized {
try {
body
} catch {
case NonFatal(exception) if isClientException(exception) =>
val e = exception match {
// Since we are using shim, the exceptions thrown by the underlying method of
// Method.invoke() are wrapped by InvocationTargetException
case i: InvocationTargetException => i.getCause
case o => o
}
throw new AnalysisException(
e.getClass.getCanonicalName + ": " + e.getMessage, cause = Some(e))
}
}
/**
* Get the raw table metadata from hive metastore directly. The raw table metadata may contains
* special data source properties and should not be exposed outside of `HiveExternalCatalog`. We
* should interpret these special data source properties and restore the original table metadata
* before returning it.
*/
private def getRawTable(db: String, table: String): CatalogTable = withClient {
client.getTable(db, table)
}
/**
* If the given table properties contains datasource properties, throw an exception. We will do
* this check when create or alter a table, i.e. when we try to write table metadata to Hive
* metastore.
*/
private def verifyTableProperties(table: CatalogTable): Unit = {
val invalidKeys = table.properties.keys.filter(_.startsWith(SPARK_SQL_PREFIX))
if (invalidKeys.nonEmpty) {
throw new AnalysisException(s"Cannot persistent ${table.qualifiedName} into hive metastore " +
s"as table property keys may not start with '$SPARK_SQL_PREFIX': " +
invalidKeys.mkString("[", ", ", "]"))
}
// External users are not allowed to set/switch the table type. In Hive metastore, the table
// type can be switched by changing the value of a case-sensitive table property `EXTERNAL`.
if (table.properties.contains("EXTERNAL")) {
throw new AnalysisException("Cannot set or change the preserved property key: 'EXTERNAL'")
}
}
// --------------------------------------------------------------------------
// Databases
// --------------------------------------------------------------------------
override def createDatabase(
dbDefinition: CatalogDatabase,
ignoreIfExists: Boolean): Unit = withClient {
client.createDatabase(dbDefinition, ignoreIfExists)
}
override def dropDatabase(
db: String,
ignoreIfNotExists: Boolean,
cascade: Boolean): Unit = withClient {
client.dropDatabase(db, ignoreIfNotExists, cascade)
}
/**
* Alter a database whose name matches the one specified in `dbDefinition`,
* assuming the database exists.
*
* Note: As of now, this only supports altering database properties!
*/
override def alterDatabase(dbDefinition: CatalogDatabase): Unit = withClient {
val existingDb = getDatabase(dbDefinition.name)
if (existingDb.properties == dbDefinition.properties) {
logWarning(s"Request to alter database ${dbDefinition.name} is a no-op because " +
s"the provided database properties are the same as the old ones. Hive does not " +
s"currently support altering other database fields.")
}
client.alterDatabase(dbDefinition)
}
override def getDatabase(db: String): CatalogDatabase = withClient {
client.getDatabase(db)
}
override def databaseExists(db: String): Boolean = withClient {
client.databaseExists(db)
}
override def listDatabases(): Seq[String] = withClient {
client.listDatabases("*")
}
override def listDatabases(pattern: String): Seq[String] = withClient {
client.listDatabases(pattern)
}
override def setCurrentDatabase(db: String): Unit = withClient {
client.setCurrentDatabase(db)
}
// --------------------------------------------------------------------------
// Tables
// --------------------------------------------------------------------------
override def createTable(
tableDefinition: CatalogTable,
ignoreIfExists: Boolean): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
val table = tableDefinition.identifier.table
requireDbExists(db)
verifyTableProperties(tableDefinition)
if (tableExists(db, table) && !ignoreIfExists) {
throw new TableAlreadyExistsException(db = db, table = table)
}
if (tableDefinition.tableType == VIEW) {
client.createTable(tableDefinition, ignoreIfExists)
} else {
// Ideally we should not create a managed table with location, but Hive serde table can
// specify location for managed table. And in [[CreateDataSourceTableAsSelectCommand]] we have
// to create the table directory and write out data before we create this table, to avoid
// exposing a partial written table.
val needDefaultTableLocation = tableDefinition.tableType == MANAGED &&
tableDefinition.storage.locationUri.isEmpty
val tableLocation = if (needDefaultTableLocation) {
Some(defaultTablePath(tableDefinition.identifier))
} else {
tableDefinition.storage.locationUri
}
if (tableDefinition.provider.get == DDLUtils.HIVE_PROVIDER) {
val tableWithDataSourceProps = tableDefinition.copy(
// We can't leave `locationUri` empty and count on Hive metastore to set a default table
// location, because Hive metastore uses hive.metastore.warehouse.dir to generate default
// table location for tables in default database, while we expect to use the location of
// default database.
storage = tableDefinition.storage.copy(locationUri = tableLocation),
// Here we follow data source tables and put table metadata like table schema, partition
// columns etc. in table properties, so that we can work around the Hive metastore issue
// about not case preserving and make Hive serde table support mixed-case column names.
properties = tableDefinition.properties ++ tableMetaToTableProps(tableDefinition))
client.createTable(tableWithDataSourceProps, ignoreIfExists)
} else {
createDataSourceTable(
tableDefinition.withNewStorage(locationUri = tableLocation),
ignoreIfExists)
}
}
}
private def createDataSourceTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = {
// data source table always have a provider, it's guaranteed by `DDLUtils.isDatasourceTable`.
val provider = table.provider.get
// To work around some hive metastore issues, e.g. not case-preserving, bad decimal type
// support, no column nullability, etc., we should do some extra works before saving table
// metadata into Hive metastore:
// 1. Put table metadata like table schema, partition columns, etc. in table properties.
// 2. Check if this table is hive compatible.
// 2.1 If it's not hive compatible, set location URI, schema, partition columns and bucket
// spec to empty and save table metadata to Hive.
// 2.2 If it's hive compatible, set serde information in table metadata and try to save
// it to Hive. If it fails, treat it as not hive compatible and go back to 2.1
val tableProperties = tableMetaToTableProps(table)
// put table provider and partition provider in table properties.
tableProperties.put(DATASOURCE_PROVIDER, provider)
if (table.tracksPartitionsInCatalog) {
tableProperties.put(TABLE_PARTITION_PROVIDER, TABLE_PARTITION_PROVIDER_CATALOG)
}
// Ideally we should also put `locationUri` in table properties like provider, schema, etc.
// However, in older version of Spark we already store table location in storage properties
// with key "path". Here we keep this behaviour for backward compatibility.
val storagePropsWithLocation = table.storage.properties ++
table.storage.locationUri.map("path" -> _)
// converts the table metadata to Spark SQL specific format, i.e. set data schema, names and
// bucket specification to empty. Note that partition columns are retained, so that we can
// call partition-related Hive API later.
def newSparkSQLSpecificMetastoreTable(): CatalogTable = {
table.copy(
// Hive only allows directory paths as location URIs while Spark SQL data source tables
// also allow file paths. For non-hive-compatible format, we should not set location URI
// to avoid hive metastore to throw exception.
storage = table.storage.copy(
locationUri = None,
properties = storagePropsWithLocation),
schema = table.partitionSchema,
bucketSpec = None,
properties = table.properties ++ tableProperties)
}
// converts the table metadata to Hive compatible format, i.e. set the serde information.
def newHiveCompatibleMetastoreTable(serde: HiveSerDe): CatalogTable = {
val location = if (table.tableType == EXTERNAL) {
// When we hit this branch, we are saving an external data source table with hive
// compatible format, which means the data source is file-based and must have a `path`.
require(table.storage.locationUri.isDefined,
"External file-based data source table must have a `path` entry in storage properties.")
Some(new Path(table.location).toUri.toString)
} else {
None
}
table.copy(
storage = table.storage.copy(
locationUri = location,
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
serde = serde.serde,
properties = storagePropsWithLocation
),
properties = table.properties ++ tableProperties)
}
val qualifiedTableName = table.identifier.quotedString
val maybeSerde = HiveSerDe.sourceToSerDe(provider)
val skipHiveMetadata = table.storage.properties
.getOrElse("skipHiveMetadata", "false").toBoolean
val (hiveCompatibleTable, logMessage) = maybeSerde match {
case _ if skipHiveMetadata =>
val message =
s"Persisting data source table $qualifiedTableName into Hive metastore in" +
"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
// our bucketing is un-compatible with hive(different hash function)
case _ if table.bucketSpec.nonEmpty =>
val message =
s"Persisting bucketed data source table $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. "
(None, message)
case Some(serde) =>
val message =
s"Persisting file based data source table $qualifiedTableName into " +
s"Hive metastore in Hive compatible format."
(Some(newHiveCompatibleMetastoreTable(serde)), message)
case _ =>
val message =
s"Couldn't find corresponding Hive SerDe for data source provider $provider. " +
s"Persisting data source table $qualifiedTableName into Hive metastore in " +
s"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
}
(hiveCompatibleTable, logMessage) match {
case (Some(table), message) =>
// We first try to save the metadata of the table in a Hive compatible way.
// If Hive throws an error, we fall back to save its metadata in the Spark SQL
// specific way.
try {
logInfo(message)
saveTableIntoHive(table, ignoreIfExists)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not persist ${table.identifier.quotedString} in a Hive " +
"compatible way. Persisting it into Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
case (None, message) =>
logWarning(message)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
}
/**
* Data source tables may be non Hive compatible and we need to store table metadata in table
* properties to workaround some Hive metastore limitations.
* This method puts table schema, partition column names, bucket specification into a map, which
* can be used as table properties later.
*/
private def tableMetaToTableProps(table: CatalogTable): mutable.Map[String, String] = {
val partitionColumns = table.partitionColumnNames
val bucketSpec = table.bucketSpec
val properties = new mutable.HashMap[String, String]
// Serialized JSON schema string may be too long to be stored into a single metastore table
// property. In this case, we split the JSON string and store each part as a separate table
// property.
val threshold = conf.get(SCHEMA_STRING_LENGTH_THRESHOLD)
val schemaJsonString = table.schema.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
properties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part)
}
if (partitionColumns.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString)
partitionColumns.zipWithIndex.foreach { case (partCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol)
}
}
if (bucketSpec.isDefined) {
val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get
properties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString)
properties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString)
bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol)
}
if (sortColumnNames.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString)
sortColumnNames.zipWithIndex.foreach { case (sortCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol)
}
}
}
properties
}
private def defaultTablePath(tableIdent: TableIdentifier): String = {
val dbLocation = getDatabase(tableIdent.database.get).locationUri
new Path(new Path(dbLocation), tableIdent.table).toString
}
private def saveTableIntoHive(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = {
assert(DDLUtils.isDatasourceTable(tableDefinition),
"saveTableIntoHive only takes data source table.")
// If this is an external data source table...
if (tableDefinition.tableType == EXTERNAL &&
// ... that is not persisted as Hive compatible format (external tables in Hive compatible
// format always set `locationUri` to the actual data location and should NOT be hacked as
// following.)
tableDefinition.storage.locationUri.isEmpty) {
// !! HACK ALERT !!
//
// Due to a restriction of Hive metastore, here we have to set `locationUri` to a temporary
// directory that doesn't exist yet but can definitely be successfully created, and then
// delete it right after creating the external data source table. This location will be
// persisted to Hive metastore as standard Hive table location URI, but Spark SQL doesn't
// really use it. Also, since we only do this workaround for external tables, deleting the
// directory after the fact doesn't do any harm.
//
// Please refer to https://issues.apache.org/jira/browse/SPARK-15269 for more details.
val tempPath = {
val dbLocation = getDatabase(tableDefinition.database).locationUri
new Path(dbLocation, tableDefinition.identifier.table + "-__PLACEHOLDER__")
}
try {
client.createTable(
tableDefinition.withNewStorage(locationUri = Some(tempPath.toString)),
ignoreIfExists)
} finally {
FileSystem.get(tempPath.toUri, hadoopConf).delete(tempPath, true)
}
} else {
client.createTable(tableDefinition, ignoreIfExists)
}
}
override def dropTable(
db: String,
table: String,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = withClient {
requireDbExists(db)
client.dropTable(db, table, ignoreIfNotExists, purge)
}
override def renameTable(db: String, oldName: String, newName: String): Unit = withClient {
val rawTable = getRawTable(db, oldName)
// Note that Hive serde tables don't use path option in storage properties to store the value
// of table location, but use `locationUri` field to store it directly. And `locationUri` field
// will be updated automatically in Hive metastore by the `alterTable` call at the end of this
// method. Here we only update the path option if the path option already exists in storage
// properties, to avoid adding a unnecessary path option for Hive serde tables.
val hasPathOption = new CaseInsensitiveMap(rawTable.storage.properties).contains("path")
val storageWithNewPath = if (rawTable.tableType == MANAGED && hasPathOption) {
// If it's a managed table with path option and we are renaming it, then the path option
// becomes inaccurate and we need to update it according to the new table name.
val newTablePath = defaultTablePath(TableIdentifier(newName, Some(db)))
updateLocationInStorageProps(rawTable, Some(newTablePath))
} else {
rawTable.storage
}
val newTable = rawTable.copy(
identifier = TableIdentifier(newName, Some(db)),
storage = storageWithNewPath)
client.alterTable(oldName, newTable)
}
private def getLocationFromStorageProps(table: CatalogTable): Option[String] = {
new CaseInsensitiveMap(table.storage.properties).get("path")
}
private def updateLocationInStorageProps(
table: CatalogTable,
newPath: Option[String]): CatalogStorageFormat = {
// We can't use `filterKeys` here, as the map returned by `filterKeys` is not serializable,
// while `CatalogTable` should be serializable.
val propsWithoutPath = table.storage.properties.filter {
case (k, v) => k.toLowerCase != "path"
}
table.storage.copy(properties = propsWithoutPath ++ newPath.map("path" -> _))
}
/**
* Alter a table whose name that matches the one specified in `tableDefinition`,
* assuming the table exists.
*
* Note: As of now, this doesn't support altering table schema, partition column names and bucket
* specification. We will ignore them even if users do specify different values for these fields.
*/
override def alterTable(tableDefinition: CatalogTable): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
requireTableExists(db, tableDefinition.identifier.table)
verifyTableProperties(tableDefinition)
// convert table statistics to properties so that we can persist them through hive api
val withStatsProps = if (tableDefinition.stats.isDefined) {
val stats = tableDefinition.stats.get
var statsProperties: Map[String, String] =
Map(STATISTICS_TOTAL_SIZE -> stats.sizeInBytes.toString())
if (stats.rowCount.isDefined) {
statsProperties += STATISTICS_NUM_ROWS -> stats.rowCount.get.toString()
}
stats.colStats.foreach { case (colName, colStat) =>
colStat.toMap.foreach { case (k, v) =>
statsProperties += (columnStatKeyPropName(colName, k) -> v)
}
}
tableDefinition.copy(properties = tableDefinition.properties ++ statsProperties)
} else {
tableDefinition
}
if (tableDefinition.tableType == VIEW) {
client.alterTable(withStatsProps)
} else {
val oldTableDef = getRawTable(db, withStatsProps.identifier.table)
val newStorage = if (tableDefinition.provider.get == DDLUtils.HIVE_PROVIDER) {
tableDefinition.storage
} else {
// We can't alter the table storage of data source table directly for 2 reasons:
// 1. internally we use path option in storage properties to store the value of table
// location, but the given `tableDefinition` is from outside and doesn't have the path
// option, we need to add it manually.
// 2. this data source table may be created on a file, not a directory, then we can't set
// the `locationUri` field and save it to Hive metastore, because Hive only allows
// directory as table location.
//
// For example, an external data source table is created with a single file '/path/to/file'.
// Internally, we will add a path option with value '/path/to/file' to storage properties,
// and set the `locationUri` to a special value due to SPARK-15269(please see
// `saveTableIntoHive` for more details). When users try to get the table metadata back, we
// will restore the `locationUri` field from the path option and remove the path option from
// storage properties. When users try to alter the table storage, the given
// `tableDefinition` will have `locationUri` field with value `/path/to/file` and the path
// option is not set.
//
// Here we need 2 extra steps:
// 1. add path option to storage properties, to match the internal format, i.e. using path
// option to store the value of table location.
// 2. set the `locationUri` field back to the old one from the existing table metadata,
// if users don't want to alter the table location. This step is necessary as the
// `locationUri` is not always same with the path option, e.g. in the above example
// `locationUri` is a special value and we should respect it. Note that, if users
// want to alter the table location to a file path, we will fail. This should be fixed
// in the future.
val newLocation = tableDefinition.storage.locationUri
val storageWithPathOption = tableDefinition.storage.copy(
properties = tableDefinition.storage.properties ++ newLocation.map("path" -> _))
val oldLocation = getLocationFromStorageProps(oldTableDef)
if (oldLocation == newLocation) {
storageWithPathOption.copy(locationUri = oldTableDef.storage.locationUri)
} else {
storageWithPathOption
}
}
val partitionProviderProp = if (tableDefinition.tracksPartitionsInCatalog) {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_CATALOG
} else {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_FILESYSTEM
}
// Sets the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition,
// to retain the spark specific format if it is. Also add old data source properties to table
// properties, to retain the data source table format.
val oldDataSourceProps = oldTableDef.properties.filter(_._1.startsWith(DATASOURCE_PREFIX))
val newTableProps = oldDataSourceProps ++ withStatsProps.properties + partitionProviderProp
val newDef = withStatsProps.copy(
storage = newStorage,
schema = oldTableDef.schema,
partitionColumnNames = oldTableDef.partitionColumnNames,
bucketSpec = oldTableDef.bucketSpec,
properties = newTableProps)
client.alterTable(newDef)
}
}
override def alterTableSchema(db: String, table: String, schema: StructType): Unit = withClient {
requireTableExists(db, table)
val rawTable = getRawTable(db, table)
val withNewSchema = rawTable.copy(schema = schema)
// Add table metadata such as table schema, partition columns, etc. to table properties.
val updatedTable = withNewSchema.copy(
properties = withNewSchema.properties ++ tableMetaToTableProps(withNewSchema))
try {
client.alterTable(updatedTable)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not alter schema of table ${rawTable.identifier.quotedString} in a Hive " +
"compatible way. Updating Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
client.alterTable(updatedTable.copy(schema = updatedTable.partitionSchema))
}
}
override def getTable(db: String, table: String): CatalogTable = withClient {
restoreTableMetadata(getRawTable(db, table))
}
override def getTableOption(db: String, table: String): Option[CatalogTable] = withClient {
client.getTableOption(db, table).map(restoreTableMetadata)
}
/**
* Restores table metadata from the table properties if it's a datasouce table. This method is
* kind of a opposite version of [[createTable]].
*
* It reads table schema, provider, partition column names and bucket specification from table
* properties, and filter out these special entries from table properties.
*/
private def restoreTableMetadata(inputTable: CatalogTable): CatalogTable = {
if (conf.get(DEBUG_MODE)) {
return inputTable
}
var table = inputTable
if (table.tableType != VIEW) {
table.properties.get(DATASOURCE_PROVIDER) match {
// No provider in table properties, which means this is a Hive serde table.
case None =>
table = restoreHiveSerdeTable(table)
// This is a regular data source table.
case Some(provider) =>
table = restoreDataSourceTable(table, provider)
}
}
// construct Spark's statistics from information in Hive metastore
val statsProps = table.properties.filterKeys(_.startsWith(STATISTICS_PREFIX))
if (statsProps.nonEmpty) {
val colStats = new mutable.HashMap[String, ColumnStat]
// For each column, recover its column stats. Note that this is currently a O(n^2) operation,
// but given the number of columns it usually not enormous, this is probably OK as a start.
// If we want to map this a linear operation, we'd need a stronger contract between the
// naming convention used for serialization.
table.schema.foreach { field =>
if (statsProps.contains(columnStatKeyPropName(field.name, ColumnStat.KEY_VERSION))) {
// If "version" field is defined, then the column stat is defined.
val keyPrefix = columnStatKeyPropName(field.name, "")
val colStatMap = statsProps.filterKeys(_.startsWith(keyPrefix)).map { case (k, v) =>
(k.drop(keyPrefix.length), v)
}
ColumnStat.fromMap(table.identifier.table, field, colStatMap).foreach {
colStat => colStats += field.name -> colStat
}
}
}
table = table.copy(
stats = Some(Statistics(
sizeInBytes = BigInt(table.properties(STATISTICS_TOTAL_SIZE)),
rowCount = table.properties.get(STATISTICS_NUM_ROWS).map(BigInt(_)),
colStats = colStats.toMap)))
}
// Get the original table properties as defined by the user.
table.copy(
properties = table.properties.filterNot { case (key, _) => key.startsWith(SPARK_SQL_PREFIX) })
}
private def restoreHiveSerdeTable(table: CatalogTable): CatalogTable = {
val hiveTable = table.copy(
provider = Some(DDLUtils.HIVE_PROVIDER),
tracksPartitionsInCatalog = true)
// If this is a Hive serde table created by Spark 2.1 or higher versions, we should restore its
// schema from table properties.
if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) {
val schemaFromTableProps = getSchemaFromTableProperties(table)
if (DataType.equalsIgnoreCaseAndNullability(schemaFromTableProps, table.schema)) {
hiveTable.copy(
schema = schemaFromTableProps,
partitionColumnNames = getPartitionColumnsFromTableProperties(table),
bucketSpec = getBucketSpecFromTableProperties(table))
} else {
// Hive metastore may change the table schema, e.g. schema inference. If the table
// schema we read back is different(ignore case and nullability) from the one in table
// properties which was written when creating table, we should respect the table schema
// from hive.
logWarning(s"The table schema given by Hive metastore(${table.schema.simpleString}) is " +
"different from the schema when this table was created by Spark SQL" +
s"(${schemaFromTableProps.simpleString}). We have to fall back to the table schema " +
"from Hive metastore which is not case preserving.")
hiveTable.copy(schemaPreservesCase = false)
}
} else {
hiveTable.copy(schemaPreservesCase = false)
}
}
private def restoreDataSourceTable(table: CatalogTable, provider: String): CatalogTable = {
// Internally we store the table location in storage properties with key "path" for data
// source tables. Here we set the table location to `locationUri` field and filter out the
// path option in storage properties, to avoid exposing this concept externally.
val storageWithLocation = {
val tableLocation = getLocationFromStorageProps(table)
// We pass None as `newPath` here, to remove the path option in storage properties.
updateLocationInStorageProps(table, newPath = None).copy(locationUri = tableLocation)
}
val partitionProvider = table.properties.get(TABLE_PARTITION_PROVIDER)
table.copy(
provider = Some(provider),
storage = storageWithLocation,
schema = getSchemaFromTableProperties(table),
partitionColumnNames = getPartitionColumnsFromTableProperties(table),
bucketSpec = getBucketSpecFromTableProperties(table),
tracksPartitionsInCatalog = partitionProvider == Some(TABLE_PARTITION_PROVIDER_CATALOG))
}
override def tableExists(db: String, table: String): Boolean = withClient {
client.tableExists(db, table)
}
override def listTables(db: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db)
}
override def listTables(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db, pattern)
}
override def loadTable(
db: String,
table: String,
loadPath: String,
isOverwrite: Boolean,
holdDDLTime: Boolean): Unit = withClient {
requireTableExists(db, table)
client.loadTable(
loadPath,
s"$db.$table",
isOverwrite,
holdDDLTime)
}
override def loadPartition(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
isOverwrite: Boolean,
holdDDLTime: Boolean,
inheritTableSpecs: Boolean): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
}
client.loadPartition(
loadPath,
db,
table,
orderedPartitionSpec,
isOverwrite,
holdDDLTime,
inheritTableSpecs)
}
override def loadDynamicPartitions(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
replace: Boolean,
numDP: Int,
holdDDLTime: Boolean): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
}
client.loadDynamicPartitions(
loadPath,
db,
table,
orderedPartitionSpec,
replace,
numDP,
holdDDLTime)
}
// --------------------------------------------------------------------------
// Partitions
// --------------------------------------------------------------------------
// Hive metastore is not case preserving and the partition columns are always lower cased. We need
// to lower case the column names in partition specification before calling partition related Hive
// APIs, to match this behaviour.
private def lowerCasePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = {
spec.map { case (k, v) => k.toLowerCase -> v }
}
// Build a map from lower-cased partition column names to exact column names for a given table
private def buildLowerCasePartColNameMap(table: CatalogTable): Map[String, String] = {
val actualPartColNames = table.partitionColumnNames
actualPartColNames.map(colName => (colName.toLowerCase, colName)).toMap
}
// Hive metastore is not case preserving and the column names of the partition specification we
// get from the metastore are always lower cased. We should restore them w.r.t. the actual table
// partition columns.
private def restorePartitionSpec(
spec: TablePartitionSpec,
partColMap: Map[String, String]): TablePartitionSpec = {
spec.map { case (k, v) => partColMap(k.toLowerCase) -> v }
}
private def restorePartitionSpec(
spec: TablePartitionSpec,
partCols: Seq[String]): TablePartitionSpec = {
spec.map { case (k, v) => partCols.find(_.equalsIgnoreCase(k)).get -> v }
}
override def createPartitions(
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = withClient {
requireTableExists(db, table)
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
val tablePath = new Path(tableMeta.location)
val partsWithLocation = parts.map { p =>
// Ideally we can leave the partition location empty and let Hive metastore to set it.
// However, Hive metastore is not case preserving and will generate wrong partition location
// with lower cased partition column names. Here we set the default partition location
// manually to avoid this problem.
val partitionPath = p.storage.locationUri.map(uri => new Path(new URI(uri))).getOrElse {
ExternalCatalogUtils.generatePartitionPath(p.spec, partitionColumnNames, tablePath)
}
p.copy(storage = p.storage.copy(locationUri = Some(partitionPath.toUri.toString)))
}
val lowerCasedParts = partsWithLocation.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
client.createPartitions(db, table, lowerCasedParts, ignoreIfExists)
}
override def dropPartitions(
db: String,
table: String,
parts: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = withClient {
requireTableExists(db, table)
client.dropPartitions(
db, table, parts.map(lowerCasePartitionSpec), ignoreIfNotExists, purge, retainData)
}
override def renamePartitions(
db: String,
table: String,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = withClient {
client.renamePartitions(
db, table, specs.map(lowerCasePartitionSpec), newSpecs.map(lowerCasePartitionSpec))
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
// Hive metastore is not case preserving and keeps partition columns with lower cased names.
// When Hive rename partition for managed tables, it will create the partition location with
// a default path generate by the new spec with lower cased partition column names. This is
// unexpected and we need to rename them manually and alter the partition location.
val hasUpperCasePartitionColumn = partitionColumnNames.exists(col => col.toLowerCase != col)
if (tableMeta.tableType == MANAGED && hasUpperCasePartitionColumn) {
val tablePath = new Path(tableMeta.location)
val newParts = newSpecs.map { spec =>
val partition = client.getPartition(db, table, lowerCasePartitionSpec(spec))
val wrongPath = new Path(partition.location)
val rightPath = ExternalCatalogUtils.generatePartitionPath(
spec, partitionColumnNames, tablePath)
try {
tablePath.getFileSystem(hadoopConf).rename(wrongPath, rightPath)
} catch {
case e: IOException => throw new SparkException(
s"Unable to rename partition path from $wrongPath to $rightPath", e)
}
partition.copy(storage = partition.storage.copy(locationUri = Some(rightPath.toString)))
}
alterPartitions(db, table, newParts)
}
}
override def alterPartitions(
db: String,
table: String,
newParts: Seq[CatalogTablePartition]): Unit = withClient {
val lowerCasedParts = newParts.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
// Note: Before altering table partitions in Hive, you *must* set the current database
// to the one that contains the table of interest. Otherwise you will end up with the
// most helpful error message ever: "Unable to alter partition. alter is not possible."
// See HIVE-2742 for more detail.
client.setCurrentDatabase(db)
client.alterPartitions(db, table, lowerCasedParts)
}
override def getPartition(
db: String,
table: String,
spec: TablePartitionSpec): CatalogTablePartition = withClient {
val part = client.getPartition(db, table, lowerCasePartitionSpec(spec))
part.copy(spec = restorePartitionSpec(part.spec, getTable(db, table).partitionColumnNames))
}
/**
* Returns the specified partition or None if it does not exist.
*/
override def getPartitionOption(
db: String,
table: String,
spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient {
client.getPartitionOption(db, table, lowerCasePartitionSpec(spec)).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, getTable(db, table).partitionColumnNames))
}
}
/**
* Returns the partition names from hive metastore for a given table in a database.
*/
override def listPartitionNames(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = withClient {
val catalogTable = getTable(db, table)
val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName)
val clientPartitionNames =
client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec))
clientPartitionNames.map { partitionPath =>
val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath)
partSpec.map { case (partName, partValue) =>
partColNameMap(partName.toLowerCase) + "=" + escapePathName(partValue)
}.mkString("/")
}
}
/**
* Returns the partitions from hive metastore for a given table in a database.
*/
override def listPartitions(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = withClient {
val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table))
client.getPartitions(db, table, partialSpec.map(lowerCasePartitionSpec)).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
}
override def listPartitionsByFilter(
db: String,
table: String,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = withClient {
val rawTable = getRawTable(db, table)
val catalogTable = restoreTableMetadata(rawTable)
val partitionColumnNames = catalogTable.partitionColumnNames.toSet
val nonPartitionPruningPredicates = predicates.filterNot {
_.references.map(_.name).toSet.subsetOf(partitionColumnNames)
}
if (nonPartitionPruningPredicates.nonEmpty) {
sys.error("Expected only partition pruning predicates: " +
predicates.reduceLeft(And))
}
val partitionSchema = catalogTable.partitionSchema
val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table))
if (predicates.nonEmpty) {
val clientPrunedPartitions = client.getPartitionsByFilter(rawTable, predicates).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
val boundPredicate =
InterpretedPredicate.create(predicates.reduce(And).transform {
case att: AttributeReference =>
val index = partitionSchema.indexWhere(_.name == att.name)
BoundReference(index, partitionSchema(index).dataType, nullable = true)
})
clientPrunedPartitions.filter { p => boundPredicate(p.toRow(partitionSchema)) }
} else {
client.getPartitions(catalogTable).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
}
}
// --------------------------------------------------------------------------
// Functions
// --------------------------------------------------------------------------
override def createFunction(
db: String,
funcDefinition: CatalogFunction): Unit = withClient {
requireDbExists(db)
// Hive's metastore is case insensitive. However, Hive's createFunction does
// not normalize the function name (unlike the getFunction part). So,
// we are normalizing the function name.
val functionName = funcDefinition.identifier.funcName.toLowerCase
requireFunctionNotExists(db, functionName)
val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName)
client.createFunction(db, funcDefinition.copy(identifier = functionIdentifier))
}
override def dropFunction(db: String, name: String): Unit = withClient {
requireFunctionExists(db, name)
client.dropFunction(db, name)
}
override def renameFunction(db: String, oldName: String, newName: String): Unit = withClient {
requireFunctionExists(db, oldName)
requireFunctionNotExists(db, newName)
client.renameFunction(db, oldName, newName)
}
override def getFunction(db: String, funcName: String): CatalogFunction = withClient {
requireFunctionExists(db, funcName)
client.getFunction(db, funcName)
}
override def functionExists(db: String, funcName: String): Boolean = withClient {
requireDbExists(db)
client.functionExists(db, funcName)
}
override def listFunctions(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listFunctions(db, pattern)
}
}
object HiveExternalCatalog {
val SPARK_SQL_PREFIX = "spark.sql."
val DATASOURCE_PREFIX = SPARK_SQL_PREFIX + "sources."
val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider"
val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema"
val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "."
val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts"
val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols"
val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols"
val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets"
val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols"
val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part."
val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol."
val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol."
val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol."
val STATISTICS_PREFIX = SPARK_SQL_PREFIX + "statistics."
val STATISTICS_TOTAL_SIZE = STATISTICS_PREFIX + "totalSize"
val STATISTICS_NUM_ROWS = STATISTICS_PREFIX + "numRows"
val STATISTICS_COL_STATS_PREFIX = STATISTICS_PREFIX + "colStats."
val TABLE_PARTITION_PROVIDER = SPARK_SQL_PREFIX + "partitionProvider"
val TABLE_PARTITION_PROVIDER_CATALOG = "catalog"
val TABLE_PARTITION_PROVIDER_FILESYSTEM = "filesystem"
/**
* Returns the fully qualified name used in table properties for a particular column stat.
* For example, for column "mycol", and "min" stat, this should return
* "spark.sql.statistics.colStats.mycol.min".
*/
private def columnStatKeyPropName(columnName: String, statKey: String): String = {
STATISTICS_COL_STATS_PREFIX + columnName + "." + statKey
}
// A persisted data source table always store its schema in the catalog.
private def getSchemaFromTableProperties(metadata: CatalogTable): StructType = {
val errorMessage = "Could not read schema from the hive metastore because it is corrupted."
val props = metadata.properties
val schema = props.get(DATASOURCE_SCHEMA)
if (schema.isDefined) {
// Originally, we used `spark.sql.sources.schema` to store the schema of a data source table.
// After SPARK-6024, we removed this flag.
// Although we are not using `spark.sql.sources.schema` any more, we need to still support.
DataType.fromJson(schema.get).asInstanceOf[StructType]
} else if (props.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) {
// If there is no schema information in table properties, it means the schema of this table
// was empty when saving into metastore, which is possible in older version(prior to 2.1) of
// Spark. We should respect it.
new StructType()
} else {
val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS)
if (numSchemaParts.isDefined) {
val parts = (0 until numSchemaParts.get.toInt).map { index =>
val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull
if (part == null) {
throw new AnalysisException(errorMessage +
s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).")
}
part
}
// Stick all parts back to a single schema string.
DataType.fromJson(parts.mkString).asInstanceOf[StructType]
} else {
throw new AnalysisException(errorMessage)
}
}
}
private def getColumnNamesByType(
props: Map[String, String],
colType: String,
typeName: String): Seq[String] = {
for {
numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq
index <- 0 until numCols.toInt
} yield props.getOrElse(
s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index",
throw new AnalysisException(
s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing."
)
)
}
private def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = {
getColumnNamesByType(metadata.properties, "part", "partitioning columns")
}
private def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = {
metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets =>
BucketSpec(
numBuckets.toInt,
getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"),
getColumnNamesByType(metadata.properties, "sort", "sorting columns"))
}
}
}
| u2009cf/spark-radar | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala | Scala | apache-2.0 | 51,519 |
package dotty.tools
package dotc
package reporting
import core.Contexts.Context
import collection.mutable
import Reporter.Diagnostic
import config.Printers._
/**
* This class implements a Reporter that stores all messages
*/
class StoreReporter extends Reporter {
val infos = new mutable.ListBuffer[Diagnostic]
protected def doReport(d: Diagnostic)(implicit ctx: Context): Unit = {
typr.println(s">>>> StoredError: ${d.msg}") // !!! DEBUG
infos += d
}
override def flush()(implicit ctx: Context) =
infos foreach ctx.reporter.report
}
| magarciaEPFL/dotty | src/dotty/tools/dotc/reporting/StoreReporter.scala | Scala | bsd-3-clause | 561 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.controllers
import com.google.inject.{Inject, Singleton}
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.mvc.{MessagesControllerComponents, Request, RequestHeader, Result}
import uk.gov.hmrc.helptosavefrontend.config.{ErrorHandler, FrontendAppConfig}
import uk.gov.hmrc.helptosavefrontend.util.MaintenanceSchedule
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.http.HeaderCarrierConverter
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
@Singleton
class BaseController @Inject() (
cpd: CommonPlayDependencies,
mcc: MessagesControllerComponents,
errorHandler: ErrorHandler,
maintenanceSchedule: MaintenanceSchedule
) extends FrontendController(mcc) with I18nSupport {
override implicit val messagesApi: MessagesApi = cpd.messagesApi
implicit val appConfig: FrontendAppConfig = cpd.appConfig
implicit val maintenence: MaintenanceSchedule = maintenanceSchedule
val Messages: MessagesApi = messagesApi
implicit override def hc(implicit rh: RequestHeader): HeaderCarrier =
HeaderCarrierConverter.fromRequestAndSession(rh,rh.session)
def internalServerError()(implicit request: Request[_]): Result =
InternalServerError(errorHandler.internalServerErrorTemplate(request))
}
class CommonPlayDependencies @Inject() (val appConfig: FrontendAppConfig, val messagesApi: MessagesApi)
| hmrc/help-to-save-frontend | app/uk/gov/hmrc/helptosavefrontend/controllers/BaseController.scala | Scala | apache-2.0 | 2,008 |
/*
* ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala
* Copyright (C) 2009, 2010, 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.scalaray.renderer
import org.jesperdj.scalaray.common._
import org.jesperdj.scalaray.filter.Filter
import org.jesperdj.scalaray.raster.{ Raster, Rectangle }
import org.jesperdj.scalaray.sampler.Sample
import org.jesperdj.scalaray.spectrum.Spectrum
// Pixel buffer
final class PixelBuffer (rectangle: Rectangle, filter: Filter) {
private final class Pixel {
private var totalSpectrum: Spectrum = Spectrum.Black
private var totalWeight: Double = 0.0
def +*=(spectrum: Spectrum, weight: Double): Unit = {
totalSpectrum +*= (spectrum, weight)
totalWeight += weight
}
def spectrum: Spectrum = if (totalWeight > 0.0) totalSpectrum / totalWeight else Spectrum.Black
}
private val pixels = {
val raster = new Raster[Pixel](rectangle)
for (y <- rectangle.top to rectangle.bottom; x <- rectangle.left to rectangle.right) raster(x, y) = new Pixel
raster
}
// Add the spectrum corresponding to a sample to the buffer
def +=(sample: Sample, spectrum: Spectrum): Unit = {
// Convert sample to image coordinates
val ix = sample.cameraSample.imageX - 0.5
val iy = sample.cameraSample.imageY - 0.5
// Determine the pixels that are to be updated according to the extent of the filter
val minX = math.max((ix - filter.extentX).ceil.toInt, rectangle.left)
val maxX = math.min((ix + filter.extentX).floor.toInt, rectangle.right)
val minY = math.max((iy - filter.extentY).ceil.toInt, rectangle.top)
val maxY = math.min((iy + filter.extentY).floor.toInt, rectangle.bottom)
// Update the relevant pixels
for (y <- minY to maxY; x <- minX to maxX) {
val weight = filter(x - ix, y - iy)
val pixel = pixels(x, y)
pixel.synchronized { pixel +*= (spectrum, weight) }
}
}
// Convert the pixels to an RGB image
def toImage: java.awt.image.BufferedImage = {
def toByte(value: Double): Int = clamp(value * 255.0, 0.0, 255.0).toInt
val image = new java.awt.image.BufferedImage(rectangle.width, rectangle.height, java.awt.image.BufferedImage.TYPE_INT_RGB)
for (y <- rectangle.top to rectangle.bottom; x <- rectangle.left to rectangle.right) {
val (red, green, blue) = pixels(x, y).spectrum.toRGB
image.setRGB(x - rectangle.left, y - rectangle.top, toByte(red) << 16 | toByte(green) << 8 | toByte(blue))
}
image
}
}
| jesperdj/scalaray | src/main/scala/org/jesperdj/scalaray/renderer/PixelBuffer.scala | Scala | gpl-3.0 | 3,155 |
package com.arcusys.learn.liferay.update.version300.migrations.scorm
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.model.SequencingModel
import com.arcusys.valamis.persistence.impl.scorm.schema.SequencingTableComponent
import slick.driver.JdbcProfile
import slick.jdbc.{GetResult, JdbcBackend, StaticQuery}
class SequencingMigration(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends SequencingTableComponent
with SlickProfile {
import driver.simple._
val childrenSelectionMigration = new ChildrenSelectionMigration(db, driver)
val sequencingTrackingMigration = new SequencingTrackingMigration(db, driver)
val seqPermissionsMigration = new SeqPermissionsMigration(db, driver)
val rollupContributionMigration = new RollupContributionMigration(db, driver)
val rollupRuleMigration = new RollupRuleMigration(db, driver)
val conditionRuleMigration = new ConditionRuleMigration(db, driver)
val objectiveMigration = new ObjectiveMigration(db, driver)
def migrate()(implicit s: JdbcBackend#Session): Unit = {
val grades = getOldGrades
if (grades.nonEmpty) {
grades.foreach(a => {
val newId = sequencingTQ.returning(sequencingTQ.map(_.id)).insert(a)
childrenSelectionMigration.migrate(a.id.get, newId)
sequencingTrackingMigration.migrate(a.id.get, newId)
seqPermissionsMigration.migrate(a.id.get, newId)
rollupContributionMigration.migrate(a.id.get, newId)
rollupRuleMigration.migrate(a.id.get, newId)
conditionRuleMigration.migrate(a.id.get, newId)
objectiveMigration.migrate(a.id.get, newId)
})
}
}
private def getOldGrades(implicit s: JdbcBackend#Session): Seq[SequencingModel] = {
implicit val reader = GetResult[SequencingModel](r => {
val id = r.nextLongOption() // LONG not null primary key,
val packageId = r.nextLongOption() // packageID INTEGER null,
val activityId = r.nextStringOption() // activityID VARCHAR(512) null,
val sharedId = r.nextStringOption() // sharedId TEXT null,
val sharedSequencingIdReference = r.nextStringOption() // sharedSequencingIdReference TEXT null,
val cAttemptObjectiveProgressChild = r.nextBoolean() // cAttemptObjectiveProgressChild BOOLEAN null,
val cAttemptAttemptProgressChild = r.nextBoolean() // cAttemptAttemptProgressChild BOOLEAN null,
val attemptLimit = r.nextIntOption() // attemptLimit INTEGER null,
val durationLimitInMilliseconds = r.nextLongOption() // durationLimitInMilliseconds LONG null,
r.nextLongOption()
val preventChildrenActivation = r.nextBoolean() // preventChildrenActivation BOOLEAN null,
val constrainChoice = r.nextBoolean() // constrainChoice BOOLEAN null
SequencingModel(id,
packageId,
activityId,
sharedId,
sharedSequencingIdReference,
cAttemptObjectiveProgressChild,
cAttemptAttemptProgressChild,
attemptLimit,
durationLimitInMilliseconds,
preventChildrenActivation,
constrainChoice
)
})
StaticQuery.queryNA[SequencingModel]("select * from Learn_LFSequencing").list
}
}
| igor-borisov/valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version300/migrations/scorm/SequencingMigration.scala | Scala | gpl-3.0 | 3,246 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{BeforeEach, Test}
import scala.jdk.CollectionConverters._
class AddPartitionsToTxnRequestServerTest extends BaseRequestTest {
private val topic1 = "topic1"
val numPartitions = 1
override def brokerPropertyOverrides(properties: Properties): Unit =
properties.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
@BeforeEach
override def setUp(): Unit = {
super.setUp()
createTopic(topic1, numPartitions, servers.size, new Properties())
}
@Test
def shouldReceiveOperationNotAttemptedWhenOtherPartitionHasError(): Unit = {
// The basic idea is that we have one unknown topic and one created topic. We should get the 'UNKNOWN_TOPIC_OR_PARTITION'
// error for the unknown topic and the 'OPERATION_NOT_ATTEMPTED' error for the known and authorized topic.
val nonExistentTopic = new TopicPartition("unknownTopic", 0)
val createdTopicPartition = new TopicPartition(topic1, 0)
val transactionalId = "foobar"
val producerId = 1000L
val producerEpoch: Short = 0
val request = new AddPartitionsToTxnRequest.Builder(
transactionalId,
producerId,
producerEpoch,
List(createdTopicPartition, nonExistentTopic).asJava)
.build()
val leaderId = servers.head.config.brokerId
val response = connectAndReceive[AddPartitionsToTxnResponse](request, brokerSocketServer(leaderId))
assertEquals(2, response.errors.size)
assertTrue(response.errors.containsKey(createdTopicPartition))
assertEquals(Errors.OPERATION_NOT_ATTEMPTED, response.errors.get(createdTopicPartition))
assertTrue(response.errors.containsKey(nonExistentTopic))
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors.get(nonExistentTopic))
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala | Scala | apache-2.0 | 2,862 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.