code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.api.base
import com.intellij.psi.PsiField
import org.argus.cit.intellij.jawa.lang.psi.api.toplevel.JawaNamedElement
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
trait JawaPsiField extends JawaNamedElement with PsiField {
}
| arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/psi/api/base/JawaPsiField.scala | Scala | epl-1.0 | 668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
/**
* An aggregator that uses a single associative and commutative reduce function. This reduce
* function can be used to go through all input values and reduces them to a single value.
* If there is no input, a null value is returned.
*
* This class currently assumes there is at least one input row.
*/
private[sql] class ReduceAggregator[T: Encoder](func: (T, T) => T)
extends Aggregator[T, (Boolean, T), T] {
private val encoder = implicitly[Encoder[T]]
override def zero: (Boolean, T) = (false, null.asInstanceOf[T])
override def bufferEncoder: Encoder[(Boolean, T)] =
ExpressionEncoder.tuple(
ExpressionEncoder[Boolean](),
encoder.asInstanceOf[ExpressionEncoder[T]])
override def outputEncoder: Encoder[T] = encoder
override def reduce(b: (Boolean, T), a: T): (Boolean, T) = {
if (b._1) {
(true, func(b._2, a))
} else {
(true, a)
}
}
override def merge(b1: (Boolean, T), b2: (Boolean, T)): (Boolean, T) = {
if (!b1._1) {
b2
} else if (!b2._1) {
b1
} else {
(true, func(b1._2, b2._2))
}
}
override def finish(reduction: (Boolean, T)): T = {
if (!reduction._1) {
throw new IllegalStateException("ReduceAggregator requires at least one input row")
}
reduction._2
}
}
| u2009cf/spark-radar | sql/core/src/main/scala/org/apache/spark/sql/expressions/ReduceAggregator.scala | Scala | apache-2.0 | 2,242 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.utils
object Country {
def isAbroad(countryName: String): Boolean =
countryName match {
case GREAT_BRITAIN => false
case ISLE_OF_MAN => false
case ENGLAND => false
case SCOTLAND => false
case WALES => false
case NORTHERN_IRELAND => false
case NOT_SPECIFIED => false
case _ => true
}
final val GREAT_BRITAIN = "GREAT BRITAIN"
final val ISLE_OF_MAN = "ISLE OF MAN"
final val ENGLAND = "ENGLAND"
final val SCOTLAND = "SCOTLAND"
final val WALES = "WALES"
final val NORTHERN_IRELAND = "NORTHERN IRELAND"
final val NOT_SPECIFIED = "NOT SPECIFIED OR NOT USED"
}
| hmrc/nisp-frontend | app/uk/gov/hmrc/nisp/utils/Country.scala | Scala | apache-2.0 | 1,354 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect.internal.util
import scala.reflect.io.{AbstractFile, VirtualFile}
import scala.annotation.tailrec
import java.util.regex.Pattern
import java.io.IOException
import scala.reflect.internal.Chars._
/** abstract base class of a source file used in the compiler */
abstract class SourceFile {
def content: Array[Char] // normalized, must end in SU
def file : AbstractFile
def isLineBreak(idx: Int): Boolean
def isEndOfLine(idx: Int): Boolean
def isSelfContained: Boolean
def length : Int
def lineCount: Int
def position(offset: Int): Position = {
assert(offset < length, file.toString + ": " + offset + " >= " + length)
Position.offset(this, offset)
}
def offsetToLine(offset: Int): Int
def lineToOffset(index : Int): Int
/** Map a position to a position in the underlying source file.
* For regular source files, simply return the argument.
*/
def positionInUltimateSource(position: Position) = position
override def toString() = file.name
def path = file.path
def lineToString(index: Int): String = {
val start = lineToOffset(index)
var end = start
while (end < length && !isEndOfLine(end)) end += 1
new String(content, start, end - start)
}
@tailrec
final def skipWhitespace(offset: Int): Int =
if (content(offset).isWhitespace) skipWhitespace(offset + 1) else offset
def identifier(pos: Position): Option[String] = None
/** An iterator over the lines between `start` and `end`.
*
* Bounds are checked and clipped as necessary.
*/
def lines(start: Int = 0, end: Int = lineCount): Iterator[String]
final def isJava: Boolean = file.name endsWith ".java"
}
/** An object representing a missing source file.
*/
object NoSourceFile extends SourceFile {
def content = Array()
def file = NoFile
def isLineBreak(idx: Int) = false
def isEndOfLine(idx: Int) = false
def isSelfContained = true
def length = -1
def lineCount = 0
def offsetToLine(offset: Int) = -1
def lineToOffset(index : Int) = -1
def lines(start: Int, end: Int) = Iterator.empty
override def toString = "<no source file>"
}
object NoFile extends VirtualFile("<no file>", "<no file>")
object ScriptSourceFile {
/** Length of the script header from the given content, if there is one.
* The header begins with "#!" or "::#!" and ends with a line starting
* with "!#" or "::!#".
*/
def headerLength(cs: Array[Char]): Int = {
val headerPattern = Pattern.compile("""((?m)^(::)?!#.*|^.*/env .*)(\\r|\\n|\\r\\n)""")
val headerStarts = List("#!", "::#!")
if (headerStarts exists (cs startsWith _)) {
val matcher = headerPattern matcher cs.mkString
if (matcher.find) matcher.end
else throw new IOException("script file does not close its header with !# or ::!#")
}
else 0
}
def apply(file: AbstractFile, content: Array[Char]) = {
val underlying = new BatchSourceFile(file, content)
val headerLen = headerLength(content)
val stripped = new ScriptSourceFile(underlying, content drop headerLen, headerLen)
stripped
}
def apply(underlying: BatchSourceFile) = {
val headerLen = headerLength(underlying.content)
new ScriptSourceFile(underlying, underlying.content drop headerLen, headerLen)
}
}
class ScriptSourceFile(underlying: BatchSourceFile, content: Array[Char], override val start: Int) extends BatchSourceFile(underlying.file, content) {
override def isSelfContained = false
override def positionInUltimateSource(pos: Position) =
if (!pos.isDefined) super.positionInUltimateSource(pos)
else pos withSource underlying withShift start
}
/** a file whose contents do not change over time */
class BatchSourceFile(val file : AbstractFile, content0: Array[Char]) extends SourceFile {
def this(_file: AbstractFile) = this(_file, _file.toCharArray)
def this(sourceName: String, cs: Seq[Char]) = this(new VirtualFile(sourceName), cs.toArray)
def this(file: AbstractFile, cs: Seq[Char]) = this(file, cs.toArray)
// If non-whitespace tokens run all the way up to EOF,
// positions go wrong because the correct end of the last
// token cannot be used as an index into the char array.
// The least painful way to address this was to add a
// newline to the array.
val content = (
if (content0.length == 0 || !content0.last.isWhitespace)
content0 :+ '\\n'
else content0
)
def length = content.length
def lineCount = lineIndices.length - 1
def start = 0
def isSelfContained = true
override def identifier(pos: Position) =
if (pos.isDefined && pos.source == this && pos.point != -1) {
def isOK(c: Char) = isIdentifierPart(c) || isOperatorPart(c)
Some(new String(content drop pos.point takeWhile isOK))
} else {
super.identifier(pos)
}
private def charAtIsEOL(idx: Int)(p: Char => Boolean) = {
// don't identify the CR in CR LF as a line break, since LF will do.
def notCRLF0 = content(idx) != CR || !content.isDefinedAt(idx + 1) || content(idx + 1) != LF
idx < length && notCRLF0 && p(content(idx))
}
def isLineBreak(idx: Int) = charAtIsEOL(idx)(isLineBreakChar)
/** True if the index is included by an EOL sequence. */
def isEndOfLine(idx: Int) = (content isDefinedAt idx) && PartialFunction.cond(content(idx)) {
case CR | LF => true
}
/** True if the index is end of an EOL sequence. */
def isAtEndOfLine(idx: Int) = charAtIsEOL(idx) {
case CR | LF => true
case _ => false
}
private lazy val lineIndices: Array[Int] = {
def countEOL(cs: Array[Char]): Int = {
var i, c = 0
while (i < cs.length) {
if (isAtEndOfLine(i))
c += 1
i += 1
}
c
}
def calculateLineIndices(cs: Array[Char]) = {
// count EOL characters in cs
val res = new Array[Int](countEOL(cs) + 2)
res(0) = 0
res(res.length - 1) = cs.length // sentinel, so that findLine below works smoother
var i, j = 0
while(i < cs.length && j < res.length - 1) {
if (isAtEndOfLine(i)) {
j += 1
res(j) = i + 1
}
i += 1
}
res
}
calculateLineIndices(content)
}
def lineToOffset(index: Int): Int = {
val offset = lineIndices(index)
if (offset < length) offset else throw new IndexOutOfBoundsException(index.toString)
}
private[this] var lastLine = 0
/** Convert offset to line in this source file.
* Lines are numbered from 0.
*/
def offsetToLine(offset: Int): Int = {
val lines = lineIndices
@tailrec
def findLine(lo: Int, hi: Int, mid: Int): Int = (
if (mid < lo || hi < mid) mid // minimal sanity check - as written this easily went into infinite loopyland
else if (offset < lines(mid)) findLine(lo, mid - 1, (lo + mid - 1) / 2)
else if (offset >= lines(mid + 1)) findLine(mid + 1, hi, (mid + 1 + hi) / 2)
else mid
)
lastLine = findLine(0, lines.length, lastLine)
lastLine
}
override def lines(start: Int, end: Int): Iterator[String] =
((start max 0) until (end min lineCount)).iterator.map { ix =>
val off = lineIndices(ix)
val len = 0 max (lineIndices(ix + 1) - off - 1) // drop newline character
String.valueOf(content, off, len)
}
override def equals(that : Any) = that match {
case that : BatchSourceFile => file.path == that.file.path && start == that.start
case _ => false
}
override def hashCode = file.path.## + start.##
}
| lrytz/scala | src/reflect/scala/reflect/internal/util/SourceFile.scala | Scala | apache-2.0 | 7,931 |
import java.util.ServiceLoader
import com.google.inject.{AbstractModule, Guice, Module}
import play.api.GlobalSettings
import services.{SimpleUUIDGenerator, UUIDGenerator}
import scala.collection.JavaConversions._
object Global extends GlobalSettings {
val runtimeModules = ServiceLoader.load(classOf[Module]).iterator.toList
val injector = Guice.createInjector(
new AbstractModule {
protected def configure() = {
bind(classOf[UUIDGenerator]).to(classOf[SimpleUUIDGenerator])
}
} :: runtimeModules
)
override def getControllerInstance[A](controllerClass: Class[A]) = injector.getInstance(controllerClass)
}
| nyavro/ooo-master | ui/app/Global.scala | Scala | apache-2.0 | 649 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io._
import scala.collection.mutable.ArrayBuffer
import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._
import com.fasterxml.jackson.core.json.JsonReadFeature
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\.\\[]+".r | "['" ~> "[^\\'\\?]+".r <~ "']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactoryBuilder()
// The two options below enabled for Hive compatibility
.enable(JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS)
.enable(JsonReadFeature.ALLOW_SINGLE_QUOTES)
.build()
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
@ExpressionDescription(
usage = "_FUNC_(json_txt, path) - Extracts a json object from `path`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":"b"}', '$.a');
b
""",
group = "json_funcs")
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import com.fasterxml.jackson.core.JsonToken._
import PathInstruction._
import SharedFactory._
import WriteStyle._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, jsonStr)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
// scalastyle:off line.size.limit line.contains.tab
@ExpressionDescription(
usage = "_FUNC_(jsonStr, p1, p2, ..., pn) - Returns a tuple like the function get_json_object, but it takes multiple names. All the input parameters and output column types are string.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":2}', 'a', 'b');
1 2
""",
group = "json_funcs")
// scalastyle:on line.size.limit line.contains.tab
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[Option[String]] = {
fieldExpressions.map {
case expr if expr.foldable => Option(expr.eval()).map(_.asInstanceOf[UTF8String].toString)
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
})
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames.map(_.orNull)
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName.orNull
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val jsonField = parser.getCurrentName
var idx = fieldNames.indexOf(jsonField)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
val jsonValue = UTF8String.fromBytes(output.toByteArray)
// SPARK-21804: json_tuple returns null values within repeated columns
// except the first one; so that we need to check the remaining fields.
do {
row(idx) = jsonValue
idx = fieldNames.indexOf(jsonField, idx + 1)
} while (idx >= 0)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
/**
* Converts an json input string to a [[StructType]], [[ArrayType]] or [[MapType]]
* with the specified schema.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, schema[, options]) - Returns a struct value with the given `jsonStr` and `schema`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":0.8}', 'a INT, b DOUBLE');
{"a":1,"b":0.8}
> SELECT _FUNC_('{"time":"26/08/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MM/yyyy'));
{"time":2015-08-26 00:00:00}
""",
group = "json_funcs",
since = "2.2.0")
// scalastyle:on line.size.limit
case class JsonToStructs(
schema: DataType,
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
// The JSON input data might be missing certain fields. We force the nullability
// of the user-provided schema to avoid data corruptions. In particular, the parquet-mr encoder
// can generate incorrect files if values are missing in columns declared as non-nullable.
val nullableSchema = schema.asNullable
override def nullable: Boolean = true
// Used in `FunctionRegistry`
def this(child: Expression, schema: Expression, options: Map[String, String]) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = options,
child = child,
timeZoneId = None)
def this(child: Expression, schema: Expression) = this(child, schema, Map.empty[String, String])
def this(child: Expression, schema: Expression, options: Expression) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
override def checkInputDataTypes(): TypeCheckResult = nullableSchema match {
case _: StructType | _: ArrayType | _: MapType =>
super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"Input schema ${nullableSchema.catalogString} must be a struct, an array or a map.")
}
// This converts parsed rows to the desired output by the given schema.
@transient
lazy val converter = nullableSchema match {
case _: StructType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next() else null
case _: ArrayType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getArray(0) else null
case _: MapType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getMap(0) else null
}
val nameOfCorruptRecord = SQLConf.get.getConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD)
@transient lazy val parser = {
val parsedOptions = new JSONOptions(options, timeZoneId.get, nameOfCorruptRecord)
val mode = parsedOptions.parseMode
if (mode != PermissiveMode && mode != FailFastMode) {
throw new IllegalArgumentException(s"from_json() doesn't support the ${mode.name} mode. " +
s"Acceptable modes are ${PermissiveMode.name} and ${FailFastMode.name}.")
}
val (parserSchema, actualSchema) = nullableSchema match {
case s: StructType =>
ExprUtils.verifyColumnNameOfCorruptRecord(s, parsedOptions.columnNameOfCorruptRecord)
(s, StructType(s.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)))
case other =>
(StructType(StructField("value", other) :: Nil), other)
}
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = false)
val createParser = CreateJacksonParser.utf8String _
new FailureSafeParser[UTF8String](
input => rawParser.parse(input, createParser, identity[UTF8String]),
mode,
parserSchema,
parsedOptions.columnNameOfCorruptRecord)
}
override def dataType: DataType = nullableSchema
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(json: Any): Any = {
converter(parser.parse(json.asInstanceOf[UTF8String]))
}
override def inputTypes: Seq[AbstractDataType] = StringType :: Nil
override def sql: String = schema match {
case _: MapType => "entries"
case _ => super.sql
}
override def prettyName: String = "from_json"
}
/**
* Converts a [[StructType]], [[ArrayType]] or [[MapType]] to a JSON output string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr[, options]) - Returns a JSON string with a given struct value",
examples = """
Examples:
> SELECT _FUNC_(named_struct('a', 1, 'b', 2));
{"a":1,"b":2}
> SELECT _FUNC_(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), map('timestampFormat', 'dd/MM/yyyy'));
{"time":"26/08/2015"}
> SELECT _FUNC_(array(named_struct('a', 1, 'b', 2)));
[{"a":1,"b":2}]
> SELECT _FUNC_(map('a', named_struct('b', 1)));
{"a":{"b":1}}
> SELECT _FUNC_(map(named_struct('a', 1),named_struct('b', 2)));
{"[1]":{"b":2}}
> SELECT _FUNC_(map('a', 1));
{"a":1}
> SELECT _FUNC_(array((map('a', 1))));
[{"a":1}]
""",
group = "json_funcs",
since = "2.2.0")
// scalastyle:on line.size.limit
case class StructsToJson(
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
def this(options: Map[String, String], child: Expression) = this(options, child, None)
// Used in `FunctionRegistry`
def this(child: Expression) = this(Map.empty, child, None)
def this(child: Expression, options: Expression) =
this(
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
@transient
lazy val writer = new CharArrayWriter()
@transient
lazy val gen = new JacksonGenerator(
inputSchema, writer, new JSONOptions(options, timeZoneId.get))
@transient
lazy val inputSchema = child.dataType
// This converts rows to the JSON output according to the given schema.
@transient
lazy val converter: Any => UTF8String = {
def getAndReset(): UTF8String = {
gen.flush()
val json = writer.toString
writer.reset()
UTF8String.fromString(json)
}
inputSchema match {
case _: StructType =>
(row: Any) =>
gen.write(row.asInstanceOf[InternalRow])
getAndReset()
case _: ArrayType =>
(arr: Any) =>
gen.write(arr.asInstanceOf[ArrayData])
getAndReset()
case _: MapType =>
(map: Any) =>
gen.write(map.asInstanceOf[MapData])
getAndReset()
}
}
override def dataType: DataType = StringType
override def checkInputDataTypes(): TypeCheckResult = inputSchema match {
case struct: StructType =>
try {
JacksonUtils.verifySchema(struct)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case map: MapType =>
try {
JacksonUtils.verifyType(prettyName, map)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case array: ArrayType =>
try {
JacksonUtils.verifyType(prettyName, array)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case _ => TypeCheckResult.TypeCheckFailure(
s"Input type ${child.dataType.catalogString} must be a struct, array of structs or " +
"a map or array of map.")
}
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(value: Any): Any = converter(value)
override def inputTypes: Seq[AbstractDataType] = TypeCollection(ArrayType, StructType) :: Nil
override def prettyName: String = "to_json"
}
/**
* A function infers schema of JSON string.
*/
@ExpressionDescription(
usage = "_FUNC_(json[, options]) - Returns schema in the DDL format of JSON string.",
examples = """
Examples:
> SELECT _FUNC_('[{"col":0}]');
array<struct<col:bigint>>
> SELECT _FUNC_('[{"col":01}]', map('allowNumericLeadingZeros', 'true'));
array<struct<col:bigint>>
""",
group = "json_funcs",
since = "2.4.0")
case class SchemaOfJson(
child: Expression,
options: Map[String, String])
extends UnaryExpression with CodegenFallback {
def this(child: Expression) = this(child, Map.empty[String, String])
def this(child: Expression, options: Expression) = this(
child = child,
options = ExprUtils.convertToMapData(options))
override def dataType: DataType = StringType
override def nullable: Boolean = false
@transient
private lazy val jsonOptions = new JSONOptions(options, "UTC")
@transient
private lazy val jsonFactory = jsonOptions.buildJsonFactory()
@transient
private lazy val jsonInferSchema = new JsonInferSchema(jsonOptions)
@transient
private lazy val json = child.eval().asInstanceOf[UTF8String]
override def checkInputDataTypes(): TypeCheckResult = {
if (child.foldable && json != null) {
super.checkInputDataTypes()
} else {
TypeCheckResult.TypeCheckFailure(
"The input json should be a foldable string expression and not null; " +
s"however, got ${child.sql}.")
}
}
override def eval(v: InternalRow): Any = {
val dt = Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parser.nextToken()
// To match with schema inference from JSON datasource.
jsonInferSchema.inferField(parser) match {
case st: StructType =>
jsonInferSchema.canonicalizeType(st, jsonOptions).getOrElse(StructType(Nil))
case at: ArrayType if at.elementType.isInstanceOf[StructType] =>
jsonInferSchema
.canonicalizeType(at.elementType, jsonOptions)
.map(ArrayType(_, containsNull = at.containsNull))
.getOrElse(ArrayType(StructType(Nil), containsNull = at.containsNull))
case other: DataType =>
jsonInferSchema.canonicalizeType(other, jsonOptions).getOrElse(StringType)
}
}
UTF8String.fromString(dt.catalogString)
}
override def prettyName: String = "schema_of_json"
}
/**
* A function that returns the number of elements in the outmost JSON array.
*/
@ExpressionDescription(
usage = "_FUNC_(jsonArray) - Returns the number of elements in the outmost JSON array.",
arguments = """
Arguments:
* jsonArray - A JSON array. `NULL` is returned in case of any other valid JSON string,
`NULL` or an invalid JSON.
""",
examples = """
Examples:
> SELECT _FUNC_('[1,2,3,4]');
4
> SELECT _FUNC_('[1,2,3,{"f1":1,"f2":[5,6]},4]');
5
> SELECT _FUNC_('[1,2');
NULL
""",
group = "json_funcs",
since = "3.1.0"
)
case class LengthOfJsonArray(child: Expression) extends UnaryExpression
with CodegenFallback with ExpectsInputTypes {
override def inputTypes: Seq[DataType] = Seq(StringType)
override def dataType: DataType = IntegerType
override def nullable: Boolean = true
override def prettyName: String = "json_array_length"
override def eval(input: InternalRow): Any = {
val json = child.eval(input).asInstanceOf[UTF8String]
// return null for null input
if (json == null) {
return null
}
try {
Utils.tryWithResource(CreateJacksonParser.utf8String(SharedFactory.jsonFactory, json)) {
parser => {
// return null if null array is encountered.
if (parser.nextToken() == null) {
return null
}
// Parse the array to compute its length.
parseCounter(parser, input)
}
}
} catch {
case _: JsonProcessingException | _: IOException => null
}
}
private def parseCounter(parser: JsonParser, input: InternalRow): Any = {
var length = 0
// Only JSON array are supported for this function.
if (parser.currentToken != JsonToken.START_ARRAY) {
return null
}
// Keep traversing until the end of JSON array
while(parser.nextToken() != JsonToken.END_ARRAY) {
length += 1
// skip all the child of inner object or array
parser.skipChildren()
}
length
}
}
/**
* A function which returns all the keys of the outmost JSON object.
*/
@ExpressionDescription(
usage = "_FUNC_(json_object) - Returns all the keys of the outmost JSON object as an array.",
arguments = """
Arguments:
* json_object - A JSON object. If a valid JSON object is given, all the keys of the outmost
object will be returned as an array. If it is any other valid JSON string, an invalid JSON
string or an empty string, the function returns null.
""",
examples = """
Examples:
> SELECT _FUNC_('{}');
[]
> SELECT _FUNC_('{"key": "value"}');
["key"]
> SELECT _FUNC_('{"f1":"abc","f2":{"f3":"a", "f4":"b"}}');
["f1","f2"]
""",
group = "json_funcs",
since = "3.1.0"
)
case class JsonObjectKeys(child: Expression) extends UnaryExpression with CodegenFallback
with ExpectsInputTypes {
override def inputTypes: Seq[DataType] = Seq(StringType)
override def dataType: DataType = ArrayType(StringType)
override def nullable: Boolean = true
override def prettyName: String = "json_object_keys"
override def eval(input: InternalRow): Any = {
val json = child.eval(input).asInstanceOf[UTF8String]
// return null for `NULL` input
if(json == null) {
return null
}
try {
Utils.tryWithResource(CreateJacksonParser.utf8String(SharedFactory.jsonFactory, json)) {
parser => {
// return null if an empty string or any other valid JSON string is encountered
if (parser.nextToken() == null || parser.currentToken() != JsonToken.START_OBJECT) {
return null
}
// Parse the JSON string to get all the keys of outmost JSON object
getJsonKeys(parser, input)
}
}
} catch {
case _: JsonProcessingException | _: IOException => null
}
}
private def getJsonKeys(parser: JsonParser, input: InternalRow): GenericArrayData = {
var arrayBufferOfKeys = ArrayBuffer.empty[UTF8String]
// traverse until the end of input and ensure it returns valid key
while(parser.nextValue() != null && parser.currentName() != null) {
// add current fieldName to the ArrayBuffer
arrayBufferOfKeys += UTF8String.fromString(parser.getCurrentName)
// skip all the children of inner object or array
parser.skipChildren()
}
new GenericArrayData(arrayBufferOfKeys.toArray)
}
}
| zuotingbing/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala | Scala | apache-2.0 | 32,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Collections
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import kafka.utils.TestUtils
import org.apache.kafka.clients.{ClientResponse, ManualMetadataUpdater, Metadata, MockClient}
import org.apache.kafka.common.Node
import org.apache.kafka.common.message.MetadataRequestData
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.{AbstractRequest, MetadataRequest, MetadataResponse, RequestTestUtils}
import org.apache.kafka.common.utils.MockTime
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import org.mockito.Mockito._
class BrokerToControllerRequestThreadTest {
@Test
def testRetryTimeoutWhileControllerNotAvailable(): Unit = {
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
when(controllerNodeProvider.get()).thenReturn(None)
val retryTimeoutMs = 30000
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs)
testRequestThread.started = true
val completionHandler = new TestRequestCompletionHandler(None)
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler
)
testRequestThread.enqueue(queueItem)
testRequestThread.doWork()
assertEquals(1, testRequestThread.queueSize)
time.sleep(retryTimeoutMs)
testRequestThread.doWork()
assertEquals(0, testRequestThread.queueSize)
assertTrue(completionHandler.timedOut.get)
}
@Test
def testRequestsSent(): Unit = {
// just a simple test that tests whether the request from 1 -> 2 is sent and the response callback is called
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val controllerId = 2
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val activeController = new Node(controllerId, "host", 1234)
when(controllerNodeProvider.get()).thenReturn(Some(activeController))
val expectedResponse = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("a", 2))
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs = Long.MaxValue)
testRequestThread.started = true
mockClient.prepareResponse(expectedResponse)
val completionHandler = new TestRequestCompletionHandler(Some(expectedResponse))
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler
)
testRequestThread.enqueue(queueItem)
assertEquals(1, testRequestThread.queueSize)
// initialize to the controller
testRequestThread.doWork()
// send and process the request
testRequestThread.doWork()
assertEquals(0, testRequestThread.queueSize)
assertTrue(completionHandler.completed.get())
}
@Test
def testControllerChanged(): Unit = {
// in this test the current broker is 1, and the controller changes from 2 -> 3 then back: 3 -> 2
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val oldControllerId = 1
val newControllerId = 2
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val oldController = new Node(oldControllerId, "host1", 1234)
val newController = new Node(newControllerId, "host2", 1234)
when(controllerNodeProvider.get()).thenReturn(Some(oldController), Some(newController))
val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2))
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(),
controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue)
testRequestThread.started = true
val completionHandler = new TestRequestCompletionHandler(Some(expectedResponse))
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler,
)
testRequestThread.enqueue(queueItem)
mockClient.prepareResponse(expectedResponse)
// initialize the thread with oldController
testRequestThread.doWork()
assertFalse(completionHandler.completed.get())
// disconnect the node
mockClient.setUnreachable(oldController, time.milliseconds() + 5000)
// verify that the client closed the connection to the faulty controller
testRequestThread.doWork()
// should connect to the new controller
testRequestThread.doWork()
// should send the request and process the response
testRequestThread.doWork()
assertTrue(completionHandler.completed.get())
}
@Test
def testNotController(): Unit = {
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val oldControllerId = 1
val newControllerId = 2
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val port = 1234
val oldController = new Node(oldControllerId, "host1", port)
val newController = new Node(newControllerId, "host2", port)
when(controllerNodeProvider.get()).thenReturn(Some(oldController), Some(newController))
val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2,
Collections.singletonMap("a", Errors.NOT_CONTROLLER),
Collections.singletonMap("a", 2))
val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2))
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs = Long.MaxValue)
testRequestThread.started = true
val completionHandler = new TestRequestCompletionHandler(Some(expectedResponse))
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()
.setAllowAutoTopicCreation(true)),
completionHandler
)
testRequestThread.enqueue(queueItem)
// initialize to the controller
testRequestThread.doWork()
val oldBrokerNode = new Node(oldControllerId, "host1", port)
assertEquals(Some(oldBrokerNode), testRequestThread.activeControllerAddress())
// send and process the request
mockClient.prepareResponse((body: AbstractRequest) => {
body.isInstanceOf[MetadataRequest] &&
body.asInstanceOf[MetadataRequest].allowAutoTopicCreation()
}, responseWithNotControllerError)
testRequestThread.doWork()
assertEquals(None, testRequestThread.activeControllerAddress())
// reinitialize the controller to a different node
testRequestThread.doWork()
// process the request again
mockClient.prepareResponse(expectedResponse)
testRequestThread.doWork()
val newControllerNode = new Node(newControllerId, "host2", port)
assertEquals(Some(newControllerNode), testRequestThread.activeControllerAddress())
assertTrue(completionHandler.completed.get())
}
@Test
def testRetryTimeout(): Unit = {
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val controllerId = 1
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val controller = new Node(controllerId, "host1", 1234)
when(controllerNodeProvider.get()).thenReturn(Some(controller))
val retryTimeoutMs = 30000
val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2,
Collections.singletonMap("a", Errors.NOT_CONTROLLER),
Collections.singletonMap("a", 2))
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs)
testRequestThread.started = true
val completionHandler = new TestRequestCompletionHandler()
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()
.setAllowAutoTopicCreation(true)),
completionHandler
)
testRequestThread.enqueue(queueItem)
// initialize to the controller
testRequestThread.doWork()
time.sleep(retryTimeoutMs)
// send and process the request
mockClient.prepareResponse((body: AbstractRequest) => {
body.isInstanceOf[MetadataRequest] &&
body.asInstanceOf[MetadataRequest].allowAutoTopicCreation()
}, responseWithNotControllerError)
testRequestThread.doWork()
assertTrue(completionHandler.timedOut.get())
}
@Test
def testUnsupportedVersionHandling(): Unit = {
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val controllerId = 2
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val activeController = new Node(controllerId, "host", 1234)
when(controllerNodeProvider.get()).thenReturn(Some(activeController))
val callbackResponse = new AtomicReference[ClientResponse]()
val completionHandler = new ControllerRequestCompletionHandler {
override def onTimeout(): Unit = fail("Unexpected timeout exception")
override def onComplete(response: ClientResponse): Unit = callbackResponse.set(response)
}
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler
)
mockClient.prepareUnsupportedVersionResponse(request => request.apiKey == ApiKeys.METADATA)
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs = Long.MaxValue)
testRequestThread.started = true
testRequestThread.enqueue(queueItem)
pollUntil(testRequestThread, () => callbackResponse.get != null)
assertNotNull(callbackResponse.get.versionMismatch)
}
@Test
def testAuthenticationExceptionHandling(): Unit = {
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val controllerId = 2
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val activeController = new Node(controllerId, "host", 1234)
when(controllerNodeProvider.get()).thenReturn(Some(activeController))
val callbackResponse = new AtomicReference[ClientResponse]()
val completionHandler = new ControllerRequestCompletionHandler {
override def onTimeout(): Unit = fail("Unexpected timeout exception")
override def onComplete(response: ClientResponse): Unit = callbackResponse.set(response)
}
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler
)
mockClient.createPendingAuthenticationError(activeController, 50)
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs = Long.MaxValue)
testRequestThread.started = true
testRequestThread.enqueue(queueItem)
pollUntil(testRequestThread, () => callbackResponse.get != null)
assertNotNull(callbackResponse.get.authenticationException)
}
@Test
def testThreadNotStarted(): Unit = {
// Make sure we throw if we enqueue anything while the thread is not running
val time = new MockTime()
val config = new KafkaConfig(TestUtils.createBrokerConfig(1, "localhost:2181"))
val metadata = mock(classOf[Metadata])
val mockClient = new MockClient(time, metadata)
val controllerNodeProvider = mock(classOf[ControllerNodeProvider])
val testRequestThread = new BrokerToControllerRequestThread(mockClient, new ManualMetadataUpdater(), controllerNodeProvider,
config, time, "", retryTimeoutMs = Long.MaxValue)
val completionHandler = new TestRequestCompletionHandler(None)
val queueItem = BrokerToControllerQueueItem(
time.milliseconds(),
new MetadataRequest.Builder(new MetadataRequestData()),
completionHandler
)
assertThrows(classOf[IllegalStateException], () => testRequestThread.enqueue(queueItem))
assertEquals(0, testRequestThread.queueSize)
}
private def pollUntil(
requestThread: BrokerToControllerRequestThread,
condition: () => Boolean,
maxRetries: Int = 10
): Unit = {
var tries = 0
do {
requestThread.doWork()
tries += 1
} while (!condition.apply() && tries < maxRetries)
if (!condition.apply()) {
fail(s"Condition failed to be met after polling $tries times")
}
}
class TestRequestCompletionHandler(
expectedResponse: Option[MetadataResponse] = None
) extends ControllerRequestCompletionHandler {
val completed: AtomicBoolean = new AtomicBoolean(false)
val timedOut: AtomicBoolean = new AtomicBoolean(false)
override def onComplete(response: ClientResponse): Unit = {
expectedResponse.foreach { expected =>
assertEquals(expected, response.responseBody())
}
completed.set(true)
}
override def onTimeout(): Unit = {
timedOut.set(true)
}
}
}
| Chasego/kafka | core/src/test/scala/kafka/server/BrokerToControllerRequestThreadTest.scala | Scala | apache-2.0 | 15,121 |
package net.ssanj.dabble
trait DabblePathTypes {
object DabblePathTypes {
def dirPath(dir: String): DirPath = DirPath(dir)
def filePath(dir: DirPath, file: String): FilePath = FilePath(dir, file)
}
case class DirPath(dir: String) {
def /(subdir: DirPath): DirPath = DirPath(s"${dir}/${subdir.dir}")
def /(filePath: String): FilePath = FilePath(this, filePath)
}
case class FilePath(dir: DirPath, filename: String) {
val file: String = s"${dir.dir}/$filename"
}
import DabblePathTypes._
case class DabbleWorkPath(path: DirPath) {
val defaultBuildFile: DabbleDefaultWorkBuildFile = DabbleDefaultWorkBuildFile(path/"build.sbt")
}
case class DabbleTemplatesPath(path: DirPath)
case class DabbleHistoryFilePath(path: FilePath)
case class DabbleDefaultBuildFile(path: FilePath)
case class DabbleDefaultWorkBuildFile(path: FilePath)
case class DabbleHomePath(path: DirPath) {
val work = DabbleWorkPath(path/dirPath(workDir))
val templates = DabbleTemplatesPath(path/dirPath(templatesDir))
val history = DabbleHistoryFilePath(path/historyFile)
val defaultBuildFile = DabbleDefaultBuildFile(path/buildFile)
}
def dabbleHomePath(userHome: String): DabbleHomePath = DabbleHomePath(dirPath(userHome)/dirPath(appDir))
}
object DabblePathTypes extends DabblePathTypes
| ssanj/dabble | src/main/scala/net/ssanj/dabble/DabblePathTypes.scala | Scala | mit | 1,346 |
package slick.lifted
import slick.util.ConstArray
import scala.language.higherKinds
import scala.language.experimental.macros
import scala.annotation.implicitNotFound
import scala.reflect.macros.blackbox.Context
import slick.ast.{Join => AJoin, _}
import FunctionSymbolExtensionMethods._
import ScalaBaseType._
sealed trait QueryBase[T] extends Rep[T]
/** An instance of Query represents a query or view, i.e. a computation of a
* collection type (Rep[Seq[T]]). It is parameterized with both, the mixed
* type (the type of values you see e.g. when you call map()) and the unpacked
* type (the type of values that you get back when you run the query).
*
* Additional extension methods for queries containing a single column are
* defined in [[slick.lifted.SingleColumnQueryExtensionMethods]].
*/
sealed abstract class Query[+E, U, C[_]] extends QueryBase[C[U]] { self =>
def shaped: ShapedValue[_ <: E, U]
final lazy val packed = shaped.toNode
/** Build a new query by applying a function to all elements of this query
* and using the elements of the resulting queries. This corresponds to an
* implicit inner join in SQL. */
def flatMap[F, T, D[_]](f: E => Query[F, T, D]): Query[F, T, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator)).value
val fv = f(aliased)
new WrappingQuery[F, T, C](new Bind(generator, toNode, fv.toNode), fv.shaped)
}
/** Build a new query by applying a function to all elements of this query. */
def map[F, G, T](f: E => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, G]): Query[G, T, C] =
flatMap(v => Query[F, T, G](f(v)))
/** Select all elements of this query which satisfy a predicate. */
private def filterHelper[T](f: E => T, wrapExpr: Node => Node)
(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator))
val fv = f(aliased.value)
new WrappingQuery[E, U, C](Filter.ifRefutable(generator, toNode, wrapExpr(wt(fv).toNode)), shaped)
}
/** Select all elements of this query which satisfy a predicate. Unlike
* `withFilter, this method only allows `Rep`-valued predicates, so it
* guards against the accidental use plain Booleans. */
def filter[T <: Rep[_]](f: E => T)(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] =
withFilter(f)
def filterNot[T <: Rep[_]](f: E => T)(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] =
filterHelper(f, node => Library.Not.typed(node.nodeType, node) )
/** Applies the given filter, if the Option value is defined.
* If the value is None, the filter will not be part of the query. */
def filterOpt[V, T <: Rep[_] : CanBeQueryCondition](optValue: Option[V])(f: (E, V) => T): Query[E, U, C] =
optValue.map(v => withFilter(a => f(a, v))).getOrElse(this)
/** Applies the given filter function, if the boolean parameter `p` evaluates to true.
* If not, the filter will not be part of the query. */
def filterIf(p: Boolean)(f: E => Rep[Boolean]): Query[E, U, C] =
if (p) withFilter(f) else this
/** Select all elements of this query which satisfy a predicate. This method
* is used when desugaring for-comprehensions over queries. There is no
* reason to call it directly because it is the same as `filter`. */
def withFilter[T : CanBeQueryCondition](f: E => T) = filterHelper(f, identity)
/** Join two queries with a cross join or inner join.
* An optional join predicate can be specified later by calling `on`. */
def join[E2, U2, D[_]](q2: Query[E2, U2, D]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[E, E2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.Inner,
aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Join two queries with a left outer join.
* An optional join predicate can be specified later by calling `on`.
* The right side of the join is lifted to an `Option`. If at least one element on the right
* matches, all matching elements are returned as `Some`, otherwise a single `None` row is
* returned. */
def joinLeft[E2, U2, D[_], O2](q2: Query[E2, _, D])(implicit ol: OptionLift[E2, O2], sh: Shape[FlatShapeLevel, O2, U2, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(ol.lift(q2.shaped.value), sh).encodeRef(Ref(rightGen))
new BaseJoinQuery[E, O2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.LeftOption,
aliased1.zip(aliased2), aliased1.value, q2.shaped.encodeRef(Ref(rightGen)).value)
}
/** Join two queries with a right outer join.
* An optional join predicate can be specified later by calling `on`.
* The left side of the join is lifted to an `Option`. If at least one element on the left
* matches, all matching elements are returned as `Some`, otherwise a single `None` row is
* returned. */
def joinRight[E1 >: E, E2, U2, D[_], O1, U1](q2: Query[E2, U2, D])(implicit ol: OptionLift[E1, O1], sh: Shape[FlatShapeLevel, O1, U1, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = ShapedValue(ol.lift(shaped.value), sh).encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[O1, E2, U1, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.RightOption,
aliased1.zip(aliased2), shaped.encodeRef(Ref(leftGen)).value, aliased2.value)
}
/** Join two queries with a full outer join.
* An optional join predicate can be specified later by calling `on`.
* Both sides of the join are lifted to an `Option`. If at least one element on either side
* matches the other side, all matching elements are returned as `Some`, otherwise a single
* `None` row is returned. */
def joinFull[E1 >: E, E2, U2, D[_], O1, U1, O2](q2: Query[E2, _, D])(implicit ol1: OptionLift[E1, O1], sh1: Shape[FlatShapeLevel, O1, U1, _], ol2: OptionLift[E2, O2], sh2: Shape[FlatShapeLevel, O2, U2, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = ShapedValue(ol1.lift(shaped.value), sh1).encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(ol2.lift(q2.shaped.value), sh2).encodeRef(Ref(rightGen))
new BaseJoinQuery[O1, O2, U1, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.OuterOption,
aliased1.zip(aliased2), shaped.encodeRef(Ref(leftGen)).value, q2.shaped.encodeRef(Ref(rightGen)).value)
}
private[this] def standardJoin[E2, U2, D[_]](q2: Query[E2, U2, D], jt: JoinType) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[E, E2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, jt,
aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Return a query formed from this query and another query by combining
* corresponding elements in pairs. */
def zip[E2, U2, D[_]](q2: Query[E2, U2, D]): Query[(E, E2), (U, U2), C] = standardJoin(q2, JoinType.Zip)
/** Return a query formed from this query and another query by combining
* corresponding elements with the specified function. */
def zipWith[E2, U2, F, G, T, D[_]](q2: Query[E2, U2, D], f: (E, E2) => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, G]): Query[G, T, C] =
standardJoin(q2, JoinType.Zip).map[F, G, T](x => f(x._1, x._2))
/** Zip this query with its indices (starting at 0). */
def zipWithIndex = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(Rep.forNode[Long](Ref(rightGen)), Shape.repColumnShape[Long, FlatShapeLevel])
new BaseJoinQuery[E, Rep[Long], U, Long, C, E, Rep[Long]](leftGen, rightGen, toNode, RangeFrom(0L), JoinType.Zip, aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Sort this query according to a function which extracts the ordering
* criteria from the query's elements. */
def sortBy[T](f: E => T)(implicit ev: T => Ordered): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator))
new WrappingQuery[E, U, C](SortBy(generator, toNode, ConstArray.from(f(aliased.value).columns)), shaped)
}
/** Sort this query according to a the ordering of its elements. */
def sorted(implicit ev: (E => Ordered)): Query[E, U, C] = sortBy(identity)
/** Partition this query into a query of pairs of a key and a nested query
* containing the elements for the key, according to some discriminator
* function. */
def groupBy[K, T, G, P](f: E => K)(implicit kshape: Shape[_ <: FlatShapeLevel, K, T, G], vshape: Shape[_ <: FlatShapeLevel, E, _, P]): Query[(G, Query[P, U, Seq]), (T, Query[P, U, Seq]), C] = {
val sym = new AnonSymbol
val key = ShapedValue(f(shaped.encodeRef(Ref(sym)).value), kshape).packedValue
val value = ShapedValue(pack.to[Seq], RepShape[FlatShapeLevel, Query[P, U, Seq], Query[P, U, Seq]])
val group = GroupBy(sym, toNode, key.toNode)
new WrappingQuery[(G, Query[P, U, Seq]), (T, Query[P, U, Seq]), C](group, key.zip(value))
}
/** Specify part of a select statement for update and marked for row level locking */
def forUpdate: Query[E, U, C] = {
val generator = new AnonSymbol
new WrappingQuery[E, U, C](ForUpdate(generator, toNode), shaped)
}
def encodeRef(path: Node): Query[E, U, C] = new Query[E, U, C] {
val shaped = self.shaped.encodeRef(path)
def toNode = path
}
/** Return a new query containing the elements from both operands. Duplicate
* elements are eliminated from the result. */
def union[O >: E, R, D[_]](other: Query[O, U, D]): Query[O, U, C] =
new WrappingQuery[O, U, C](Union(toNode, other.toNode, false), shaped)
/** Return a new query containing the elements from both operands. Duplicate
* elements are preserved. */
def unionAll[O >: E, R, D[_]](other: Query[O, U, D]): Query[O, U, C] =
new WrappingQuery[O, U, C](Union(toNode, other.toNode, true), shaped)
/** Return a new query containing the elements from both operands. Duplicate
* elements are preserved. */
def ++[O >: E, R, D[_]](other: Query[O, U, D]) = unionAll(other)
/** The total number of elements (i.e. rows). */
def length: Rep[Int] = Library.CountAll.column(toNode)
/** The total number of elements (i.e. rows). */
def size = length
/** The number of distinct elements of the query. */
@deprecated("Use `length` on `distinct` or `distinctOn` instead of `countDistinct`", "3.2")
def countDistinct: Rep[Int] = Library.CountDistinct.column(toNode)
/** Test whether this query is non-empty. */
def exists = Library.Exists.column[Boolean](toNode)
def pack[R](implicit packing: Shape[_ <: FlatShapeLevel, E, _, R]): Query[R, U, C] =
new Query[R, U, C] {
val shaped: ShapedValue[_ <: R, U] = self.shaped.packedValue(packing)
def toNode = self.toNode
}
/** Select the first `num` elements. */
def take(num: ConstColumn[Long]): Query[E, U, C] = new WrappingQuery[E, U, C](Take(toNode, num.toNode), shaped)
/** Select the first `num` elements. */
def take(num: Long): Query[E, U, C] = take(LiteralColumn(num))
/** Select the first `num` elements. */
def take(num: Int): Query[E, U, C] = take(num.toLong)
/** Select all elements except the first `num` ones. */
def drop(num: ConstColumn[Long]): Query[E, U, C] = new WrappingQuery[E, U, C](Drop(toNode, num.toNode), shaped)
/** Select all elements except the first `num` ones. */
def drop(num: Long): Query[E, U, C] = drop(LiteralColumn(num))
/** Select all elements except the first `num` ones. */
def drop(num: Int): Query[E, U, C] = drop(num.toLong)
/** Remove duplicate elements. When used on an ordered Query, there is no guarantee in which
* order duplicates are removed. This method is equivalent to `distinctOn(identity)`. */
def distinct: Query[E, U, C] =
distinctOn[E, U](identity)(shaped.shape.asInstanceOf[Shape[FlatShapeLevel, E, U, _]])
/** Remove duplicate elements which are the same in the given projection. When used on an
* ordered Query, there is no guarantee in which order duplicates are removed. */
def distinctOn[F, T](f: E => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, _]): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator)).value
val fv = f(aliased)
new WrappingQuery[E, U, C](Distinct(generator, toNode, shape.toNode(fv)), shaped)
}
/** Change the collection type to build when executing the query. */
def to[D[_]](implicit ctc: TypedCollectionTypeConstructor[D]): Query[E, U, D] = new Query[E, U, D] {
val shaped = self.shaped
def toNode = CollectionCast(self.toNode, ctc)
}
/** Force a subquery to be created when using this Query as part of a larger Query. This method
* should never be necessary for correctness. If a query works with an explicit `.subquery` call
* but fails without, this should be considered a bug in Slick. The method is exposed in the API
* to enable workarounds to be written in such cases. */
def subquery: Query[E, U, C] = new WrappingQuery[E, U, C](Subquery(toNode, Subquery.Default), shaped)
}
/** The companion object for Query contains factory methods for creating queries. */
object Query {
/** Lift a scalar value to a Query. */
def apply[E, U, R](value: E)(implicit unpack: Shape[_ <: FlatShapeLevel, E, U, R]): Query[R, U, Seq] = {
val shaped = ShapedValue(value, unpack).packedValue
new WrappingQuery[R, U, Seq](Pure(shaped.toNode), shaped)
}
/** The empty Query. */
def empty: Query[Unit, Unit, Seq] = new Query[Unit, Unit, Seq] {
val toNode = shaped.toNode
def shaped = ShapedValue((), Shape.unitShape[FlatShapeLevel])
}
@inline implicit def queryShape[Level >: NestedShapeLevel <: ShapeLevel, T, Q <: QueryBase[_]](implicit ev: Q <:< Rep[T]) = RepShape[Level, Q, T]
}
/** A typeclass for types that can be used as predicates in `filter` calls. */
@implicitNotFound("Type ${T} cannot be a query condition (only Boolean, Rep[Boolean] and Rep[Option[Boolean]] are allowed")
trait CanBeQueryCondition[-T] extends (T => Rep[_])
object CanBeQueryCondition {
// Using implicits with explicit type annotation here (instead of previously implicit objects)
// because otherwise they would not be found in this file above this line.
// See https://github.com/slick/slick/pull/217
implicit val BooleanColumnCanBeQueryCondition : CanBeQueryCondition[Rep[Boolean]] =
new CanBeQueryCondition[Rep[Boolean]] {
def apply(value: Rep[Boolean]) = value
}
implicit val BooleanOptionColumnCanBeQueryCondition : CanBeQueryCondition[Rep[Option[Boolean]]] =
new CanBeQueryCondition[Rep[Option[Boolean]]] {
def apply(value: Rep[Option[Boolean]]) = value
}
implicit val BooleanCanBeQueryCondition : CanBeQueryCondition[Boolean] =
new CanBeQueryCondition[Boolean] {
def apply(value: Boolean) = new LiteralColumn(value)
}
}
class WrappingQuery[+E, U, C[_]](val toNode: Node, val shaped: ShapedValue[_ <: E, U]) extends Query[E, U, C]
final class BaseJoinQuery[+E1, +E2, U1, U2, C[_], +B1, +B2](leftGen: TermSymbol, rightGen: TermSymbol, left: Node, right: Node, jt: JoinType, base: ShapedValue[_ <: (E1, E2), (U1, U2)], b1: B1, b2: B2)
extends WrappingQuery[(E1, E2), (U1, U2), C](AJoin(leftGen, rightGen, left, right, jt, LiteralNode(true)), base) {
/** Add a join condition to a join operation. */
def on[T <: Rep[_]](pred: (B1, B2) => T)(implicit wt: CanBeQueryCondition[T]): Query[(E1, E2), (U1, U2), C] =
new WrappingQuery[(E1, E2), (U1, U2), C](AJoin(leftGen, rightGen, left, right, jt, wt(pred(b1, b2)).toNode), base)
}
/** Represents a database table. Profiles add extension methods to TableQuery
* for operations that can be performed on tables but not on arbitrary
* queries, e.g. getting the table DDL. */
class TableQuery[E <: AbstractTable[_]](cons: Tag => E) extends Query[E, E#TableElementType, Seq] {
lazy val shaped = {
val baseTable = cons(new BaseTag { base =>
def taggedAs(path: Node): AbstractTable[_] = cons(new RefTag(path) {
def taggedAs(path: Node) = base.taggedAs(path)
})
})
ShapedValue(baseTable, RepShape[FlatShapeLevel, E, E#TableElementType])
}
lazy val toNode = shaped.toNode
/** Get the "raw" table row that represents the table itself, as opposed to
* a Path for a variable of the table's type. This method should generally
* not be called from user code. */
def baseTableRow: E = shaped.value
}
object TableQuery {
/** Create a TableQuery for a table row class using an arbitrary constructor function. */
def apply[E <: AbstractTable[_]](cons: Tag => E): TableQuery[E] =
new TableQuery[E](cons)
/** Create a TableQuery for a table row class which has a constructor of type (Tag). */
def apply[E <: AbstractTable[_]]: TableQuery[E] =
macro TableQueryMacroImpl.apply[E]
}
object TableQueryMacroImpl {
def apply[E <: AbstractTable[_]](c: Context)(implicit e: c.WeakTypeTag[E]): c.Expr[TableQuery[E]] = {
import c.universe._
val cons = c.Expr[Tag => E](Function(
List(ValDef(Modifiers(Flag.PARAM), TermName("tag"), Ident(typeOf[Tag].typeSymbol), EmptyTree)),
Apply(
Select(New(TypeTree(e.tpe)), termNames.CONSTRUCTOR),
List(Ident(TermName("tag")))
)
))
reify { TableQuery.apply[E](cons.splice) }
}
}
| nafg/slick | slick/src/main/scala/slick/lifted/Query.scala | Scala | bsd-2-clause | 17,684 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package types
import com.intellij.lang.PsiBuilder
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import scala.annotation.tailrec
/**
* @author Alexander Podkhalyuzin
* Date: 15.02.2008
*/
/*
* SimpleType ::= SimpleType TypeArgs
* | SimpleType '#' id
* | StableId
* | Path '.' 'type'
* | '(' Types [','] ')'
*/
object SimpleType {
def parse(builder: ScalaPsiBuilder, isPattern: Boolean, multipleSQBrackets: Boolean = true): Boolean = {
@tailrec
def parseTail(curMarker: PsiBuilder.Marker, checkSQBracket: Boolean = true) {
builder.getTokenType match {
case ScalaTokenTypes.tLSQBRACKET if checkSQBracket =>
val newMarker = curMarker.precede
TypeArgs.parse(builder, isPattern)
curMarker.done(ScalaElementTypes.TYPE_GENERIC_CALL)
parseTail(newMarker, checkSQBracket = multipleSQBrackets)
case ScalaTokenTypes.tINNER_CLASS =>
val newMarker = curMarker.precede
builder.advanceLexer() //Ate #
builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer() //Ate id
curMarker.done(ScalaElementTypes.TYPE_PROJECTION)
parseTail(newMarker)
case _ =>
newMarker.drop()
curMarker.drop()
}
case _ =>
curMarker.drop()
}
}
val simpleMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
val tupleMarker = builder.mark
builder.advanceLexer()
builder.disableNewlines
val (_, isTuple) = Types parse builder
builder.getTokenType match {
case ScalaTokenTypes.tCOMMA =>
builder.advanceLexer() //Ate ,
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
if (isTuple) tupleMarker.done(ScalaElementTypes.TUPLE_TYPE)
else {
builder.error("Identifier expected, but ',' found")
tupleMarker.done(ScalaElementTypes.TYPE_IN_PARENTHESIS)
}
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
if (isTuple) tupleMarker.done(ScalaElementTypes.TUPLE_TYPE)
else tupleMarker.done(ScalaElementTypes.TYPE_IN_PARENTHESIS)
}
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
if (isTuple) tupleMarker.done(ScalaElementTypes.TUPLE_TYPE)
else tupleMarker.done(ScalaElementTypes.TYPE_IN_PARENTHESIS)
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
if (isTuple) tupleMarker.done(ScalaElementTypes.TUPLE_TYPE)
else tupleMarker.done(ScalaElementTypes.TYPE_IN_PARENTHESIS)
}
builder.restoreNewlinesState
case ScalaTokenTypes.kTHIS |
ScalaTokenTypes.tIDENTIFIER |
ScalaTokenTypes.kSUPER =>
val newMarker = builder.mark
Path parse (builder, ScalaElementTypes.REFERENCE)
builder.getTokenType match {
case ScalaTokenTypes.tDOT =>
builder.advanceLexer() //Ate .
builder.getTokenType match {
case ScalaTokenTypes.kTYPE =>
builder.advanceLexer() //Ate type
newMarker.done(ScalaElementTypes.SIMPLE_TYPE)
case _ =>
newMarker.rollbackTo()
val fMarker = builder.mark
StableId parse (builder, ScalaElementTypes.REFERENCE)
fMarker.done(ScalaElementTypes.SIMPLE_TYPE)
}
case _ =>
newMarker.rollbackTo()
val fMarker = builder.mark
StableId parse (builder, ScalaElementTypes.REFERENCE)
fMarker.done(ScalaElementTypes.SIMPLE_TYPE)
}
case _ =>
simpleMarker.rollbackTo()
return false
}
parseTail(simpleMarker)
true
}
} | double-y/translation-idea-plugin | src/org/jetbrains/plugins/scala/lang/parser/parsing/types/SimpleType.scala | Scala | apache-2.0 | 4,297 |
/** Copyright 2014 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.engines.itemrank
// This module allows users to evaluate their algorithm data with their acutal
// data. It takes the mongodb dump (from Version 0.7 or before) of the three key
// collections: User, Item, and U2IActions. It evaluates the data in a
// time-rolling fashsion. First, it sets a cutoff day d and uses the data before
// it for training, and then use the following n days for testing. Then, it
// shifts the cutoff day by n, i.e. using data before (d + n-days) for training,
// then use data between d + n and d + 2n for testing, and so on until the last
// day.
//
// In each test, we construct a query using a combination of 1. all conversion
// data of a user on a day; and 2. the global top x sold items of that day.
//
// Notice that this replay may not be completely accurate as User and Item are
// not event log.
import io.prediction.controller._
import io.prediction.controller.{ Params => BaseParams }
import com.github.nscala_time.time.Imports._
import org.joda.time.Instant
import org.json4s._
import org.json4s.native.JsonMethods._
import scala.io.Source
import scala.collection.immutable.HashMap
import scala.util.hashing.MurmurHash3
import io.prediction.engines.base.ItemTD
import io.prediction.engines.base.UserTD
import io.prediction.engines.base.U2IActionTD
import io.prediction.engines.base.TrainingData
import io.prediction.engines.base.HasName
case class ReplaySliceParams(
val name: String,
val idx: Int
) extends Serializable with HasName
object ReplayDataSource {
case class Params(
val userPath: String,
val itemPath: String,
val u2iPath: String,
val baseDate: LocalDate,
val fromIdx: Int,
val untilIdx: Int,
val testingWindowSize: Int,
// Mix top x sold items in Query
val numTopSoldItems: Int,
// Only items belonging to this whitelist is considered.
val whitelistItypes: Seq[String],
// Only u2i belonging to this single action is considered.
val whitelistAction: String
) extends BaseParams
case class PreprocessedData(
val userList: Array[User],
val itemList: Array[Item],
val date2u2iList: Map[LocalDate, Array[U2I]]
) extends Serializable {
val ui2UserTd: Map[Int, UserTD] = userList
.zipWithIndex
.map(_.swap)
.toMap
.mapValues(user => new UserTD(user._id))
val ui2uid: Map[Int, String] = ui2UserTd.mapValues(_.uid)
val uid2ui: Map[String, Int] = ui2uid.map(_.swap)
val ii2ItemTd: Map[Int, ItemTD] = itemList
.zipWithIndex
.map(_.swap)
.toMap
.mapValues(
item => new ItemTD(item._id, item.itypes.toSeq, None, None, false))
val ii2iid: Map[Int, String] = ii2ItemTd.mapValues(_.iid)
val iid2ii: Map[String, Int] = ii2iid.map(_.swap)
val date2ActionTds: Map[LocalDate, Array[U2IActionTD]] = date2u2iList
.mapValues(
_.map(u2i => new U2IActionTD(
uid2ui(u2i.uid),
iid2ii(u2i.iid),
u2i.action,
None,
u2i.t.$date)))
val dailyServedItems: Map[LocalDate, Array[(String, Int)]] = date2u2iList
.mapValues(
_.map(_.iid).groupBy(identity).mapValues(_.size).toArray.sortBy(-_._2)
)
}
}
class ReplayDataSource(val dsp: ReplayDataSource.Params)
extends LDataSource[ReplaySliceParams, TrainingData, Query, Actual] {
def load(): (Array[User], Array[Item], Array[U2I]) = {
implicit val formats = DefaultFormats
val u2iList = Source
.fromFile(dsp.u2iPath).getLines
.map { s => parse(s).extract[U2I] }
.toArray
val userList = Source
.fromFile(dsp.userPath).getLines
.map { s => parse(s).extract[User] }
.toArray
val itemList = Source
.fromFile(dsp.itemPath)
.getLines
.map { s => parse(s).extract[Item] }
.toArray
return (userList, itemList, u2iList)
}
def preprocess(input: (Array[User], Array[Item], Array[U2I]))
: ReplayDataSource.PreprocessedData = {
val (users, items, u2is) = input
val whitelistItypeSet = Set(dsp.whitelistItypes:_*)
val validItems: Array[Item] = items
.filter(_.itypes.find(it => whitelistItypeSet(it)) != None)
val validIidSet: Set[String] = validItems.map(_._id).toSet
val date2Actions: Map[LocalDate, Array[U2I]] = u2is
.filter(u2i => validIidSet(u2i.iid))
.filter(u2i => u2i.action == dsp.whitelistAction)
.groupBy(_.dt.toLocalDate)
ReplayDataSource.PreprocessedData(users, validItems, date2Actions)
}
def generateParams(): Seq[ReplaySliceParams] = {
Range(dsp.fromIdx, dsp.untilIdx, dsp.testingWindowSize).map { idx =>
val trainingUntilDate: LocalDate = dsp.baseDate.plusDays(idx)
val dow = trainingUntilDate.dayOfWeek.getAsShortText
ReplaySliceParams(
name = s"${trainingUntilDate.toString()} $dow",
idx = idx)
}
}
def generateOne(input: (ReplayDataSource.PreprocessedData, ReplaySliceParams))
: (ReplaySliceParams, TrainingData, Array[(Query, Actual)]) = {
val (data, dp) = input
val userList: Array[User] = data.userList
val itemList: Array[Item] = data.itemList
val date2u2iList: Map[LocalDate, Array[U2I]] = data.date2u2iList
val ui2UserTd = data.ui2UserTd
val ui2uid = data.ui2uid
val uid2ui = data.uid2ui
val ii2ItemTd = data.ii2ItemTd
val ii2iid = data.ii2iid
val iid2ii = data.iid2ii
val date2Actions = data.date2ActionTds
val dailyServedItems = data.dailyServedItems
val trainingUntilDate: LocalDate = dsp.baseDate.plusDays(dp.idx)
println("TrainingUntil: " + trainingUntilDate.toString)
val trainingDate2Actions: Map[LocalDate, Array[U2IActionTD]] =
date2Actions.filterKeys(k => k.isBefore(trainingUntilDate))
val trainingActions: Array[U2IActionTD] = trainingDate2Actions
.values
.flatMap(_.toSeq)
.toArray
val trainingData = new TrainingData(
HashMap[Int, UserTD]() ++ ui2UserTd,
HashMap[Int, ItemTD]() ++ ii2ItemTd,
trainingActions.toList)
//Array[U2IActionTD]() ++ trainingActions)
val uiActionsMap: Map[Int, Int] = trainingActions
.groupBy(_.uindex)
.mapValues(_.size)
// Seq[(Int, Int)]: (User, Order Size)
val date2OrderSizeMap: Map[LocalDate, Array[(Int, Int)]] =
trainingDate2Actions
.mapValues {
_.groupBy(_.uindex).mapValues(_.size).toArray
}
val uiAverageSizeMap: Map[Int, Double] = date2OrderSizeMap
.values
.flatMap(_.toSeq)
.groupBy(_._1)
.mapValues( l => l.map(_._2).sum.toDouble / l.size )
val uiPreviousOrdersMap: Map[Int, Int] = date2OrderSizeMap
.values
.flatMap(_.toSeq)
.groupBy(_._1)
.mapValues(_.size)
val uiVarietyMap: Map[Int, Int] = trainingActions
.groupBy(_.uindex)
.mapValues(_.map(_.iindex).distinct.size)
val queryActionList: Array[(Query, Actual)] =
Range(dp.idx, math.min(dp.idx + dsp.testingWindowSize, dsp.untilIdx))
.map { queryIdx => dsp.baseDate.plusDays(queryIdx) }
.flatMap { queryDate => {
//println(
// s"Testing: ${queryDate.toString} DOW(${queryDate.getDayOfWeek})")
val u2is = date2u2iList.getOrElse(queryDate, Array[U2I]())
val uid2Actions: Map[String, Array[U2I]] = u2is.groupBy(_.uid)
val user2iids = uid2Actions.mapValues(_.map(_.iid))
// Use first action time.
val user2LocalDT = uid2Actions
.mapValues(_.map(_.dt.toLocalDateTime).min)
val todayItems: Seq[String] = dailyServedItems(queryDate)
.take(dsp.numTopSoldItems)
.map(_._1)
user2iids.map { case (uid, iids) => {
val possibleIids = (iids ++ todayItems)
.distinct
//val sortedIids = random.shuffle(possibleIids)
// Introduce some kind of stable randomness
val sortedIids = possibleIids
.sortBy(iid => MurmurHash3.stringHash(iid))
//val sortedIids = possibleIids.sortBy(identity)
val query = new Query(uid, sortedIids)
val ui = uid2ui(uid)
// FIXME(yipjustin): update Action to use U2I
val actual = new Actual(
iids = iids.toSeq,
actionTuples = Seq[(String, String, U2IActionTD)](),
previousActionCount = uiActionsMap.getOrElse(ui, 0),
localDate = queryDate,
localDateTime = user2LocalDT(uid),
averageOrderSize = uiAverageSizeMap.getOrElse(ui, 0),
previousOrders = uiPreviousOrdersMap.getOrElse(ui, 0),
variety = uiVarietyMap.getOrElse(ui, 0)
)
(query, actual)
}}
}}
.toArray
//println("Testing Size: " + queryActionList.size)
(dp, trainingData, queryActionList)
}
def generate(input: ReplayDataSource.PreprocessedData)
: Seq[(ReplaySliceParams, TrainingData, Array[(Query, Actual)])] = {
val paramsList = generateParams()
paramsList.map { params => {
generateOne((input, params))
}}
}
override
def read(): Seq[(ReplaySliceParams, TrainingData, Seq[(Query, Actual)])] = {
generate(preprocess(load()))
.map(e => (e._1, e._2, e._3.toSeq))
}
}
case class DateObject(val $date: Long)
case class U2I(
val action: String,
val uid: String, val iid: String, val t: DateObject) {
lazy val dt: DateTime =
new DateTime(new Instant(t.$date), DateTimeZone.forOffsetHours(-8))
override def toString(): String =
s"U2I($uid, $iid, $action, $dt)"
}
case class User(val _id: String)
case class Item(val _id: String, val starttime: DateObject,
val itypes: Array[String],
val ca_name: String) {
override def toString(): String =
s"${_id} $ca_name [" + itypes.mkString(",") + "]"
val itypesSet = Set(itypes:_*)
}
| TheDataShed/PredictionIO | engines/src/main/scala/itemrank/ReplayDataSource.scala | Scala | apache-2.0 | 10,304 |
package spray.json.laws
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Prop.forAll
import org.scalacheck._
import spray.json.DefaultJsonProtocol._
import spray.json._
import spray.json.Scalaz._
import scalaz._
import scalaz.Scalaz._
object JsonReaderLaws {
val laws = new Properties("JsonReader Functor Laws") {
val laws = Functor[JsonReader].functorLaw
val intReader: JsonReader[Int] = IntJsonFormat
implicit val jsNumberArbitrary: Arbitrary[JsNumber] =
Arbitrary(arbitrary[Int].map(JsNumber.apply))
implicit val intFunctionArbitrary: Arbitrary[Int => Int] =
Arbitrary(Gen.oneOf((x: Int) => -x, (x: Int) => x % 5, (x: Int) => x * x, (x: Int) => x, (x: Int) => 3 * x, (x: Int) => x + 1))
def equalsOn[T: Equal](input: JsValue): Equal[JsonReader[T]] = new Equal[JsonReader[T]] {
def equal(a1: JsonReader[T], a2: JsonReader[T]): Boolean =
a1.read(input) === a2.read(input)
}
property("identity") = forAll { input: JsNumber =>
laws.identity(intReader)(equalsOn(input))
}
property("composite") = forAll { (input: JsNumber, f: (Int => Int), g: (Int => Int)) =>
laws.composite(intReader, f, g)(equalsOn(input))
}
property("invariantIdentity") = forAll { input: JsNumber =>
laws.invariantIdentity(intReader)(equalsOn(input))
}
property("invariantComposite") = forAll { (input: JsNumber, f1: (Int => Int), g1: (Int => Int), f2: (Int => Int), g2: (Int => Int)) =>
laws.invariantComposite(intReader, f1, g1, f2, g2)(equalsOn(input))
}
}
}
| msimav/spray-contrib-scalaz | src/test/scala/spray/json/laws/JsonReaderLaws.scala | Scala | mit | 1,572 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.reactivestreams.suites
import java.util.concurrent.atomic.AtomicInteger
import com.websudos.phantom.dsl._
import com.websudos.phantom.reactivestreams._
import com.websudos.util.testing._
import org.reactivestreams.{Subscriber, Subscription}
import org.scalatest.FlatSpec
import org.scalatest.concurrent.Eventually
import org.scalatest.time.SpanSugar._
import scala.concurrent.Await
class PublisherIntegrationTest extends FlatSpec with StreamTest with TestImplicits with Eventually {
implicit val defaultPatience = PatienceConfig(timeout = 10.seconds, interval = 50.millis)
it should "correctly consume the entire stream of items published from a Cassandra table" in {
val counter = new AtomicInteger(0)
val generatorCount = 100
val samples = genList[String](generatorCount).map(Opera)
val chain = for {
truncate <- StreamDatabase.operaTable.truncate().future()
store <- samples.foldLeft(Batch.unlogged) {
(acc, item) => {
acc.add(StreamDatabase.operaTable.store(item))
}
} future()
} yield store
Await.result(chain, 10.seconds)
val publisher = StreamDatabase.operaTable.publisher
publisher.subscribe(new Subscriber[Opera] {
override def onError(t: Throwable): Unit = {
fail(t)
}
override def onSubscribe(s: Subscription): Unit = {
s.request(Long.MaxValue)
}
override def onComplete(): Unit = {
info(s"Finished streaming, total count is ${counter.get()}")
}
override def onNext(t: Opera): Unit = {
info(s"The current item is ${t.name}")
info(s"The current count is ${counter.incrementAndGet()}")
}
})
eventually {
counter.get() shouldEqual generatorCount
} (defaultPatience)
}
}
| levinson/phantom | phantom-reactivestreams/src/test/scala/com/websudos/phantom/reactivestreams/suites/PublisherIntegrationTest.scala | Scala | bsd-2-clause | 3,305 |
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package example {
package snippet {
import _root_.scala.xml.{NodeSeq, Text}
import _root_.net.liftweb.util._
import _root_.net.liftweb.http._
import _root_.net.liftweb.wizard._
import _root_.net.liftweb.common._
import _root_.java.util.Date
import Helpers._
import model._
/**
* An example of a wizard in Lift
*/
object MyWizard extends Wizard {
object completeInfo extends WizardVar(false)
// define the first screen
val nameAndAge = new Screen {
// it has a name field
val name = new Field with StringField {
def name = S ? "First Name"
override def validations = minLen(2, S ? "Name Too Short") ::
maxLen(40, S ? "Name Too Long") :: super.validations
}
// and an age field
val age = new Field with IntField {
def name = S ? "Age"
override def validations = minVal(5, S ?? "Too young") ::
maxVal(120, S ? "You should be dead") :: super.validations
}
// choose the next screen based on the age
override def nextScreen = if (age.is < 18) parentName else favoritePet
}
// We ask the parent's name if the person is under 18
val parentName = new Screen {
val parentName = new Field with StringField {
def name = S ? "Mom or Dad's name"
override def validations = minLen(2, S ? "Name Too Short") ::
maxLen(40, S ? "Name Too Long") :: super.validations
}
}
// we ask for the favorite pet
val favoritePet = new Screen {
val petName = new Field with StringField {
def name = S ? "Pet's name"
override def validations = minLen(2, S ? "Name Too Short") ::
maxLen(40, S ? "Name Too Long") :: super.validations
}
}
// what to do on completion of the wizard
def finish() {
S.notice("Thank you for registering your pet")
completeInfo.set(true)
}
}
object WizardChallenge extends Wizard {
val page1 = new Screen {
val info = new Field with StringField {
def name = S ? "Page one entry"
}
}
val page2 = new Screen {
override def screenTop = <span>Page one field is{page1.info}</span>
val info = new Field with StringField {
def name = S ? "Page two entry"
}
}
val page3 = new Screen {
override def screenTop = <span>Page one field is{page1.info}<br/>Page two field is{page2.info}</span>
}
def finish() {
S.notice("Finished the challenge")
}
}
object PersonScreen extends LiftScreen {
object person extends ScreenVar(Person.create)
override def screenTopTextAsHtml = Full(<b>A single screen with some input validation</b>)
_register(() => person.is)
val shouldSave = new Field with BooleanField {
def name = "Save ?"
}
def finish() {
if (shouldSave.is) {
person.is.save
}
}
}}
}
}
| jeppenejsum/liftweb | examples/example/src/main/scala/net/liftweb/example/snippet/Wizard.scala | Scala | apache-2.0 | 3,405 |
import org.scalatest.FunSuite
import scala.reflect.runtime.universe._
class ParameterZoo {
class C[@funny T](@funny val x: Int)
object С
def m[@funny T, @funny U](@funny x: Int)(@funny y: Int) = ???
type T[@funny U] = U
}
class Parameters extends FunSuite {
test("combo") {
assert(typeOf[ParameterZoo].decls.sorted.map(_.toString).mkString("\\n") === """
|constructor ParameterZoo
|object С
|class CTx
|method mTUxy
|type TU
""".trim.stripMargin)
}
}
| scalamacros/paradise | tests/src/test/scala/annotations/run/Parameters.scala | Scala | bsd-3-clause | 504 |
/*
* Copyright (C) 2014 AyaIB Developers (http://github.com/fauu/AyaIB)
*
* This software is licensed under the GNU General Public License
* (version 3 or later). See the COPYING file in this distribution.
*
* You should have received a copy of the GNU Library General Public License
* along with this software. If not, see <http://www.gnu.org/licenses/>.
*
* Authored by: Piotr Grabowski <fau999@gmail.com>
*/
package models.forms
import play.api.data._
import play.api.data.Forms._
case class BoardForm(
name: String = "",
fullName: String = "",
allowedContentTypesStr: String = "image/jpeg;image/png;image/gif",
maxNumPages: Int = 10,
threadsPerPage: Int = 15
)
object BoardForm {
def get = Form(mapping(
"name" -> nonEmptyText(maxLength = 15),
"fullName" -> nonEmptyText(maxLength = 40),
"allowedContentTypesStr" -> nonEmptyText(maxLength = 500),
"maxNumPages" -> number(min = 1, max = 50),
"threadsPerPage" -> number(min = 1, max = 25)
)(BoardForm.apply)(BoardForm.unapply))
}
| fauu/AyaIB | app/models/forms/BoardForm.scala | Scala | gpl-3.0 | 1,032 |
package org.talkingpuffin.util
import org.specs.runner.JUnit4
import org.specs.Specification
class WordCounterTest extends JUnit4(WordCounterSpec)
object WordCounterSpec extends Specification {
"The result" should {
"contain 2 words" in {
WordCounter("Scala traits").words must have size(2)
}
"have trailing punctuation stripped" in {
WordCounter("Scala.").words(0).word must_==("scala")
}
"count correctly" in {
WordCounter("Scala. scala SCala?").words(0).count must_==(3)
}
}
}
| dcbriccetti/talking-puffin | common/src/test/scala/org/talkingpuffin/util/WordCounterTest.scala | Scala | mit | 529 |
package io.ino.solrs
import io.ino.solrs.future.ScalaFutureFactory.ScalaFuture
import io.ino.time.Clock.MutableClock
import org.apache.solr.client.solrj.SolrResponse
import org.apache.solr.client.solrj.response.QueryResponse
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{Success, Try}
object AsyncSolrClientMocks {
def mockDoRequest[F[_]](
mock: AsyncSolrClient[F],
solrServer: => SolrServer = any[SolrServer](),
responseDelay: Duration = 1 milli)(implicit clock: MutableClock): AsyncSolrClient[F] = {
// for spies doReturn should be used...
doAnswer(_ => delayedResponse(responseDelay.toMillis))
.when(mock)
.doExecute(solrServer, any())(any())
mock
}
def mockDoRequest[F[_]](mock: AsyncSolrClient[F],
futureResponse: future.Future[SolrResponse]): AsyncSolrClient[F] = {
// for spies doReturn should be used...
doAnswer(_ => futureResponse).when(mock).doExecute(any[SolrServer](), any())(any())
mock
}
def delayedResponse(delay: Long)(implicit clock: MutableClock): future.Future[QueryResponse] = {
val response = new QueryResponse()
new ScalaFuture(new Future[QueryResponse] {
override def onComplete[U](func: (Try[QueryResponse]) => U)(
implicit executor: ExecutionContext): Unit = {
clock.advance(delay)
func(Success(response))
}
override def isCompleted: Boolean = true
override def value: Option[Try[QueryResponse]] = Some(Success(response))
@throws(classOf[Exception])
override def result(atMost: Duration)(implicit permit: CanAwait): QueryResponse = response
@throws(classOf[InterruptedException])
@throws(classOf[TimeoutException])
override def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this
def transform[S](f: Try[QueryResponse] => Try[S])(
implicit executor: ExecutionContext): Future[S] = Future.fromTry(f(Success(response)))
def transformWith[S](f: Try[QueryResponse] => Future[S])(
implicit executor: ExecutionContext): Future[S] = f(Success(response))
})
}
}
| inoio/solrs | src/test/scala/io/ino/solrs/AsyncSolrClientMocks.scala | Scala | apache-2.0 | 2,226 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.controller.test
import scala.concurrent.duration.DurationInt
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.server.Route
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.core.controller.WhiskActionsApi
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entitlement.Resource
import org.apache.openwhisk.core.entitlement.Privilege._
import scala.concurrent.Await
import scala.language.postfixOps
import org.apache.openwhisk.http.ErrorResponse
import org.apache.openwhisk.http.Messages
/**
* Tests Packages API.
*
* Unit tests of the controller service as a standalone component.
* These tests exercise a fresh instance of the service object in memory -- these
* tests do NOT communication with a whisk deployment.
*
*
* @Idioglossia
* "using Specification DSL to write unit tests, as in should, must, not, be"
* "using Specs2RouteTest DSL to chain HTTP requests for unit testing, as in ~>"
*/
@RunWith(classOf[JUnitRunner])
class PackageActionsApiTests extends ControllerTestCommon with WhiskActionsApi {
/** Package Actions API tests */
behavior of "Package Actions API"
val creds = WhiskAuthHelpers.newIdentity()
val namespace = EntityPath(creds.subject.asString)
val collectionPath = s"/${EntityPath.DEFAULT}/${collection.path}"
def aname() = MakeName.next("package_action_tests")
//// GET /actions/package/
it should "list all actions in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val actions = (1 to 2).map { _ =>
WhiskAction(provider.fullPath, aname(), jsDefault("??"))
}
put(entityStore, provider)
actions foreach { put(entityStore, _) }
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${provider.name}/") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
actions forall { a =>
response contains a.summaryAsJson
} should be(true)
}
}
}
it should "list all actions in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val actions = (1 to 2).map { _ =>
WhiskAction(provider.fullPath, aname(), jsDefault("??"))
}
put(entityStore, provider)
put(entityStore, reference)
actions foreach { put(entityStore, _) }
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${reference.name}/") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
actions forall { a =>
response contains a.summaryAsJson
} should be(true)
}
}
}
it should "include action in package when listing all actions" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None)
val action1 = WhiskAction(namespace, aname(), jsDefault("??"), Parameters(), ActionLimits())
val action2 = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
put(entityStore, action1)
put(entityStore, action2)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
response.length should be(2)
response contains action1.summaryAsJson should be(true)
response contains action2.summaryAsJson should be(true)
}
}
}
it should "reject ambiguous list actions in package without trailing slash" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None)
put(entityStore, provider)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(Conflict)
}
}
}
it should "reject invalid verb on get package actions" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None)
put(entityStore, provider)
Delete(s"$collectionPath/${provider.name}/") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
//// PUT /actions/package/name
it should "put action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = WhiskActionPut(Some(action.exec))
put(entityStore, provider)
Put(s"$collectionPath/${provider.name}/${action.name}", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response should be(
WhiskAction(
action.namespace,
action.name,
action.exec,
action.parameters,
action.limits,
action.version,
action.publish,
action.annotations ++ systemAnnotations(NODEJS),
updated = response.updated))
}
}
it should "reject put action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = WhiskActionPut(Some(action.exec))
Put(s"$collectionPath/${provider.name}/${action.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject put action in package binding where package doesn't exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, publish = true)
val binding = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, binding)
Put(s"$collectionPath/${binding.name}/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject put action in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, publish = true)
val binding = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
put(entityStore, binding)
Put(s"$collectionPath/${binding.name}/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject put action in package owned by different subject" in {
implicit val tid = transid()
val provider = WhiskPackage(EntityPath(Subject().asString), aname(), publish = true)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
Put(s"/${provider.namespace}/${collection.path}/${provider.name}/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// DEL /actions/package/name
it should "delete action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
put(entityStore, action)
// it should "reject delete action in package owned by different subject" in {
val auser = WhiskAuthHelpers.newIdentity()
Delete(s"/${provider.namespace}/${collection.path}/${provider.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action)
}
}
it should "reject delete action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, action)
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject delete non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
Delete(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject delete action in package binding where package doesn't exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, publish = true)
val binding = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, binding)
Delete(s"$collectionPath/${binding.name}/${aname()}") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject delete action in package binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, publish = true)
val binding = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskActionPut(Some(jsDefault("??")))
put(entityStore, provider)
put(entityStore, binding)
Delete(s"$collectionPath/${binding.name}/${aname()}") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
}
}
it should "reject delete action in package owned by different subject" in {
implicit val tid = transid()
val provider = WhiskPackage(EntityPath(Subject().asString), aname(), publish = true)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
put(entityStore, action)
Delete(s"/${provider.namespace}/${collection.path}/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// GET /actions/package/name
it should "get action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, action)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit provider.parameters)
}
}
}
it should "get action in package binding with public package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
}
it should "get action in package binding with public package with overriding parameters" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A") ++ Parameters("b", "b"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
}
// NOTE: does not work because entitlement model does not allow for an explicit
// check on either one or both of the binding and package
ignore should "get action in package binding with explicit entitlement grant" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = false)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
val pkgaccess = Resource(provider.namespace, PACKAGES, Some(provider.name.asString))
Await.result(entitlementProvider.grant(auser, READ, pkgaccess), 1 second)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action inherit (provider.parameters ++ binding.parameters))
}
}
it should "reject get action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, action)
Get(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject get non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
put(entityStore, provider)
Get(s"$collectionPath/${provider.name}/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding that does not exist" in {
implicit val tid = transid()
val name = aname()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding with package that does not exist" in {
implicit val tid = transid()
val name = aname()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden) // do not leak that package does not exist
}
}
it should "reject get non-existing action in package binding" in {
implicit val tid = transid()
val name = aname()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(NotFound)
}
}
it should "reject get action in package binding with private package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = false)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
}
//// POST /actions/name
it should "allow owner to invoke an action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "allow non-owner to invoke an action in public package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), publish = true)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"/$namespace/${collection.path}/${provider.name}/${action.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "invoke action in package binding with public package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), publish = true)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
// NOTE: does not work because entitlement model does not allow for an explicit
// check on either one or both of the binding and package
ignore should "invoke action in package binding with explicit entitlement grant even if package is not public" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), publish = false)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
val pkgaccess = Resource(provider.namespace, PACKAGES, Some(provider.name.asString))
Await.result(entitlementProvider.grant(auser, ACTIVATE, pkgaccess), 1 second)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Accepted)
val response = responseAs[JsObject]
response.fields("activationId") should not be None
}
}
it should "reject non-owner invoking an action in private package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), publish = false)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, provider)
put(entityStore, action)
Post(s"/$namespace/${collection.path}/${provider.name}/${action.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
}
it should "reject invoking an action in package that does not exist" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), publish = false)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject invoking a non-existent action in package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), publish = false)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("xxx" -> "yyy".toJson)
put(entityStore, action)
Post(s"$collectionPath/${provider.name}/${action.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "reject invoke action in package binding with private package" in {
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), publish = false)
val reference = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind)
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"))
val content = JsObject("x" -> "x".toJson, "z" -> "Z".toJson)
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
Post(s"$collectionPath/${reference.name}/${action.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
}
it should "report proper error when provider record is corrupted on delete" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname())
val entity = BadEntity(provider.namespace.addPath(provider.name), aname())
put(entityStore, provider)
put(entityStore, entity)
Delete(s"$collectionPath/${provider.name}/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when record is corrupted on delete" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val entity = BadEntity(provider.fullPath, aname())
put(entityStore, provider, false)
val entityToDelete = put(entityStore, entity, false)
Delete(s"$collectionPath/${provider.name}/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
deletePackage(provider.docid)
delete(entityStore, entityToDelete)
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when provider record is corrupted on get" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname())
val entity = BadEntity(provider.namespace.addPath(provider.name), aname())
put(entityStore, provider)
put(entityStore, entity)
Get(s"$collectionPath/${provider.name}/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/${provider.namespace}/${collection.path}/${provider.name}/${entity.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
responseAs[ErrorResponse].error shouldBe Messages.notAuthorizedtoAccessResource(s"$namespace/${provider.name}")
}
}
it should "report proper error when record is corrupted on get" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val entity = BadEntity(provider.fullPath, aname())
put(entityStore, provider)
put(entityStore, entity)
Get(s"$collectionPath/${provider.name}/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when provider record is corrupted on put" in {
implicit val tid = transid()
val provider = BadEntity(namespace, aname())
val entity = BadEntity(provider.namespace.addPath(provider.name), aname())
put(entityStore, provider)
put(entityStore, entity)
val content = WhiskActionPut()
Put(s"$collectionPath/${provider.name}/${entity.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when record is corrupted on put" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val entity = BadEntity(provider.fullPath, aname())
put(entityStore, provider)
put(entityStore, entity)
val content = WhiskActionPut()
Put(s"$collectionPath/${provider.name}/${entity.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
var testExecuteOnly = false
override def executeOnly = testExecuteOnly
it should ("allow access to get of action in binding of shared package when config option is disabled") in {
testExecuteOnly = false
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(OK)
}
}
it should ("deny access to get of action in binding of shared package when config option is enabled") in {
testExecuteOnly = true
implicit val tid = transid()
val auser = WhiskAuthHelpers.newIdentity()
val provider = WhiskPackage(namespace, aname(), None, Parameters("p", "P"), publish = true)
val binding = WhiskPackage(EntityPath(auser.subject.asString), aname(), provider.bind, Parameters("b", "B"))
val action = WhiskAction(provider.fullPath, aname(), jsDefault("??"), Parameters("a", "A"))
put(entityStore, provider)
put(entityStore, binding)
put(entityStore, action)
Get(s"$collectionPath/${binding.name}/${action.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
}
}
| style95/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/controller/test/PackageActionsApiTests.scala | Scala | apache-2.0 | 29,041 |
package com.github.slackey.codecs.responses
case class GroupsOpen(
no_op: Option[Boolean],
already_open: Option[Boolean]
)
| slackey/slackey | src/main/scala/com/github/slackey/codecs/responses/GroupsOpen.scala | Scala | mit | 128 |
/**
* Angles
* Copyright (C) 2015 Jakob Hendeß, Niklas Wolber
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package io.ssc.angles.pipeline.explorers
import java.io.PrintWriter
import java.nio.file.Files
import java.nio.file.Paths
import org.slf4j.LoggerFactory
object GenerateAntiPairs extends App {
val logger = LoggerFactory.getLogger(GenerateAntiPairs.getClass)
val clustersFile = args(0)
val antiPairsFile = args(1)
logger.info("Loading cluster file")
val clusters = ClusterReadWriter.readClusterFile(clustersFile)
logger.info("Got {} clusters from CSV", clusters.getNumClusters)
val path = Paths.get(antiPairsFile)
val writer = new PrintWriter(Files.newBufferedWriter(path))
var n = 0
for (i <- 1 until clusters.getNumClusters; j <- (i + 1) until clusters.getNumClusters) {
val cluster1 = clusters.getCluster(i)
val cluster2 = clusters.getCluster(j)
(cluster1 cross cluster2).foreach { p =>
writer.print(p._1)
writer.print(";")
writer.println(p._2)
n += 1
}
}
writer.close()
logger.info("Generated {} anti-pairs", n)
implicit class Crossable[X](xs: Traversable[X]) {
def cross[Y](ys: Traversable[Y]) = (xs).flatMap { case x => (ys).map { case y => (x, y) } }
}
}
| nwolber/angles | src/main/scala/io/ssc/angles/pipeline/explorers/GenerateAntiPairs.scala | Scala | gpl-3.0 | 1,874 |
package me.axiometry.blocknet.entity
trait Animal extends Creature
object Animal {
trait Tameable extends Animal {
def ownerName: Option[String]
def sitting: Boolean
def tame: Boolean
def ownerName_=(ownerName: Option[String])
def sitting_=(sitting: Boolean)
}
trait Rideable extends Animal {
def saddled: Boolean
def saddled_=(saddled: Boolean)
}
}
| Axiometry/Blocknet | blocknet-api/src/main/scala/me/axiometry/blocknet/entity/Animal.scala | Scala | bsd-2-clause | 389 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package generic
import mutable.Builder
import scala.annotation.migration
import scala.annotation.unchecked.uncheckedVariance
import scala.language.higherKinds
/** A template class for companion objects of ``regular`` collection classes
* that represent an unconstrained higher-kinded type.
*
* @tparam A The type of the collection elements.
* @tparam CC The type constructor representing the collection class.
* @author Martin Odersky
* @since 2.8
* @define coll collection
* @define Coll Traversable
*/
trait GenericTraversableTemplate[+A, +CC[X] <: GenTraversable[X]] extends HasNewBuilder[A, CC[A] @uncheckedVariance] {
/** Applies a function `f` to all elements of this $coll.
*
* @param f the function that is applied for its side-effect to every element.
* The result of function `f` is discarded.
*
* @tparam U the type parameter describing the result of function `f`.
* This result will always be ignored. Typically `U` is `Unit`,
* but this is not necessary.
*
* @usecase def foreach(f: A => Unit): Unit
*/
def foreach[U](f: A => U): Unit
/** Selects the first element of this $coll.
*
* @return the first element of this $coll.
* @throws NoSuchElementException if the $coll is empty.
*/
def head: A
/** Tests whether this $coll is empty.
*
* @return `true` if the $coll contain no elements, `false` otherwise.
*/
def isEmpty: Boolean
/** The factory companion object that builds instances of class $Coll.
* (or its `Iterable` superclass where class $Coll is not a `Seq`.)
*/
def companion: GenericCompanion[CC]
/** The builder that builds instances of type $Coll[A]
*/
protected[this] def newBuilder: Builder[A, CC[A]] = companion.newBuilder[A]
/** The generic builder that builds instances of $Coll
* at arbitrary element types.
*/
def genericBuilder[B]: Builder[B, CC[B]] = companion.newBuilder[B]
private def sequential: TraversableOnce[A] = this.asInstanceOf[GenTraversableOnce[A]].seq
/** Converts this $coll of pairs into two collections of the first and second
* half of each pair.
*
* {{{
* val xs = $Coll(
* (1, "one"),
* (2, "two"),
* (3, "three")).unzip
* // xs == ($Coll(1, 2, 3),
* // $Coll(one, two, three))
* }}}
*
* @tparam A1 the type of the first half of the element pairs
* @tparam A2 the type of the second half of the element pairs
* @param asPair an implicit conversion which asserts that the element type
* of this $coll is a pair.
* @return a pair of ${coll}s, containing the first, respectively second
* half of each element pair of this $coll.
*/
def unzip[A1, A2](implicit asPair: A => (A1, A2)): (CC[A1], CC[A2]) = {
val b1 = genericBuilder[A1]
val b2 = genericBuilder[A2]
for (xy <- sequential) {
val (x, y) = asPair(xy)
b1 += x
b2 += y
}
(b1.result(), b2.result())
}
/** Converts this $coll of triples into three collections of the first, second,
* and third element of each triple.
*
* {{{
* val xs = $Coll(
* (1, "one", '1'),
* (2, "two", '2'),
* (3, "three", '3')).unzip3
* // xs == ($Coll(1, 2, 3),
* // $Coll(one, two, three),
* // $Coll(1, 2, 3))
* }}}
*
* @tparam A1 the type of the first member of the element triples
* @tparam A2 the type of the second member of the element triples
* @tparam A3 the type of the third member of the element triples
* @param asTriple an implicit conversion which asserts that the element type
* of this $coll is a triple.
* @return a triple of ${coll}s, containing the first, second, respectively
* third member of each element triple of this $coll.
*/
def unzip3[A1, A2, A3](implicit asTriple: A => (A1, A2, A3)): (CC[A1], CC[A2], CC[A3]) = {
val b1 = genericBuilder[A1]
val b2 = genericBuilder[A2]
val b3 = genericBuilder[A3]
for (xyz <- sequential) {
val (x, y, z) = asTriple(xyz)
b1 += x
b2 += y
b3 += z
}
(b1.result(), b2.result(), b3.result())
}
/** Converts this $coll of traversable collections into
* a $coll formed by the elements of these traversable
* collections.
*
* @tparam B the type of the elements of each traversable collection.
* @param asTraversable an implicit conversion which asserts that the element
* type of this $coll is a `GenTraversable`.
* @return a new $coll resulting from concatenating all element ${coll}s.
*
* @usecase def flatten[B]: $Coll[B]
*
* @inheritdoc
*
* The resulting collection's type will be guided by the
* static type of $coll. For example:
*
* {{{
* val xs = List(
* Set(1, 2, 3),
* Set(1, 2, 3)
* ).flatten
* // xs == List(1, 2, 3, 1, 2, 3)
*
* val ys = Set(
* List(1, 2, 3),
* List(3, 2, 1)
* ).flatten
* // ys == Set(1, 2, 3)
* }}}
*/
def flatten[B](implicit asTraversable: A => /*<:<!!!*/ GenTraversableOnce[B]): CC[B] = {
val b = genericBuilder[B]
for (xs <- sequential)
b ++= asTraversable(xs).seq
b.result()
}
/** Transposes this $coll of traversable collections into
* a $coll of ${coll}s.
*
* The resulting collection's type will be guided by the
* static type of $coll. For example:
*
* {{{
* val xs = List(
* Set(1, 2, 3),
* Set(4, 5, 6)).transpose
* // xs == List(
* // List(1, 4),
* // List(2, 5),
* // List(3, 6))
*
* val ys = Vector(
* List(1, 2, 3),
* List(4, 5, 6)).transpose
* // ys == Vector(
* // Vector(1, 4),
* // Vector(2, 5),
* // Vector(3, 6))
* }}}
*
* @tparam B the type of the elements of each traversable collection.
* @param asTraversable an implicit conversion which asserts that the
* element type of this $coll is a `Traversable`.
* @return a two-dimensional $coll of ${coll}s which has as ''n''th row
* the ''n''th column of this $coll.
* @throws IllegalArgumentException if all collections in this $coll
* are not of the same size.
*/
@migration("`transpose` throws an `IllegalArgumentException` if collections are not uniformly sized.", "2.9.0")
def transpose[B](implicit asTraversable: A => /*<:<!!!*/ GenTraversableOnce[B]): CC[CC[B] @uncheckedVariance] = {
if (isEmpty)
return genericBuilder[CC[B]].result()
def fail = throw new IllegalArgumentException("transpose requires all collections have the same size")
val headSize = asTraversable(head).size
val bs: IndexedSeq[Builder[B, CC[B]]] = IndexedSeq.fill(headSize)(genericBuilder[B])
for (xs <- sequential) {
var i = 0
for (x <- asTraversable(xs).seq) {
if (i >= headSize) fail
bs(i) += x
i += 1
}
if (i != headSize)
fail
}
val bb = genericBuilder[CC[B]]
for (b <- bs) bb += b.result
bb.result()
}
}
| felixmulder/scala | src/library/scala/collection/generic/GenericTraversableTemplate.scala | Scala | bsd-3-clause | 8,052 |
package com.scout24.pipedsl.model
import com.amazonaws.services.datapipeline.model.Field
import scala.collection.mutable
class PipelineObject(id : String, theType : String) extends AwsPipelineObject {
setId(id)
addValueField("type", theType)
var referencedObjects = mutable.Set[PipelineObject]()
def withReferencedObjects : Set[AwsPipelineObject] = collectReferencedObjects(Set[AwsPipelineObject]())
def collectReferencedObjects(collected : Set[AwsPipelineObject]) : Set[AwsPipelineObject] = {
if (collected.contains(this))
collected
else {
referencedObjects.foldLeft(collected + this)((coll, obj) => obj.collectReferencedObjects(coll))
}
}
def addValueField(key : String, value : String): Unit = {
withFields(new Field().withKey(key).withStringValue(value))
}
def addReferenceField(key : String, other : PipelineObject): Unit = {
withFields(new Field().withKey(key).withRefValue(other.getId))
referencedObjects += other
}
}
| matey-jack/pipe-dsl | src/main/scala/com/scout24/pipedsl/model/PipelineObject.scala | Scala | gpl-2.0 | 989 |
package de.fosd.typechef.crewrite
import de.fosd.typechef.parser.c._
import de.fosd.typechef.featureexpr.FeatureModel
import de.fosd.typechef.conditional.Opt
// implements a simple analysis that checks whether a switch statement has
// code that does not occur in the control flow of a case or default statement
// https://www.securecoding.cert.org/confluence/display/seccode/MSC35-C.+Do+not+include+any+executable+statements+inside+a+switch+statement+before+the+first+case+label
// MSC35-C
class DanglingSwitchCode(env: ASTEnv) extends IntraCFG {
def danglingSwitchCode(s: SwitchStatement): List[Opt[AST]] = {
// get all successor elements of all expressions in the switch statement
// SwitchStatement(expr, ...) and filter out other case statements, as fall through (case after case)
// is allowed in this analysis
val exprsuccs = succ(s, env).flatMap({ x => succ(x.entry, env) })
val wlist: List[Opt[AST]] = exprsuccs.filterNot({
case Opt(_, _: CaseStatement) => true
case Opt(_, _: DefaultStatement) => true
// filter out all elements that are not part of the switch
case Opt(_, x) => if (isPartOf(x, s)) false else true
})
wlist
}
}
| ckaestne/TypeChef | CRewrite/src/main/scala/de/fosd/typechef/crewrite/DanglingSwitchCode.scala | Scala | lgpl-3.0 | 1,254 |
/*
Copyright (c) 2012 Joshua Garnett
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package com.adverserealms.astar.core
import com.adverserealms.astar.basic2d._
/**
* The AstarPath defines the path that is found by the Astar class.
*/
class AstarPath(cost: Double, path: List[AstarTile]) {
/**
* Gets the cost of this AstarPath
*/
def getCost = cost
/**
* Gets the path
*/
def getPath = path
override def toString(): String = {
var value = ""
for (tile <- path) {
val point = tile.asInstanceOf[PositionTile].getPosition
value = value + point.getX + "," + point.getY + " "
}
value
}
} | joshgarnett/Astar-Scala | src/com/adverserealms/astar/core/AstarPath.scala | Scala | mit | 1,626 |
package fr.renoux.gaston.util
trait Identified {
val id: Int
}
| gaelrenoux/gaston | src/main/scala/fr/renoux/gaston/util/Identified.scala | Scala | apache-2.0 | 66 |
package com.github.scalaspring.akka
import akka.actor.ActorSystem
import org.springframework.beans.factory.annotation.Autowired
/**
* Extend this trait to add actor reference creation helper methods to any Spring configuration.
*/
trait ActorSystemConfiguration extends SpringActorRefFactory {
@Autowired
protected implicit val factory: ActorSystem = null
}
| scalaspring/akka-spring-boot | src/main/scala/com/github/scalaspring/akka/ActorSystemConfiguration.scala | Scala | apache-2.0 | 368 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractCriterion, AbstractModule}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.NumericFloat
import com.intel.analytics.zoo.pipeline.api.keras.ZooSpecHelper
import scala.sys.process._
import org.scalatest.Tag
object NoKeras2 extends Tag("com.intel.analytics.zoo.pipeline.api")
object Keras2Test extends Tag("com.intel.analytics.zoo.pipeline.api.keras2")
abstract class KerasBaseSpec extends ZooSpecHelper {
protected def ifskipTest(): Unit = {
// Skip unitest if environment is not ready
try {
Seq("python", "-c", "import keras; import tensorflow").!!
} catch {
case e: Throwable => cancel("python or keras or tensorflow is not installed", e)
}
}
private def defaultWeightConverter(in: Array[Tensor[Float]]) = in
// weightConverter: convert keras weight to BigDL format,
// do nothing for the default converter
def checkOutputAndGrad(bmodel: AbstractModule[Tensor[Float], Tensor[Float], Float],
kerasCode: String,
weightConverter: (Array[Tensor[Float]]) => Array[Tensor[Float]]
= defaultWeightConverter,
precision: Double = 1e-5): Unit = {
ifskipTest()
val (gradInput, gradWeight, weights, input, target, output) = KerasRunner.run(kerasCode)
// Ensure they share the same weights
if (weights != null) {
bmodel.setWeightsBias(weightConverter(weights))
}
val boutput = bmodel.forward(input)
boutput.size().sameElements(output.size()) should be (true)
boutput.almostEqual(output, precision) should be (true)
val bgradInput = bmodel.backward(input, boutput.clone())
bgradInput.size().sameElements(gradInput.size()) should be (true)
bgradInput.almostEqual(gradInput, precision) should be (true)
val parameters = bmodel.parameters()
if (gradWeight != null) {
val bgradWeights = parameters._2
(bgradWeights, weightConverter(gradWeight)).zipped.foreach { (bgrad, kgrad) =>
bgrad.almostEqual(kgrad, precision) should be(true)
}
}
}
def checkOutputAndGradForLoss(bmodel: AbstractCriterion[Tensor[Float], Tensor[Float], Float],
kerasCode: String,
precision: Double = 1e-5): Unit = {
ifskipTest()
val (gradInput, gradWeight, weights, input, target, output) =
KerasRunner.run(kerasCode, Loss)
val boutput = bmodel.forward(input, target)
val koutput = output.mean() // the return value from keras is not always averaged.
NumericFloat.nearlyEqual(boutput, koutput, precision) should be(true)
val kgradInput = gradInput.div(output.nElement()) // div is an in-place operation.
val bgradInput = bmodel.backward(input, target.clone())
bgradInput.almostEqual(kgradInput, precision) should be(true)
}
}
abstract class Keras2BaseSpec extends KerasBaseSpec
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/KerasBaseSpec.scala | Scala | apache-2.0 | 3,671 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.ui
private[spark] object Page extends Enumeration { val Storage, Jobs, Environment, Executors = Value }
| wgpshashank/spark | core/src/main/scala/spark/ui/Page.scala | Scala | apache-2.0 | 921 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import java.net.{URL, URLClassLoader}
import java.util.Locale
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap
import scala.util.Try
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.util.VersionInfo
import org.apache.hive.common.util.HiveVersionInfo
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.hive.client._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf._
import org.apache.spark.sql.internal.StaticSQLConf.WAREHOUSE_PATH
import org.apache.spark.sql.types._
import org.apache.spark.util.{ChildFirstURLClassLoader, Utils}
private[spark] object HiveUtils extends Logging {
/** The version of hive used internally by Spark SQL. */
val builtinHiveVersion: String = HiveVersionInfo.getVersion
val BUILTIN_HIVE_VERSION = buildStaticConf("spark.sql.hive.version")
.doc("The compiled, a.k.a, builtin Hive version of the Spark distribution bundled with." +
" Note that, this a read-only conf and only used to report the built-in hive version." +
" If you want a different metastore client for Spark to call, please refer to" +
" spark.sql.hive.metastore.version.")
.version("1.1.1")
.stringConf
.checkValue(_ == builtinHiveVersion,
"The builtin Hive version is read-only, please use spark.sql.hive.metastore.version")
.createWithDefault(builtinHiveVersion)
private def isCompatibleHiveVersion(hiveVersionStr: String): Boolean = {
Try { IsolatedClientLoader.hiveVersion(hiveVersionStr) }.isSuccess
}
val HIVE_METASTORE_VERSION = buildStaticConf("spark.sql.hive.metastore.version")
.doc("Version of the Hive metastore. Available options are " +
"<code>0.12.0</code> through <code>2.3.8</code> and " +
"<code>3.0.0</code> through <code>3.1.2</code>.")
.version("1.4.0")
.stringConf
.checkValue(isCompatibleHiveVersion, "Unsupported Hive Metastore version")
.createWithDefault(builtinHiveVersion)
val HIVE_METASTORE_JARS = buildStaticConf("spark.sql.hive.metastore.jars")
.doc(s"""
| Location of the jars that should be used to instantiate the HiveMetastoreClient.
| This property can be one of four options:
| 1. "builtin"
| Use Hive ${builtinHiveVersion}, which is bundled with the Spark assembly when
| <code>-Phive</code> is enabled. When this option is chosen,
| <code>spark.sql.hive.metastore.version</code> must be either
| <code>${builtinHiveVersion}</code> or not defined.
| 2. "maven"
| Use Hive jars of specified version downloaded from Maven repositories.
| 3. "path"
| Use Hive jars configured by `spark.sql.hive.metastore.jars.path`
| in comma separated format. Support both local or remote paths.The provided jars
| should be the same version as ${HIVE_METASTORE_VERSION}.
| 4. A classpath in the standard format for both Hive and Hadoop. The provided jars
| should be the same version as ${HIVE_METASTORE_VERSION}.
""".stripMargin)
.version("1.4.0")
.stringConf
.createWithDefault("builtin")
val HIVE_METASTORE_JARS_PATH = buildStaticConf("spark.sql.hive.metastore.jars.path")
.doc(s"""
| Comma-separated paths of the jars that used to instantiate the HiveMetastoreClient.
| This configuration is useful only when `{$HIVE_METASTORE_JARS.key}` is set as `path`.
| The paths can be any of the following format:
| 1. file://path/to/jar/foo.jar
| 2. hdfs://nameservice/path/to/jar/foo.jar
| 3. /path/to/jar/ (path without URI scheme follow conf `fs.defaultFS`'s URI schema)
| 4. [http/https/ftp]://path/to/jar/foo.jar
| Note that 1, 2, and 3 support wildcard. For example:
| 1. file://path/to/jar/*,file://path2/to/jar/*/*.jar
| 2. hdfs://nameservice/path/to/jar/*,hdfs://nameservice2/path/to/jar/*/*.jar
""".stripMargin)
.version("3.1.0")
.stringConf
.toSequence
.createWithDefault(Nil)
val CONVERT_METASTORE_PARQUET = buildConf("spark.sql.hive.convertMetastoreParquet")
.doc("When set to true, the built-in Parquet reader and writer are used to process " +
"parquet tables created by using the HiveQL syntax, instead of Hive serde.")
.version("1.1.1")
.booleanConf
.createWithDefault(true)
val CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING =
buildConf("spark.sql.hive.convertMetastoreParquet.mergeSchema")
.doc("When true, also tries to merge possibly different but compatible Parquet schemas in " +
"different Parquet data files. This configuration is only effective " +
"when \\"spark.sql.hive.convertMetastoreParquet\\" is true.")
.version("1.3.1")
.booleanConf
.createWithDefault(false)
val CONVERT_METASTORE_ORC = buildConf("spark.sql.hive.convertMetastoreOrc")
.doc("When set to true, the built-in ORC reader and writer are used to process " +
"ORC tables created by using the HiveQL syntax, instead of Hive serde.")
.version("2.0.0")
.booleanConf
.createWithDefault(true)
val CONVERT_INSERTING_PARTITIONED_TABLE =
buildConf("spark.sql.hive.convertInsertingPartitionedTable")
.doc("When set to true, and `spark.sql.hive.convertMetastoreParquet` or " +
"`spark.sql.hive.convertMetastoreOrc` is true, the built-in ORC/Parquet writer is used" +
"to process inserting into partitioned ORC/Parquet tables created by using the HiveSQL " +
"syntax.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
val CONVERT_METASTORE_CTAS = buildConf("spark.sql.hive.convertMetastoreCtas")
.doc("When set to true, Spark will try to use built-in data source writer " +
"instead of Hive serde in CTAS. This flag is effective only if " +
"`spark.sql.hive.convertMetastoreParquet` or `spark.sql.hive.convertMetastoreOrc` is " +
"enabled respectively for Parquet and ORC formats")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
val HIVE_METASTORE_SHARED_PREFIXES = buildStaticConf("spark.sql.hive.metastore.sharedPrefixes")
.doc("A comma separated list of class prefixes that should be loaded using the classloader " +
"that is shared between Spark SQL and a specific version of Hive. An example of classes " +
"that should be shared is JDBC drivers that are needed to talk to the metastore. Other " +
"classes that need to be shared are those that interact with classes that are already " +
"shared. For example, custom appenders that are used by log4j.")
.version("1.4.0")
.stringConf
.toSequence
.createWithDefault(jdbcPrefixes)
private def jdbcPrefixes = Seq(
"com.mysql.jdbc", "org.postgresql", "com.microsoft.sqlserver", "oracle.jdbc")
val HIVE_METASTORE_BARRIER_PREFIXES = buildStaticConf("spark.sql.hive.metastore.barrierPrefixes")
.doc("A comma separated list of class prefixes that should explicitly be reloaded for each " +
"version of Hive that Spark SQL is communicating with. For example, Hive UDFs that are " +
"declared in a prefix that typically would be shared (i.e. <code>org.apache.spark.*</code>).")
.version("1.4.0")
.stringConf
.toSequence
.createWithDefault(Nil)
val HIVE_THRIFT_SERVER_ASYNC = buildConf("spark.sql.hive.thriftServer.async")
.doc("When set to true, Hive Thrift server executes SQL queries in an asynchronous way.")
.version("1.5.0")
.booleanConf
.createWithDefault(true)
/**
* The version of the hive client that will be used to communicate with the metastore. Note that
* this does not necessarily need to be the same version of Hive that is used internally by
* Spark SQL for execution.
*/
private def hiveMetastoreVersion(conf: SQLConf): String = {
conf.getConf(HIVE_METASTORE_VERSION)
}
/**
* The location of the jars that should be used to instantiate the HiveMetastoreClient. This
* property can be one of three options:
* - a classpath in the standard format for both hive and hadoop.
* - path - attempt to discover the jars with paths configured by `HIVE_METASTORE_JARS_PATH`.
* - builtin - attempt to discover the jars that were used to load Spark SQL and use those. This
* option is only valid when using the execution version of Hive.
* - maven - download the correct version of hive on demand from maven.
*/
private def hiveMetastoreJars(conf: SQLConf): String = {
conf.getConf(HIVE_METASTORE_JARS)
}
/**
* Hive jars paths, only work when `HIVE_METASTORE_JARS` is `path`.
*/
private def hiveMetastoreJarsPath(conf: SQLConf): Seq[String] = {
conf.getConf(HIVE_METASTORE_JARS_PATH)
}
/**
* A comma separated list of class prefixes that should be loaded using the classloader that
* is shared between Spark SQL and a specific version of Hive. An example of classes that should
* be shared is JDBC drivers that are needed to talk to the metastore. Other classes that need
* to be shared are those that interact with classes that are already shared. For example,
* custom appenders that are used by log4j.
*/
private def hiveMetastoreSharedPrefixes(conf: SQLConf): Seq[String] = {
conf.getConf(HIVE_METASTORE_SHARED_PREFIXES).filterNot(_ == "")
}
/**
* A comma separated list of class prefixes that should explicitly be reloaded for each version
* of Hive that Spark SQL is communicating with. For example, Hive UDFs that are declared in a
* prefix that typically would be shared (i.e. org.apache.spark.*)
*/
private def hiveMetastoreBarrierPrefixes(conf: SQLConf): Seq[String] = {
conf.getConf(HIVE_METASTORE_BARRIER_PREFIXES).filterNot(_ == "")
}
/**
* Change time configurations needed to create a [[HiveClient]] into unified [[Long]] format.
*/
private[hive] def formatTimeVarsForHiveClient(hadoopConf: Configuration): Map[String, String] = {
// Hive 0.14.0 introduces timeout operations in HiveConf, and changes default values of a bunch
// of time `ConfVar`s by adding time suffixes (`s`, `ms`, and `d` etc.). This breaks backwards-
// compatibility when users are trying to connecting to a Hive metastore of lower version,
// because these options are expected to be integral values in lower versions of Hive.
//
// Here we enumerate all time `ConfVar`s and convert their values to numeric strings according
// to their output time units.
val commonTimeVars = Seq(
ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY -> TimeUnit.SECONDS,
ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME -> TimeUnit.SECONDS,
ConfVars.HMSHANDLERINTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.METASTORE_EVENT_DB_LISTENER_TTL -> TimeUnit.SECONDS,
ConfVars.METASTORE_EVENT_CLEAN_FREQ -> TimeUnit.SECONDS,
ConfVars.METASTORE_EVENT_EXPIRY_DURATION -> TimeUnit.SECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL -> TimeUnit.SECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT -> TimeUnit.MILLISECONDS,
ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT -> TimeUnit.MILLISECONDS,
ConfVars.HIVES_AUTO_PROGRESS_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES -> TimeUnit.SECONDS,
ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_TXN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL -> TimeUnit.SECONDS,
ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME -> TimeUnit.SECONDS,
ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.SERVER_READ_SOCKET_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL -> TimeUnit.MILLISECONDS,
ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.SPARK_JOB_MONITOR_TIMEOUT -> TimeUnit.SECONDS,
ConfVars.SPARK_RPC_CLIENT_CONNECT_TIMEOUT -> TimeUnit.MILLISECONDS,
ConfVars.SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT -> TimeUnit.MILLISECONDS
).map { case (confVar, unit) =>
confVar.varname -> HiveConf.getTimeVar(hadoopConf, confVar, unit).toString
}
// The following configurations were removed by HIVE-12164(Hive 2.0)
val hardcodingTimeVars = Seq(
("hive.stats.jdbc.timeout", "30s") -> TimeUnit.SECONDS,
("hive.stats.retries.wait", "3000ms") -> TimeUnit.MILLISECONDS
).map { case ((key, defaultValue), unit) =>
val value = hadoopConf.get(key, defaultValue)
key -> HiveConf.toTime(value, unit, unit).toString
}
(commonTimeVars ++ hardcodingTimeVars).toMap
}
/**
* Check current Thread's SessionState type
* @return true when SessionState.get returns an instance of CliSessionState,
* false when it gets non-CliSessionState instance or null
*/
def isCliSessionState(): Boolean = {
val state = SessionState.get
var temp: Class[_] = if (state != null) state.getClass else null
var found = false
while (temp != null && !found) {
found = temp.getName == "org.apache.hadoop.hive.cli.CliSessionState"
temp = temp.getSuperclass
}
found
}
/**
* Create a [[HiveClient]] used for execution.
*
* Currently this must always be the Hive built-in version that packaged
* with Spark SQL. This copy of the client is used for execution related tasks like
* registering temporary functions or ensuring that the ThreadLocal SessionState is
* correctly populated. This copy of Hive is *not* used for storing persistent metadata,
* and only point to a dummy metastore in a temporary directory.
*/
protected[hive] def newClientForExecution(
conf: SparkConf,
hadoopConf: Configuration): HiveClientImpl = {
logInfo(s"Initializing execution hive, version $builtinHiveVersion")
val loader = new IsolatedClientLoader(
version = IsolatedClientLoader.hiveVersion(builtinHiveVersion),
sparkConf = conf,
execJars = Seq.empty,
hadoopConf = hadoopConf,
config = newTemporaryConfiguration(useInMemoryDerby = true),
isolationOn = false,
baseClassLoader = Utils.getContextOrSparkClassLoader)
loader.createClient().asInstanceOf[HiveClientImpl]
}
/**
* Create a [[HiveClient]] used to retrieve metadata from the Hive MetaStore.
*
* The version of the Hive client that is used here must match the metastore that is configured
* in the hive-site.xml file.
*/
protected[hive] def newClientForMetadata(
conf: SparkConf,
hadoopConf: Configuration): HiveClient = {
val configurations = formatTimeVarsForHiveClient(hadoopConf)
newClientForMetadata(conf, hadoopConf, configurations)
}
protected[hive] def newClientForMetadata(
conf: SparkConf,
hadoopConf: Configuration,
configurations: Map[String, String]): HiveClient = {
val sqlConf = new SQLConf
sqlConf.setConf(SQLContext.getSQLProperties(conf))
val hiveMetastoreVersion = HiveUtils.hiveMetastoreVersion(sqlConf)
val hiveMetastoreJars = HiveUtils.hiveMetastoreJars(sqlConf)
val hiveMetastoreSharedPrefixes = HiveUtils.hiveMetastoreSharedPrefixes(sqlConf)
val hiveMetastoreBarrierPrefixes = HiveUtils.hiveMetastoreBarrierPrefixes(sqlConf)
val metaVersion = IsolatedClientLoader.hiveVersion(hiveMetastoreVersion)
def addLocalHiveJars(file: File): Seq[URL] = {
if (file.getName == "*") {
val files = file.getParentFile.listFiles()
if (files == null) {
logWarning(s"Hive jar path '${file.getPath}' does not exist.")
Nil
} else {
files.filter(_.getName.toLowerCase(Locale.ROOT).endsWith(".jar")).map(_.toURI.toURL).toSeq
}
} else {
file.toURI.toURL :: Nil
}
}
val isolatedLoader = if (hiveMetastoreJars == "builtin") {
if (builtinHiveVersion != hiveMetastoreVersion) {
throw new IllegalArgumentException(
"Builtin jars can only be used when hive execution version == hive metastore version. " +
s"Execution: $builtinHiveVersion != Metastore: $hiveMetastoreVersion. " +
s"Specify a valid path to the correct hive jars using ${HIVE_METASTORE_JARS.key} " +
s"or change ${HIVE_METASTORE_VERSION.key} to $builtinHiveVersion.")
}
// We recursively find all jars in the class loader chain,
// starting from the given classLoader.
def allJars(classLoader: ClassLoader): Array[URL] = classLoader match {
case null => Array.empty[URL]
case childFirst: ChildFirstURLClassLoader =>
childFirst.getURLs() ++ allJars(Utils.getSparkClassLoader)
case urlClassLoader: URLClassLoader =>
urlClassLoader.getURLs ++ allJars(urlClassLoader.getParent)
case other => allJars(other.getParent)
}
val classLoader = Utils.getContextOrSparkClassLoader
val jars: Array[URL] = if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9)) {
// Do nothing. The system classloader is no longer a URLClassLoader in Java 9,
// so it won't match the case in allJars. It no longer exposes URLs of
// the system classpath
Array.empty[URL]
} else {
val loadedJars = allJars(classLoader)
// Verify at least one jar was found
if (loadedJars.length == 0) {
throw new IllegalArgumentException(
"Unable to locate hive jars to connect to metastore. " +
s"Please set ${HIVE_METASTORE_JARS.key}.")
}
loadedJars
}
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using Spark classes.")
new IsolatedClientLoader(
version = metaVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
execJars = jars.toSeq,
config = configurations,
isolationOn = !isCliSessionState(),
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
} else if (hiveMetastoreJars == "maven") {
// TODO: Support for loading the jars from an already downloaded location.
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using maven.")
IsolatedClientLoader.forVersion(
hiveMetastoreVersion = hiveMetastoreVersion,
hadoopVersion = VersionInfo.getVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
config = configurations,
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
} else if (hiveMetastoreJars == "path") {
// Convert to files and expand any directories.
val jars =
HiveUtils.hiveMetastoreJarsPath(sqlConf)
.flatMap {
case path if path.contains("\\\\") && Utils.isWindows =>
addLocalHiveJars(new File(path))
case path =>
DataSource.checkAndGlobPathIfNecessary(
pathStrings = Seq(path),
hadoopConf = hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = false,
enableGlobbing = true
).map(_.toUri.toURL)
}
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " +
s"using path: ${jars.mkString(";")}")
new IsolatedClientLoader(
version = metaVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
execJars = jars,
config = configurations,
isolationOn = true,
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
} else {
// Convert to files and expand any directories.
val jars =
hiveMetastoreJars
.split(File.pathSeparator)
.flatMap { path =>
addLocalHiveJars(new File(path))
}
logInfo(
s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " +
s"using ${jars.mkString(":")}")
new IsolatedClientLoader(
version = metaVersion,
sparkConf = conf,
hadoopConf = hadoopConf,
execJars = jars.toSeq,
config = configurations,
isolationOn = true,
barrierPrefixes = hiveMetastoreBarrierPrefixes,
sharedPrefixes = hiveMetastoreSharedPrefixes)
}
isolatedLoader.createClient()
}
/** Constructs a configuration for hive, where the metastore is located in a temp directory. */
def newTemporaryConfiguration(useInMemoryDerby: Boolean): Map[String, String] = {
val withInMemoryMode = if (useInMemoryDerby) "memory:" else ""
val tempDir = Utils.createTempDir()
val localMetastore = new File(tempDir, "metastore")
val propMap: HashMap[String, String] = HashMap()
// We have to mask all properties in hive-site.xml that relates to metastore data source
// as we used a local metastore here.
HiveConf.ConfVars.values().foreach { confvar =>
if (confvar.varname.contains("datanucleus") || confvar.varname.contains("jdo")
|| confvar.varname.contains("hive.metastore.rawstore.impl")) {
propMap.put(confvar.varname, confvar.getDefaultExpr())
}
}
propMap.put(WAREHOUSE_PATH.key, localMetastore.toURI.toString)
propMap.put(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname,
s"jdbc:derby:${withInMemoryMode};databaseName=${localMetastore.getAbsolutePath};create=true")
propMap.put("datanucleus.rdbms.datastoreAdapterClassName",
"org.datanucleus.store.rdbms.adapter.DerbyAdapter")
// Disable schema verification and allow schema auto-creation in the
// Derby database, in case the config for the metastore is set otherwise.
// Without these settings, starting the client fails with
// MetaException(message:Version information not found in metastore.)
propMap.put("hive.metastore.schema.verification", "false")
propMap.put("datanucleus.schema.autoCreateAll", "true")
// SPARK-11783: When "hive.metastore.uris" is set, the metastore connection mode will be
// remote (https://cwiki.apache.org/confluence/display/Hive/AdminManual+MetastoreAdmin
// mentions that "If hive.metastore.uris is empty local mode is assumed, remote otherwise").
// Remote means that the metastore server is running in its own process.
// When the mode is remote, configurations like "javax.jdo.option.ConnectionURL" will not be
// used (because they are used by remote metastore server that talks to the database).
// Because execution Hive should always connects to an embedded derby metastore.
// We have to remove the value of hive.metastore.uris. So, the execution Hive client connects
// to the actual embedded derby metastore instead of the remote metastore.
// You can search HiveConf.ConfVars.METASTOREURIS in the code of HiveConf (in Hive's repo).
// Then, you will find that the local metastore mode is only set to true when
// hive.metastore.uris is not set.
propMap.put(ConfVars.METASTOREURIS.varname, "")
// The execution client will generate garbage events, therefore the listeners that are generated
// for the execution clients are useless. In order to not output garbage, we don't generate
// these listeners.
propMap.put(ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, "")
propMap.put(ConfVars.METASTORE_EVENT_LISTENERS.varname, "")
propMap.put(ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname, "")
// SPARK-21451: Spark will gather all `spark.hadoop.*` properties from a `SparkConf` to a
// Hadoop Configuration internally, as long as it happens after SparkContext initialized.
// Some instances such as `CliSessionState` used in `SparkSQLCliDriver` may also rely on these
// Configuration. But it happens before SparkContext initialized, we need to take them from
// system properties in the form of regular hadoop configurations.
SparkHadoopUtil.get.appendSparkHadoopConfigs(sys.props.toMap, propMap)
SparkHadoopUtil.get.appendSparkHiveConfigs(sys.props.toMap, propMap)
propMap.toMap
}
/**
* Infers the schema for Hive serde tables and returns the CatalogTable with the inferred schema.
* When the tables are data source tables or the schema already exists, returns the original
* CatalogTable.
*/
def inferSchema(table: CatalogTable): CatalogTable = {
if (DDLUtils.isDatasourceTable(table) || table.dataSchema.nonEmpty) {
table
} else {
val hiveTable = HiveClientImpl.toHiveTable(table)
// Note: Hive separates partition columns and the schema, but for us the
// partition columns are part of the schema
val partCols = hiveTable.getPartCols.asScala.map(HiveClientImpl.fromHiveColumn)
val dataCols = hiveTable.getCols.asScala.map(HiveClientImpl.fromHiveColumn)
table.copy(schema = StructType((dataCols ++ partCols).toSeq))
}
}
}
| BryanCutler/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala | Scala | apache-2.0 | 27,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.ml.metrics.distances
import org.apache.flink.ml.math.Vector
/** This class implements a Euclidean distance metric. The metric calculates the distance between
* the given two vectors by summing the square root of the squared differences between
* each coordinate.
*
* http://en.wikipedia.org/wiki/Euclidean_distance
*
* If you don't care about the true distance and only need for comparison,
* [[SquaredEuclideanDistanceMetric]] will be faster because it doesn't calculate the actual
* square root of the distances.
*
* @see http://en.wikipedia.org/wiki/Euclidean_distance
*/
class EuclideanDistanceMetric extends SquaredEuclideanDistanceMetric {
override def distance(a: Vector, b: Vector): Double = math.sqrt(super.distance(a, b))
}
object EuclideanDistanceMetric {
def apply() = new EuclideanDistanceMetric()
}
| hongyuhong/flink | flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/metrics/distances/EuclideanDistanceMetric.scala | Scala | apache-2.0 | 1,674 |
package scredis.protocol.requests
import scredis.protocol._
import scredis.serialization.Writer
import scala.collection.generic.CanBuildFrom
import scala.language.higherKinds
object PubSubRequests {
import scredis.serialization.Implicits.{intReader, stringReader}
object PSubscribe extends Command("PSUBSCRIBE")
object Publish extends Command("PUBLISH") with WriteCommand
object PubSubChannels extends Command("PUBSUB", "CHANNELS")
object PubSubNumSub extends Command("PUBSUB", "NUMSUB")
object PubSubNumPat extends ZeroArgCommand("PUBSUB", "NUMPAT")
object PUnsubscribe extends Command("PUNSUBSCRIBE")
object Subscribe extends Command("SUBSCRIBE")
object Unsubscribe extends Command("UNSUBSCRIBE")
case class PSubscribe(patterns: String*) extends Request[Int](
PSubscribe, patterns: _*
) {
override def decode = ???
}
case class Publish[W: Writer](channel: String, message: W) extends Request[Long](
Publish, channel, implicitly[Writer[W]].write(message)
) {
override def decode = {
case IntegerResponse(value) => value
}
}
case class PubSubChannels[CC[X] <: Traversable[X]](patternOpt: Option[String])(
implicit cbf: CanBuildFrom[Nothing, String, CC[String]]
) extends Request[CC[String]](PubSubChannels, patternOpt.toSeq: _*) {
override def decode = {
case a: ArrayResponse => a.parsed[String, CC] {
case b: BulkStringResponse => b.flattened[String]
}
}
}
case class PubSubNumSub[CC[X, Y] <: collection.Map[X, Y]](channels: String*)(
implicit cbf: CanBuildFrom[Nothing, (String, Int), CC[String, Int]]
) extends Request[CC[String, Int]](PubSubNumSub, channels: _*) {
override def decode = {
case a: ArrayResponse => a.parsedAsPairsMap[String, Int, CC] {
case b: BulkStringResponse => b.flattened[String]
} {
case b: BulkStringResponse => b.flattened[Int]
}
}
}
case class PubSubNumPat() extends Request[Long](PubSubNumPat) {
override def decode = {
case IntegerResponse(value) => value
}
}
case class PUnsubscribe(patterns: String*) extends Request[Int](
PUnsubscribe, patterns: _*
) {
override def decode = ???
}
case class Subscribe(channels: String*) extends Request[Int](
Subscribe, channels: _*
) {
override def decode = ???
}
case class Unsubscribe(channels: String*) extends Request[Int](
Unsubscribe, channels: _*
) {
override def decode = ???
}
} | 1and1/scredis | src/main/scala/scredis/protocol/requests/PubSubRequests.scala | Scala | apache-2.0 | 2,500 |
package gitbucket.core.controller.api
import java.io.File
import gitbucket.core.api._
import gitbucket.core.controller.ControllerBase
import gitbucket.core.service.{AccountService, ReleaseService}
import gitbucket.core.util.Directory.getReleaseFilesDir
import gitbucket.core.util.{FileUtil, ReferrerAuthenticator, RepositoryName, WritableUsersAuthenticator}
import gitbucket.core.util.Implicits._
import org.apache.commons.io.FileUtils
import org.scalatra.NoContent
trait ApiReleaseControllerBase extends ControllerBase {
self: AccountService with ReleaseService with ReferrerAuthenticator with WritableUsersAuthenticator =>
/**
* i. List releases for a repository
* https://developer.github.com/v3/repos/releases/#list-releases-for-a-repository
*/
get("/api/v3/repos/:owner/:repository/releases")(referrersOnly { repository =>
val releases = getReleases(repository.owner, repository.name)
JsonFormat(releases.map { rel =>
val assets = getReleaseAssets(repository.owner, repository.name, rel.tag)
ApiRelease(rel, assets, getAccountByUserName(rel.author).get, RepositoryName(repository))
})
})
/**
* ii. Get a single release
* https://developer.github.com/v3/repos/releases/#get-a-single-release
* GitBucket doesn't have release id
*/
/**
* iii. Get the latest release
* https://developer.github.com/v3/repos/releases/#get-the-latest-release
*/
get("/api/v3/repos/:owner/:repository/releases/latest")(referrersOnly { repository =>
getReleases(repository.owner, repository.name).lastOption
.map { release =>
val assets = getReleaseAssets(repository.owner, repository.name, release.tag)
JsonFormat(ApiRelease(release, assets, getAccountByUserName(release.author).get, RepositoryName(repository)))
}
.getOrElse {
NotFound()
}
})
/**
* iv. Get a release by tag name
* https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
*/
get("/api/v3/repos/:owner/:repository/releases/tags/:tag")(referrersOnly { repository =>
val tag = params("tag")
getRelease(repository.owner, repository.name, tag)
.map { release =>
val assets = getReleaseAssets(repository.owner, repository.name, tag)
JsonFormat(ApiRelease(release, assets, getAccountByUserName(release.author).get, RepositoryName(repository)))
}
.getOrElse {
NotFound()
}
})
/**
* v. Create a release
* https://developer.github.com/v3/repos/releases/#create-a-release
*/
post("/api/v3/repos/:owner/:repository/releases")(writableUsersOnly { repository =>
(for {
data <- extractFromJsonBody[CreateARelease]
} yield {
createRelease(
repository.owner,
repository.name,
data.name.getOrElse(data.tag_name),
data.body,
data.tag_name,
context.loginAccount.get
)
val release = getRelease(repository.owner, repository.name, data.tag_name).get
val assets = getReleaseAssets(repository.owner, repository.name, data.tag_name)
JsonFormat(ApiRelease(release, assets, context.loginAccount.get, RepositoryName(repository)))
})
})
/**
* vi. Edit a release
* https://developer.github.com/v3/repos/releases/#edit-a-release
* Incompatibility info: GitHub API requires :release_id, but GitBucket API requires :tag_name
*/
patch("/api/v3/repos/:owner/:repository/releases/:tag")(writableUsersOnly { repository =>
(for {
data <- extractFromJsonBody[CreateARelease]
} yield {
val tag = params("tag")
updateRelease(repository.owner, repository.name, tag, data.name.getOrElse(data.tag_name), data.body)
val release = getRelease(repository.owner, repository.name, data.tag_name).get
val assets = getReleaseAssets(repository.owner, repository.name, data.tag_name)
JsonFormat(ApiRelease(release, assets, context.loginAccount.get, RepositoryName(repository)))
})
})
/**
* vii. Delete a release
* https://developer.github.com/v3/repos/releases/#delete-a-release
* Incompatibility info: GitHub API requires :release_id, but GitBucket API requires :tag_name
*/
delete("/api/v3/repos/:owner/:repository/releases/:tag")(writableUsersOnly { repository =>
val tag = params("tag")
deleteRelease(repository.owner, repository.name, tag)
NoContent()
})
/**
* viii. List assets for a release
* https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
*/
/**
* ix. Upload a release asset
* https://developer.github.com/v3/repos/releases/#upload-a-release-asset
*/
post("/api/v3/repos/:owner/:repository/releases/:tag/assets")(writableUsersOnly {
repository =>
val name = params("name")
val tag = params("tag")
getRelease(repository.owner, repository.name, tag)
.map { release =>
val fileId = FileUtil.generateFileId
val buf = new Array[Byte](request.inputStream.available())
request.inputStream.read(buf)
FileUtils.writeByteArrayToFile(
new File(
getReleaseFilesDir(repository.owner, repository.name),
FileUtil.checkFilename(tag + "/" + fileId)
),
buf
)
createReleaseAsset(
repository.owner,
repository.name,
tag,
fileId,
name,
request.contentLength.getOrElse(0),
context.loginAccount.get
)
getReleaseAsset(repository.owner, repository.name, tag, fileId)
.map { asset =>
JsonFormat(ApiReleaseAsset(asset, RepositoryName(repository)))
}
.getOrElse {
ApiError("Unknown error")
}
}
.getOrElse(NotFound())
})
/**
* x. Get a single release asset
* https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
* Incompatibility info: GitHub requires only asset_id, but GitBucket requires tag and fileId(file_id).
*/
get("/api/v3/repos/:owner/:repository/releases/:tag/assets/:fileId")(referrersOnly { repository =>
val tag = params("tag")
val fileId = params("fileId")
getReleaseAsset(repository.owner, repository.name, tag, fileId)
.map { asset =>
JsonFormat(ApiReleaseAsset(asset, RepositoryName(repository)))
}
.getOrElse(NotFound())
})
/*
* xi. Edit a release asset
* https://developer.github.com/v3/repos/releases/#edit-a-release-asset
*/
/*
* xii. Delete a release asset
* https://developer.github.com/v3/repos/releases/#edit-a-release-asset
*/
}
| imeszaros/gitbucket | src/main/scala/gitbucket/core/controller/api/ApiReleaseControllerBase.scala | Scala | apache-2.0 | 6,653 |
package com.blrest.dao
import com.blrest.model.{Tag, TagResponse}
import reactivemongo.api.{QueryOpts, DB}
import reactivemongo.api.collections.default.BSONCollection
import akka.actor.{ActorRef, Actor, ActorSystem}
import reactivemongo.core.commands.Count
import scala.util.Random
import reactivemongo.bson.{BSONObjectID, BSONDocument}
import scala.concurrent.Future
import com.typesafe.scalalogging.slf4j.Logging
import akka.pattern._
/**
* Created by ccarrier for bl-rest.
* at 2:05 PM on 12/20/13
*/
trait TagDao {
def getRandomTag: Future[Option[Tag]]
def saveTagResponse(tagResponse: TagResponse): Either[Exception, TagResponse]
}
class MongoTagDao(db: DB, tagCollection: BSONCollection, tagResponseCollection: BSONCollection, system: ActorSystem, neo4jActor: ActorRef) extends TagDao with Logging {
implicit val context = system.dispatcher
def getRandomTag: Future[Option[Tag]] = {
val futureCount = db.command(Count(tagCollection.name))
futureCount.flatMap { count =>
val skip = Random.nextInt(count)
for (
tag <- tagCollection.find(BSONDocument()).options(QueryOpts(skipN = skip)).one[Tag]
) yield tag.map(x => x.copy(questionText = Some(x.displayPattern.format(x.name))))
}
}
def saveTagResponse(tagResponse: TagResponse): Either[Exception, TagResponse] = {
tagResponseCollection.insert(tagResponse)
//neo4jActor ! tagResponse
Right(tagResponse)
}
} | ctcarrier/bl-rest | src/main/scala/com/blrest/dao/TagDao.scala | Scala | mit | 1,429 |
package at.forsyte.apalache.tla.bmcmt.caches
import at.forsyte.apalache.tla.bmcmt.smt.SolverContext
import at.forsyte.apalache.tla.bmcmt.types.ConstT
import at.forsyte.apalache.tla.bmcmt.{Arena, ArenaCell}
import at.forsyte.apalache.tla.lir.convenience.tla
/**
* A cache for string constants that are translated as uninterpreted constants in SMT.
* Since two TLA+ strings are equal iff they are literally the same string, we force
* inequality between all the respective SMT constants.
*
* @author Igor Konnov
*/
class StrValueCache(solverContext: SolverContext) extends AbstractCache[Arena, String, ArenaCell] with Serializable {
override protected def create(arena: Arena, strValue: String): (Arena, ArenaCell) = {
// introduce a new cell
val newArena = arena.appendCell(ConstT())
val newCell = newArena.topCell
// the freshly created cell should differ from the others
for (other <- values()) {
solverContext.assertGroundExpr(tla.neql(newCell.toNameEx, other.toNameEx))
}
solverContext.log("; cached \\"%s\\" to %s".format(strValue, newCell))
(newArena, newCell)
}
}
| konnov/dach | tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/caches/StrValueCache.scala | Scala | apache-2.0 | 1,125 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC212B(value: Option[Int]) extends CtBoxIdentifier(name = "Tangible assets - Land and buildings - cost - revaluations")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value)
)
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC212B.scala | Scala | apache-2.0 | 1,142 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.kafka
import java.io.File
import java.io.IOException
import java.nio.file.FileVisitOption
import java.nio.file.Files
import java.nio.file.Paths
import java.util.Properties
import java.util.concurrent.atomic.AtomicReference
import org.apache.curator.test.TestingServer
import org.slf4j.LoggerFactory
import com.lightbend.lagom.internal.util.PropertiesLoader
import javax.management.InstanceNotFoundException
import kafka.server.KafkaServerStartable
import scala.collection.JavaConverters._
import java.util.Comparator
class KafkaLocalServer private (kafkaProperties: Properties, zooKeeperServer: KafkaLocalServer.ZooKeeperLocalServer) {
private val kafkaServerRef = new AtomicReference[KafkaServerStartable](null)
def start(): Unit = {
if (kafkaServerRef.get == null) {
// There is a possible race condition here. However, instead of attempting to avoid it
// by using a lock, we are working with it and do the necessary clean up if indeed we
// end up creating two Kafka server instances.
val newKafkaServer = KafkaServerStartable.fromProps(kafkaProperties)
if (kafkaServerRef.compareAndSet(null, newKafkaServer)) {
zooKeeperServer.start()
val kafkaServer = kafkaServerRef.get()
kafkaServer.startup()
} else newKafkaServer.shutdown()
}
// else it's already running
}
// this exists only for testing purposes
private[lagom] def restart(): Unit = {
val kafkaServer = kafkaServerRef.get()
if (kafkaServer != null) {
kafkaServer.shutdown()
kafkaServer.startup()
}
}
def stop(): Unit = {
val kafkaServer = kafkaServerRef.getAndSet(null)
if (kafkaServer != null) {
try kafkaServer.shutdown()
catch {
case _: Throwable => ()
}
try zooKeeperServer.stop()
catch {
case _: InstanceNotFoundException => () // swallow, see https://github.com/Netflix/curator/issues/121 for why it's ok to do so
}
}
// else it's already stopped
}
}
object KafkaLocalServer {
final val DefaultPort = 9092
final val DefaultPropertiesFile = "/kafka-server.properties"
final val DefaultResetOnStart = true
private final val KafkaDataFolderName = "kafka_data"
private val Log = LoggerFactory.getLogger(classOf[KafkaLocalServer])
private lazy val tempDir = System.getProperty("java.io.tmpdir")
def apply(cleanOnStart: Boolean): KafkaLocalServer = this(DefaultPort, ZooKeeperLocalServer.DefaultPort, DefaultPropertiesFile, Some(tempDir), cleanOnStart)
def apply(kafkaPort: Int, zookeeperServerPort: Int, kafkaPropertiesFile: String, targetDir: Option[String], cleanOnStart: Boolean): KafkaLocalServer = {
val kafkaDataDir = dataDirectory(targetDir, KafkaDataFolderName)
Log.info(s"Kafka data directory is $kafkaDataDir.")
val kafkaProperties = createKafkaProperties(kafkaPropertiesFile, kafkaPort, zookeeperServerPort, kafkaDataDir)
if (cleanOnStart) deleteDirectory(kafkaDataDir)
new KafkaLocalServer(kafkaProperties, new ZooKeeperLocalServer(zookeeperServerPort, cleanOnStart, targetDir))
}
/**
* Creates a Properties instance for Kafka customized with values passed in argument.
*/
private def createKafkaProperties(kafkaPropertiesFile: String, kafkaPort: Int, zookeeperServerPort: Int, dataDir: File): Properties = {
val kafkaProperties = PropertiesLoader.from(kafkaPropertiesFile)
kafkaProperties.setProperty("log.dirs", dataDir.getAbsolutePath)
kafkaProperties.setProperty("listeners", s"PLAINTEXT://:$kafkaPort")
kafkaProperties.setProperty("zookeeper.connect", s"localhost:$zookeeperServerPort")
kafkaProperties
}
private def deleteDirectory(directory: File): Unit = {
if (directory.exists()) try {
val rootPath = Paths.get(directory.getAbsolutePath)
val files = Files.walk(rootPath, FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).iterator().asScala
files.foreach(Files.delete)
Log.debug(s"Deleted ${directory.getAbsolutePath}.")
} catch {
case e: Exception => Log.warn(s"Failed to delete directory ${directory.getAbsolutePath}.", e)
}
}
/**
* If the passed `baseDirPath` points to an existing directory for which the application has write access,
* return a File instance that points to `baseDirPath/directoryName`. Otherwise, return a File instance that
* points to `tempDir/directoryName` where `tempDir` is the system temporary folder returned by the system
* property "java.io.tmpdir".
*
* @param baseDirPath The path to the base directory.
* @param directoryName The name to use for the child folder in the base directory.
* @throws IllegalArgumentException If the passed `directoryName` is not a valid directory name.
* @return A file directory that points to either `baseDirPath/directoryName` or `tempDir/directoryName`.
*/
private def dataDirectory(baseDirPath: Option[String], directoryName: String): File = {
lazy val tempDirMessage = s"Will attempt to create folder $directoryName in the system temporary directory: $tempDir"
val maybeBaseDir = baseDirPath.map(new File(_)).filter(f => f.exists())
val baseDir = {
maybeBaseDir match {
case None =>
Log.warn(s"Directory $baseDirPath doesn't exist. $tempDirMessage.")
new File(tempDir)
case Some(directory) =>
if (!directory.isDirectory()) {
Log.warn(s"$baseDirPath is not a directory. $tempDirMessage.")
new File(tempDir)
} else if (!directory.canWrite()) {
Log.warn(s"The application does not have write access to directory $baseDirPath. $tempDirMessage.")
new File(tempDir)
} else directory
}
}
val dataDirectory = new File(baseDir, directoryName)
if (dataDirectory.exists() && !dataDirectory.isDirectory())
throw new IllegalArgumentException(s"Cannot use $directoryName as a directory name because a file with that name already exists in $dataDirectory.")
dataDirectory
}
private class ZooKeeperLocalServer(port: Int, cleanOnStart: Boolean, targetDir: Option[String]) {
private val zooKeeperServerRef = new AtomicReference[TestingServer](null)
def start(): Unit = {
val zookeeperDataDir = dataDirectory(targetDir, ZooKeeperLocalServer.ZookeeperDataFolderName)
if (zooKeeperServerRef.compareAndSet(null, new TestingServer(port, zookeeperDataDir, /*start=*/ false))) {
Log.info(s"Zookeeper data directory is $zookeeperDataDir.")
if (cleanOnStart) deleteDirectory(zookeeperDataDir)
val zooKeeperServer = zooKeeperServerRef.get
zooKeeperServer.start() // blocking operation
}
// else it's already running
}
def stop(): Unit = {
val zooKeeperServer = zooKeeperServerRef.getAndSet(null)
if (zooKeeperServer != null)
try zooKeeperServer.stop()
catch {
case _: IOException => () // nothing to do if an exception is thrown while shutting down
}
// else it's already stopped
}
}
object ZooKeeperLocalServer {
final val DefaultPort = 2181
private final val ZookeeperDataFolderName = "zookeeper_data"
}
}
| edouardKaiser/lagom | dev/kafka-server/src/main/scala/com/lightbend/lagom/internal/kafka/KafkaLocalServer.scala | Scala | apache-2.0 | 7,332 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kylin.engine.spark.builder
import java.text.SimpleDateFormat
import java.util.{Locale, TimeZone, UUID}
import org.apache.kylin.common.KylinConfig
import org.apache.kylin.cube.{CubeInstance, CubeManager, CubeSegment}
import org.apache.kylin.engine.spark.cross.CrossDateTimeUtils
import org.apache.kylin.engine.spark.job.KylinBuildEnv
import org.apache.kylin.engine.spark.metadata.MetadataConverter
import org.apache.kylin.engine.spark.metadata.cube.model.ForestSpanningTree
import org.apache.kylin.job.engine.JobEngineConfig
import org.apache.kylin.job.impl.threadpool.DefaultScheduler
import org.apache.kylin.job.lock.MockJobLock
import org.apache.kylin.metadata.model.SegmentRange
import org.apache.spark.{InfoHelper, SPARK_VERSION}
import org.apache.spark.sql.common.{LocalMetadata, SharedSparkSession, SparderBaseFunSuite}
import org.apache.spark.sql.{Dataset, Row}
import org.junit.Assert
import scala.collection.JavaConversions
// scalastyle:off
class TestCreateFlatTable extends SparderBaseFunSuite with SharedSparkSession with LocalMetadata {
private val PROJECT = "default"
private val CUBE_NAME1 = "ci_left_join_cube"
private val CUBE_NAME2 = "ci_inner_join_cube"
val dateFormat = new SimpleDateFormat("yyyy-MM-dd", Locale.ROOT)
dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"))
def getTestConfig: KylinConfig = {
val config = KylinConfig.getInstanceFromEnv
config
}
test("Check the flattable filter and encode") {
init()
val cubeMgr: CubeManager = CubeManager.getInstance(getTestConfig)
val cube: CubeInstance = cubeMgr.getCube(CUBE_NAME1)
// cleanup all segments first
cubeMgr.updateCubeDropSegments(cube, cube.getSegments())
// resource detect mode
val seg1 = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(0L, 1356019200000L))
val afterJoin1 = generateFlatTable(seg1, cube, false)
checkFilterCondition(afterJoin1, seg1)
checkEncodeCols(afterJoin1, seg1, false)
val seg2 = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(1356019200000L, 1376019200000L))
val afterJoin2 = generateFlatTable(seg2, cube, false)
checkFilterCondition(afterJoin2, seg2)
checkEncodeCols(afterJoin2, seg2, false)
// cubing mode
val seg3 = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(1376019200000L, 1396019200000L))
val afterJoin3 = generateFlatTable(seg3, cube, true)
checkEncodeCols(afterJoin3, seg3, true)
val seg4 = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(1396019200000L, 1416019200000L))
val afterJoin4 = generateFlatTable(seg4, cube, true)
checkEncodeCols(afterJoin4, seg4, true)
//DefaultScheduler.destroyInstance()
}
test("Check the flattable spark jobs num correctness") {
init()
val helper: InfoHelper = new InfoHelper(spark)
val cubeMgr: CubeManager = CubeManager.getInstance(getTestConfig)
val cube: CubeInstance = cubeMgr.getCube(CUBE_NAME2)
// cleanup all segments first
cubeMgr.updateCubeDropSegments(cube, cube.getSegments)
val groupId = UUID.randomUUID().toString
spark.sparkContext.setJobGroup(groupId, "test", false)
val seg1 = cubeMgr.appendSegment(cube, new SegmentRange.TSRange(0L, 1356019200000L))
val afterJoin1 = generateFlatTable(seg1, cube, true)
afterJoin1.collect()
if (SPARK_VERSION.startsWith("2.4")) {
val jobs = helper.getJobsByGroupId(groupId)
if (seg1.getConfig.detectDataSkewInDictEncodingEnabled()) {
Assert.assertEquals(jobs.length, 18)
} else {
Assert.assertEquals(jobs.length, 15)
}
} else if (SPARK_VERSION.startsWith("3.1")) {
// in Spark 3.x, BroadcastExchangeExec overwrites job group ID
val jobs = helper.getJobsByGroupId(null)
Assert.assertEquals(6, jobs.count(_.jobGroup.exists(_.endsWith(groupId))))
Assert.assertEquals(9, jobs.count(_.description.exists(_.contains("broadcast exchange"))))
}
DefaultScheduler.destroyInstance()
}
private def checkFilterCondition(ds: Dataset[Row], seg: CubeSegment) = {
val queryExecution = ds.queryExecution.simpleString
var startTime = dateFormat.format(seg.getTSRange.start.v)
var endTime = dateFormat.format(seg.getTSRange.end.v)
//Test Filter Condition
// dates will not be converted to string by default since spark 3.0.0.
// see https://issues.apache.org/jira/browse/SPARK-27638 for details.
if (SPARK_VERSION.startsWith("3.") && conf.get("spark.sql.legacy.typeCoercion.datetimeToString.enabled", "false") == "false") {
startTime = CrossDateTimeUtils.stringToDate(startTime).get.toString
endTime = CrossDateTimeUtils.stringToDate(endTime).get.toString
}
Assert.assertTrue(queryExecution.contains(startTime))
Assert.assertTrue(queryExecution.contains(endTime))
}
private def checkEncodeCols(ds: Dataset[Row], segment: CubeSegment, needEncode: Boolean) = {
val seg = MetadataConverter.getSegmentInfo(segment.getCubeInstance, segment.getUuid, segment.getName, segment.getStorageLocationIdentifier)
val globalDictSet = seg.toBuildDictColumns
val actualEncodeDictSize = ds.schema.count(_.name.endsWith(CubeBuilderHelper.ENCODE_SUFFIX))
if (needEncode) {
Assert.assertEquals(globalDictSet.size, actualEncodeDictSize)
} else {
Assert.assertEquals(0, actualEncodeDictSize)
}
}
private def generateFlatTable(segment: CubeSegment, cube: CubeInstance, needEncode: Boolean): Dataset[Row] = {
val seg = MetadataConverter.getSegmentInfo(segment.getCubeInstance, segment.getUuid, segment.getName, segment.getStorageLocationIdentifier)
val spanningTree = new ForestSpanningTree(JavaConversions.asJavaCollection(seg.toBuildLayouts))
//for test case there is no build job id
val flatTable = new CreateFlatTable(seg, spanningTree, spark, null, spark.sparkContext.applicationId)
val afterJoin = flatTable.generateDataset(needEncode)
afterJoin
}
def init() = {
KylinBuildEnv.getOrCreate(getTestConfig)
System.setProperty("kylin.metadata.distributed-lock-impl", "org.apache.kylin.engine.spark.utils.MockedDistributedLock$MockedFactory")
val scheduler = DefaultScheduler.getInstance
scheduler.init(new JobEngineConfig(KylinConfig.getInstanceFromEnv), new MockJobLock)
if (!scheduler.hasStarted) throw new RuntimeException("scheduler has not been started")
}
}
| apache/kylin | kylin-spark-project/kylin-spark-engine/src/test/scala/org/apache/kylin/engine/spark/builder/TestCreateFlatTable.scala | Scala | apache-2.0 | 7,187 |
import sbt._
object TestPlugin extends Plugin
{
val Check = TaskKey[Unit]("check")
override def settings = Seq(
Check := assert(JavaTest.X == 9)
)
} | pdalpra/sbt | sbt/src/sbt-test/project/src-plugins/plugin/TestPlugin.scala | Scala | bsd-3-clause | 154 |
/**
*
* BaseActivity.scala
* Ledger wallet
*
* Created by Pierre Pollastri on 09/01/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package com.ledger.ledgerwallet.base
import android.app.NotificationManager
import android.os.Bundle
import android.support.v7.app.{ActionBar, ActionBarActivity}
import com.ledger.ledgerwallet.widget.Toolbar
import android.view.ViewGroup.LayoutParams
import android.view.{MenuItem, LayoutInflater, View}
import android.widget.FrameLayout
import com.ledger.ledgerwallet.R
import com.ledger.ledgerwallet.utils.{AndroidUtils, TR}
import com.ledger.ledgerwallet.utils.logs.Loggable
import com.ledger.ledgerwallet.app.GcmIntentService
import android.content.Context
import scala.concurrent.ExecutionContext
abstract class BaseActivity extends ActionBarActivity with Loggable {
implicit val context = this
implicit val executor: ExecutionContext = com.ledger.ledgerwallet.concurrent.ExecutionContext.Implicits.ui
lazy val toolbar = TR(R.id.toolbar).as[Toolbar]
lazy val content = TR(R.id.content_view).as[FrameLayout]
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
super.setContentView(R.layout.base_activity)
setSupportActionBar(toolbar)
toolbar.style = actionBarStyle
if (actionBarStyle == Toolbar.Style.Normal) {
getSupportActionBar.setDisplayShowTitleEnabled(false)
getSupportActionBar.setDisplayShowCustomEnabled(true)
getSupportActionBar.setCustomView(toolbar.titleView)
}
toolbar.setTitle(getTitle)
}
override def onResume(): Unit = {
super.onResume()
AndroidUtils.notifyActivityOnResume()
getSystemService(Context.NOTIFICATION_SERVICE).asInstanceOf[NotificationManager]
.cancel(GcmIntentService.IncomingTransactionNotificationId)
}
override def onPause(): Unit = {
super.onPause()
AndroidUtils.notifyActivityOnPause()
}
override def setContentView(layoutResID: Int): Unit = {
val inflater = LayoutInflater.from(this)
val view = inflater.inflate(layoutResID, content, false)
setContentView(view, view.getLayoutParams())
}
override def setContentView(view: View): Unit = {
setContentView(view, new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT))
}
def setContentFragment(baseFragment: BaseFragment): Unit = {
val ft = getSupportFragmentManager.beginTransaction()
ft.replace(R.id.content_view, baseFragment)
ft.commit()
}
override def setContentView(view: View, params: LayoutParams): Unit = {
content.removeAllViews()
content.addView(view, params)
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
if ((item.getItemId == android.R.id.home && !onClickHome()) || item.getItemId != android.R.id.home)
super.onOptionsItemSelected(item)
else
true
}
def onClickHome(): Boolean = {
finish()
true
}
def actionBarStyle: Toolbar.Style = Toolbar.Style.Normal
} | Morveus/ledger-wallet-android | app/src/main/scala/com/ledger/ledgerwallet/base/BaseActivity.scala | Scala | mit | 4,067 |
package com.pamarin.monitoring.gatling
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
import com.typesafe.config._
/**
* @author jittagornp
* create 26/11/2015
*/
class HomePageSimulation extends Simulation {
val conf = ConfigFactory.load();
val homePageUrl = conf.getString("homePageUrl");
val httpConf = http
.baseURL(homePageUrl)
.acceptHeader("text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
.inferHtmlResources( /* include html resources */
BlackList(),
WhiteList()
)
.acceptLanguageHeader("en-US,en;q=0.5")
.acceptEncodingHeader("gzip, deflate")
.userAgentHeader("Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36");
val scn = scenario("Go to Home page")
.exec(http("index").get("/"))
setUp(scn.inject(atOnceUsers(1)).protocols(httpConf))
}
| jittagornp/gatling-example | src/test/resources/simulations/HomePageSimulation.scala | Scala | apache-2.0 | 939 |
package ch.wsl.box.client
import ch.wsl.box.client.utils.TestHooks
import org.scalajs.dom.document
import org.scalajs.dom.window
class LoginTest extends TestBase {
"login" should "be done" in {
Main.setupUI().flatMap { _ =>
val beforeLogin = document.body.innerHTML
assert(document.querySelectorAll(s"#${TestHooks.logoutButton}").length == 0)
for{
_ <- Context.services.clientSession.login("test","test")
_ <- waitLoggedIn
} yield {
assert(beforeLogin != document.body.innerHTML)
assert(document.querySelectorAll(s"#${TestHooks.logoutButton}").length == 1)
assert(document.getElementById(values.titleId).textContent == values.titleText)
}
}
}
}
| Insubric/box | client/src/test/scala/ch/wsl/box/client/LoginTest.scala | Scala | apache-2.0 | 781 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.collection
package mutable
trait IndexedSeq[T] extends Seq[T]
with scala.collection.IndexedSeq[T]
with IndexedSeqOps[T, IndexedSeq, IndexedSeq[T]]
with IterableFactoryDefaults[T, IndexedSeq] {
override def iterableFactory: SeqFactory[IndexedSeq] = IndexedSeq
}
@SerialVersionUID(3L)
object IndexedSeq extends SeqFactory.Delegate[IndexedSeq](ArrayBuffer)
trait IndexedSeqOps[A, +CC[_], +C <: AnyRef]
extends scala.collection.IndexedSeqOps[A, CC, C]
with SeqOps[A, CC, C] {
/** Modifies this $coll by applying a function to all elements of this $coll.
*
* @param f the function to apply to each element.
* @return this $coll modified by replacing all elements with the
* result of applying the given function `f` to each element
* of this $coll.
*/
def mapInPlace(f: A => A): this.type = {
var i = 0
val siz = size
while (i < siz) { this(i) = f(this(i)); i += 1 }
this
}
/** Sorts this $coll in place according to an Ordering.
*
* @see [[scala.collection.SeqOps.sorted]]
* @param ord the ordering to be used to compare elements.
* @return modified input $coll sorted according to the ordering `ord`.
*/
def sortInPlace[B >: A]()(implicit ord: Ordering[B]): this.type = {
val len = this.length
if (len > 1) {
val arr = new Array[AnyRef](len)
var i = 0
for (x <- this) {
arr(i) = x.asInstanceOf[AnyRef]
i += 1
}
java.util.Arrays.sort(arr, ord.asInstanceOf[Ordering[Object]])
i = 0
while (i < arr.length) {
update(i, arr(i).asInstanceOf[A])
i += 1
}
}
this
}
/** Sorts this $coll in place according to a comparison function.
*
* @see [[scala.collection.SeqOps.sortWith]]
*/
def sortInPlaceWith(lt: (A, A) => Boolean): this.type = sortInPlace()(Ordering.fromLessThan(lt))
/** Sorts this $coll in place according to the Ordering which results from transforming
* an implicitly given Ordering with a transformation function.
*
* @see [[scala.collection.SeqOps.sortBy]]
*/
def sortInPlaceBy[B](f: A => B)(implicit ord: Ordering[B]): this.type = sortInPlace()(ord on f)
}
| scala/scala | src/library/scala/collection/mutable/IndexedSeq.scala | Scala | apache-2.0 | 2,537 |
package uk.co.morleydev.ghosthunt.model.component.menu
import org.jsfml.system.Vector2f
/**
* A menu option is a series of buttons, where one of those buttons can be selected.
* That button will then become considered the active button.
*
* @param position
* @param size
* @param text
* @param active
*/
case class MenuOption(position : Vector2f, size : Vector2f, text : Seq[String], active : Int = -1)
| MorleyDev/GhostHunt | src/main/scala/uk/co/morleydev/ghosthunt/model/component/menu/MenuOption.scala | Scala | mit | 413 |
package at.logic.gapt.proofs.resolution
import at.logic.gapt.expr._
import at.logic.gapt.proofs.{ Ant, Sequent, Suc }
import org.specs2.mutable.Specification
class UnitResolutionToLKProofTest extends Specification {
"flips" in {
val p1 = Input( Sequent() :+ hof"a=b" )
val p2 = Input( hof"b=a" +: Sequent() )
UnitResolutionToLKProof( Resolution( Flip( p1, Suc( 0 ) ), p2, hof"b=a" ) ).conclusion.toImplication must_== hof"a=b -> b=a"
UnitResolutionToLKProof( Resolution( p1, Flip( p2, Ant( 0 ) ), hof"a=b" ) ).conclusion.toImplication must_== hof"a=b -> b=a"
}
"double flip" in {
val p1 = Input( Sequent() :+ hof"a=b" )
val p2 = Input( hof"a=b" +: Sequent() )
UnitResolutionToLKProof( Resolution( Flip( p1, Suc( 0 ) ), Flip( p2, Ant( 0 ) ), hof"b=a" ) ).conclusion.toImplication must_== hof"a=b -> a=b"
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/proofs/resolution/UnitResolutionToLKProofTest.scala | Scala | gpl-3.0 | 850 |
package hoecoga.play.api
import org.scalatest.FunSpec
import play.api.http.Writeable
import play.api.libs.json.Json
import play.api.mvc.Results._
import play.api.mvc.{Action, BodyParsers}
import play.api.test.Helpers._
import play.api.test.{FakeApplication, FakeRequest}
class ErrorHandlerSpec extends FunSpec {
import ErrorHandlerSpecHelper._
describe("ErrorHandler") {
it("onClientError") {
test { settings =>
import settings._
def check[A](req: FakeRequest[A], code: Int)(implicit w: Writeable[A]): Unit = {
val Some(res) = route(req)
assert(status(res) === code)
assert(contentAsJson(res) === Json.obj("meta" -> Json.obj("status" -> code)))
}
check(client.withTextBody(""), UNSUPPORTED_MEDIA_TYPE)
check(FakeRequest(), NOT_FOUND)
check(client.withJsonBody(Json.obj("a" -> "b" * BodyParsers.parse.DefaultMaxTextLength)), REQUEST_ENTITY_TOO_LARGE)
}
}
}
}
object ErrorHandlerSpecHelper {
val Method = POST
val Path1 = "/client"
case class Settings(client: FakeRequest[_])
def test(f: Settings => Unit): Unit = {
running(FakeApplication(
withRoutes = {
case (Method, Path1) => Action(BodyParsers.parse.json)(_ => Ok(""))
},
additionalConfiguration = Map("play.http.errorHandler" -> "hoecoga.play.api.ErrorHandler")
)) {
f(Settings(FakeRequest(Method, Path1)))
}
}
}
| hoecoga/hoecoga-play-api | src/test/scala/hoecoga/play/api/ErrorHandlerSpec.scala | Scala | mit | 1,433 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.testkit
import java.nio.file.{ Files, Path, Paths }
import com.lightbend.lagom.scaladsl.api.{ Descriptor, Service }
import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraPersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcPersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.{ PersistenceComponents, PersistentEntityRegistry }
import com.lightbend.lagom.scaladsl.playjson.{ EmptyJsonSerializerRegistry, JsonSerializerRegistry }
import com.lightbend.lagom.scaladsl.server._
import org.scalatest.{ Matchers, WordSpec }
import play.api.db.HikariCPComponents
import play.api.libs.ws.ahc.AhcWSComponents
import scala.collection.JavaConverters._
import scala.util.Properties
class ServiceTestSpec extends WordSpec with Matchers {
"ServiceTest" when {
"started with Cassandra" should {
"create a temporary directory" in {
val temporaryFileCountBeforeRun = listTemporaryFiles().size
ServiceTest.withServer(ServiceTest.defaultSetup.withCassandra())(new CassandraTestApplication(_)) { _ =>
val temporaryFilesDuringRun = listTemporaryFiles()
temporaryFilesDuringRun should have size (temporaryFileCountBeforeRun + 1)
}
}
}
"stopped after starting" should {
"remove its temporary directory" in {
val temporaryFileCountBeforeRun = listTemporaryFiles().size
ServiceTest.withServer(ServiceTest.defaultSetup.withCassandra())(new CassandraTestApplication(_)) { _ => () }
val temporaryFilesAfterRun = listTemporaryFiles()
temporaryFilesAfterRun should have size temporaryFileCountBeforeRun
}
}
"started with JDBC" should {
"start successfully" in {
ServiceTest.withServer(ServiceTest.defaultSetup.withJdbc())(new JdbcTestApplication(_)) { _ => () }
}
}
}
def listTemporaryFiles(): Iterator[Path] = {
val tmpDir = Paths.get(Properties.tmpDir)
Files
.newDirectoryStream(tmpDir, "ServiceTest_*")
.iterator()
.asScala
}
}
trait TestService extends Service {
import Service._
override final def descriptor: Descriptor = named("test")
}
class TestServiceImpl(persistentEntityRegistry: PersistentEntityRegistry) extends TestService
class TestApplication(context: LagomApplicationContext) extends LagomApplication(context)
with LocalServiceLocator
with AhcWSComponents { self: PersistenceComponents =>
override lazy val jsonSerializerRegistry: JsonSerializerRegistry = EmptyJsonSerializerRegistry
override lazy val lagomServer: LagomServer = serverFor[TestService](new TestServiceImpl(persistentEntityRegistry))
}
class CassandraTestApplication(context: LagomApplicationContext) extends TestApplication(context)
with CassandraPersistenceComponents
class JdbcTestApplication(context: LagomApplicationContext) extends TestApplication(context)
with JdbcPersistenceComponents
with HikariCPComponents
| rstento/lagom | testkit/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/testkit/ServiceTestSpec.scala | Scala | apache-2.0 | 3,061 |
package com.socrata.soda.clients.datacoordinator
import com.socrata.http.client.{RequestBuilder, HttpClient}
import com.socrata.http.server.util.RequestId.{RequestId, ReqIdHeader}
import com.socrata.http.common.AuxiliaryData
import com.socrata.curator.ProviderCache
import com.socrata.soda.server.{HeaderAddingHttpClient, SodaUtils, ThreadLimiter}
import java.io.Closeable
import com.socrata.soda.server.id.DatasetId
import org.apache.curator.x.discovery.{strategies => providerStrategies, ServiceDiscovery}
import scala.concurrent.duration.FiniteDuration
class CuratedHttpDataCoordinatorClientProvider(discovery: ServiceDiscovery[AuxiliaryData],
discoveredInstances: () => Set[String],
serviceName: String,
connectTimeout: FiniteDuration,
receiveTimeout: FiniteDuration,
maxJettyThreadPoolSize: Int,
maxThreadRatio: Double)
extends Closeable with (HttpClient => HttpDataCoordinatorClient)
{
// Make sure the DC connection doesn't use all available threads
val threadLimiter = new ThreadLimiter("DataCoordinatorClient",
(maxThreadRatio * maxJettyThreadPoolSize).toInt)
private[this] val connectTimeoutMS = connectTimeout.toMillis.toInt
if (connectTimeoutMS != connectTimeout.toMillis) {
throw new IllegalArgumentException("Connect timeout out of range (milliseconds must fit in an int)")
}
private[this] val receiveTimeoutMS = receiveTimeout.toMillis.toInt
if (receiveTimeoutMS != receiveTimeout.toMillis) {
throw new IllegalArgumentException("Receive timeout out of range (milliseconds must fit in an int)")
}
val provider = new ProviderCache(discovery, new providerStrategies.RoundRobinStrategy, serviceName)
def close() {
provider.close()
}
def apply(http: HttpClient): HttpDataCoordinatorClient =
new HttpDataCoordinatorClient {
val httpClient = http
val threadLimiter = CuratedHttpDataCoordinatorClientProvider.this.threadLimiter
def hostO(instance: String): Option[RequestBuilder] = Option(provider(instance).getInstance()).map { serv =>
RequestBuilder(new java.net.URI(serv.buildUriSpec())).
livenessCheckInfo(Option(serv.getPayload).flatMap(_.livenessCheckInfo)).
connectTimeoutMS(connectTimeoutMS).
receiveTimeoutMS(receiveTimeoutMS)
}
def instances() = discoveredInstances()
}
}
| socrata-platform/soda-fountain | soda-fountain-lib/src/main/scala/com/socrata/soda/clients/datacoordinator/CuratedHttpDataCoordinatorClientProvider.scala | Scala | apache-2.0 | 2,636 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.initrepository
import uk.gov.hmrc.initrepository.FutureUtils.exponentialRetry
import uk.gov.hmrc.initrepository.git.LocalGitService
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
class Coordinator(github: Github, git: LocalGitService) {
type PreConditionError[T] = Option[T]
def run(
newRepoName: String,
teams: Seq[String],
digitalServiceName: Option[String],
bootstrapTag: Option[String],
privateRepo: Boolean,
githubToken: String,
requireSignedCommits: Seq[String]): Future[Unit] =
checkPreConditions(newRepoName, teams, privateRepo)
.flatMap { error =>
if (error.isEmpty) {
Log.info(s"Pre-conditions met, creating '$newRepoName'")
for {
repoUrl <- github.createRepo(newRepoName, privateRepo)
_ <- addTeamsToGitRepo(teams, newRepoName)
_ <- addRepoAdminsTeamToGitRepo(newRepoName)
_ <- tryToFuture(
git.initialiseRepository(newRepoName, digitalServiceName, bootstrapTag, privateRepo, githubToken))
_ <- github.addRequireSignedCommits(newRepoName, requireSignedCommits)
} yield repoUrl
} else {
Future.failed(new Exception(s"pre-condition check failed with: ${error.get}"))
}
}
.map { repoUrl =>
Log.info(s"Successfully created $repoUrl")
}
private def addTeamsToGitRepo(teamNames: Seq[String], newRepoName: String): Future[Seq[Unit]] = {
val x: Seq[Future[Unit]] = teamNames.map { teamName =>
val teamIdFuture: Future[Option[Int]] = github.teamId(teamName)
teamIdFuture.flatMap { teamId =>
exponentialRetry(10) {
addRepoToTeam(newRepoName, teamId, "push")
}
}
}
Future.sequence(x)
}
private def addRepoAdminsTeamToGitRepo(newRepoName: String): Future[Unit] = {
val teamName = "Repository Admins"
val teamIdFuture: Future[Option[Int]] = github.teamId(teamName)
teamIdFuture.flatMap { teamId =>
exponentialRetry(10) {
addRepoToTeam(newRepoName, teamId, "admin")
}
}
}
private def addRepoToTeam(repoName: String, teamIdO: Option[Int], permission: String): Future[Unit] =
teamIdO
.map { teamId =>
github.addRepoToTeam(repoName, teamId, permission)
}
.getOrElse(Future.failed(new Exception("Didn't have a valid team id")))
private def tryToFuture[A](t: => Try[A]): Future[A] =
Future {
t
}.flatMap {
case Success(s) => Future.successful(s)
case Failure(fail) => Future.failed(fail)
}
def checkTeamsExistOnGithub(teamNames: Seq[String]): Future[Boolean] =
Future.sequence(teamNames.map(team => github.teamId(team))).map(_.flatten).map(_.size == teamNames.size)
private def checkPreConditions(
newRepoName: String,
teams: Seq[String],
privateRepo: Boolean): Future[PreConditionError[String]] =
for {
repoExists <- github.containsRepo(newRepoName)
teamsExist <- checkTeamsExistOnGithub(teams)
} yield {
if (repoExists) Some(s"Repository with name '$newRepoName' already exists in github ")
else if (!teamsExist)
Some(s"One of the provided team names ('${teams.mkString(",")}') could not be found in github")
else None
}
}
| hmrc/init-repository | src/main/scala/uk/gov/hmrc/initrepository/Coordinator.scala | Scala | apache-2.0 | 4,045 |
package com.stackmob.customcode.dev
package test
package server
import org.specs2.Specification
import com.stackmob.newman.test.DummyHttpClient
import com.stackmob.customcode.dev.server.APIRequestProxy
import com.stackmob.customcode.dev.server.APIRequestProxy.UnknownVerbError
import collection.JavaConverters._
class APIRequestProxySpecs extends Specification with CustomMatchers { def is =
"APIRequestProxySpecs".title ^ end ^
"APIRequestProxy is responsible for executing non-custom code requests to the Stackmob API" ^ end ^
"the proxy should fail if given an unknown verb" ! unknownVerb ^ end ^
"the proxy should work properly for GET requests" ! get ^ end ^
"the proxy should work properly for POST requests" ! post ^ end ^
"the proxy should work properly for PUT requests" ! put ^ end ^
"the proxy should work properly for DELETE requests" ! delete ^ end ^
"the proxy should work properly for HEAD requests" ! head ^ end ^
end
private val resp = DummyHttpClient.CannedResponse
private implicit def client = new DummyHttpClient(responseToReturn = () => resp)
private def request(verb: String,
uri: String,
headers: Map[String, String] = Map("Content-Type" -> "text/plain"),
body: String = "stackmob-test") = {
new MockJettyRequest(verb, uri, headers.asJava, body)
}
private def unknownVerb = {
val req = request("OPTIONS", "http://httpbin.org/options")
APIRequestProxy(req).toEither must beThrowableInstance[UnknownVerbError]
}
private def get = {
val req = request("GET", "http://httpbin.org/get")
APIRequestProxy(req).toEither must beRight
}
private def post = {
val req = request("POST", "http://httpbin.org/post")
APIRequestProxy(req).toEither must beRight
}
private def put = {
val req = request("PUT", "http://httpbin.org/put")
APIRequestProxy(req).toEither must beRight
}
private def delete = {
val req = request("DELETE", "http://httpbin.org/delete")
APIRequestProxy(req).toEither must beRight
}
private def head = {
val req = request("HEAD", "http://httpbin.org/head")
APIRequestProxy(req).toEither must beRight
}
}
| matthewfarwell/stackmob-customcode-dev | src/test/scala/com/stackmob/customcode/dev/test/server/APIRequestProxySpecs.scala | Scala | apache-2.0 | 2,728 |
/*
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.agreement.datafetch
import javax.naming.NamingEnumeration
import javax.naming.directory.{ Attributes, SearchControls, SearchResult }
import javax.naming.ldap.LdapContext
import nl.knaw.dans.easy.agreement.NoUserFoundException
import nl.knaw.dans.easy.agreement.fixture.TestSupportFixture
import org.scalamock.scalatest.MockFactory
import scala.util.{ Failure, Success }
class LdapSpec extends TestSupportFixture with MockFactory {
"query" should "return the results of sending a query to LDAP" in {
val testDepositorId = "foobar"
val ctx = mock[LdapContext]
val result = mock[NamingEnumeration[SearchResult]]
val attrs1 = mock[Attributes]
val f = where { (_: String, filter: String, _: SearchControls) => filter.contains(s"uid=$testDepositorId") }
(ctx.search(_: String, _: String, _: SearchControls)) expects f returning result
inSequence {
result.hasMoreElements _ expects () returns true
result.nextElement _ expects () returns new SearchResult("foobar1", null, attrs1)
}
val ldap = new LdapImpl(ctx)
ldap.query(testDepositorId) should matchPattern { case Success(`attrs1`) => }
}
it should "fail when no results are given from LDAP" in {
val testDepositorId = "foobar"
val ctx = mock[LdapContext]
val result = mock[NamingEnumeration[SearchResult]]
val attrs1 = mock[Attributes]
val f = where { (_: String, filter: String, _: SearchControls) => filter.contains(s"uid=$testDepositorId") }
(ctx.search(_: String, _: String, _: SearchControls)) expects f returning result
inSequence {
result.hasMoreElements _ expects () returns false
}
val ldap = new LdapImpl(ctx)
ldap.query(testDepositorId) should matchPattern { case Failure(NoUserFoundException(`testDepositorId`)) => }
}
}
| DANS-KNAW/easy-license-creator | src/test/scala/nl/knaw/dans/easy/agreement/datafetch/LdapSpec.scala | Scala | apache-2.0 | 2,474 |
package com.twitter.finagle
import com.twitter.finagle.client._
import com.twitter.finagle.dispatch.SerialServerDispatcher
import com.twitter.finagle.http.codec.HttpClientDispatcher
import com.twitter.finagle.netty3._
import com.twitter.finagle.server._
import com.twitter.util.Future
import java.net.{InetSocketAddress, SocketAddress}
import org.jboss.netty.handler.codec.http._
trait HttpRichClient { self: Client[HttpRequest, HttpResponse] =>
def fetchUrl(url: String): Future[HttpResponse] = fetchUrl(new java.net.URL(url))
def fetchUrl(url: java.net.URL): Future[HttpResponse] = {
val addr = {
val port = if (url.getPort < 0) url.getDefaultPort else url.getPort
new InetSocketAddress(url.getHost, port)
}
val group = Group[SocketAddress](addr)
val req = http.RequestBuilder().url(url).buildGet()
val service = newClient(group).toService
service(req) ensure {
service.close()
}
}
}
object HttpTransporter extends Netty3Transporter[HttpRequest, HttpResponse](
"http",
http.Http()
.enableTracing(true)
.client(ClientCodecConfig("httpclient")).pipelineFactory
)
object HttpClient extends DefaultClient[HttpRequest, HttpResponse](
name = "http",
endpointer = Bridge[HttpRequest, HttpResponse, HttpRequest, HttpResponse](
HttpTransporter, new HttpClientDispatcher(_))
) with HttpRichClient
object HttpListener extends Netty3Listener[HttpResponse, HttpRequest](
"http",
http.Http()
.enableTracing(true)
.server(ServerCodecConfig("httpserver", new SocketAddress{})).pipelineFactory
)
object HttpServer extends DefaultServer[HttpRequest, HttpResponse, HttpResponse, HttpRequest](
"http", HttpListener, new SerialServerDispatcher(_, _)
)
object Http extends Client[HttpRequest, HttpResponse] with HttpRichClient
with Server[HttpRequest, HttpResponse]
{
def newClient(group: Group[SocketAddress]): ServiceFactory[HttpRequest, HttpResponse] =
HttpClient.newClient(group)
def serve(addr: SocketAddress, service: ServiceFactory[HttpRequest, HttpResponse]): ListeningServer =
HttpServer.serve(addr, service)
}
| firebase/finagle | finagle-http/src/main/scala/com/twitter/finagle/Http.scala | Scala | apache-2.0 | 2,110 |
package com.github.vooolll.domain.comments
import com.github.vooolll.domain.FacebookAttribute
object FacebookCommentsAttributes {
val defaultCommentsAttributeValues =
Set(Id, Message, CreatedTime, Attachment, From, Object)
trait FacebookCommentsAttribute extends FacebookAttribute
case object Id extends FacebookCommentsAttribute {
override def value = "id"
}
case object Message extends FacebookCommentsAttribute {
override def value = "message"
}
case object Attachment extends FacebookCommentsAttribute {
override def value = "attachment"
}
case object CreatedTime extends FacebookCommentsAttribute {
override def value = "created_time"
}
case object From extends FacebookCommentsAttribute {
override def value = "from"
}
case object Object extends FacebookCommentsAttribute {
override def value = "object"
}
}
| vooolll/facebook4s | src/main/scala/com/github/vooolll/domain/comments/FacebookCommentsAttributes.scala | Scala | apache-2.0 | 881 |
package org.emailscript.helpers
import org.emailscript.api._
import org.scalatest.{FlatSpec, Matchers}
class ConfigurationTest extends FlatSpec with Matchers {
"IndexerBean" should "produce a named Indexer obj" in {
val bean = new IndexerBean()
bean.setUrl("url1")
bean.setNickname("name1")
val result = Configuration.getConfig("", Some(bean))
result match {
case Some((name:String, indexer:Indexer)) => {
name should be ("name1")
indexer.url should startWith ("url1")
}
case _ => fail(s"unexpected configuration: $result")
}
}
"GoogleContactsBean" should "produce a GoogleContacts" in {
val bean = new GoogleContactsBean
bean.setNickname("name2")
bean.setAccount("account1")
bean.setPassword("password1")
val result = Configuration.getConfig("", Some(bean))
result match {
case Some((name: String, gc: GoogleContacts)) => {
name should be ("name2")
gc.account should be ("account1")
gc.password should be ("password1")
}
case _ => fail(s"Unrecognized result: $result")
}
}
"EmailAccountBean" should "produce an EmailAccount" in {
val bean = new EmailAccountBean()
bean.setNickname("name3")
bean.setImapHost("imap1")
bean.setImapPort(2)
bean.setUser("user1")
bean.setPassword("password1")
bean.setSmtpHost("smpt1")
bean.setSmtpPort(3)
val result = Configuration.getConfig("", Some(bean))
result match {
case Some((name: String, ea: EmailAccount)) => {
name should be("name3")
}
case _ => fail(s"Unknown recoginized result: $result")
}
}
}
| OdysseusLevy/emailscript | src/test/scala/org/emailscript/helpers/ConfigurationTest.scala | Scala | lgpl-3.0 | 1,664 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.net.InetAddress
import java.nio.charset.StandardCharsets
import java.util
import java.util.Arrays.asList
import java.util.{Collections, Optional, Random}
import java.util.concurrent.TimeUnit
import kafka.api.LeaderAndIsr
import kafka.api.{ApiVersion, KAFKA_0_10_2_IV0, KAFKA_2_2_IV1}
import kafka.cluster.Partition
import kafka.controller.KafkaController
import kafka.coordinator.group.GroupCoordinatorConcurrencyTest.JoinGroupCallback
import kafka.coordinator.group.GroupCoordinatorConcurrencyTest.SyncGroupCallback
import kafka.coordinator.group.JoinGroupResult
import kafka.coordinator.group.SyncGroupResult
import kafka.coordinator.group.{GroupCoordinator, GroupSummary, MemberSummary}
import kafka.coordinator.transaction.TransactionCoordinator
import kafka.log.AppendOrigin
import kafka.network.RequestChannel
import kafka.network.RequestChannel.SendResponse
import kafka.server.QuotaFactory.QuotaManagers
import kafka.utils.{MockTime, TestUtils}
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.acl.AclOperation
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.{IsolationLevel, Node, TopicPartition}
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.memory.MemoryPool
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig
import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity
import org.apache.kafka.common.message.OffsetDeleteRequestData.{OffsetDeleteRequestPartition, OffsetDeleteRequestTopic, OffsetDeleteRequestTopicCollection}
import org.apache.kafka.common.message.StopReplicaRequestData.{StopReplicaPartitionState, StopReplicaTopicState}
import org.apache.kafka.common.message.UpdateMetadataRequestData.{UpdateMetadataBroker, UpdateMetadataEndpoint, UpdateMetadataPartitionState}
import org.apache.kafka.common.message._
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ClientInformation
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record._
import org.apache.kafka.common.replica.ClientMetadata
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.WriteTxnMarkersRequest.TxnMarkerEntry
import org.apache.kafka.common.requests.{FetchMetadata => JFetchMetadata, _}
import org.apache.kafka.common.resource.PatternType
import org.apache.kafka.common.resource.ResourcePattern
import org.apache.kafka.common.resource.ResourceType
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.server.authorizer.Action
import org.apache.kafka.server.authorizer.AuthorizationResult
import org.apache.kafka.server.authorizer.Authorizer
import org.easymock.EasyMock._
import org.easymock.{Capture, EasyMock, IAnswer, IArgumentMatcher}
import org.junit.Assert.{assertArrayEquals, assertEquals, assertNull, assertTrue}
import org.junit.{After, Test}
import scala.jdk.CollectionConverters._
import scala.collection.{Map, Seq, mutable}
class KafkaApisTest {
private val requestChannel: RequestChannel = EasyMock.createNiceMock(classOf[RequestChannel])
private val requestChannelMetrics: RequestChannel.Metrics = EasyMock.createNiceMock(classOf[RequestChannel.Metrics])
private val replicaManager: ReplicaManager = EasyMock.createNiceMock(classOf[ReplicaManager])
private val groupCoordinator: GroupCoordinator = EasyMock.createNiceMock(classOf[GroupCoordinator])
private val adminManager: AdminManager = EasyMock.createNiceMock(classOf[AdminManager])
private val txnCoordinator: TransactionCoordinator = EasyMock.createNiceMock(classOf[TransactionCoordinator])
private val controller: KafkaController = EasyMock.createNiceMock(classOf[KafkaController])
private val zkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
private val metrics = new Metrics()
private val brokerId = 1
private val metadataCache = new MetadataCache(brokerId)
private val clientQuotaManager: ClientQuotaManager = EasyMock.createNiceMock(classOf[ClientQuotaManager])
private val clientRequestQuotaManager: ClientRequestQuotaManager = EasyMock.createNiceMock(classOf[ClientRequestQuotaManager])
private val replicaQuotaManager: ReplicationQuotaManager = EasyMock.createNiceMock(classOf[ReplicationQuotaManager])
private val quotas = QuotaManagers(clientQuotaManager, clientQuotaManager, clientRequestQuotaManager,
replicaQuotaManager, replicaQuotaManager, replicaQuotaManager, None)
private val fetchManager: FetchManager = EasyMock.createNiceMock(classOf[FetchManager])
private val brokerTopicStats = new BrokerTopicStats
private val clusterId = "clusterId"
private val time = new MockTime
private val clientId = ""
@After
def tearDown(): Unit = {
quotas.shutdown()
TestUtils.clearYammerMetrics()
metrics.close()
}
def createKafkaApis(interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion,
authorizer: Option[Authorizer] = None): KafkaApis = {
val properties = TestUtils.createBrokerConfig(brokerId, "zk")
properties.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerProtocolVersion.toString)
properties.put(KafkaConfig.LogMessageFormatVersionProp, interBrokerProtocolVersion.toString)
new KafkaApis(requestChannel,
replicaManager,
adminManager,
groupCoordinator,
txnCoordinator,
controller,
zkClient,
brokerId,
new KafkaConfig(properties),
metadataCache,
metrics,
authorizer,
quotas,
fetchManager,
brokerTopicStats,
clusterId,
time,
null
)
}
@Test
def testAuthorize(): Unit = {
val authorizer: Authorizer = EasyMock.niceMock(classOf[Authorizer])
val operation = AclOperation.WRITE
val resourceType = ResourceType.TOPIC
val resourceName = "topic-1"
val requestHeader = new RequestHeader(ApiKeys.PRODUCE, ApiKeys.PRODUCE.latestVersion,
clientId, 0)
val requestContext = new RequestContext(requestHeader, "1", InetAddress.getLocalHost,
KafkaPrincipal.ANONYMOUS, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT),
SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY)
val expectedActions = Seq(
new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL),
1, true, true)
)
EasyMock.expect(authorizer.authorize(requestContext, expectedActions.asJava))
.andReturn(Seq(AuthorizationResult.ALLOWED).asJava)
.once()
EasyMock.replay(authorizer)
val result = createKafkaApis(authorizer = Some(authorizer)).authorize(
requestContext, operation, resourceType, resourceName)
verify(authorizer)
assertEquals(true, result)
}
@Test
def testFilterByAuthorized(): Unit = {
val authorizer: Authorizer = EasyMock.niceMock(classOf[Authorizer])
val operation = AclOperation.WRITE
val resourceType = ResourceType.TOPIC
val resourceName1 = "topic-1"
val resourceName2 = "topic-2"
val resourceName3 = "topic-3"
val requestHeader = new RequestHeader(ApiKeys.PRODUCE, ApiKeys.PRODUCE.latestVersion,
clientId, 0)
val requestContext = new RequestContext(requestHeader, "1", InetAddress.getLocalHost,
KafkaPrincipal.ANONYMOUS, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT),
SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY)
val expectedActions = Seq(
new Action(operation, new ResourcePattern(resourceType, resourceName1, PatternType.LITERAL),
2, true, true),
new Action(operation, new ResourcePattern(resourceType, resourceName2, PatternType.LITERAL),
1, true, true),
new Action(operation, new ResourcePattern(resourceType, resourceName3, PatternType.LITERAL),
1, true, true),
)
EasyMock.expect(authorizer.authorize(
EasyMock.eq(requestContext), matchSameElements(expectedActions.asJava)
)).andAnswer { () =>
val actions = EasyMock.getCurrentArguments.apply(1).asInstanceOf[util.List[Action]].asScala
actions.map { action =>
if (Set(resourceName1, resourceName3).contains(action.resourcePattern.name))
AuthorizationResult.ALLOWED
else
AuthorizationResult.DENIED
}.asJava
}.once()
EasyMock.replay(authorizer)
val result = createKafkaApis(authorizer = Some(authorizer)).filterByAuthorized(
requestContext,
operation,
resourceType,
// Duplicate resource names should not trigger multiple calls to authorize
Seq(resourceName1, resourceName2, resourceName1, resourceName3)
)(identity)
verify(authorizer)
assertEquals(Set(resourceName1, resourceName3), result)
}
/**
* Returns true if the elements in both lists are the same irrespective of ordering.
*/
private def matchSameElements[T](list: util.List[T]): util.List[T] = {
EasyMock.reportMatcher(new IArgumentMatcher {
def matches(argument: Any): Boolean = argument match {
case s: util.List[_] => s.asScala.toSet == list.asScala.toSet
case _ => false
}
def appendTo(buffer: StringBuffer): Unit = buffer.append(s"list($list)")
})
null
}
@Test
def testDescribeConfigsWithAuthorizer(): Unit = {
val authorizer: Authorizer = EasyMock.niceMock(classOf[Authorizer])
val operation = AclOperation.DESCRIBE_CONFIGS
val resourceType = ResourceType.TOPIC
val resourceName = "topic-1"
val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion,
clientId, 0)
val expectedActions = Seq(
new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL),
1, true, true)
)
// Verify that authorize is only called once
EasyMock.expect(authorizer.authorize(anyObject[RequestContext], EasyMock.eq(expectedActions.asJava)))
.andReturn(Seq(AuthorizationResult.ALLOWED).asJava)
.once()
expectNoThrottling()
val configResource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
val config = new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptyList[DescribeConfigsResponse.ConfigEntry])
EasyMock.expect(adminManager.describeConfigs(anyObject(), EasyMock.eq(true)))
.andReturn(Map(configResource -> config))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, authorizer,
adminManager)
val resourceToConfigNames = Map[ConfigResource, util.Collection[String]](
configResource -> Collections.emptyList[String])
val request = buildRequest(new DescribeConfigsRequest(requestHeader.apiVersion,
resourceToConfigNames.asJava, true))
createKafkaApis(authorizer = Some(authorizer)).handleDescribeConfigsRequest(request)
verify(authorizer, adminManager)
}
@Test
def testAlterConfigsWithAuthorizer(): Unit = {
val authorizer: Authorizer = EasyMock.niceMock(classOf[Authorizer])
val operation = AclOperation.ALTER_CONFIGS
val resourceType = ResourceType.TOPIC
val resourceName = "topic-1"
val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion,
clientId, 0)
val expectedActions = Seq(
new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL),
1, true, true)
)
// Verify that authorize is only called once
EasyMock.expect(authorizer.authorize(anyObject[RequestContext], EasyMock.eq(expectedActions.asJava)))
.andReturn(Seq(AuthorizationResult.ALLOWED).asJava)
.once()
expectNoThrottling()
val configResource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
EasyMock.expect(adminManager.alterConfigs(anyObject(), EasyMock.eq(false)))
.andReturn(Map(configResource -> ApiError.NONE))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, authorizer,
adminManager)
val configs = Map(
configResource -> new AlterConfigsRequest.Config(
Seq(new AlterConfigsRequest.ConfigEntry("foo", "bar")).asJava))
val request = buildRequest(new AlterConfigsRequest.Builder(configs.asJava, false)
.build(requestHeader.apiVersion))
createKafkaApis(authorizer = Some(authorizer)).handleAlterConfigsRequest(request)
verify(authorizer, adminManager)
}
@Test
def testIncrementalAlterConfigsWithAuthorizer(): Unit = {
val authorizer: Authorizer = EasyMock.niceMock(classOf[Authorizer])
val operation = AclOperation.ALTER_CONFIGS
val resourceType = ResourceType.TOPIC
val resourceName = "topic-1"
val requestHeader = new RequestHeader(ApiKeys.INCREMENTAL_ALTER_CONFIGS,
ApiKeys.INCREMENTAL_ALTER_CONFIGS.latestVersion, clientId, 0)
val expectedActions = Seq(
new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL),
1, true, true)
)
// Verify that authorize is only called once
EasyMock.expect(authorizer.authorize(anyObject[RequestContext], EasyMock.eq(expectedActions.asJava)))
.andReturn(Seq(AuthorizationResult.ALLOWED).asJava)
.once()
expectNoThrottling()
val configResource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
EasyMock.expect(adminManager.incrementalAlterConfigs(anyObject(), EasyMock.eq(false)))
.andReturn(Map(configResource -> ApiError.NONE))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, authorizer,
adminManager)
val requestData = new IncrementalAlterConfigsRequestData()
val alterResource = new IncrementalAlterConfigsRequestData.AlterConfigsResource()
.setResourceName(configResource.name)
.setResourceType(configResource.`type`.id)
alterResource.configs.add(new AlterableConfig()
.setName("foo")
.setValue("bar"))
requestData.resources.add(alterResource)
val request = buildRequest(new IncrementalAlterConfigsRequest.Builder(requestData)
.build(requestHeader.apiVersion))
createKafkaApis(authorizer = Some(authorizer)).handleIncrementalAlterConfigsRequest(request)
verify(authorizer, adminManager)
}
@Test
def testOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val offsetCommitRequest = new OffsetCommitRequest.Builder(
new OffsetCommitRequestData()
.setGroupId("groupId")
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName(topic)
.setPartitions(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(invalidPartitionId)
.setCommittedOffset(15)
.setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH)
.setCommittedMetadata(""))
)
))).build()
val request = buildRequest(offsetCommitRequest)
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetCommitRequest(request)
val response = readResponse(ApiKeys.OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[OffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION,
Errors.forCode(response.data().topics().get(0).partitions().get(0).errorCode()))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testTxnOffsetCommitWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val partitionOffsetCommitData = new TxnOffsetCommitRequest.CommittedOffset(15L, "", Optional.empty())
val offsetCommitRequest = new TxnOffsetCommitRequest.Builder(
"txnId",
"groupId",
15L,
0.toShort,
Map(invalidTopicPartition -> partitionOffsetCommitData).asJava,
false
).build()
val request = buildRequest(offsetCommitRequest)
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleTxnOffsetCommitRequest(request)
val response = readResponse(ApiKeys.TXN_OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[TxnOffsetCommitResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def shouldReplaceCoordinatorNotAvailableWithLoadInProcessInTxnOffsetCommitWithOlderClient(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 2)
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel, groupCoordinator)
val topicPartition = new TopicPartition(topic, 1)
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, Errors] => Unit] = EasyMock.newCapture()
val partitionOffsetCommitData = new TxnOffsetCommitRequest.CommittedOffset(15L, "", Optional.empty())
val groupId = "groupId"
val offsetCommitRequest = new TxnOffsetCommitRequest.Builder(
"txnId",
groupId,
15L,
0.toShort,
Map(topicPartition -> partitionOffsetCommitData).asJava,
false
).build(1)
val request = buildRequest(offsetCommitRequest)
EasyMock.expect(groupCoordinator.handleTxnCommitOffsets(
EasyMock.eq(groupId),
EasyMock.eq(15L),
EasyMock.eq(0),
EasyMock.anyString(),
EasyMock.eq(Option.empty),
EasyMock.anyInt(),
EasyMock.anyObject(),
EasyMock.capture(responseCallback)
)).andAnswer(
() => responseCallback.getValue.apply(Map(topicPartition -> Errors.COORDINATOR_LOAD_IN_PROGRESS)))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel, groupCoordinator)
createKafkaApis().handleTxnOffsetCommitRequest(request)
val response = readResponse(ApiKeys.TXN_OFFSET_COMMIT, offsetCommitRequest, capturedResponse)
.asInstanceOf[TxnOffsetCommitResponse]
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, response.errors().get(topicPartition))
}
@Test
def testAddPartitionsToTxnWithInvalidPartition(): Unit = {
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(replicaManager, clientRequestQuotaManager, requestChannel)
val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId)
val addPartitionsToTxnRequest = new AddPartitionsToTxnRequest.Builder(
"txnlId", 15L, 0.toShort, List(invalidTopicPartition).asJava
).build()
val request = buildRequest(addPartitionsToTxnRequest)
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleAddPartitionToTxnRequest(request)
val response = readResponse(ApiKeys.ADD_PARTITIONS_TO_TXN, addPartitionsToTxnRequest, capturedResponse)
.asInstanceOf[AddPartitionsToTxnResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response.errors().get(invalidTopicPartition))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddOffsetsToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleEndTxnRequest(null)
}
@Test(expected = classOf[UnsupportedVersionException])
def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = {
createKafkaApis(KAFKA_0_10_2_IV0).handleWriteTxnMarkersRequest(null)
}
@Test
def shouldRespondWithUnsupportedForMessageFormatOnHandleWriteTxnMarkersWhenMagicLowerThanRequired(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(topicPartition))
val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(None)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
}
@Test
def shouldRespondWithUnsupportedMessageFormatForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V1))
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(AppendOrigin.Coordinator),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())
).andAnswer(() => responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE))))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndLeaderEpoch(): Unit = {
shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(
LeaderAndIsr.initialLeaderEpoch + 2, true)
}
@Test
def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndDeleteSentinel(): Unit = {
shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(
LeaderAndIsr.EpochDuringDelete, true)
}
@Test
def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlagAndNoEpochSentinel(): Unit = {
shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(
LeaderAndIsr.NoEpoch, true)
}
@Test
def shouldNotResignCoordinatorsIfStopReplicaReceivedWithoutDeleteFlag(): Unit = {
shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(
LeaderAndIsr.initialLeaderEpoch + 2, false)
}
def shouldResignCoordinatorsIfStopReplicaReceivedWithDeleteFlag(leaderEpoch: Int,
deletePartition: Boolean): Unit = {
val controllerId = 0
val controllerEpoch = 5
val brokerEpoch = 230498320L
val fooPartition = new TopicPartition("foo", 0)
val groupMetadataPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0)
val txnStatePartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, 0)
val topicStates = Seq(
new StopReplicaTopicState()
.setTopicName(groupMetadataPartition.topic())
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(groupMetadataPartition.partition())
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava),
new StopReplicaTopicState()
.setTopicName(txnStatePartition.topic())
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(txnStatePartition.partition())
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava),
new StopReplicaTopicState()
.setTopicName(fooPartition.topic())
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(fooPartition.partition())
.setLeaderEpoch(leaderEpoch)
.setDeletePartition(deletePartition)).asJava)
).asJava
val stopReplicaRequest = new StopReplicaRequest.Builder(
ApiKeys.STOP_REPLICA.latestVersion,
controllerId,
controllerEpoch,
brokerEpoch,
false,
topicStates
).build()
val request = buildRequest(stopReplicaRequest)
EasyMock.expect(replicaManager.stopReplicas(
EasyMock.eq(request.context.correlationId),
EasyMock.eq(controllerId),
EasyMock.eq(controllerEpoch),
EasyMock.eq(brokerEpoch),
EasyMock.eq(stopReplicaRequest.partitionStates().asScala)
)).andReturn(
(mutable.Map(
groupMetadataPartition -> Errors.NONE,
txnStatePartition -> Errors.NONE,
fooPartition -> Errors.NONE
), Errors.NONE)
)
EasyMock.expect(controller.brokerEpoch).andStubReturn(brokerEpoch)
if (deletePartition) {
if (leaderEpoch >= 0) {
txnCoordinator.onResignation(txnStatePartition.partition, Some(leaderEpoch))
} else {
txnCoordinator.onResignation(txnStatePartition.partition, None)
}
EasyMock.expectLastCall()
}
if (deletePartition) {
groupCoordinator.onResignation(groupMetadataPartition.partition)
EasyMock.expectLastCall()
}
EasyMock.replay(controller, replicaManager, txnCoordinator, groupCoordinator)
createKafkaApis().handleStopReplicaRequest(request)
EasyMock.verify(txnCoordinator, groupCoordinator)
}
@Test
def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = {
val tp1 = new TopicPartition("t", 0)
val tp2 = new TopicPartition("t1", 0)
val (writeTxnMarkersRequest, request) = createWriteTxnMarkersRequest(asList(tp1, tp2))
val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val responseCallback: Capture[Map[TopicPartition, PartitionResponse] => Unit] = EasyMock.newCapture()
EasyMock.expect(replicaManager.getMagic(tp1))
.andReturn(None)
EasyMock.expect(replicaManager.getMagic(tp2))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(AppendOrigin.Coordinator),
EasyMock.anyObject(),
EasyMock.capture(responseCallback),
EasyMock.anyObject(),
EasyMock.anyObject())
).andAnswer(() => responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE))))
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, replicaQuotaManager, requestChannel)
createKafkaApis().handleWriteTxnMarkersRequest(request)
val markersResponse = readResponse(ApiKeys.WRITE_TXN_MARKERS, writeTxnMarkersRequest, capturedResponse)
.asInstanceOf[WriteTxnMarkersResponse]
assertEquals(expectedErrors, markersResponse.errors(1))
EasyMock.verify(replicaManager)
}
@Test
def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(): Unit = {
val topicPartition = new TopicPartition("t", 0)
val request = createWriteTxnMarkersRequest(asList(topicPartition))._2
EasyMock.expect(replicaManager.getMagic(topicPartition))
.andReturn(Some(RecordBatch.MAGIC_VALUE_V2))
EasyMock.expect(replicaManager.appendRecords(EasyMock.anyLong(),
EasyMock.anyShort(),
EasyMock.eq(true),
EasyMock.eq(AppendOrigin.Coordinator),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject(),
EasyMock.anyObject()))
EasyMock.replay(replicaManager)
createKafkaApis().handleWriteTxnMarkersRequest(request)
EasyMock.verify(replicaManager)
}
@Test
def testLeaderReplicaIfLocalRaisesFencedLeaderEpoch(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.FENCED_LEADER_EPOCH)
}
@Test
def testLeaderReplicaIfLocalRaisesUnknownLeaderEpoch(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.UNKNOWN_LEADER_EPOCH)
}
@Test
def testLeaderReplicaIfLocalRaisesNotLeaderForPartition(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.NOT_LEADER_FOR_PARTITION)
}
@Test
def testLeaderReplicaIfLocalRaisesUnknownTopicOrPartition(): Unit = {
testListOffsetFailedGetLeaderReplica(Errors.UNKNOWN_TOPIC_OR_PARTITION)
}
@Test
def testDescribeGroups(): Unit = {
val groupId = "groupId"
val random = new Random()
val metadata = new Array[Byte](10)
random.nextBytes(metadata)
val assignment = new Array[Byte](10)
random.nextBytes(assignment)
val memberSummary = MemberSummary("memberid", Some("instanceid"), "clientid", "clienthost", metadata, assignment)
val groupSummary = GroupSummary("Stable", "consumer", "roundrobin", List(memberSummary))
EasyMock.reset(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
val describeGroupsRequest = new DescribeGroupsRequest.Builder(
new DescribeGroupsRequestData().setGroups(List(groupId).asJava)
).build()
val request = buildRequest(describeGroupsRequest)
val capturedResponse = expectNoThrottling()
EasyMock.expect(groupCoordinator.handleDescribeGroup(EasyMock.eq(groupId)))
.andReturn((Errors.NONE, groupSummary))
EasyMock.replay(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleDescribeGroupRequest(request)
val response = readResponse(ApiKeys.DESCRIBE_GROUPS, describeGroupsRequest, capturedResponse)
.asInstanceOf[DescribeGroupsResponse]
val group = response.data().groups().get(0)
assertEquals(Errors.NONE, Errors.forCode(group.errorCode()))
assertEquals(groupId, group.groupId())
assertEquals(groupSummary.state, group.groupState())
assertEquals(groupSummary.protocolType, group.protocolType())
assertEquals(groupSummary.protocol, group.protocolData())
assertEquals(groupSummary.members.size, group.members().size())
val member = group.members().get(0)
assertEquals(memberSummary.memberId, member.memberId())
assertEquals(memberSummary.groupInstanceId.orNull, member.groupInstanceId())
assertEquals(memberSummary.clientId, member.clientId())
assertEquals(memberSummary.clientHost, member.clientHost())
assertArrayEquals(memberSummary.metadata, member.memberMetadata())
assertArrayEquals(memberSummary.assignment, member.memberAssignment())
}
@Test
def testOffsetDelete(): Unit = {
val group = "groupId"
setupBasicMetadataCache("topic-1", numPartitions = 2)
setupBasicMetadataCache("topic-2", numPartitions = 2)
EasyMock.reset(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
val topics = new OffsetDeleteRequestTopicCollection()
topics.add(new OffsetDeleteRequestTopic()
.setName("topic-1")
.setPartitions(Seq(
new OffsetDeleteRequestPartition().setPartitionIndex(0),
new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava))
topics.add(new OffsetDeleteRequestTopic()
.setName("topic-2")
.setPartitions(Seq(
new OffsetDeleteRequestPartition().setPartitionIndex(0),
new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava))
val offsetDeleteRequest = new OffsetDeleteRequest.Builder(
new OffsetDeleteRequestData()
.setGroupId(group)
.setTopics(topics)
).build()
val request = buildRequest(offsetDeleteRequest)
val capturedResponse = expectNoThrottling()
EasyMock.expect(groupCoordinator.handleDeleteOffsets(
EasyMock.eq(group),
EasyMock.eq(Seq(
new TopicPartition("topic-1", 0),
new TopicPartition("topic-1", 1),
new TopicPartition("topic-2", 0),
new TopicPartition("topic-2", 1)
))
)).andReturn((Errors.NONE, Map(
new TopicPartition("topic-1", 0) -> Errors.NONE,
new TopicPartition("topic-1", 1) -> Errors.NONE,
new TopicPartition("topic-2", 0) -> Errors.NONE,
new TopicPartition("topic-2", 1) -> Errors.NONE,
)))
EasyMock.replay(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetDeleteRequest(request)
val response = readResponse(ApiKeys.OFFSET_DELETE, offsetDeleteRequest, capturedResponse)
.asInstanceOf[OffsetDeleteResponse]
def errorForPartition(topic: String, partition: Int): Errors = {
Errors.forCode(response.data.topics.find(topic).partitions.find(partition).errorCode())
}
assertEquals(2, response.data.topics.size)
assertEquals(Errors.NONE, errorForPartition("topic-1", 0))
assertEquals(Errors.NONE, errorForPartition("topic-1", 1))
assertEquals(Errors.NONE, errorForPartition("topic-2", 0))
assertEquals(Errors.NONE, errorForPartition("topic-2", 1))
}
@Test
def testOffsetDeleteWithInvalidPartition(): Unit = {
val group = "groupId"
val topic = "topic"
setupBasicMetadataCache(topic, numPartitions = 1)
def checkInvalidPartition(invalidPartitionId: Int): Unit = {
EasyMock.reset(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
val topics = new OffsetDeleteRequestTopicCollection()
topics.add(new OffsetDeleteRequestTopic()
.setName(topic)
.setPartitions(Collections.singletonList(
new OffsetDeleteRequestPartition().setPartitionIndex(invalidPartitionId))))
val offsetDeleteRequest = new OffsetDeleteRequest.Builder(
new OffsetDeleteRequestData()
.setGroupId(group)
.setTopics(topics)
).build()
val request = buildRequest(offsetDeleteRequest)
val capturedResponse = expectNoThrottling()
EasyMock.expect(groupCoordinator.handleDeleteOffsets(EasyMock.eq(group), EasyMock.eq(Seq.empty)))
.andReturn((Errors.NONE, Map.empty))
EasyMock.replay(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetDeleteRequest(request)
val response = readResponse(ApiKeys.OFFSET_DELETE, offsetDeleteRequest, capturedResponse)
.asInstanceOf[OffsetDeleteResponse]
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION,
Errors.forCode(response.data.topics.find(topic).partitions.find(invalidPartitionId).errorCode()))
}
checkInvalidPartition(-1)
checkInvalidPartition(1) // topic has only one partition
}
@Test
def testOffsetDeleteWithInvalidGroup(): Unit = {
val group = "groupId"
EasyMock.reset(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
val offsetDeleteRequest = new OffsetDeleteRequest.Builder(
new OffsetDeleteRequestData()
.setGroupId(group)
).build()
val request = buildRequest(offsetDeleteRequest)
val capturedResponse = expectNoThrottling()
EasyMock.expect(groupCoordinator.handleDeleteOffsets(EasyMock.eq(group), EasyMock.eq(Seq.empty)))
.andReturn((Errors.GROUP_ID_NOT_FOUND, Map.empty))
EasyMock.replay(groupCoordinator, replicaManager, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleOffsetDeleteRequest(request)
val response = readResponse(ApiKeys.OFFSET_DELETE, offsetDeleteRequest, capturedResponse)
.asInstanceOf[OffsetDeleteResponse]
assertEquals(Errors.GROUP_ID_NOT_FOUND, Errors.forCode(response.data.errorCode()))
}
private def testListOffsetFailedGetLeaderReplica(error: Errors): Unit = {
val tp = new TopicPartition("foo", 0)
val isolationLevel = IsolationLevel.READ_UNCOMMITTED
val currentLeaderEpoch = Optional.of[Integer](15)
EasyMock.expect(replicaManager.fetchOffsetForTimestamp(
EasyMock.eq(tp),
EasyMock.eq(ListOffsetRequest.EARLIEST_TIMESTAMP),
EasyMock.eq(Some(isolationLevel)),
EasyMock.eq(currentLeaderEpoch),
fetchOnlyFromLeader = EasyMock.eq(true))
).andThrow(error.exception)
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
val targetTimes = Map(tp -> new ListOffsetRequest.PartitionData(ListOffsetRequest.EARLIEST_TIMESTAMP,
currentLeaderEpoch))
val listOffsetRequest = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(targetTimes.asJava).build()
val request = buildRequest(listOffsetRequest)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse)
.asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(error, partitionData.error)
assertEquals(ListOffsetResponse.UNKNOWN_OFFSET, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
@Test
def testReadUncommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_UNCOMMITTED)
}
@Test
def testReadCommittedConsumerListOffsetLatest(): Unit = {
testConsumerListOffsetLatest(IsolationLevel.READ_COMMITTED)
}
/**
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in both brokers.
*/
@Test
def testMetadataRequestOnSharedListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (plaintextListener, _) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(plaintextListener)
assertEquals(Set(0, 1), response.brokers.asScala.map(_.id).toSet)
}
/*
* Verifies that the metadata response is correct if the broker listeners are inconsistent (i.e. one broker has
* more listeners than another) and the request is sent on the listener that exists in one broker.
*/
@Test
def testMetadataRequestOnDistinctListenerWithInconsistentListenersAcrossBrokers(): Unit = {
val (_, anotherListener) = updateMetadataCacheWithInconsistentListeners()
val response = sendMetadataRequestWithInconsistentListeners(anotherListener)
assertEquals(Set(0), response.brokers.asScala.map(_.id).toSet)
}
/**
* Verifies that sending a fetch request with version 9 works correctly when
* ReplicaManager.getLogConfig returns None.
*/
@Test
def testFetchRequestV9WithNoLogConfig(): Unit = {
val tp = new TopicPartition("foo", 0)
setupBasicMetadataCache(tp.topic, numPartitions = 1)
val hw = 3
val timestamp = 1000
expect(replicaManager.getLogConfig(EasyMock.eq(tp))).andReturn(None)
replicaManager.fetchMessages(anyLong, anyInt, anyInt, anyInt, anyBoolean,
anyObject[Seq[(TopicPartition, FetchRequest.PartitionData)]], anyObject[ReplicaQuota],
anyObject[Seq[(TopicPartition, FetchPartitionData)] => Unit](), anyObject[IsolationLevel],
anyObject[Option[ClientMetadata]])
expectLastCall[Unit].andAnswer(new IAnswer[Unit] {
def answer: Unit = {
val callback = getCurrentArguments.apply(7)
.asInstanceOf[Seq[(TopicPartition, FetchPartitionData)] => Unit]
val records = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(timestamp, "foo".getBytes(StandardCharsets.UTF_8)))
callback(Seq(tp -> FetchPartitionData(Errors.NONE, hw, 0, records,
None, None, Option.empty, isReassignmentFetch = false)))
}
})
val fetchData = Map(tp -> new FetchRequest.PartitionData(0, 0, 1000,
Optional.empty())).asJava
val fetchMetadata = new JFetchMetadata(0, 0)
val fetchContext = new FullFetchContext(time, new FetchSessionCache(1000, 100),
fetchMetadata, fetchData, false)
expect(fetchManager.newContext(anyObject[JFetchMetadata],
anyObject[util.Map[TopicPartition, FetchRequest.PartitionData]],
anyObject[util.List[TopicPartition]],
anyBoolean)).andReturn(fetchContext)
val capturedResponse = expectNoThrottling()
EasyMock.expect(clientQuotaManager.maybeRecordAndGetThrottleTimeMs(
anyObject[RequestChannel.Request](), anyDouble, anyLong)).andReturn(0)
EasyMock.replay(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, fetchManager)
val fetchRequest = new FetchRequest.Builder(9, 9, -1, 100, 0, fetchData)
.build()
val request = buildRequest(fetchRequest)
createKafkaApis().handleFetchRequest(request)
val response = readResponse(ApiKeys.FETCH, fetchRequest, capturedResponse)
.asInstanceOf[FetchResponse[BaseRecords]]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(hw, partitionData.highWatermark)
assertEquals(-1, partitionData.lastStableOffset)
assertEquals(0, partitionData.logStartOffset)
assertEquals(timestamp,
partitionData.records.asInstanceOf[MemoryRecords].batches.iterator.next.maxTimestamp)
assertNull(partitionData.abortedTransactions)
}
@Test
def testJoinGroupProtocolsOrder(): Unit = {
val protocols = List(
("first", "first".getBytes()),
("second", "second".getBytes())
)
val groupId = "group"
val memberId = "member1"
val protocolType = "consumer"
val rebalanceTimeoutMs = 10
val sessionTimeoutMs = 5
val capturedProtocols = EasyMock.newCapture[List[(String, Array[Byte])]]()
EasyMock.expect(groupCoordinator.handleJoinGroup(
EasyMock.eq(groupId),
EasyMock.eq(memberId),
EasyMock.eq(None),
EasyMock.eq(true),
EasyMock.eq(clientId),
EasyMock.eq(InetAddress.getLocalHost.toString),
EasyMock.eq(rebalanceTimeoutMs),
EasyMock.eq(sessionTimeoutMs),
EasyMock.eq(protocolType),
EasyMock.capture(capturedProtocols),
anyObject()
))
EasyMock.replay(groupCoordinator)
createKafkaApis().handleJoinGroupRequest(
buildRequest(
new JoinGroupRequest.Builder(
new JoinGroupRequestData()
.setGroupId(groupId)
.setMemberId(memberId)
.setProtocolType(protocolType)
.setRebalanceTimeoutMs(rebalanceTimeoutMs)
.setSessionTimeoutMs(sessionTimeoutMs)
.setProtocols(new JoinGroupRequestData.JoinGroupRequestProtocolCollection(
protocols.map { case (name, protocol) => new JoinGroupRequestProtocol()
.setName(name).setMetadata(protocol)
}.iterator.asJava))
).build()
))
EasyMock.verify(groupCoordinator)
val capturedProtocolsList = capturedProtocols.getValue
assertEquals(protocols.size, capturedProtocolsList.size)
protocols.zip(capturedProtocolsList).foreach { case ((expectedName, expectedBytes), (name, bytes)) =>
assertEquals(expectedName, name)
assertArrayEquals(expectedBytes, bytes)
}
}
@Test
def testJoinGroupWhenAnErrorOccurs(): Unit = {
for (version <- ApiKeys.JOIN_GROUP.oldestVersion to ApiKeys.JOIN_GROUP.latestVersion) {
testJoinGroupWhenAnErrorOccurs(version.asInstanceOf[Short])
}
}
def testJoinGroupWhenAnErrorOccurs(version: Short): Unit = {
EasyMock.reset(groupCoordinator, clientRequestQuotaManager, requestChannel)
val capturedResponse = expectNoThrottling()
val groupId = "group"
val memberId = "member1"
val protocolType = "consumer"
val rebalanceTimeoutMs = 10
val sessionTimeoutMs = 5
val capturedCallback = EasyMock.newCapture[JoinGroupCallback]()
EasyMock.expect(groupCoordinator.handleJoinGroup(
EasyMock.eq(groupId),
EasyMock.eq(memberId),
EasyMock.eq(None),
EasyMock.eq(if (version >= 4) true else false),
EasyMock.eq(clientId),
EasyMock.eq(InetAddress.getLocalHost.toString),
EasyMock.eq(if (version >= 1) rebalanceTimeoutMs else sessionTimeoutMs),
EasyMock.eq(sessionTimeoutMs),
EasyMock.eq(protocolType),
EasyMock.eq(List.empty),
EasyMock.capture(capturedCallback)
))
val joinGroupRequest = new JoinGroupRequest.Builder(
new JoinGroupRequestData()
.setGroupId(groupId)
.setMemberId(memberId)
.setProtocolType(protocolType)
.setRebalanceTimeoutMs(rebalanceTimeoutMs)
.setSessionTimeoutMs(sessionTimeoutMs)
).build(version)
val requestChannelRequest = buildRequest(joinGroupRequest)
EasyMock.replay(groupCoordinator, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleJoinGroupRequest(requestChannelRequest)
EasyMock.verify(groupCoordinator)
capturedCallback.getValue.apply(JoinGroupResult(memberId, Errors.INCONSISTENT_GROUP_PROTOCOL))
val response = readResponse(ApiKeys.JOIN_GROUP, joinGroupRequest, capturedResponse)
.asInstanceOf[JoinGroupResponse]
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, response.error)
assertEquals(0, response.data.members.size)
assertEquals(memberId, response.data.memberId)
assertEquals(GroupCoordinator.NoGeneration, response.data.generationId)
assertEquals(GroupCoordinator.NoLeader, response.data.leader)
assertNull(response.data.protocolType)
if (version >= 7) {
assertNull(response.data.protocolName)
} else {
assertEquals(GroupCoordinator.NoProtocol, response.data.protocolName)
}
EasyMock.verify(clientRequestQuotaManager, requestChannel)
}
@Test
def testJoinGroupProtocolType(): Unit = {
for (version <- ApiKeys.JOIN_GROUP.oldestVersion to ApiKeys.JOIN_GROUP.latestVersion) {
testJoinGroupProtocolType(version.asInstanceOf[Short])
}
}
def testJoinGroupProtocolType(version: Short): Unit = {
EasyMock.reset(groupCoordinator, clientRequestQuotaManager, requestChannel)
val capturedResponse = expectNoThrottling()
val groupId = "group"
val memberId = "member1"
val protocolType = "consumer"
val protocolName = "range"
val rebalanceTimeoutMs = 10
val sessionTimeoutMs = 5
val capturedCallback = EasyMock.newCapture[JoinGroupCallback]()
EasyMock.expect(groupCoordinator.handleJoinGroup(
EasyMock.eq(groupId),
EasyMock.eq(memberId),
EasyMock.eq(None),
EasyMock.eq(if (version >= 4) true else false),
EasyMock.eq(clientId),
EasyMock.eq(InetAddress.getLocalHost.toString),
EasyMock.eq(if (version >= 1) rebalanceTimeoutMs else sessionTimeoutMs),
EasyMock.eq(sessionTimeoutMs),
EasyMock.eq(protocolType),
EasyMock.eq(List.empty),
EasyMock.capture(capturedCallback)
))
val joinGroupRequest = new JoinGroupRequest.Builder(
new JoinGroupRequestData()
.setGroupId(groupId)
.setMemberId(memberId)
.setProtocolType(protocolType)
.setRebalanceTimeoutMs(rebalanceTimeoutMs)
.setSessionTimeoutMs(sessionTimeoutMs)
).build(version)
val requestChannelRequest = buildRequest(joinGroupRequest)
EasyMock.replay(groupCoordinator, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleJoinGroupRequest(requestChannelRequest)
EasyMock.verify(groupCoordinator)
capturedCallback.getValue.apply(JoinGroupResult(
members = List.empty,
memberId = memberId,
generationId = 0,
protocolType = Some(protocolType),
protocolName = Some(protocolName),
leaderId = memberId,
error = Errors.NONE
))
val response = readResponse(ApiKeys.JOIN_GROUP, joinGroupRequest, capturedResponse)
.asInstanceOf[JoinGroupResponse]
assertEquals(Errors.NONE, response.error)
assertEquals(0, response.data.members.size)
assertEquals(memberId, response.data.memberId)
assertEquals(0, response.data.generationId)
assertEquals(memberId, response.data.leader)
assertEquals(protocolName, response.data.protocolName)
if (version >= 7) {
assertEquals(protocolType, response.data.protocolType)
} else {
assertNull(response.data.protocolType)
}
EasyMock.verify(clientRequestQuotaManager, requestChannel)
}
@Test
def testSyncGroupProtocolTypeAndName(): Unit = {
for (version <- ApiKeys.SYNC_GROUP.oldestVersion to ApiKeys.SYNC_GROUP.latestVersion) {
testSyncGroupProtocolTypeAndName(version.asInstanceOf[Short])
}
}
def testSyncGroupProtocolTypeAndName(version: Short): Unit = {
EasyMock.reset(groupCoordinator, clientRequestQuotaManager, requestChannel)
val capturedResponse = expectNoThrottling()
val groupId = "group"
val memberId = "member1"
val protocolType = "consumer"
val protocolName = "range"
val capturedCallback = EasyMock.newCapture[SyncGroupCallback]()
EasyMock.expect(groupCoordinator.handleSyncGroup(
EasyMock.eq(groupId),
EasyMock.eq(0),
EasyMock.eq(memberId),
EasyMock.eq(if (version >= 5) Some(protocolType) else None),
EasyMock.eq(if (version >= 5) Some(protocolName) else None),
EasyMock.eq(None),
EasyMock.eq(Map.empty),
EasyMock.capture(capturedCallback)
))
val syncGroupRequest = new SyncGroupRequest.Builder(
new SyncGroupRequestData()
.setGroupId(groupId)
.setGenerationId(0)
.setMemberId(memberId)
.setProtocolType(protocolType)
.setProtocolName(protocolName)
).build(version)
val requestChannelRequest = buildRequest(syncGroupRequest)
EasyMock.replay(groupCoordinator, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleSyncGroupRequest(requestChannelRequest)
EasyMock.verify(groupCoordinator)
capturedCallback.getValue.apply(SyncGroupResult(
protocolType = Some(protocolType),
protocolName = Some(protocolName),
memberAssignment = Array.empty,
error = Errors.NONE
))
val response = readResponse(ApiKeys.SYNC_GROUP, syncGroupRequest, capturedResponse)
.asInstanceOf[SyncGroupResponse]
assertEquals(Errors.NONE, response.error)
assertArrayEquals(Array.empty[Byte], response.data.assignment)
if (version >= 5) {
assertEquals(protocolType, response.data.protocolType)
} else {
assertNull(response.data.protocolType)
}
EasyMock.verify(clientRequestQuotaManager, requestChannel)
}
@Test
def testSyncGroupProtocolTypeAndNameAreMandatorySinceV5(): Unit = {
for (version <- ApiKeys.SYNC_GROUP.oldestVersion to ApiKeys.SYNC_GROUP.latestVersion) {
testSyncGroupProtocolTypeAndNameAreMandatorySinceV5(version.asInstanceOf[Short])
}
}
def testSyncGroupProtocolTypeAndNameAreMandatorySinceV5(version: Short): Unit = {
EasyMock.reset(groupCoordinator, clientRequestQuotaManager, requestChannel)
val capturedResponse = expectNoThrottling()
val groupId = "group"
val memberId = "member1"
val protocolType = "consumer"
val protocolName = "range"
val capturedCallback = EasyMock.newCapture[SyncGroupCallback]()
if (version < 5) {
EasyMock.expect(groupCoordinator.handleSyncGroup(
EasyMock.eq(groupId),
EasyMock.eq(0),
EasyMock.eq(memberId),
EasyMock.eq(None),
EasyMock.eq(None),
EasyMock.eq(None),
EasyMock.eq(Map.empty),
EasyMock.capture(capturedCallback)
))
}
val syncGroupRequest = new SyncGroupRequest.Builder(
new SyncGroupRequestData()
.setGroupId(groupId)
.setGenerationId(0)
.setMemberId(memberId)
).build(version)
val requestChannelRequest = buildRequest(syncGroupRequest)
EasyMock.replay(groupCoordinator, clientRequestQuotaManager, requestChannel)
createKafkaApis().handleSyncGroupRequest(requestChannelRequest)
EasyMock.verify(groupCoordinator)
if (version < 5) {
capturedCallback.getValue.apply(SyncGroupResult(
protocolType = Some(protocolType),
protocolName = Some(protocolName),
memberAssignment = Array.empty,
error = Errors.NONE
))
}
val response = readResponse(ApiKeys.SYNC_GROUP, syncGroupRequest, capturedResponse)
.asInstanceOf[SyncGroupResponse]
if (version < 5) {
assertEquals(Errors.NONE, response.error)
} else {
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, response.error)
}
EasyMock.verify(clientRequestQuotaManager, requestChannel)
}
@Test
def rejectJoinGroupRequestWhenStaticMembershipNotSupported(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val joinGroupRequest = new JoinGroupRequest.Builder(
new JoinGroupRequestData()
.setGroupId("test")
.setMemberId("test")
.setGroupInstanceId("instanceId")
.setProtocolType("consumer")
.setProtocols(new JoinGroupRequestData.JoinGroupRequestProtocolCollection)
).build()
val requestChannelRequest = buildRequest(joinGroupRequest)
createKafkaApis(KAFKA_2_2_IV1).handleJoinGroupRequest(requestChannelRequest)
val response = readResponse(ApiKeys.JOIN_GROUP, joinGroupRequest, capturedResponse).asInstanceOf[JoinGroupResponse]
assertEquals(Errors.UNSUPPORTED_VERSION, response.error())
EasyMock.replay(groupCoordinator)
}
@Test
def rejectSyncGroupRequestWhenStaticMembershipNotSupported(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val syncGroupRequest = new SyncGroupRequest.Builder(
new SyncGroupRequestData()
.setGroupId("test")
.setMemberId("test")
.setGroupInstanceId("instanceId")
.setGenerationId(1)
).build()
val requestChannelRequest = buildRequest(syncGroupRequest)
createKafkaApis(KAFKA_2_2_IV1).handleSyncGroupRequest(requestChannelRequest)
val response = readResponse(ApiKeys.SYNC_GROUP, syncGroupRequest, capturedResponse).asInstanceOf[SyncGroupResponse]
assertEquals(Errors.UNSUPPORTED_VERSION, response.error)
EasyMock.replay(groupCoordinator)
}
@Test
def rejectHeartbeatRequestWhenStaticMembershipNotSupported(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val heartbeatRequest = new HeartbeatRequest.Builder(
new HeartbeatRequestData()
.setGroupId("test")
.setMemberId("test")
.setGroupInstanceId("instanceId")
.setGenerationId(1)
).build()
val requestChannelRequest = buildRequest(heartbeatRequest)
createKafkaApis(KAFKA_2_2_IV1).handleHeartbeatRequest(requestChannelRequest)
val response = readResponse(ApiKeys.HEARTBEAT, heartbeatRequest, capturedResponse).asInstanceOf[HeartbeatResponse]
assertEquals(Errors.UNSUPPORTED_VERSION, response.error())
EasyMock.replay(groupCoordinator)
}
@Test
def rejectOffsetCommitRequestWhenStaticMembershipNotSupported(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val offsetCommitRequest = new OffsetCommitRequest.Builder(
new OffsetCommitRequestData()
.setGroupId("test")
.setMemberId("test")
.setGroupInstanceId("instanceId")
.setGenerationId(100)
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("test")
.setPartitions(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100)
.setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH)
.setCommittedMetadata("")
))
))
).build()
val requestChannelRequest = buildRequest(offsetCommitRequest)
createKafkaApis(KAFKA_2_2_IV1).handleOffsetCommitRequest(requestChannelRequest)
val expectedTopicErrors = Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponseTopic()
.setName("test")
.setPartitions(Collections.singletonList(
new OffsetCommitResponseData.OffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.UNSUPPORTED_VERSION.code())
))
)
val response = readResponse(ApiKeys.OFFSET_COMMIT, offsetCommitRequest, capturedResponse).asInstanceOf[OffsetCommitResponse]
assertEquals(expectedTopicErrors, response.data.topics())
EasyMock.replay(groupCoordinator)
}
@Test
def testMultipleLeaveGroup(): Unit = {
val groupId = "groupId"
val leaveMemberList = List(
new MemberIdentity()
.setMemberId("member-1")
.setGroupInstanceId("instance-1"),
new MemberIdentity()
.setMemberId("member-2")
.setGroupInstanceId("instance-2")
)
EasyMock.expect(groupCoordinator.handleLeaveGroup(
EasyMock.eq(groupId),
EasyMock.eq(leaveMemberList),
anyObject()
))
val leaveRequest = buildRequest(
new LeaveGroupRequest.Builder(
groupId,
leaveMemberList.asJava
).build()
)
createKafkaApis().handleLeaveGroupRequest(leaveRequest)
EasyMock.replay(groupCoordinator)
}
@Test
def testSingleLeaveGroup(): Unit = {
val groupId = "groupId"
val memberId = "member"
val singleLeaveMember = List(
new MemberIdentity()
.setMemberId(memberId)
)
EasyMock.expect(groupCoordinator.handleLeaveGroup(
EasyMock.eq(groupId),
EasyMock.eq(singleLeaveMember),
anyObject()
))
val leaveRequest = buildRequest(
new LeaveGroupRequest.Builder(
groupId,
singleLeaveMember.asJava
).build()
)
createKafkaApis().handleLeaveGroupRequest(leaveRequest)
EasyMock.replay(groupCoordinator)
}
@Test
def testReassignmentAndReplicationBytesOutRateWhenReassigning(): Unit = {
assertReassignmentAndReplicationBytesOutPerSec(true)
}
@Test
def testReassignmentAndReplicationBytesOutRateWhenNotReassigning(): Unit = {
assertReassignmentAndReplicationBytesOutPerSec(false)
}
private def assertReassignmentAndReplicationBytesOutPerSec(isReassigning: Boolean): Unit = {
val leaderEpoch = 0
val tp0 = new TopicPartition("tp", 0)
val fetchData = Collections.singletonMap(tp0, new FetchRequest.PartitionData(0, 0, Int.MaxValue, Optional.of(leaderEpoch)))
val fetchFromFollower = buildRequest(new FetchRequest.Builder(
ApiKeys.FETCH.oldestVersion(), ApiKeys.FETCH.latestVersion(), 1, 1000, 0, fetchData
).build())
setupBasicMetadataCache(tp0.topic, numPartitions = 1)
val hw = 3
val records = MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(1000, "foo".getBytes(StandardCharsets.UTF_8)))
replicaManager.fetchMessages(anyLong, anyInt, anyInt, anyInt, anyBoolean,
anyObject[Seq[(TopicPartition, FetchRequest.PartitionData)]], anyObject[ReplicaQuota],
anyObject[Seq[(TopicPartition, FetchPartitionData)] => Unit](), anyObject[IsolationLevel],
anyObject[Option[ClientMetadata]])
expectLastCall[Unit].andAnswer(new IAnswer[Unit] {
def answer: Unit = {
val callback = getCurrentArguments.apply(7).asInstanceOf[Seq[(TopicPartition, FetchPartitionData)] => Unit]
callback(Seq(tp0 -> FetchPartitionData(Errors.NONE, hw, 0, records, None, None, Option.empty, isReassignmentFetch = isReassigning)))
}
})
val fetchMetadata = new JFetchMetadata(0, 0)
val fetchContext = new FullFetchContext(time, new FetchSessionCache(1000, 100),
fetchMetadata, fetchData, true)
expect(fetchManager.newContext(anyObject[JFetchMetadata],
anyObject[util.Map[TopicPartition, FetchRequest.PartitionData]],
anyObject[util.List[TopicPartition]],
anyBoolean)).andReturn(fetchContext)
expect(replicaQuotaManager.record(anyLong()))
expect(replicaManager.getLogConfig(EasyMock.eq(tp0))).andReturn(None)
val partition: Partition = createNiceMock(classOf[Partition])
expect(replicaManager.isAddingReplica(anyObject(), anyInt())).andReturn(isReassigning)
replay(replicaManager, fetchManager, clientQuotaManager, requestChannel, replicaQuotaManager, partition)
createKafkaApis().handle(fetchFromFollower)
if (isReassigning)
assertEquals(records.sizeInBytes(), brokerTopicStats.allTopicsStats.reassignmentBytesOutPerSec.get.count())
else
assertEquals(0, brokerTopicStats.allTopicsStats.reassignmentBytesOutPerSec.get.count())
assertEquals(records.sizeInBytes(), brokerTopicStats.allTopicsStats.replicationBytesOutRate.get.count())
}
@Test
def rejectInitProducerIdWhenIdButNotEpochProvided(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val initProducerIdRequest = new InitProducerIdRequest.Builder(
new InitProducerIdRequestData()
.setTransactionalId("known")
.setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt)
.setProducerId(10)
.setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH)
).build()
val requestChannelRequest = buildRequest(initProducerIdRequest)
createKafkaApis(KAFKA_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest)
val response = readResponse(ApiKeys.INIT_PRODUCER_ID, initProducerIdRequest, capturedResponse)
.asInstanceOf[InitProducerIdResponse]
assertEquals(Errors.INVALID_REQUEST, response.error)
}
@Test
def rejectInitProducerIdWhenEpochButNotIdProvided(): Unit = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val initProducerIdRequest = new InitProducerIdRequest.Builder(
new InitProducerIdRequestData()
.setTransactionalId("known")
.setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt)
.setProducerId(RecordBatch.NO_PRODUCER_ID)
.setProducerEpoch(2)
).build()
val requestChannelRequest = buildRequest(initProducerIdRequest)
createKafkaApis(KAFKA_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest)
val response = readResponse(ApiKeys.INIT_PRODUCER_ID, initProducerIdRequest, capturedResponse).asInstanceOf[InitProducerIdResponse]
assertEquals(Errors.INVALID_REQUEST, response.error)
}
@Test
def testUpdateMetadataRequestWithCurrentBrokerEpoch(): Unit = {
val currentBrokerEpoch = 1239875L
testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE)
}
@Test
def testUpdateMetadataRequestWithNewerBrokerEpochIsValid(): Unit = {
val currentBrokerEpoch = 1239875L
testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE)
}
@Test
def testUpdateMetadataRequestWithStaleBrokerEpochIsRejected(): Unit = {
val currentBrokerEpoch = 1239875L
testUpdateMetadataRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH)
}
def testUpdateMetadataRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = {
val updateMetadataRequest = createBasicMetadataRequest("topicA", 1, brokerEpochInRequest)
val request = buildRequest(updateMetadataRequest)
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
EasyMock.expect(controller.brokerEpoch).andStubReturn(currentBrokerEpoch)
EasyMock.expect(replicaManager.maybeUpdateMetadataCache(
EasyMock.eq(request.context.correlationId),
EasyMock.anyObject()
)).andStubReturn(
Seq()
)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, controller, requestChannel)
createKafkaApis().handleUpdateMetadataRequest(request)
val updateMetadataResponse = readResponse(ApiKeys.UPDATE_METADATA, updateMetadataRequest, capturedResponse)
.asInstanceOf[UpdateMetadataResponse]
assertEquals(expectedError, updateMetadataResponse.error())
EasyMock.verify(replicaManager)
}
@Test
def testLeaderAndIsrRequestWithCurrentBrokerEpoch(): Unit = {
val currentBrokerEpoch = 1239875L
testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE)
}
@Test
def testLeaderAndIsrRequestWithNewerBrokerEpochIsValid(): Unit = {
val currentBrokerEpoch = 1239875L
testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE)
}
@Test
def testLeaderAndIsrRequestWithStaleBrokerEpochIsRejected(): Unit = {
val currentBrokerEpoch = 1239875L
testLeaderAndIsrRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH)
}
def testLeaderAndIsrRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = {
val controllerId = 2
val controllerEpoch = 6
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val partitionStates = Seq(
new LeaderAndIsrRequestData.LeaderAndIsrPartitionState()
.setTopicName("topicW")
.setPartitionIndex(1)
.setControllerEpoch(1)
.setLeader(0)
.setLeaderEpoch(1)
.setIsr(asList(0, 1))
.setZkVersion(2)
.setReplicas(asList(0, 1, 2))
.setIsNew(false)
).asJava
val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(
ApiKeys.LEADER_AND_ISR.latestVersion,
controllerId,
controllerEpoch,
brokerEpochInRequest,
partitionStates,
asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091))
).build()
val request = buildRequest(leaderAndIsrRequest)
val response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData()
.setErrorCode(Errors.NONE.code)
.setPartitionErrors(asList()))
EasyMock.expect(controller.brokerEpoch).andStubReturn(currentBrokerEpoch)
EasyMock.expect(replicaManager.becomeLeaderOrFollower(
EasyMock.eq(request.context.correlationId),
EasyMock.anyObject(),
EasyMock.anyObject()
)).andStubReturn(
response
)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(replicaManager, controller, requestChannel)
createKafkaApis().handleLeaderAndIsrRequest(request)
val leaderAndIsrResponse = readResponse(ApiKeys.LEADER_AND_ISR, leaderAndIsrRequest, capturedResponse)
.asInstanceOf[LeaderAndIsrResponse]
assertEquals(expectedError, leaderAndIsrResponse.error())
EasyMock.verify(replicaManager)
}
@Test
def testStopReplicaRequestWithCurrentBrokerEpoch(): Unit = {
val currentBrokerEpoch = 1239875L
testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch, Errors.NONE)
}
@Test
def testStopReplicaRequestWithNewerBrokerEpochIsValid(): Unit = {
val currentBrokerEpoch = 1239875L
testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch + 1, Errors.NONE)
}
@Test
def testStopReplicaRequestWithStaleBrokerEpochIsRejected(): Unit = {
val currentBrokerEpoch = 1239875L
testStopReplicaRequest(currentBrokerEpoch, currentBrokerEpoch - 1, Errors.STALE_BROKER_EPOCH)
}
def testStopReplicaRequest(currentBrokerEpoch: Long, brokerEpochInRequest: Long, expectedError: Errors): Unit = {
val controllerId = 0
val controllerEpoch = 5
val capturedResponse: Capture[RequestChannel.Response] = EasyMock.newCapture()
val fooPartition = new TopicPartition("foo", 0)
val topicStates = Seq(
new StopReplicaTopicState()
.setTopicName(fooPartition.topic())
.setPartitionStates(Seq(new StopReplicaPartitionState()
.setPartitionIndex(fooPartition.partition())
.setLeaderEpoch(1)
.setDeletePartition(false)).asJava)
).asJava
val stopReplicaRequest = new StopReplicaRequest.Builder(
ApiKeys.STOP_REPLICA.latestVersion,
controllerId,
controllerEpoch,
brokerEpochInRequest,
false,
topicStates
).build()
val request = buildRequest(stopReplicaRequest)
EasyMock.expect(controller.brokerEpoch).andStubReturn(currentBrokerEpoch)
EasyMock.expect(replicaManager.stopReplicas(
EasyMock.eq(request.context.correlationId),
EasyMock.eq(controllerId),
EasyMock.eq(controllerEpoch),
EasyMock.eq(brokerEpochInRequest),
EasyMock.eq(stopReplicaRequest.partitionStates().asScala)
)).andStubReturn(
(mutable.Map(
fooPartition -> Errors.NONE
), Errors.NONE)
)
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
EasyMock.replay(controller, replicaManager, requestChannel)
createKafkaApis().handleStopReplicaRequest(request)
val stopReplicaResponse = readResponse(ApiKeys.STOP_REPLICA, stopReplicaRequest, capturedResponse)
.asInstanceOf[StopReplicaResponse]
assertEquals(expectedError, stopReplicaResponse.error())
EasyMock.verify(replicaManager)
}
/**
* Return pair of listener names in the metadataCache: PLAINTEXT and LISTENER2 respectively.
*/
private def updateMetadataCacheWithInconsistentListeners(): (ListenerName, ListenerName) = {
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val anotherListener = new ListenerName("LISTENER2")
val brokers = Seq(
new UpdateMetadataBroker()
.setId(0)
.setRack("rack")
.setEndpoints(Seq(
new UpdateMetadataEndpoint()
.setHost("broker0")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListener.value),
new UpdateMetadataEndpoint()
.setHost("broker0")
.setPort(9093)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(anotherListener.value)
).asJava),
new UpdateMetadataBroker()
.setId(1)
.setRack("rack")
.setEndpoints(Seq(
new UpdateMetadataEndpoint()
.setHost("broker1")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListener.value)).asJava)
)
val updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, 0, Seq.empty[UpdateMetadataPartitionState].asJava, brokers.asJava).build()
metadataCache.updateMetadata(correlationId = 0, updateMetadataRequest)
(plaintextListener, anotherListener)
}
private def sendMetadataRequestWithInconsistentListeners(requestListener: ListenerName): MetadataResponse = {
val capturedResponse = expectNoThrottling()
EasyMock.replay(clientRequestQuotaManager, requestChannel)
val metadataRequest = MetadataRequest.Builder.allTopics.build()
val requestChannelRequest = buildRequest(metadataRequest, requestListener)
createKafkaApis().handleTopicMetadataRequest(requestChannelRequest)
readResponse(ApiKeys.METADATA, metadataRequest, capturedResponse).asInstanceOf[MetadataResponse]
}
private def testConsumerListOffsetLatest(isolationLevel: IsolationLevel): Unit = {
val tp = new TopicPartition("foo", 0)
val latestOffset = 15L
val currentLeaderEpoch = Optional.empty[Integer]()
EasyMock.expect(replicaManager.fetchOffsetForTimestamp(
EasyMock.eq(tp),
EasyMock.eq(ListOffsetRequest.LATEST_TIMESTAMP),
EasyMock.eq(Some(isolationLevel)),
EasyMock.eq(currentLeaderEpoch),
fetchOnlyFromLeader = EasyMock.eq(true))
).andReturn(Some(new TimestampAndOffset(ListOffsetResponse.UNKNOWN_TIMESTAMP, latestOffset, currentLeaderEpoch)))
val capturedResponse = expectNoThrottling()
EasyMock.replay(replicaManager, clientRequestQuotaManager, requestChannel)
val targetTimes = Map(tp -> new ListOffsetRequest.PartitionData(ListOffsetRequest.LATEST_TIMESTAMP,
currentLeaderEpoch))
val listOffsetRequest = ListOffsetRequest.Builder.forConsumer(true, isolationLevel)
.setTargetTimes(targetTimes.asJava).build()
val request = buildRequest(listOffsetRequest)
createKafkaApis().handleListOffsetRequest(request)
val response = readResponse(ApiKeys.LIST_OFFSETS, listOffsetRequest, capturedResponse).asInstanceOf[ListOffsetResponse]
assertTrue(response.responseData.containsKey(tp))
val partitionData = response.responseData.get(tp)
assertEquals(Errors.NONE, partitionData.error)
assertEquals(latestOffset, partitionData.offset)
assertEquals(ListOffsetResponse.UNKNOWN_TIMESTAMP, partitionData.timestamp)
}
private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = {
val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder(asList(
new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))
).build()
(writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest))
}
private def buildRequest[T <: AbstractRequest](request: AbstractRequest,
listenerName: ListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)): RequestChannel.Request = {
val buffer = request.serialize(new RequestHeader(request.api, request.version, clientId, 0))
// read the header from the buffer first so that the body can be read next from the Request constructor
val header = RequestHeader.parse(buffer)
val context = new RequestContext(header, "1", InetAddress.getLocalHost, KafkaPrincipal.ANONYMOUS,
listenerName, SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY)
new RequestChannel.Request(processor = 1, context = context, startTimeNanos = 0, MemoryPool.NONE, buffer,
requestChannelMetrics)
}
private def readResponse(api: ApiKeys, request: AbstractRequest, capturedResponse: Capture[RequestChannel.Response]): AbstractResponse = {
val response = capturedResponse.getValue
assertTrue(s"Unexpected response type: ${response.getClass}", response.isInstanceOf[SendResponse])
val sendResponse = response.asInstanceOf[SendResponse]
val send = sendResponse.responseSend
val channel = new ByteBufferChannel(send.size)
send.writeTo(channel)
channel.close()
channel.buffer.getInt() // read the size
ResponseHeader.parse(channel.buffer, api.responseHeaderVersion(request.version))
val struct = api.responseSchema(request.version).read(channel.buffer)
AbstractResponse.parseResponse(api, struct, request.version)
}
private def expectNoThrottling(): Capture[RequestChannel.Response] = {
EasyMock.expect(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(EasyMock.anyObject[RequestChannel.Request](),
EasyMock.anyObject[Long])).andReturn(0)
EasyMock.expect(clientRequestQuotaManager.throttle(EasyMock.anyObject[RequestChannel.Request](), EasyMock.eq(0),
EasyMock.anyObject[RequestChannel.Response => Unit]()))
val capturedResponse = EasyMock.newCapture[RequestChannel.Response]()
EasyMock.expect(requestChannel.sendResponse(EasyMock.capture(capturedResponse)))
capturedResponse
}
private def createBasicMetadataRequest(topic: String, numPartitions: Int, brokerEpoch: Long): UpdateMetadataRequest = {
val replicas = List(0.asInstanceOf[Integer]).asJava
def createPartitionState(partition: Int) = new UpdateMetadataPartitionState()
.setTopicName(topic)
.setPartitionIndex(partition)
.setControllerEpoch(1)
.setLeader(0)
.setLeaderEpoch(1)
.setReplicas(replicas)
.setZkVersion(0)
.setReplicas(replicas)
val plaintextListener = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val broker = new UpdateMetadataBroker()
.setId(0)
.setRack("rack")
.setEndpoints(Seq(new UpdateMetadataEndpoint()
.setHost("broker0")
.setPort(9092)
.setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)
.setListener(plaintextListener.value)).asJava)
val partitionStates = (0 until numPartitions).map(createPartitionState)
new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion, 0,
0, brokerEpoch, partitionStates.asJava, Seq(broker).asJava).build()
}
private def setupBasicMetadataCache(topic: String, numPartitions: Int): Unit = {
val updateMetadataRequest = createBasicMetadataRequest(topic, numPartitions, 0)
metadataCache.updateMetadata(correlationId = 0, updateMetadataRequest)
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/KafkaApisTest.scala | Scala | apache-2.0 | 80,961 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import org.apache.spark.Logging
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.util.Utils
import scala.util.Random
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import java.io.{File, IOException}
import java.nio.charset.Charset
import java.util.UUID
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
/**
* Master(主节点故障)测试套件
*/
private[streaming]
object MasterFailureTest extends Logging {
@volatile var killed = false
@volatile var killCount = 0
@volatile var setupCalled = false
def main(args: Array[String]) {
// scalastyle:off println
//批量大小以毫秒为单位
/* if (args.size < 2) {
println(
"Usage: MasterFailureTest <local/HDFS directory> <# batches> " +
"[<batch size in milliseconds>]")
System.exit(1)
}*/
//检查点
// val directory = args(0)
val directory = "D:\\\\checkpoint_test"
// val directory="hdfs://xcsq:8089/analytics/"
//批量时间
// val numBatches = args(1).toInt//批量
val numBatches =10.toInt//批量
//设置合理的批处理时间,一般500ms性能很不错了。Milliseconds 毫秒
val batchDuration = if (args.size > 2) Milliseconds(args(2).toInt) else Seconds(1)//1秒
println("\\n\\n========================= MAP TEST =========================\\n\\n")
testMap(directory, numBatches, batchDuration)
println("\\n\\n================= UPDATE-STATE-BY-KEY TEST =================\\n\\n")
testUpdateStateByKey(directory, numBatches, batchDuration)
println("\\n\\nSUCCESS\\n\\n")
// scalastyle:on println
}
//
def testMap(directory: String, numBatches: Int, batchDuration: Duration) {
// Input: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
//input: scala.collection.immutable.Seq[String] = Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val input = (1 to numBatches).map(_.toString).toSeq
// Expected output: time=1 ==> [ 1 ] , time=2 ==> [ 2 ] , time=3 ==> [ 3 ] , ...
//expectedOutput= Range(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val expectedOutput = (1 to numBatches)
val operation = (st: DStream[String]) => st.map(_.toInt)
// Run streaming operation with multiple master failures
//具有多个主故障的运行流操作
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size)
logInfo(expectedOutput.mkString("[", ",", "]"))
logInfo("Output, size = " + output.size)
logInfo(output.mkString("[", ",", "]"))
// Verify whether all the values of the expected output is present
// in the output
//是否验证所期望输出的所有值是否存在于输出中
assert(output.distinct.toSet == expectedOutput.toSet)
}
def testUpdateStateByKey(directory: String, numBatches: Int, batchDuration: Duration) {
// Input: time=1 ==> [ a ] , time=2 ==> [ a, a ] , time=3 ==> [ a, a, a ] , ...
val input = (1 to numBatches).map(i => (1 to i).map(_ => "a").mkString(" ")).toSeq
// Expected output: time=1 ==> [ (a, 1) ] , time=2 ==> [ (a, 3) ] , time=3 ==> [ (a,6) ] , ...
val expectedOutput = (1L to numBatches).map(i => (1L to i).sum).map(j => ("a", j))
val operation = (st: DStream[String]) => {
val updateFunc = (values: Seq[Long], state: Option[Long]) => {
Some(values.foldLeft(0L)(_ + _) + state.getOrElse(0L))
}
/**
*updateStateByKey 操作返回一个新状态的DStream,
*其中传入的函数基于键之前的状态和键新的值更新每个键的状态
*updateStateByKey操作对每个键会调用一次,
*values表示键对应的值序列,state可以是任务状态
**/
st.flatMap(_.split(" "))
.map(x => (x, 1L))
.updateStateByKey[Long](updateFunc)
//在interval周期后给生成的RDD设置检查点
.checkpoint(batchDuration * 5)
}
// Run streaming operation with multiple master failures
//具有多个主故障的运行流操作
val output = testOperation(directory, batchDuration, input, operation, expectedOutput)
logInfo("Expected output, size = " + expectedOutput.size + "\\n" + expectedOutput)
logInfo("Output, size = " + output.size + "\\n" + output)
// Verify whether all the values in the output are among the expected output values
//是否验证输出中的所有值是否在预期的输出值中
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
// Verify whether the last expected output value has been generated, there by
// confirming that none of the inputs have been missed
//验证是否已生成最后一个期望输出值,确认没有输入已被丢失
assert(output.last == expectedOutput.last)
}
/**
* Tests stream operation with multiple master failures, and verifies whether the
* final set of output values is as expected or not.
* 测试流操作多个主节点故障,并最后验证一组输出值是否预期一致
*/
def testOperation[T: ClassTag](
directory: String,
batchDuration: Duration,
input: Seq[String],
operation: DStream[String] => DStream[T],
expectedOutput: Seq[T]
): Seq[T] = {
// Just making sure that the expected output does not have duplicates
//只要确保预期的输出没有重复
assert(expectedOutput.distinct.toSet == expectedOutput.toSet)
// Reset all state
//重置所有状态
reset()
// Create the directories for this test
//为这个测试创建随机目录
val uuid = UUID.randomUUID().toString
val rootDir = new Path(directory, uuid)
val fs = rootDir.getFileSystem(new Configuration())
//HDFS checkpoint子目录
val checkpointDir = new Path(rootDir, "checkpoint")
//HDFS test子目录
val testDir = new Path(rootDir, "test")
//创建checkpoint子目录
fs.mkdirs(checkpointDir)
//创建test子目录
fs.mkdirs(testDir)
// Setup the stream computation with the given operation
//用给定的操作设置流计算,从检查点文件获取StreamingContext
//创建新的StreamingContext对象,或者从检查点构造一个
/**
如果checkpointDirectory目录存在,则context对象会从检查点数据重新构建出来,
如果该目录不存在(如:首次运行),则functionToCreateContext函数会被调,
创建一个新的StreamingContext对象并定义好DStream数据流**/
val ssc = StreamingContext.getOrCreate(checkpointDir.toString, () => {
//返回StreamingContext
setupStreams(batchDuration, operation, checkpointDir, testDir)
})
// Check if setupStream was called to create StreamingContext
// (and not created from checkpoint file)
// 检查setupstream调用创建StreamingContext,并不是从检查点文件创建
assert(setupCalled, "Setup was not called in the first call to StreamingContext.getOrCreate")
// Start generating files in the a different thread
// 开始生成文件在一个不同的线程
val fileGeneratingThread = new FileGeneratingThread(input, testDir, batchDuration.milliseconds)
//启动线程生成文件
fileGeneratingThread.start()
// Run the streams and repeatedly kill it until the last expected output
// has been generated, or until it has run for twice the expected time
//运行流,并多次杀死它,直到最后一个预期的输出已产生,或者直到它运行了两倍的预期时间
val lastExpectedOutput = expectedOutput.last //取出最后一个值10
val maxTimeToRun = expectedOutput.size * batchDuration.milliseconds * 2//10x1000X2=20000
//合并输出值
val mergedOutput = runStreams(ssc, lastExpectedOutput, maxTimeToRun)
//join 这个方法一直在等待,直到调用他的线程终止
fileGeneratingThread.join()
//hdfs 删除检查点目录
fs.delete(checkpointDir, true)
//hdfs 删除测试目录
fs.delete(testDir, true)
logInfo("Finished test after " + killCount + " failures")
mergedOutput
}
/**
* Sets up the stream computation with the given operation, directory (local or HDFS),
* and batch duration. Returns the streaming context and the directory to which
* files should be written for testing.
* 用给定的操作设置流计算,目录(本地或HDFS)和批处理间隔时间,返回测试的流上下文和目录写入文件
*/
private def setupStreams[T: ClassTag](
batchDuration: Duration,//时间间隔
operation: DStream[String] => DStream[T],//流式操作
checkpointDir: Path,//检查点目录
testDir: Path//测试目录
): StreamingContext = {
// Mark that setup was called
// 标记setupStreams被调用
setupCalled = true
// Setup the streaming computation with the given operation
//设置给定的流式计算操作,分隔的时间叫作批次间隔
val ssc = new StreamingContext("local[4]", "MasterFailureTest", batchDuration, null, Nil,
Map())
//设置检查点目录
ssc.checkpoint(checkpointDir.toString)
//Spark Streaming将监视该dataDirectory目录,并处理该目录下任何新建的文件,不支持嵌套目录
//注意:dataDirectory中的文件必须通过moving或者renaming来创建
//各个文件数据格式必须一致
//获得测试目录数据
val inputStream = ssc.textFileStream(testDir.toString)
//操作输入流
val operatedStream = operation(inputStream)
//创建一个输出对象流
val outputStream = new TestOutputStream(operatedStream)
//register将当前DStream注册到DStreamGraph的输出流中
outputStream.register()
ssc
}
/**
* Repeatedly starts and kills the streaming context until timed out or
* the last expected output is generated. Finally, return
* 重复启动和杀死流上下文,直到超时或生成最后的期望输出,最后返回
*/
private def runStreams[T: ClassTag](
ssc_ : StreamingContext,
lastExpectedOutput: T,
maxTimeToRun: Long
): Seq[T] = {
var ssc = ssc_
//跑的总时间
var totalTimeRan = 0L
//是否最后一个输出生成
var isLastOutputGenerated = false
//是否超时
var isTimedOut = false
//合并输出
val mergedOutput = new ArrayBuffer[T]()
//检查点目录
val checkpointDir = ssc.checkpointDir
//批处理间隔
val batchDuration = ssc.graph.batchDuration
while(!isLastOutputGenerated && !isTimedOut) {
// Get the output buffer
//获取输出缓冲区,获取输出流TestOutputStream对象的output变量
val outputBuffer = ssc.graph.getOutputStreams().head.asInstanceOf[TestOutputStream[T]].output
def output = outputBuffer.flatMap(x => {
//println("outputBuffer=="+x)
x})
// Start the thread to kill the streaming after some time
// 运行线程一段时间后杀死流
killed = false
//线程在一个随机的时间后杀死流上下文
val killingThread = new KillingThread(ssc, batchDuration.milliseconds * 10)
killingThread.start()
//运行的时间
var timeRan = 0L
try {
// Start the streaming computation and let it run while ...
//开始流式计算,并让它运行
// (i) StreamingContext has not been shut down yet, StreamingContext没有被关闭
// (ii) The last expected output has not been generated yet 最后的预期输出未生成
// (iii) Its not timed out yet 它还没有超时
System.clearProperty("spark.streaming.clock")
System.clearProperty("spark.driver.port")
ssc.start()
//开始运行的时间
val startTime = System.currentTimeMillis()
while (!killed && !isLastOutputGenerated && !isTimedOut) {
Thread.sleep(100)
timeRan = System.currentTimeMillis() - startTime
//判断输出不为空,最后一个值output.last 10==lastExpectedOutput 10
isLastOutputGenerated = (output.nonEmpty && output.last == lastExpectedOutput)
//判断是否超时
isTimedOut = (timeRan + totalTimeRan > maxTimeToRun)
}
} catch {
case e: Exception => logError("Error running streaming context", e)
}
//isAlive如果调用他的线程仍在运行,返回true,否则返回false
if (killingThread.isAlive) {
//interrupt中断并不能直接终止另一个线程,而需要被中断的线程自己处理中断
killingThread.interrupt()
// SparkContext.stop will set SparkEnv.env to null. We need to make sure SparkContext is
// stopped before running the next test. Otherwise, it's possible that we set SparkEnv.env
// to null after the next test creates the new SparkContext and fail the test.
//join 这个方法一直在等待,直到调用他的线程终止
killingThread.join()
}
ssc.stop()
/**
* Has been killed = true
* Is last output generated = false
* Is timed out = false
*/
logInfo("Has been killed = " + killed)
logInfo("Is last output generated = " + isLastOutputGenerated)
logInfo("Is timed out = " + isTimedOut)
//=============================
println("Has been killed = " + killed)
println("Is last output generated = " + isLastOutputGenerated)
println("Is timed out = " + isTimedOut)
// Verify whether the output of each batch has only one element or no element
//检查每个批处理的输出是否只有一个元素或没有元素
// and then merge the new output with all the earlier output
//然后合并新的输出与所有之前的输出
mergedOutput ++= output
totalTimeRan += timeRan
/**
* New output = ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8)
* Merged output = ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8)
* Time ran = 13788
* Total time ran = 13788
*/
logInfo("New output = " + output)
logInfo("Merged output = " + mergedOutput)
logInfo("Time ran = " + timeRan)
logInfo("Total time ran = " + totalTimeRan)
//=============================
println("New output = " + output)
println("Merged output = " + mergedOutput)
println("Time ran = " + timeRan)
println("Total time ran = " + totalTimeRan)
if (!isLastOutputGenerated && !isTimedOut) {
val sleepTime = Random.nextInt(batchDuration.milliseconds.toInt * 10)
logInfo(
"\\n-------------------------------------------\\n" +
" Restarting stream computation in " + sleepTime + " ms " +
"\\n-------------------------------------------\\n"
)
Thread.sleep(sleepTime)
// Recreate the streaming context from checkpoint
//从检查点重新创建流上下文
ssc = StreamingContext.getOrCreate(checkpointDir, () => {
throw new Exception("Trying to create new context when it " +
"should be reading from checkpoint file")
})
}
}
mergedOutput
}
/**
* Verifies the output value are the same as expected. Since failures can lead to
* 验证输出值与预期相同,由于故障可以导致一个被处理的一个批次两次,一批输出可能会出现不止一次
* a batch being processed twice, a batches output may appear more than once
* consecutively. To avoid getting confused with those, we eliminate consecutive
* 为了避免与那些混淆,我们消除“输出”的值的连续重复批输出,因此
* duplicate batch outputs of values from the `output`. As a result, the
* expected output should not have consecutive batches with the same values as output.
* 预期的输出不应该具有与输出相同的值的连续批。
*/
private def verifyOutput[T: ClassTag](output: Seq[T], expectedOutput: Seq[T]) {
// Verify whether expected outputs do not consecutive batches with same output
//验证是否预期输出不连续批具有相同的输出
for (i <- 0 until expectedOutput.size - 1) {
assert(expectedOutput(i) != expectedOutput(i + 1),
"Expected output has consecutive duplicate sequence of values")
}
// Log the output
// scalastyle:off println
println("Expected output, size = " + expectedOutput.size)
println(expectedOutput.mkString("[", ",", "]"))
println("Output, size = " + output.size)
println(output.mkString("[", ",", "]"))
// scalastyle:on println
// Match the output with the expected output
//将输出与预期输出相匹配
output.foreach(o =>
assert(expectedOutput.contains(o), "Expected value " + o + " not found")
)
}
/**
* Resets counter to prepare for the test
* 准备测试 重置为计数
* */
private def reset() {
killed = false
killCount = 0
setupCalled = false
}
}
/**
* Thread to kill streaming context after a random period of time.
* 线程在一个随机的时间后杀死流上下文
* maxKillWaitTime:最大杀死等待的时间
*/
private[streaming]
class KillingThread(ssc: StreamingContext, maxKillWaitTime: Long) extends Thread with Logging {
override def run() {
try {
// If it is the first killing, then allow the first checkpoint to be created
//如果是第一次杀死,那么允许创建一个检查点
//最小杀死等待时间
var minKillWaitTime = if (MasterFailureTest.killCount == 0) 5000 else 2000
//math.abs返回 double值的绝对值,如果参数是非负数,则返回该参数,如果参数是负数,则返回该参数的相反数
val killWaitTime = minKillWaitTime + math.abs(Random.nextLong % maxKillWaitTime)
logInfo("Kill wait time = " + killWaitTime)
//死等待时间
Thread.sleep(killWaitTime)
logInfo(
"\\n---------------------------------------\\n" +
"Killing streaming context after " + killWaitTime + " ms" +
"\\n---------------------------------------\\n"
)
if (ssc != null) {
//StreamingContext暂停
ssc.stop()
MasterFailureTest.killed = true
//死计数自增1
MasterFailureTest.killCount += 1
}
logInfo("Killing thread finished normally")
} catch {
case ie: InterruptedException => logInfo("Killing thread interrupted")
case e: Exception => logWarning("Exception in killing thread", e)
}
}
}
/**
* Thread to generate input files periodically with the desired text.
* 使用线程周期性生成输入inputSeq[String]所需文本的文件
*/
private[streaming]
class FileGeneratingThread(input: Seq[String], testDir: Path, interval: Long)
extends Thread with Logging {
override def run() {
//创建本地临时目录
val localTestDir = Utils.createTempDir()
//println("createTempDir:"+localTestDir.toString())
var fs = testDir.getFileSystem(new Configuration())
//最大失败数
val maxTries = 3
try {
//为了确保所有的流上下文已被设置
Thread.sleep(5000) // To make sure that all the streaming context has been set up
//println("input:"+input.size) //input:10
for (i <- 0 until input.size) {
// Write the data to a local file and then move it to the target test directory
//将数据写入本地文件,然后将其移动到目标测试目录
//C:\\\\Temp\\spark-d7d82b5f-0484-40b5-8dce-b065ae504c1c\\1
val localFile = new File(localTestDir, (i + 1).toString)//因为从0开始所以+1
//D:/checkpoint_test/581040b2-b8a4-4d5a-be73-8dfcb6b6516e/test/1
val hadoopFile = new Path(testDir, (i + 1).toString)
//D:/checkpoint_test/581040b2-b8a4-4d5a-be73-8dfcb6b6516e/test/.tmp_1
val tempHadoopFile = new Path(testDir, ".tmp_" + (i + 1).toString)//因为从0开始所以+1
//将数据写入本地临时目录文件 C:\\\\Temp\\spark-d7d82b5f-0484-40b5-8dce-b065ae504c1c\\1
Files.write(input(i) + "\\n", localFile, Charset.forName("UTF-8"))
var tries = 0 //重试次数
var done = false
while (!done && tries < maxTries) {
tries += 1
try {
// fs.copyFromLocalFile(new Path(localFile.toString), hadoopFile)
//将本地临时目录文件,复制到临时的hadoop文件.tmp_1
fs.copyFromLocalFile(new Path(localFile.toString), tempHadoopFile)
//将hadoop文件.tmp_1重新命名/test/1
fs.rename(tempHadoopFile, hadoopFile)
//标记循环完成
done = true
} catch {
case ioe: IOException => {
fs = testDir.getFileSystem(new Configuration())
logWarning("Attempt " + tries + " at generating file " + hadoopFile + " failed.",
ioe)
}
}
}
if (!done) {
logError("Could not generate file " + hadoopFile)
} else {
logInfo("Generated file " + hadoopFile + " at " + System.currentTimeMillis)
}
Thread.sleep(interval)
//删除临时文件
localFile.delete()
}
logInfo("File generating thread finished normally")
} catch {
case ie: InterruptedException => logInfo("File generating thread interrupted")
case e: Exception => logWarning("File generating in killing thread", e)
} finally {
fs.close()
//递归删除本地临时目录
Utils.deleteRecursively(localTestDir)
}
}
}
| tophua/spark1.52 | streaming/src/test/scala/org/apache/spark/streaming/MasterFailureTest.scala | Scala | apache-2.0 | 22,529 |
package cgta.cenum
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman
// All Rights Reserved
// please contact ben@jackman.biz
// for licensing inquiries
// Created by bjackman @ 5/8/14 3:42 PM
//////////////////////////////////////////////////////////////
private[cenum] object CEnumInitStrategyImpl extends CEnumInitStrategy {
def initOrdinal[A <: CEnum](en: A, el : CEnum#EnumElement): Int = {
en.setOrdinals()
el._ord
}
} | cgta/open | cenum/js/src/main/scala/cgta/cenum/CEnumInitStrategyImpl.scala | Scala | mit | 486 |
import fpscala.errorhandling._
object Exercise4_2 extends App {
def variance(xs: Seq[Double]): Option[Double] = {
def calcVar(xs: Seq[Double]): Double = {
val m = xs.sum / xs.length
xs.map(x => math.pow(x - m, 2)).sum / xs.length
}
Some(xs).flatMap(x => if (x.isEmpty) None else Some(calcVar(x)))
}
val ls = List[Double](1.0,2.0,3.0,4.0,5.0)
println(variance(ls))
val els = List()
println(variance(els))
}
| hnfmr/fpscala | ex4.2.scala | Scala | mit | 445 |
package org.ferrit.core.test
import org.ferrit.core.http.{Request, Response, DefaultResponse, Stats}
/**
* Utility used with FakeHttpClient, a fake response minus the Request param
* that is only available at request time.
*/
case class PartResponse(
status: Int,
headers: Map[String,Seq[String]],
content: String
) {
def toResponse(request: Request, stats: Stats = Stats.empty):Response = {
DefaultResponse(status, headers, content.getBytes, stats, request)
}
}
| reggoodwin/ferrit | src/test/scala/org/ferrit/core/test/PartResponse.scala | Scala | mit | 488 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import monix.execution.internal.Trampoline
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
/** A `scala.concurrentExecutionContext` implementation
* that executes runnables immediately, on the current thread,
* by means of a trampoline implementation.
*
* Can be used in some cases to keep the asynchronous execution
* on the current thread, as an optimization, but be warned,
* you have to know what you're doing.
*
* The `TrampolineExecutionContext` keeps a reference to another
* `underlying` context, to which it defers for:
*
* - reporting errors
* - deferring the rest of the queue in problematic situations
*
* Deferring the rest of the queue happens:
*
* - in case we have a runnable throwing an exception, the rest
* of the tasks get re-scheduled for execution by using
* the `underlying` context
* - in case we have a runnable triggering a Scala `blocking`
* context, the rest of the tasks get re-scheduled for execution
* on the `underlying` context to prevent any deadlocks
*
* Thus this implementation is compatible with the
* `scala.concurrent.BlockContext`, detecting `blocking` blocks and
* reacting by forking the rest of the queue to prevent deadlocks.
*
* @param underlying is the `ExecutionContext` to which the it defers
* to in case real asynchronous is needed
*/
final class TrampolineExecutionContext private (underlying: ExecutionContext) extends ExecutionContextExecutor {
private[this] val trampoline = new Trampoline
override def execute(runnable: Runnable): Unit =
trampoline.execute(runnable, underlying)
override def reportFailure(t: Throwable): Unit =
underlying.reportFailure(t)
}
object TrampolineExecutionContext {
/** Builds a [[TrampolineExecutionContext]] instance.
*
* @param underlying is the `ExecutionContext` to which the
* it defers to in case asynchronous or time-delayed execution
* is needed
*/
def apply(underlying: ExecutionContext): TrampolineExecutionContext =
new TrampolineExecutionContext(underlying)
/** [[TrampolineExecutionContext]] instance that executes everything
* immediately, on the current thread.
*
* Implementation notes:
*
* - if too many `blocking` operations are chained, at some point
* the implementation will trigger a stack overflow error
* - `reportError` re-throws the exception in the hope that it
* will get caught and reported by the underlying thread-pool,
* because there's nowhere it could report that error safely
* (i.e. `System.err` might be routed to `/dev/null` and we'd
* have no way to override it)
*/
val immediate: TrampolineExecutionContext =
TrampolineExecutionContext(new ExecutionContext {
def execute(r: Runnable): Unit = r.run()
def reportFailure(e: Throwable): Unit = throw e
})
}
| alexandru/monifu | monix-execution/js/src/main/scala/monix/execution/schedulers/TrampolineExecutionContext.scala | Scala | apache-2.0 | 3,642 |
package controllers;
import play.api.mvc.Controller;
import play.api.mvc.Action;
import play.api.cache.Cached;
import play.api.Play.current;
import play.api.i18n.Lang;
object I18N extends Controller {
def setLang(lang: String) = Action { request =>
Found(request.headers.get("referer").getOrElse("/")).withLang(Lang(lang));
}
def messages(lang: String) = Cached("messages." + lang) {
Action { request =>
import play.api.Play;
import play.api.i18n.MessagesPlugin;
val map = Play.current.plugin[MessagesPlugin]
.map(_.api.messages).getOrElse(Map.empty);
Ok(views.html.messages(map.getOrElse(lang, map("default")).filterKeys(_.startsWith("ui."))))
.as("text/javascript;charset=\"utf-8\"");
}
}
} | shunjikonishi/jquery-formbuilder | app/controllers/I18N.scala | Scala | mit | 754 |
package domain.count
import domain.Dao._
import play.api.db._
import play.api.Play.current
import domain._
import domain.entity._
object GetActivity {
def getId(name: String): Int =
DB.withConnection (implicit connection => {
Dao.getVoicebankId(name)
})
def getName(id: Int): String =
DB.withConnection (implicit connection => {
getVoicebankName(id)
})
def getNewcomers: List[Voicebank] =
DB.withConnection (implicit connection => {
getVoicebanksWithLimit("", 1, 10, Sort.UPDATE_TIME, Order.DESC)
})
def getVoicebanksCount: Int =
DB.withConnection (implicit connection => {
Dao.getVoicebanksCount
})
def getFilenameCount(id: Int): Int =
DB.withConnection (implicit connection => {
Dao.getFilenameCount(id)
})
def getFilenameCountByName(name: String): Int =
DB.withConnection (implicit connection => {
Dao.getFilenameCountByName(name)
})
def getRecentActivity: List[RecentActivity] =
DB.withConnection (implicit connection => {
getRecentActivityWithLimit(10)
})
def getDetailActivity(id: Int, page: Int, pageSize: Int, sort: ActivitySort, order:Order): List[Activity] =
DB.withConnection (implicit connection => {
val offset: Int = pageSize * page
val limit: Int = pageSize
Dao.getActivityDetails(id, limit, offset, sort, order)
})
def getDetailActivityByName(name: String, page: Int, pageSize: Int, sort: ActivitySort, order: Order): List[Activity] =
DB.withConnection (implicit connection => {
val offset: Int = pageSize * page
val limit: Int = pageSize
Dao.getActivityDetailsByName(name, limit, offset, sort, order)
})
def getVoicebanks(name: String, page: Int, pageSize: Int, sort: Sort, order: Order): List[Voicebank] =
DB.withConnection (implicit connection => {
val offset: Int = pageSize * page
val limit: Int = pageSize
Dao.getVoicebanksWithLimit(name, limit, offset, sort, order)
})
def getCount(name: String, filename: String): Int =
DB.withConnection (implicit connection => {
Dao.getCount(name, filename)
})
} | maruLoop/VoiceBankActivity | app/domain/count/GetActivity.scala | Scala | mit | 2,244 |
import com.twitter.scalding
import java.io.{IOException, FileInputStream, FileOutputStream, File}
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
import java.util.jar.JarOutputStream
import java.util.zip.ZipOutputStream
import org.apache.commons.io.IOUtils
import org.apache.hadoop.util.ToolRunner
import org.apache.hadoop.conf.Configuration
import org.apache.tools.zip.ZipEntry
import pl.project13.hadoop.NoJarTool
import scala.collection.JavaConverters
import scala.collection.mutable.ListBuffer
import scala.io.Source
import com.twitter.scalding.Hdfs
import main.scala.Global
/**
* This main is intended for use only for the Activator run command as
* the default. If you pass no arguments, it runs all of the examples
* using default arguments. Use the sbt command "scalding" to run any
* of the examples with the ability to specify your own arguments.
* See also the companion objects' main methods.
*/
object RunAll {
def main(args: Array[String]) {
if (args.length == 0) {
WordCount.main(args)
} else {
Run.run(args(0), "", args)
}
}
}
object Run {
def run(name: String, message: String, args: Array[String]): Int = {
run(name, new File(Global.absPath + "/target/scala-2.10/classes/"), message, args)
}
def run(name: String, classesDir: File, message: String, args: Array[String]): Int = {
println(s"\\n==== $name " + ("===" * 20))
println(message)
val argsWithName = name +: args
println(s"Running: ${argsWithName.mkString(" ")}")
val masterIp = Global.host // on cluster, internal ip
val conf = new Configuration
// make suse these are set, otherwise Cascading will use "LocalJobRunner"
conf.setStrings("fs.default.name", s"hdfs://$masterIp:" + Global.fsPort)
conf.setStrings("mapred.job.tracker", s"$masterIp:" + Global.jtPort)
val tool = new NoJarTool(
wrappedTool = new scalding.Tool,
collectClassesFrom = Some(new File("target/scala-2.10/classes/")),
libJars = List(new File(Global.absPath + "/target/scala-2.10/" + Global.buildName + "-" + Global.buildVersion + ".jar"))
)
ToolRunner.run(conf, tool, argsWithName)
}
def printSomeOutput(outputFileName: String, message: String = "") = {
if (message.length > 0) println(message)
println("Output in $outputFileName:")
Source.fromFile(outputFileName).getLines.take(10) foreach println
println("...\\n")
}
}
| pomadchin/hadoop-dg-decomp | src/main/scala/scalding/RunAll.scala | Scala | apache-2.0 | 2,437 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.descriptors
import java.util
import java.util.Collections
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.util.JavaScalaConversionUtil.toJava
import org.junit.Assert.assertEquals
import org.junit.Test
/**
* Tests for [[DescriptorProperties]].
*/
class DescriptorPropertiesTest {
private val ARRAY_KEY = "my-array"
@Test
def testEquals(): Unit = {
val properties1 = new DescriptorProperties()
properties1.putString("hello1", "12")
properties1.putString("hello2", "13")
properties1.putString("hello3", "14")
val properties2 = new DescriptorProperties()
properties2.putString("hello1", "12")
properties2.putString("hello2", "13")
properties2.putString("hello3", "14")
val properties3 = new DescriptorProperties()
properties3.putString("hello1", "12")
properties3.putString("hello3", "14")
properties3.putString("hello2", "13")
assertEquals(properties1, properties2)
assertEquals(properties1, properties3)
}
@Test
def testMissingArray(): Unit = {
val properties = new DescriptorProperties()
testArrayValidation(properties, 0, Integer.MAX_VALUE)
}
@Test
def testArrayValues(): Unit = {
val properties = new DescriptorProperties()
properties.putString(s"$ARRAY_KEY.0", "12")
properties.putString(s"$ARRAY_KEY.1", "42")
properties.putString(s"$ARRAY_KEY.2", "66")
testArrayValidation(properties, 1, Integer.MAX_VALUE)
assertEquals(
util.Arrays.asList(12, 42, 66),
properties.getArray(ARRAY_KEY, toJava((key: String) => {
properties.getInt(key)
})))
}
@Test
def testArraySingleValue(): Unit = {
val properties = new DescriptorProperties()
properties.putString(ARRAY_KEY, "12")
testArrayValidation(properties, 1, Integer.MAX_VALUE)
assertEquals(
Collections.singletonList(12),
properties.getArray(ARRAY_KEY, toJava((key: String) => {
properties.getInt(key)
})))
}
@Test(expected = classOf[ValidationException])
def testArrayInvalidValues(): Unit = {
val properties = new DescriptorProperties()
properties.putString(s"$ARRAY_KEY.0", "12")
properties.putString(s"$ARRAY_KEY.1", "INVALID")
properties.putString(s"$ARRAY_KEY.2", "66")
testArrayValidation(properties, 1, Integer.MAX_VALUE)
}
@Test(expected = classOf[ValidationException])
def testArrayInvalidSingleValue(): Unit = {
val properties = new DescriptorProperties()
properties.putString(ARRAY_KEY, "INVALID")
testArrayValidation(properties, 1, Integer.MAX_VALUE)
}
@Test(expected = classOf[ValidationException])
def testInvalidMissingArray(): Unit = {
val properties = new DescriptorProperties()
testArrayValidation(properties, 1, Integer.MAX_VALUE)
}
@Test
def testRemoveKeys(): Unit = {
val properties = new DescriptorProperties()
properties.putString("hello1", "12")
properties.putString("hello2", "13")
properties.putString("hello3", "14")
val actual = properties.withoutKeys(util.Arrays.asList("hello1", "hello3"))
val expected = new DescriptorProperties()
expected.putString("hello2", "13")
assertEquals(expected, actual)
}
@Test
def testPrefixedMap(): Unit = {
val properties = new DescriptorProperties()
properties.putString("hello1", "12")
properties.putString("hello2", "13")
properties.putString("hello3", "14")
val actual = properties.asPrefixedMap("prefix.")
val expected = new DescriptorProperties()
expected.putString("prefix.hello1", "12")
expected.putString("prefix.hello2", "13")
expected.putString("prefix.hello3", "14")
assertEquals(expected.asMap, actual)
}
private def testArrayValidation(
properties: DescriptorProperties,
minLength: Int,
maxLength: Int)
: Unit = {
val validator: (String) => Unit = (key: String) => {
properties.validateInt(key, false)
}
properties.validateArray(
ARRAY_KEY,
toJava(validator),
minLength,
maxLength)
}
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorPropertiesTest.scala | Scala | apache-2.0 | 4,912 |
package name.abhijitsarkar.akka
import akka.actor.ActorSystem
import akka.stream.scaladsl.{GraphDSL, Sink, UnzipWith}
import akka.stream.{ActorMaterializer, SinkShape}
import akka.util.ByteString
import name.abhijitsarkar.akka.model.Rsvp
import name.abhijitsarkar.akka.service.{MeetupStreamingService, RsvpSubscriber}
import name.abhijitsarkar.akka.util.ActorPlumbing
import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.Implicits.global
object MeetupStreamingApp extends App {
implicit val system = ActorSystem("twitter")
implicit val materializer = ActorMaterializer()
implicit val executionContext: ExecutionContext = {
implicitly
}
implicit val actorPlumbing: ActorPlumbing = ActorPlumbing()
val firstSubscriber = Sink.actorSubscriber(RsvpSubscriber.props("first"))
val secondSubscriber = Sink.actorSubscriber(RsvpSubscriber.props("second"))
val rsvpSink = GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val splitStream = builder.add(UnzipWith[ByteString, Rsvp, Rsvp] { byteStr =>
import model.RsvpJsonSupport._
import spray.json._
val rsvp = byteStr.utf8String.parseJson.convertTo[Rsvp]
(rsvp, rsvp)
})
/* Broadcast could be used too */
// val rsvpFlow: Flow[ByteString, Rsvp, NotUsed] = Flow[ByteString].map {
// import model.RsvpJsonSupport._
// import spray.json._
//
// _.utf8String.parseJson.convertTo[Rsvp]
// }
//
// val broadcast = builder.add(Broadcast[Rsvp](2))
//
// val rsvp = builder.add(rsvpFlow)
//
// broadcast ~> firstSubscriber
// broadcast ~> secondSubscriber
//
// rsvp ~> broadcast
//
// SinkShape(rsvp.in)
splitStream.out0 ~> firstSubscriber
splitStream.out1 ~> secondSubscriber
SinkShape(splitStream.in)
}
val meetupStreamingService = new MeetupStreamingService(rsvpSink)
meetupStreamingService.stream
} | asarkar/akka | akka-streams-learning/meetup-streaming/src/main/scala/name/abhijitsarkar/akka/MeetupStreamingApp.scala | Scala | gpl-3.0 | 1,981 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.scuery
import xml.{Attribute, Document, Elem, Node, NodeSeq, Null, Text}
object Transform {
implicit def toNodes(transform: Transform): NodeSeq = transform()
implicit def toTraversable(transform: Transform): Traversable[Node] = transform()
}
/**
* A helper class to make it easier to write new transformers within loops inside a ancestor transformer
*/
class Transform(val nodes: NodeSeq, ancestors: Seq[Node] = Nil) extends Transformer {
def apply(): NodeSeq = apply(nodes, ancestors)
implicit def toNodes(): NodeSeq = apply()
implicit def toTraversable(): Traversable[Node] = apply()
}
| dnatic09/scalate | scalate-core/src/main/scala/org/fusesource/scalate/scuery/Transform.scala | Scala | apache-2.0 | 1,375 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.time.ZoneId
import org.apache.parquet.io.api.{GroupConverter, RecordMaterializer}
import org.apache.parquet.schema.MessageType
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.types.StructType
/**
* A [[RecordMaterializer]] for Catalyst rows.
*
* @param parquetSchema Parquet schema of the records to be read
* @param catalystSchema Catalyst schema of the rows to be constructed
* @param schemaConverter A Parquet-Catalyst schema converter that helps initializing row converters
* @param convertTz the optional time zone to convert to int96 data
* @param datetimeRebaseMode the mode of rebasing date/timestamp from Julian to Proleptic Gregorian
* calendar
* @param int96RebaseMode the mode of rebasing INT96 timestamp from Julian to Proleptic Gregorian
* calendar
*/
private[parquet] class ParquetRecordMaterializer(
parquetSchema: MessageType,
catalystSchema: StructType,
schemaConverter: ParquetToSparkSchemaConverter,
convertTz: Option[ZoneId],
datetimeRebaseMode: LegacyBehaviorPolicy.Value,
int96RebaseMode: LegacyBehaviorPolicy.Value)
extends RecordMaterializer[InternalRow] {
private val rootConverter = new ParquetRowConverter(
schemaConverter,
parquetSchema,
catalystSchema,
convertTz,
datetimeRebaseMode,
int96RebaseMode,
NoopUpdater)
override def getCurrentRecord: InternalRow = rootConverter.currentRecord
override def getRootConverter: GroupConverter = rootConverter
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRecordMaterializer.scala | Scala | apache-2.0 | 2,479 |
package com.twitter.util
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import com.twitter.conversions.storage._
@RunWith(classOf[JUnitRunner])
class StorageUnitTest extends FunSuite {
test("StorageUnit: should convert whole numbers into storage units (back and forth)") {
assert(1.byte.inBytes == 1)
assert(1.kilobyte.inBytes == 1024)
assert(1.megabyte.inMegabytes == 1.0)
assert(1.gigabyte.inMegabytes == 1024.0)
assert(1.gigabyte.inKilobytes == 1024.0 * 1024.0)
}
test("StorageUnit: should confer an essential humanity") {
assert(900.bytes.toHuman == "900 B")
assert(1.kilobyte.toHuman == "1024 B")
assert(2.kilobytes.toHuman == "2.0 KiB")
assert(Int.MaxValue.bytes.toHuman == "2.0 GiB")
assert(Long.MaxValue.bytes.toHuman == "8.0 EiB")
}
test("StorageUnit: should handle Long value") {
assert(StorageUnit.parse("3589654126.bytes") == 3589654126L.bytes)
}
test("StorageUnit: should accept humanity") {
assert(StorageUnit.parse("142.bytes") == 142.bytes)
assert(StorageUnit.parse("78.kilobytes") == 78.kilobytes)
assert(StorageUnit.parse("1.megabyte") == 1.megabyte)
assert(StorageUnit.parse("873.gigabytes") == 873.gigabytes)
assert(StorageUnit.parse("3.terabytes") == 3.terabytes)
assert(StorageUnit.parse("9.petabytes") == 9.petabytes)
assert(StorageUnit.parse("-3.megabytes") == -3.megabytes)
}
test("StorageUnit: should reject soulless robots") {
intercept[NumberFormatException] { StorageUnit.parse("100.bottles") }
intercept[NumberFormatException] { StorageUnit.parse("100 bytes") }
}
test("StorageUnit: should deal with negative values") {
assert(-123.bytes.inBytes == -123)
assert(-2.kilobytes.toHuman == "-2.0 KiB")
}
test("StorageUnit: should min properly") {
assert((1.bytes min 2.bytes) == 1.bytes)
assert((2.bytes min 1.bytes) == 1.bytes)
assert((2.bytes min 2.bytes) == 2.bytes)
}
test("StorageUnit: should adhere to company-issued serial number") {
val i = 4.megabytes
val j = 4.megabytes
assert(i.hashCode == j.hashCode)
}
}
| BuoyantIO/twitter-util | util-core/src/test/scala/com/twitter/util/StorageUnitTest.scala | Scala | apache-2.0 | 2,152 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.io.Source
import org.apache.spark.sql.{AnalysisException, FastOperator}
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation}
import org.apache.spark.sql.catalyst.trees.TreeNodeTag
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
case class QueryExecutionTestRecord(
c0: Int, c1: Int, c2: Int, c3: Int, c4: Int,
c5: Int, c6: Int, c7: Int, c8: Int, c9: Int,
c10: Int, c11: Int, c12: Int, c13: Int, c14: Int,
c15: Int, c16: Int, c17: Int, c18: Int, c19: Int,
c20: Int, c21: Int, c22: Int, c23: Int, c24: Int,
c25: Int, c26: Int)
class QueryExecutionSuite extends SharedSparkSession {
import testImplicits._
def checkDumpedPlans(path: String, expected: Int): Unit = {
assert(Source.fromFile(path).getLines.toList
.takeWhile(_ != "== Whole Stage Codegen ==") == List(
"== Parsed Logical Plan ==",
s"Range (0, $expected, step=1, splits=Some(2))",
"",
"== Analyzed Logical Plan ==",
"id: bigint",
s"Range (0, $expected, step=1, splits=Some(2))",
"",
"== Optimized Logical Plan ==",
s"Range (0, $expected, step=1, splits=Some(2))",
"",
"== Physical Plan ==",
s"*(1) Range (0, $expected, step=1, splits=2)",
""))
}
test("dumping query execution info to a file") {
withTempDir { dir =>
val path = dir.getCanonicalPath + "/plans.txt"
val df = spark.range(0, 10)
df.queryExecution.debug.toFile(path)
checkDumpedPlans(path, expected = 10)
}
}
test("dumping query execution info to an existing file") {
withTempDir { dir =>
val path = dir.getCanonicalPath + "/plans.txt"
val df = spark.range(0, 10)
df.queryExecution.debug.toFile(path)
val df2 = spark.range(0, 1)
df2.queryExecution.debug.toFile(path)
checkDumpedPlans(path, expected = 1)
}
}
test("dumping query execution info to non-existing folder") {
withTempDir { dir =>
val path = dir.getCanonicalPath + "/newfolder/plans.txt"
val df = spark.range(0, 100)
df.queryExecution.debug.toFile(path)
checkDumpedPlans(path, expected = 100)
}
}
test("dumping query execution info by invalid path") {
val path = "1234567890://plans.txt"
val exception = intercept[IllegalArgumentException] {
spark.range(0, 100).queryExecution.debug.toFile(path)
}
assert(exception.getMessage.contains("Illegal character in scheme name"))
}
test("limit number of fields by sql config") {
def relationPlans: String = {
val ds = spark.createDataset(Seq(QueryExecutionTestRecord(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)))
ds.queryExecution.toString
}
withSQLConf(SQLConf.MAX_TO_STRING_FIELDS.key -> "26") {
assert(relationPlans.contains("more fields"))
}
withSQLConf(SQLConf.MAX_TO_STRING_FIELDS.key -> "27") {
assert(!relationPlans.contains("more fields"))
}
}
test("check maximum fields restriction") {
withTempDir { dir =>
val path = dir.getCanonicalPath + "/plans.txt"
val ds = spark.createDataset(Seq(QueryExecutionTestRecord(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)))
ds.queryExecution.debug.toFile(path)
val localRelations = Source.fromFile(path).getLines().filter(_.contains("LocalRelation"))
assert(!localRelations.exists(_.contains("more fields")))
}
}
test("toString() exception/error handling") {
spark.experimental.extraStrategies = Seq[SparkStrategy]((_: LogicalPlan) => Nil)
def qe: QueryExecution = new QueryExecution(spark, OneRowRelation())
// Nothing!
assert(qe.toString.contains("OneRowRelation"))
// Throw an AnalysisException - this should be captured.
spark.experimental.extraStrategies = Seq[SparkStrategy](
(_: LogicalPlan) => throw new AnalysisException("exception"))
assert(qe.toString.contains("org.apache.spark.sql.AnalysisException"))
// Throw an Error - this should not be captured.
spark.experimental.extraStrategies = Seq[SparkStrategy](
(_: LogicalPlan) => throw new Error("error"))
val error = intercept[Error](qe.toString)
assert(error.getMessage.contains("error"))
spark.experimental.extraStrategies = Nil
}
test("SPARK-28346: clone the query plan between different stages") {
val tag1 = new TreeNodeTag[String]("a")
val tag2 = new TreeNodeTag[String]("b")
val tag3 = new TreeNodeTag[String]("c")
def assertNoTag(tag: TreeNodeTag[String], plans: QueryPlan[_]*): Unit = {
plans.foreach { plan =>
assert(plan.getTagValue(tag).isEmpty)
}
}
val df = spark.range(10)
val analyzedPlan = df.queryExecution.analyzed
val cachedPlan = df.queryExecution.withCachedData
val optimizedPlan = df.queryExecution.optimizedPlan
analyzedPlan.setTagValue(tag1, "v")
assertNoTag(tag1, cachedPlan, optimizedPlan)
cachedPlan.setTagValue(tag2, "v")
assertNoTag(tag2, analyzedPlan, optimizedPlan)
optimizedPlan.setTagValue(tag3, "v")
assertNoTag(tag3, analyzedPlan, cachedPlan)
val tag4 = new TreeNodeTag[String]("d")
try {
spark.experimental.extraStrategies = Seq(new SparkStrategy() {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan.foreach {
case r: org.apache.spark.sql.catalyst.plans.logical.Range =>
r.setTagValue(tag4, "v")
case _ =>
}
Seq(FastOperator(plan.output))
}
})
// trigger planning
df.queryExecution.sparkPlan
assert(optimizedPlan.getTagValue(tag4).isEmpty)
} finally {
spark.experimental.extraStrategies = Nil
}
val tag5 = new TreeNodeTag[String]("e")
df.queryExecution.executedPlan.setTagValue(tag5, "v")
assertNoTag(tag5, df.queryExecution.sparkPlan)
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala | Scala | apache-2.0 | 6,925 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input, MustBeNoneOrZeroOrPositive}
case class AC21(value: Option[Int]) extends CtBoxIdentifier(name = "Previous Administrative Expenses")
with CtOptionalInteger with MustBeNoneOrZeroOrPositive with Input
| scottcutts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/AC21.scala | Scala | apache-2.0 | 942 |
package epam.bdcc_app.json
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.annotation.JsonProperty
import lombok.Data
@Data
@JsonIgnoreProperties(ignoreUnknown = true)
class Follows_get {
@JsonProperty("response")
val response: Vk_follows = null;
}
| mkasatkin/bdcc_app.vk_samza | src/main/scala/epam/bdcc_app/json/Follows_get.scala | Scala | apache-2.0 | 303 |
package com.workshop
import org.specs2.mutable.Specification
class IsIncreasingTest extends Specification {
val intSmaller = (a: Int, b: Int) => a < b
val isSorted = new IsIncreasing[Int](intSmaller)
"check" should {
"be true for increasing seq" in {
isSorted.check(Seq(1, 2, 3, 4)) must beTrue
}
"be false for non-sorted seq" in {
isSorted.check(Seq(1, 2, 5, 3, 4)) must beFalse
}
"be false for reverse-sorted seq" in {
isSorted.check(Seq(4, 3, 2, 1)) must beFalse
}
}
}
| maximn/scala-workshop | src/test/scala/com/workshop/IsIncreasingTest.scala | Scala | mit | 527 |
package org.jetbrains.plugins.scala.findUsages.compilerReferences
package bytecode
import java.io.File
import java.{util => ju}
import org.jetbrains.jps.backwardRefs.CompilerRef
import org.jetbrains.plugins.scala.findUsages.compilerReferences.indices.{ScFunExprCompilerRef, ScalaCompilerReferenceWriter}
private[findUsages] final case class CompiledScalaFile private (
file: File,
backwardHierarchy: ju.Map[CompilerRef, collection.Seq[CompilerRef]],
refs: ju.Map[CompilerRef, collection.Seq[Int]]
)
private[compilerReferences] object CompiledScalaFile {
def apply(
source: File,
classes: Set[ParsedClass],
writer: ScalaCompilerReferenceWriter
): CompiledScalaFile = {
val backwardHierarchy = new ju.HashMap[CompilerRef, collection.Seq[CompilerRef]]()
val refs = new ju.HashMap[CompilerRef, collection.Seq[Int]]()
val refProvider = new BytecodeReferenceCompilerRefProvider(writer)
classes.foreach { parsed =>
val className = writer.enumerateName(parsed.classInfo.fqn)
val classRef = parsed match {
case cl: RegularClass =>
if (cl.classInfo.isAnonymous) new CompilerRef.JavaCompilerAnonymousClassRef(className)
else new CompilerRef.JavaCompilerClassRef(className)
case anon: FunExprClass => ScFunExprCompilerRef(anon.line)
}
val superClasses: Set[CompilerRef] = parsed.classInfo.superClasses
.map(className => new CompilerRef.JavaCompilerClassRef(writer.enumerateName(className)))
superClasses.foreach(backwardHierarchy.merge(_, Seq(classRef), _ ++ _))
parsed.funExprs.foreach { sam =>
val ref = new ScFunExprCompilerRef(sam.line)
val ifaceId = writer.enumerateName(sam.interface)
val iface = new CompilerRef.JavaCompilerClassRef(ifaceId)
backwardHierarchy.merge(iface, Seq(ref), _ ++ _)
}
}
classes
.flatMap(_.refs)
.groupBy {
case mref: MethodReference => (mref.fqn, 0)
case fref: FieldReference => (fref.fqn, 1)
}
.foreach {
case (_, rs) =>
val compilerRef = refProvider.toCompilerRef(rs.head)
val lines: Seq[Int] = rs.map(_.line).toSeq
refs.put(compilerRef, lines)
}
new CompiledScalaFile(source, backwardHierarchy, refs)
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/findUsages/compilerReferences/bytecode/CompiledScalaFile.scala | Scala | apache-2.0 | 2,374 |
import java.net.ConnectException
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import dispatch._, Defaults._
import akka.actor.ActorSystem
import akka.actor.Props
import akka.actor.Actor
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
object App {
def main(args: Array[String]): Unit = {
val foo = Stopwatch.start // Forcing initialization
val actorSystemConf = ConfigFactory.parseString("""
akka {
stdout-loglevel = "OFF"
loglevel = "OFF"
}
""")
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
val config = Config("localhost:8080", mapper, 1000)
val system = ActorSystem("MySystem", ConfigFactory.load(actorSystemConf))
val coordinator = system.actorOf(Props(classOf[AssemblingCoordinator], config), name = "Coordinator")
coordinator ! config
system awaitTermination(5 minutes)
}
class AssemblingCoordinator(config: Config) extends Actor {
def receive = {
case config: Config =>
val req = url(s"http://${config.host}/api/graph/edges-quantity")
val futureQuantity = Http(req OK as.String) map { _.toInt }
val quantityEdges = futureQuantity()
val batches = quantityEdges / config.batchSize + (math signum (quantityEdges % config.batchSize))
context.become(collectEdges(Array[Edge](), Set[Int]()))
context.actorOf(Props[EdgesBatchProcessor], name = "EdgesBatchProcessor") ! (batches, config)
}
def collectEdges(edges: Array[Edge], vertices: Set[Int]): Receive = {
case adjacencies: Array[Edge] =>
context.become(collectEdges(
edges ++ adjacencies,
vertices ++ edges.flatMap(e => (e.i, e.j).productIterator map (_.asInstanceOf[Int]))))
case _: BatchProcessingFinished =>
println(s"Vertices: ${vertices.size}")
println(s"Edges: ${edges.length}")
println(s"Milliseconds taken: ${Stopwatch elapsedTime}")
context.system.shutdown()
}
}
class EdgesBatchProcessor extends Actor {
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy._
override val supervisorStrategy = OneForOneStrategy() {
case _: AdjacencyServerFailure => Restart
case _: Exception => Escalate
}
def receive = {
case (batches: Int, config: Config) =>
context.become(receiveBatchResults(batches, 1))
for (batch <- List.range(0, batches)) {
val adjancencyFetcher = context.actorOf(Props[AdjacencyArrayFetcher], name = "AdjacencyArrayFetcher" + batch)
adjancencyFetcher ! (config, batch * config.batchSize)
}
}
def receiveBatchResults(totalBatches: Int, batchesAlreadyProcessed: Int): Receive = {
case edges: Array[Edge] =>
context.parent ! edges
if (batchesAlreadyProcessed == totalBatches) {
dispatch.Http.shutdown()
context.parent ! BatchProcessingFinished()
} else {
context.become(receiveBatchResults(totalBatches, batchesAlreadyProcessed + 1))
}
}
}
class AdjacencyArrayFetcher extends Actor {
def receive = {
case (config: Config, offset: Int) =>
val req = url(s"http://${config.host}/api/graph?offset=$offset&limit=${config.batchSize}")
val response: Future[Either[Throwable, String]] = Http(req OK as.String).either
response() match {
case Left(StatusCode(502)) => throw AdjacencyServerFailure(offset)
case Left(_: ConnectException) => throw AdjacencyServerFailure(offset)
case Left(ex) => throw ex
case Right(json) =>
context.parent ! config.mapper.readValue(json, new TypeReference[Array[Edge]] {})
context.stop(self)
}
}
override def preRestart(cause: Throwable, message: Option[Any]): Unit = {
cause match {
case AdjacencyServerFailure(_) =>
message match {
case Some(m) => self ! m
}
}
}
}
case class Config(host: String, mapper: ObjectMapper, batchSize: Int)
case class Edge(i: Int, j: Int, weight: Int)
case class AdjacencyServerFailure(offset: Int)
extends Exception(s"Temporary server failure fetching adjacencies after $offset")
case class BatchProcessingFinished()
case object Stopwatch {
val start: Long = System.nanoTime()
def elapsedTime(): Long = {
val stop = System.nanoTime()
return math.round((stop - start).asInstanceOf[Double] / 1000000)
}
}
}
| andrepnh/creepy-graph-adventure | scala-actors/src/main/scala-2.11/App.scala | Scala | mit | 4,733 |
package org.ensime.api
import java.io.File
import java.nio.charset.Charset
/**
* There should be exactly one `RpcResponseEnvelope` in response to an
* `RpcRequestEnvelope`. If the `callId` is empty, the response is
* an asynchronous event.
*/
case class RpcResponseEnvelope(
callId: Option[Int],
payload: EnsimeServerMessage
)
sealed trait EnsimeServerMessage
/**
* A message that the server can send to the client at any time.
*/
sealed trait EnsimeEvent extends EnsimeServerMessage
//////////////////////////////////////////////////////////////////////
// Contents of the payload
sealed trait RpcResponse extends EnsimeServerMessage
case class EnsimeServerError(description: String) extends RpcResponse
case object DebuggerShutdownEvent
sealed trait DebugVmStatus extends RpcResponse
// must have redundant status: String to match legacy API
case class DebugVmSuccess(
status: String = "success"
) extends DebugVmStatus
case class DebugVmError(
errorCode: Int,
details: String,
status: String = "error"
) extends DebugVmStatus
sealed trait GeneralSwankEvent extends EnsimeEvent
sealed trait DebugEvent extends EnsimeEvent
/**
* Generic background notification.
*
* NOTE: codes will be deprecated, preferring sealed families.
*/
case class SendBackgroundMessageEvent(
detail: String,
code: Int = 105
) extends GeneralSwankEvent
/** The presentation compiler is ready to accept requests. */
case object AnalyzerReadyEvent extends GeneralSwankEvent
/** The presentation compiler has finished analysing the entire project. */
case object FullTypeCheckCompleteEvent extends GeneralSwankEvent
/** The search engine has finished indexing the classpath. */
case object IndexerReadyEvent extends GeneralSwankEvent
/** The presentation compiler was restarted. Existing `:type-id`s are invalid. */
case object CompilerRestartedEvent extends GeneralSwankEvent
/** The presentation compiler has invalidated all existing notes. */
case object ClearAllScalaNotesEvent extends GeneralSwankEvent
/** The presentation compiler has invalidated all existing notes. */
case object ClearAllJavaNotesEvent extends GeneralSwankEvent
case class Note(
file: String,
msg: String,
severity: NoteSeverity,
beg: Int,
end: Int,
line: Int,
col: Int
) extends RpcResponse
/** The presentation compiler is providing notes: e.g. errors, warnings. */
case class NewScalaNotesEvent(
isFull: Boolean,
notes: List[Note]
) extends GeneralSwankEvent
/** The presentation compiler is providing notes: e.g. errors, warnings. */
case class NewJavaNotesEvent(
isFull: Boolean,
notes: List[Note]
) extends GeneralSwankEvent
/** The debugged VM has stepped to a new location and is now paused awaiting control. */
case class DebugStepEvent(
threadId: DebugThreadId,
threadName: String,
file: File,
line: Int
) extends DebugEvent
/** The debugged VM has stopped at a breakpoint. */
case class DebugBreakEvent(
threadId: DebugThreadId,
threadName: String,
file: File,
line: Int
) extends DebugEvent
/** The debugged VM has started. */
case object DebugVMStartEvent extends DebugEvent
/** The debugger has disconnected from the debugged VM. */
case object DebugVMDisconnectEvent extends DebugEvent
/** The debugged VM has thrown an exception and is now paused waiting for control. */
case class DebugExceptionEvent(
exception: Long,
threadId: DebugThreadId,
threadName: String,
file: Option[File],
line: Option[Int]
) extends DebugEvent
/** A new thread has started. */
case class DebugThreadStartEvent(threadId: DebugThreadId) extends DebugEvent
/** A thread has died. */
case class DebugThreadDeathEvent(threadId: DebugThreadId) extends DebugEvent
/** Communicates stdout/stderr of debugged VM to client. */
case class DebugOutputEvent(body: String) extends DebugEvent
case object ReloadExistingFilesEvent
case object AskReTypecheck
case object VoidResponse extends RpcResponse
case class RefactorFailure(
procedureId: Int,
reason: String,
status: scala.Symbol = 'failure // redundant field
) extends RpcResponse
trait RefactorProcedure {
def procedureId: Int
def refactorType: RefactorType
}
case class RefactorEffect(
procedureId: Int,
refactorType: RefactorType,
changes: List[FileEdit],
status: scala.Symbol = 'success // redundant field
) extends RpcResponse with RefactorProcedure
case class RefactorResult(
procedureId: Int,
refactorType: RefactorType,
touchedFiles: Seq[File],
status: scala.Symbol = 'success // redundant field
) extends RpcResponse with RefactorProcedure
sealed abstract class RefactorDesc(val refactorType: RefactorType)
case class InlineLocalRefactorDesc(file: File, start: Int, end: Int) extends RefactorDesc(RefactorType.InlineLocal)
case class RenameRefactorDesc(newName: String, file: File, start: Int, end: Int) extends RefactorDesc(RefactorType.Rename)
case class ExtractMethodRefactorDesc(methodName: String, file: File, start: Int, end: Int)
extends RefactorDesc(RefactorType.ExtractMethod)
case class ExtractLocalRefactorDesc(name: String, file: File, start: Int, end: Int)
extends RefactorDesc(RefactorType.ExtractLocal)
case class OrganiseImportsRefactorDesc(file: File) extends RefactorDesc(RefactorType.OrganizeImports)
case class AddImportRefactorDesc(qualifiedName: String, file: File)
extends RefactorDesc(RefactorType.AddImport)
case class SourceFileInfo(
file: File,
contents: Option[String] = None,
contentsIn: Option[File] = None
) {
// keep the log file sane for unsaved files
override def toString = s"SourceFileInfo($file,${contents.map(_ => "...")},$contentsIn)"
}
sealed trait PatchOp {
def start: Int
}
case class PatchInsert(
start: Int,
text: String
) extends PatchOp
case class PatchDelete(
start: Int,
end: Int
) extends PatchOp
case class PatchReplace(
start: Int,
end: Int,
text: String
) extends PatchOp
sealed trait EntityInfo extends RpcResponse {
def name: String
def members: Iterable[EntityInfo]
}
object SourceSymbol {
val allSymbols: List[SourceSymbol] = List(
ObjectSymbol, ClassSymbol, TraitSymbol, PackageSymbol, ConstructorSymbol, ImportedNameSymbol, TypeParamSymbol,
ParamSymbol, VarFieldSymbol, ValFieldSymbol, OperatorFieldSymbol, VarSymbol, ValSymbol, FunctionCallSymbol,
ImplicitConversionSymbol, ImplicitParamsSymbol, DeprecatedSymbol
)
}
sealed trait SourceSymbol
case object ObjectSymbol extends SourceSymbol
case object ClassSymbol extends SourceSymbol
case object TraitSymbol extends SourceSymbol
case object PackageSymbol extends SourceSymbol
case object ConstructorSymbol extends SourceSymbol
case object ImportedNameSymbol extends SourceSymbol
case object TypeParamSymbol extends SourceSymbol
case object ParamSymbol extends SourceSymbol
case object VarFieldSymbol extends SourceSymbol
case object ValFieldSymbol extends SourceSymbol
case object OperatorFieldSymbol extends SourceSymbol
case object VarSymbol extends SourceSymbol
case object ValSymbol extends SourceSymbol
case object FunctionCallSymbol extends SourceSymbol
case object ImplicitConversionSymbol extends SourceSymbol
case object ImplicitParamsSymbol extends SourceSymbol
case object DeprecatedSymbol extends SourceSymbol
sealed trait PosNeeded
case object PosNeededNo extends PosNeeded
case object PosNeededAvail extends PosNeeded
case object PosNeededYes extends PosNeeded
sealed trait SourcePosition extends RpcResponse
case class EmptySourcePosition() extends SourcePosition
case class OffsetSourcePosition(file: File, offset: Int) extends SourcePosition
case class LineSourcePosition(file: File, line: Int) extends SourcePosition
case class PackageInfo(
name: String,
fullName: String,
// n.b. members should be sorted by name for consistency
members: Seq[EntityInfo]
) extends EntityInfo {
require(members == members.sortBy(_.name), "members should be sorted by name")
}
sealed trait SymbolSearchResult extends RpcResponse {
def name: String
def localName: String
def declAs: DeclaredAs
def pos: Option[SourcePosition]
}
case class TypeSearchResult(
name: String,
localName: String,
declAs: DeclaredAs,
pos: Option[SourcePosition]
) extends SymbolSearchResult
case class MethodSearchResult(
name: String,
localName: String,
declAs: DeclaredAs,
pos: Option[SourcePosition],
ownerName: String
) extends SymbolSearchResult
// what is the point of these types?
case class ImportSuggestions(symLists: List[List[SymbolSearchResult]]) extends RpcResponse
case class SymbolSearchResults(syms: List[SymbolSearchResult]) extends RpcResponse
case class SymbolDesignations(
file: File,
syms: List[SymbolDesignation]
) extends RpcResponse
case class SymbolDesignation(
start: Int,
end: Int,
symType: SourceSymbol
)
case class SymbolInfo(
name: String,
localName: String,
declPos: Option[SourcePosition],
`type`: TypeInfo,
isCallable: Boolean,
ownerTypeId: Option[Int]
) extends RpcResponse {
def tpe = `type`
}
case class Op(
op: String,
description: String
)
case class MethodBytecode(
className: String,
methodName: String,
methodSignature: Option[String],
byteCode: List[Op],
startLine: Int,
endLine: Int
)
case class CompletionSignature(
sections: List[List[(String, String)]],
result: String
)
case class CompletionInfo(
name: String,
typeSig: CompletionSignature,
typeId: Int,
isCallable: Boolean,
relevance: Int,
toInsert: Option[String]
) extends RpcResponse
case class CompletionInfoList(
prefix: String,
completions: List[CompletionInfo]
) extends RpcResponse
case class Breakpoint(file: File, line: Int) extends RpcResponse
case class BreakpointList(active: List[Breakpoint], pending: List[Breakpoint]) extends RpcResponse
case class OffsetRange(from: Int, to: Int)
object OffsetRange extends ((Int, Int) => OffsetRange) {
def apply(fromTo: Int): OffsetRange = new OffsetRange(fromTo, fromTo)
}
/**
* A debugger thread id.
*/
case class DebugThreadId(id: Long)
object DebugThreadId {
/**
* Create a ThreadId from a String representation
* @param s A Long encoded as a string
* @return A ThreadId
*/
def apply(s: String): DebugThreadId = {
new DebugThreadId(s.toLong)
}
}
case class DebugObjectId(id: Long)
object DebugObjectId {
/**
* Create a DebugObjectId from a String representation
* @param s A Long encoded as a string
* @return A DebugObjectId
*/
def apply(s: String): DebugObjectId = {
new DebugObjectId(s.toLong)
}
}
sealed trait DebugLocation extends RpcResponse
case class DebugObjectReference(objectId: DebugObjectId) extends DebugLocation
object DebugObjectReference {
def apply(objId: Long): DebugObjectReference = new DebugObjectReference(DebugObjectId(objId))
}
case class DebugStackSlot(threadId: DebugThreadId, frame: Int, offset: Int) extends DebugLocation
case class DebugArrayElement(objectId: DebugObjectId, index: Int) extends DebugLocation
case class DebugObjectField(objectId: DebugObjectId, field: String) extends DebugLocation
sealed trait DebugValue extends RpcResponse {
def typeName: String
}
case class DebugNullValue(
typeName: String
) extends DebugValue
case class DebugPrimitiveValue(
summary: String,
typeName: String
) extends DebugValue
case class DebugObjectInstance(
summary: String,
fields: List[DebugClassField],
typeName: String,
objectId: DebugObjectId
) extends DebugValue
case class DebugStringInstance(
summary: String,
fields: List[DebugClassField],
typeName: String,
objectId: DebugObjectId
) extends DebugValue
case class DebugArrayInstance(
length: Int,
typeName: String,
elementTypeName: String,
objectId: DebugObjectId
) extends DebugValue
case class DebugClassField(
index: Int,
name: String,
typeName: String,
summary: String
) extends RpcResponse
case class DebugStackLocal(
index: Int,
name: String,
summary: String,
typeName: String
) extends RpcResponse
case class DebugStackFrame(
index: Int,
locals: List[DebugStackLocal],
numArgs: Int,
className: String,
methodName: String,
pcLocation: LineSourcePosition,
thisObjectId: DebugObjectId
) extends RpcResponse
case class DebugBacktrace(
frames: List[DebugStackFrame],
threadId: DebugThreadId,
threadName: String
) extends RpcResponse
case class NamedTypeMemberInfo(
name: String,
`type`: TypeInfo,
pos: Option[SourcePosition],
signatureString: Option[String],
declAs: DeclaredAs
) extends EntityInfo {
override def members = List.empty
def tpe = `type`
}
sealed trait TypeInfo extends EntityInfo {
def name: String
def typeId: Int
def declAs: DeclaredAs
def fullName: String
def typeArgs: Iterable[TypeInfo]
def members: Iterable[EntityInfo]
def pos: Option[SourcePosition]
def outerTypeId: Option[Int]
final def declaredAs = declAs
final def args = typeArgs
}
case class BasicTypeInfo(
name: String,
typeId: Int,
declAs: DeclaredAs,
fullName: String,
typeArgs: Iterable[TypeInfo],
members: Iterable[EntityInfo],
pos: Option[SourcePosition],
outerTypeId: Option[Int]
) extends TypeInfo
case class ArrowTypeInfo(
name: String,
typeId: Int,
resultType: TypeInfo,
paramSections: Iterable[ParamSectionInfo]
) extends TypeInfo {
def declAs = DeclaredAs.Nil
def fullName = name
def typeArgs = List.empty
def members = List.empty
def pos = None
def outerTypeId = None
}
case class CallCompletionInfo(
resultType: TypeInfo,
paramSections: Iterable[ParamSectionInfo]
) extends RpcResponse
case class ParamSectionInfo(
params: Iterable[(String, TypeInfo)],
isImplicit: Boolean
)
case class InterfaceInfo(
`type`: TypeInfo,
viaView: Option[String]
) extends RpcResponse {
def tpe = `type`
}
case class TypeInspectInfo(
`type`: TypeInfo,
companionId: Option[Int],
interfaces: Iterable[InterfaceInfo],
infoType: scala.Symbol = 'typeInspect // redundant field in protocol
) extends RpcResponse {
def supers = interfaces
}
/** ERangePosition is a mirror of scala compiler internal RangePosition as a case class to */
case class ERangePosition(file: String, offset: Int, start: Int, end: Int)
case class ERangePositions(positions: List[ERangePosition]) extends RpcResponse
case class FileRange(file: String, start: Int, end: Int) extends RpcResponse
case class EnsimeImplementation(
name: String
)
case class ConnectionInfo(
pid: Option[Int] = None,
implementation: EnsimeImplementation = EnsimeImplementation("ENSIME"),
version: String = "0.8.17"
) extends RpcResponse
sealed trait ImplicitInfo
case class ImplicitConversionInfo(
start: Int,
end: Int,
fun: SymbolInfo
) extends ImplicitInfo
case class ImplicitParamInfo(
start: Int,
end: Int,
fun: SymbolInfo,
params: List[SymbolInfo],
funIsImplicit: Boolean
) extends ImplicitInfo
case class ImplicitInfos(infos: List[ImplicitInfo]) extends RpcResponse
sealed trait LegacyRawResponse extends RpcResponse
case object FalseResponse extends LegacyRawResponse
case object TrueResponse extends LegacyRawResponse
case class StringResponse(text: String) extends LegacyRawResponse
| eddsteel/ensime | api/src/main/scala/org/ensime/api/outgoing.scala | Scala | gpl-3.0 | 15,173 |
package org.vaadin.addons.rinne
import com.vaadin.ui.TextArea
import org.vaadin.addons.rinne.mixins.AbstractTextFieldMixin
class VTextArea extends TextArea with AbstractTextFieldMixin {
def rows: Int = getRows
def rows_=(rows: Int) {
setRows(rows)
}
def wordwrap: Boolean = isWordwrap
def wordwrap_=(wordwrap: Boolean) {
setWordwrap(wordwrap)
}
}
| LukaszByczynski/rinne | src/main/scala/org/vaadin/addons/rinne/VTextArea.scala | Scala | apache-2.0 | 372 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the ApollonDiagram entity.
*/
class ApollonDiagramGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080"""
val httpConf = http
.baseUrl(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
.silentResources // Silence all resources like css or css so they don't clutter the results
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the ApollonDiagram entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJson
.check(header("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(2)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all apollonDiagrams")
.get("/api/apollon-diagrams")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new apollonDiagram")
.post("/api/apollon-diagrams")
.headers(headers_http_authenticated)
.body(StringBody("""{
"id":null
, "title":"SAMPLE_TEXT"
, "jsonRepresentation":"SAMPLE_TEXT"
}""")).asJson
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_apollonDiagram_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created apollonDiagram")
.get("${new_apollonDiagram_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created apollonDiagram")
.delete("${new_apollonDiagram_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) during(Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
| ls1intum/ArTEMiS | src/test/gatling/user-files/simulations/ApollonDiagramGatlingTest.scala | Scala | mit | 3,638 |
package org.squeryl.logging
import xml.Unparsed
import java.io.{FileOutputStream, PrintStream}
object BarChartRenderer {
class Stat(val title: String, val xAxisLabel: String, val lines: Iterable[StatLine], measureFromLike: StatLine => String) {
def queryLabelsJSArray =
lines.map(sl => "'" + sl.statement.definitionOrCallSite + "'").mkString("[",",","]")
def measuresJSArray =
lines.map(measureFromLike(_)).mkString("[",",","]")
}
def generateStatSummary(staticHtmlFile: java.io.File, n: Int) = {
val page =
BarChartRenderer.page(
new Stat(
"Top "+n+" statements with longest avg",
"avg time",
StatsSchema.topRankingStatements(n, Measure.AvgExecTime),
sl => sl.avgExecTime.toString),
new Stat(
"Top "+n+" most called statements",
"invocation count",
StatsSchema.topRankingStatements(n, Measure.InvocationCount),
sl => sl.invocationCount.toString),
new Stat(
"Top "+n+" statements incurring most cummulative execution time",
"cummulative execution time",
StatsSchema.topRankingStatements(n, Measure.CumulativeExecutionTime),
sl => sl.cumulativeExecutionTime.toString),
new Stat(
"Top "+n+" statements with highest avg row count",
"avg row count",
StatsSchema.topRankingStatements(n, Measure.AvgResultSetSize),
sl => sl.avgRowCount.toString)
)
val ps = new PrintStream(new FileOutputStream(staticHtmlFile))
ps.print(page)
ps.close
}
val drawFunc = """
function drawBarGraph(divId, chartTitle, statType, queryClasses, measure) {
var data = new google.visualization.DataTable();
data.addColumn('string', 'Z');
data.addColumn('number', statType);
data.addRows(queryClasses.length);
for (var j = 0; j < queryClasses.length; ++j) {
data.setValue(j, 0, queryClasses[j].toString());
data.setValue(j, 1, measure[j]);
}
var v = new google.visualization.BarChart(document.getElementById(divId))
v.draw(data,
{title: chartTitle,
width:600, height:400,
vAxis: {title: "Queries"},
hAxis: {title: statType}
}
);
}
"""
def funcCalls(stats: Seq[Stat]) = {
val sb = new StringBuffer
var i = 0
for(s <- stats) {
i += 1
sb.append("drawBarGraph('chart")
sb.append(i)
sb.append("','")
sb.append(s.title)
sb.append("','")
sb.append(s.xAxisLabel)
sb.append("',")
sb.append(s.queryLabelsJSArray)
sb.append(",")
sb.append(s.measuresJSArray)
sb.append(");\\n")
}
sb.toString
}
def page(stats: Stat*) =
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<title>Performance profile of Squeryl queries</title>
<script type="text/javascript" src="http://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load('visualization', '1', {{packages: ['corechart']}});
</script>
<script type="text/javascript">
{Unparsed(drawFunc)}
function drawVisualization() {{
{Unparsed(funcCalls(stats))}
}}
google.setOnLoadCallback(drawVisualization);
</script>
</head>
<body style="font-family: Arial;border: 0 none;">
<div id="chart1" style="width: 1000px; height: 400px;"></div>
<div id="chart2" style="width: 1000px; height: 400px;"></div>
<div id="chart3" style="width: 1000px; height: 400px;"></div>
<div id="chart4" style="width: 1000px; height: 400px;"></div>
</body>
</html>
} | takezoux2/squeryl-experimental | src/main/scala/org/squeryl/logging/BarChartRenderer.scala | Scala | apache-2.0 | 4,093 |
package org.chaomai.paraten.matrix
import breeze.linalg.{
CSCMatrix => BCSCM,
DenseMatrix => BDM,
DenseVector => BDV,
VectorBuilder => BVB
}
import breeze.math.Semiring
import breeze.stats.distributions.Rand
import breeze.storage.Zero
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.chaomai.paraten.support.CanUse
import scala.reflect.ClassTag
/**
* Created by chaomai on 11/05/2017.
*/
case class IndexedColumn[V: CanUse](cidx: Long, cvec: BDV[V])
class IndexedColumnMatrix[
@specialized(Double, Float, Int, Long) V: ClassTag: Zero: Semiring: CanUse](
private var nrows: Int,
private var ncols: Long,
private val storage: RDD[IndexedColumn[V]])
extends Matrix[V] {
val shape: (Int, Long) = (numRows, numCols)
def numRows: Int = {
if (nrows <= 0) {
nrows = storage.map(_.cvec.length).reduce(math.max) + 1
}
nrows
}
def numCols: Long = {
if (ncols <= 0) {
ncols = storage.map(_.cidx).reduce(math.max) + 1L
}
ncols
}
override def nnz(implicit n: Numeric[V]): Long = {
storage
.map(_.cvec.map(v => if (v != n.zero) 1L else 0L).reduce(_ + _))
.reduce(_ + _)
}
def mapStorage[U: ClassTag](f: IndexedColumn[V] => U): RDD[U] =
storage.map(f)
def addColumn(col: RDD[IndexedColumn[V]]): IndexedColumnMatrix[V] = {
IndexedColumnMatrix(numRows, numCols, storage.union(col))
}
def toDenseMatrix: BDM[V] = toCSCMatrix.toDenseMatrix
def toCSCMatrix: BCSCM[V] = {
val ncs = if (numCols >= Int.MaxValue) Int.MaxValue else numCols.toInt
val builder = new BCSCM.Builder[V](numRows, ncs)
storage.collect().foreach { p =>
val cidx = p.cidx.toInt
val col = p.cvec
col.foreachPair((ridx, v) => builder.add(ridx, cidx, v))
}
builder.result()
}
def t: IndexedRowMatrix[V] =
IndexedRowMatrix(numCols,
numRows,
storage.map(col => IndexedRow(col.cidx, col.cvec)))
def *(m: IndexedRowMatrix[V])(implicit n: Numeric[V]): BDM[V] = {
require(numCols == m.numRows,
s"Required matrix product, "
+ s"but the m1.numCols = $numCols and m2.numRows = ${m.numRows}")
val cols = storage.map(col => (col.cidx, col.cvec))
val rows = m.mapStorage(row => (row.ridx, row.rvec))
cols
.fullOuterJoin(rows)
.map { x =>
val col = x._2._1
val row = x._2._2
(col, row) match {
case (None, None) => sys.error("should not happen")
case (Some(_), None) => BDM.zeros[V](numRows, m.numCols)
case (None, Some(_)) => BDM.zeros[V](numRows, m.numCols)
case (Some(_), Some(_)) => col.get * row.get.t
}
}
.reduce(_ + _)
}
}
object IndexedColumnMatrix {
def zeros[V: ClassTag: Zero: Semiring: CanUse](numRows: Int, numCols: Long)(
implicit sc: SparkContext): IndexedColumnMatrix[V] =
IndexedColumnMatrix(numRows, numCols, sc.emptyRDD[IndexedColumn[V]])
def rand[V: ClassTag: Zero: Semiring: CanUse](numRows: Int,
numCols: Long,
rand: Rand[V] = Rand.uniform)(
implicit sc: SparkContext): IndexedColumnMatrix[V] = {
val cols = for { cidx <- 0L until numRows } yield
IndexedColumn(cidx, BDV.rand[V](numRows, rand))
IndexedColumnMatrix(numRows, numCols, sc.parallelize(cols))
}
def vals[V: ClassTag: Zero: Semiring: CanUse](cols: Seq[V]*)(
implicit sc: SparkContext): IndexedColumnMatrix[V] = {
val nrows = cols.head.length
val ncols = cols.length
val c = cols.zipWithIndex.map { p =>
val col = p._1
val cidx = p._2
val builder = new BVB[V](nrows)
col.zipWithIndex.foreach { x =>
val ridx = x._2
val v = x._1
builder.add(ridx, v)
}
IndexedColumn(cidx, builder.toDenseVector)
}
IndexedColumnMatrix(nrows, ncols, sc.parallelize(c))
}
def apply[V: ClassTag: Zero: Semiring: CanUse](
numRows: Int,
numCols: Long,
rdd: RDD[IndexedColumn[V]]): IndexedColumnMatrix[V] =
new IndexedColumnMatrix[V](numRows, numCols, rdd)
}
| ChaoMai/ParaTen | src/main/scala/org/chaomai/paraten/matrix/IndexedColumnMatrix.scala | Scala | apache-2.0 | 4,201 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.params.choice
import io.deepsense.deeplang.exceptions.DeepLangException
import io.deepsense.deeplang.params.ParameterType
import spray.json.DefaultJsonProtocol._
import spray.json._
import scala.reflect.runtime.universe._
case class MultipleChoiceParam[T <: Choice](
override val name: String,
override val description: Option[String])
(implicit tag: TypeTag[T])
extends AbstractChoiceParam[T, Set[T]] {
override protected def serializeDefault(choices: Set[T]): JsValue =
JsArray(choices.toSeq.map(choice => JsString(choice.name)): _*)
val parameterType = ParameterType.MultipleChoice
override def valueToJson(value: Set[T]): JsValue =
value.foldLeft(JsObject())(
(acc: JsObject, choice: T) => JsObject(acc.fields ++ choiceToJson(choice).fields))
protected override def valueFromJsMap(jsMap: Map[String, JsValue]): Set[T] = {
jsMap.toList.map {
case (label, innerJsValue) => choiceFromJson(label, innerJsValue)
}.toSet
}
override def validate(value: Set[T]): Vector[DeepLangException] = {
value.toVector.flatMap { _.validateParams }
}
override def replicate(name: String): MultipleChoiceParam[T] = copy(name = name)
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/params/choice/MultipleChoiceParam.scala | Scala | apache-2.0 | 1,816 |
package services
import play.api._
import play.api.Play.current
import models._
import java.nio.charset.StandardCharsets
import java.util.Date
import java.net.URI
import play.api.libs.oauth._
import scala.util.Try
import play.api.mvc.RequestHeader
import play.api.mvc.Controller
import play.api.mvc.Result
import play.api.libs.ws.WSClient
import org.apache.commons.codec.binary.Base64
import scala.concurrent.Future
import org.apache.http.HttpStatus
import play.api.libs.json.Json
import scala.concurrent.ExecutionContext
import play.api.libs.json.Reads
import play.api.libs.ws.WSRequest
import play.api.mvc.Request
import utils.OAuthUtil
import net.oauth.client.OAuthClient
import net.oauth.client.httpclient4.HttpClient4
import play.api.libs.json.JsValue
import scala.util.Failure
import scala.util.Success
import scala.util.Success
import org.apache.http.HttpException
import java.io.IOException
import scala.util.Failure
import java.net.URLEncoder
import play.api.libs.json._
trait JiraApiService {
/**
* Returns all projects which are visible for the currently logged in user. If no user is logged in, it returns the list of projects that are visible when using anonymous access.
*/
def getAllProjects(expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraProject]]
/**
* Returns all versions for the specified project. Results are paginated.
*/
def getProjectVersions(projectIdOrKey: String, startAt: Option[Integer]=None, maxResults: Option[Integer] = None,
orderBy: Option[String] = None, expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraVersion]]
/**
* Contains a full representation of a the specified project's versions.
*/
def getVersions(projectIdOrKey: String, expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraVersion]]
/**
* Searches for issues using JQL.
*/
def findIssues(jql: String, startAt: Option[Integer] = None, maxResults: Option[Integer] = None,
validateQuery: Option[Boolean] = None, fields: Option[String] = Some("*navigatable"), expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[JiraSearchResult]
}
sealed trait JiraAuthentication
case class BasicAuthentication(username: String, password: String) extends JiraAuthentication
case class OAuthAuthentication(consumerKey:String, privateKey:String, token: String) extends JiraAuthentication
case class JiraConfiguration(baseUrl: String)
trait JiraApiServiceImpl extends JiraApiService {
import services.JiraWSHelper._
val allProjectsUrl = "/rest/api/2/project?"
val projectVersionsUrl = "/rest/api/2/project/%s/version?"
val versionsUrl = "/rest/api/2/project/%s/versions?"
val findIssuesUrl = "/rest/api/2/search?"
val ws: WSClient
val config: JiraConfiguration
def getAllProjects(expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraProject]] = {
val params = getParamList(getParam("expand", expand))
val url = allProjectsUrl + params
Logger.debug(s"getAllProjects(expand:$expand, url:$url")
getList[JiraProject](url)
}
def getProjectVersions(projectIdOrKey: String, startAt: Option[Integer]=None, maxResults: Option[Integer] = None,
orderBy: Option[String] = None, expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraVersion]] = {
val params = getParamList(getParam("startAt", startAt), getParam("maxResults", maxResults), getParam("orderBy", orderBy), getParam("expand", expand))
val url = projectVersionsUrl.format(projectIdOrKey) + params
getList[JiraVersion](url)
}
def getVersions(projectIdOrKey: String, expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Seq[JiraVersion]] = {
val params = getParamList(getParam("expand", expand))
val url = versionsUrl.format(projectIdOrKey) + params
getList[JiraVersion](url)
}
def findIssues(jql: String, startAt: Option[Integer] = None, maxResults: Option[Integer] = None,
validateQuery: Option[Boolean] = None, fields: Option[String] = Some("*navigatable"), expand: Option[String] = None)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[JiraSearchResult] = {
val params = getParamList(getParam("jql", jql), getParam("startAt", startAt), getParam("maxResults", maxResults), getParam("validateQuery", validateQuery), getParam("fields", fields), getParam("expand", expand))
val url = findIssuesUrl + params
getSingleValue[JiraSearchResult](url)
}
def getParamList(params: Option[String]*):String = {
params.flatten.mkString("&")
}
def getParam[T](name:String, value:T):Option[String] = {
getParam(name, Some(value))
}
def getParam[T](name:String, value:Option[T]):Option[String] = {
value.map(v => name + "=" + URLEncoder.encode(v.toString, "UTF-8"))
}
def getList[T](relUrl: String)(implicit auth: JiraAuthentication, executionContext: ExecutionContext, reads: Reads[T]): Future[Seq[T]] = {
val url = config.baseUrl + relUrl
Logger.debug(s"getList(url:$url")
JiraWSHelper.call(config, url, ws).flatMap { _ match {
case Success(json: JsArray) =>
Logger.debug(s"getList:Success -> $json")
Json.fromJson[Seq[T]](json).asOpt.map(j => Future.successful(j)).getOrElse(Future.failed(new RuntimeException(s"Could not parse $json")))
case Success(json) =>
Logger.debug(s"getList:Success -> $json")
Json.fromJson[T](json).asOpt.map(j => Future.successful(Seq(j))).getOrElse(Future.failed(new RuntimeException(s"Could not parse $json")))
case Failure(e) =>
Logger.debug(s"getList:Failure -> $e")
Future.failed(e)
}}
}
def getSingleValue[T](relUrl: String)(implicit auth: JiraAuthentication, executionContext: ExecutionContext, reads: Reads[T]): Future[T] = {
val url = config.baseUrl + relUrl
Logger.debug(s"getOption(url:$url")
JiraWSHelper.call(config, url, ws).flatMap { _ match {
case Success(json) =>
Logger.debug(s"getOption:Success -> $json")
Json.fromJson[T](json).asOpt.map(j => Future.successful(j)).getOrElse(Future.failed(new RuntimeException(s"Could not parse $json")))
case Failure(e) =>
Logger.debug(s"getOption:Failure -> $e")
Future.failed(e)
}}
}
}
object JiraWSHelper {
import scala.async.Async.{async, await}
def call(config:JiraConfiguration, url:String, ws:WSClient)(implicit auth: JiraAuthentication, executionContext: ExecutionContext): Future[Try[JsValue]] = {
auth match {
case oauth: OAuthAuthentication =>
callWithOAuth(config, url, oauth)
case basicAuth: BasicAuthentication =>
callWithBasicAuth(config, url, ws, basicAuth)
}
}
def callWithOAuth(config:JiraConfiguration, url:String, auth:OAuthAuthentication)(implicit executionContext: ExecutionContext):Future[Try[JsValue]] = {
async{
try {
val accessor = OAuthUtil.getAccessor(config.baseUrl, auth.consumerKey, auth.privateKey, "")
accessor.accessToken = auth.token
val client = new OAuthClient(new HttpClient4());
val response = client.invoke(accessor, url, java.util.Collections.emptySet())
Success(Json.parse(response.readBodyAsString()))
}
catch {
case e:Exception => Failure(e)
}
}
}
def callWithBasicAuth(config:JiraConfiguration, url:String, ws:WSClient, auth:BasicAuthentication)(implicit executionContext: ExecutionContext): Future[Try[JsValue]] = {
val pair = s"${auth.username}:${auth.password}"
val encPart = new String(Base64.encodeBase64(pair.getBytes("utf-8")), "utf-8")
val enc = s"Basic $encPart"
ws.url(url).withHeaders((headers :+ ("Authorization" -> enc)) : _*).get.map {resp =>
resp.status match {
case HttpStatus.SC_OK => Success(resp.json)
case error => Failure(new IOException(s"Http status:$error"))
}
}
}
def headers() = {
Seq(("Content-Type" -> "application/json"))
}
}
//object JiraApiServiceImpl extends JiraApiServiceImpl | toggm/play-scala-jira-api | app/services/JIRAApiService.scala | Scala | gpl-2.0 | 8,486 |
package org.ensime.core
import org.ensime.api._
import org.ensime.fixture._
import org.ensime.util.EnsimeSpec
import DeclaredAs.{ Nil => _, _ }
class TypeToScalaNameSpec extends EnsimeSpec
with IsolatedRichPresentationCompilerFixture
with RichPresentationCompilerTestUtils
with ReallyRichPresentationCompilerFixture {
import ReallyRichPresentationCompilerFixture._
val original = EnsimeConfigFixture.ShapelessTestProject
it should "calculate the TypeInfo at point" in withPresCompiler { (config, cc) =>
runForPositionInCompiledSource(
config, cc,
"package com.example",
"import shapeless._, labelled._, syntax.singleton._",
"class Thing {",
" val in@int@t: Int = 13",
" def met@method1@hod1(i: Int): String = i.toString",
" val ar@arrow1@row1: Int => String = (i: Int) => met@call1@hod1(i)",
" def met@method2@hod2(i: Int, j: Long): String = i.toString",
" val arrow2: Int => Long => String = (i: Int, j: Long) => met@call2@hod2(i, j)",
" val arrow0: () => Int = null ; ar@call0@row0()",
" def tu@tuple2@ple2: (String, Int) = null",
" def hl@hlist@ist: Int :: String :: HNil = null",
" def re@refined@fined = 1.narrow",
" def ex@exciting@citing = 'f' ->> 23.narrow",
"}"
) { (p, label, cc) =>
withClue(label) {
cc.askTypeInfoAt(p).getOrElse { fail } shouldBe {
label match {
case "int" =>
BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None)
case "method1" | "method2" =>
// the return type
BasicTypeInfo("String", Class, "java.lang.String", Nil, Nil, None)
case "call1" =>
ArrowTypeInfo(
// we used to skip the surrounding brackets, but
// it's confusing when chaining multiple functions
"(Int) => String",
"(scala.Int) => java.lang.String",
BasicTypeInfo(
"String",
Class,
"java.lang.String",
Nil, Nil, None
),
List(ParamSectionInfo(List(("i", BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None))), false))
)
case "arrow1" =>
ArrowTypeInfo(
"(Int) => String",
"(scala.Int) => java.lang.String",
BasicTypeInfo("String", Class, "java.lang.String", Nil, Nil, None),
List(ParamSectionInfo(
List(("_0", BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None))), false
))
)
case "call0" =>
ArrowTypeInfo(
"() => Int",
"() => scala.Int",
BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None),
List(ParamSectionInfo(Nil, false))
)
case "call2" =>
ArrowTypeInfo(
"(Int, Long) => String",
"(scala.Int, scala.Long) => java.lang.String",
BasicTypeInfo("String", Class, "java.lang.String", Nil, Nil, None),
List(ParamSectionInfo(List(
("i", BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None)),
("j", BasicTypeInfo("Long", Class, "scala.Long", Nil, Nil, None))
), false))
)
case "tuple2" =>
BasicTypeInfo(
"(String, Int)",
Class,
"(java.lang.String, scala.Int)",
List(
BasicTypeInfo("String", Class, "java.lang.String", Nil, Nil, None),
BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None)
),
Nil, None
)
case "hlist" =>
BasicTypeInfo(
"Int :: String :: HNil",
Class,
"scala.Int shapeless.:: java.lang.String shapeless.:: shapeless.HNil",
List(
BasicTypeInfo("Int", Class, "scala.Int", Nil, Nil, None),
BasicTypeInfo("String :: HNil", Class, "java.lang.String shapeless.:: shapeless.HNil",
List(
BasicTypeInfo("String", Class, "java.lang.String", Nil, Nil, None),
BasicTypeInfo("HNil", Trait, "shapeless.HNil", Nil, Nil, None)
), Nil, None)
), Nil, None
)
case "refined" =>
BasicTypeInfo(
"Int(1)",
Class,
"scala.Int(1)",
Nil, Nil, None
)
case "exciting" =>
// potential canary, we might want to prettify KeyTag
BasicTypeInfo(
"Int(23) with KeyTag[Char('f'), Int(23)]",
Class,
"scala.Int(23) with shapeless.labelled.KeyTag[scala.Char('f'), scala.Int(23)]",
Nil, Nil, None
)
}
}
}
}
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/core/src/it/scala/org/ensime/core/TypeToScalaNameSpec.scala | Scala | apache-2.0 | 5,313 |
package aio
package conduit
/**
*
* exchange szenarios
*
* alternatives:
*
* a. read bytebuffer contains full http request and nothing more, http response can be written in one bytebuffer
* b. read bytebuffer contains full http request and nothing more, http response must be written with many bytebuffers
* c.
*
*
*
*/
import scala.util.Random.nextBytes
import java.util.Calendar
import java.nio.charset.StandardCharsets.US_ASCII
object p extends App {
val m = 1
val n = 1000000
val o = 10
val index = 100
val remainder = 1000
private[this] final val pattern = """\\r\\n\\r\\n""".getBytes
private[this] final val data = {
val a = new Array[Byte](index)
val b = new Array[Byte](remainder)
nextBytes(a)
nextBytes(b)
a ++ pattern ++ b
}
def now = Calendar.getInstance.getTimeInMillis
private[this] final val failure: Array[Int] = {
val failure = new Array[Int](pattern.length)
var j = 0
var i = 1
while (i < pattern.length) {
while (j > 0 && pattern(j) != pattern(i)) j = failure(j - 1)
if (pattern(j) == pattern(i)) j += 1
failure(i) = j
i += 1
}
failure
}
private[this] final def indexOf: Int = {
var j = 0
var i = 0
while (i < data.length) {
while (j > 0 && pattern(j) != data(i)) j = failure(j - 1)
if (pattern(j) == data(i)) j += 1
if (j == pattern.length) return i - pattern.length + 1
i += 1
}
-1
}
def test1 = index == data.indexOfSlice(pattern)
def test2 = index == indexOf
def test3 = {
val ss = new String(pattern, US_ASCII)
val datas = new String(data, US_ASCII)
index == datas.indexOf(ss)
}
for (_ ← 1 to o) {
for (_ ← 1 to m) {
val b = now
for (i ← 1 to n) assert(test1)
val e = now
println(s"1 ${e - b}, ${(n / ((e - b) / 1000.0)).toInt}")
}
for (_ ← 1 to m) {
val b = now
for (i ← 1 to n) assert(test2)
val e = now
println(s"2 ${e - b}, ${(n / ((e - b) / 1000.0)).toInt}")
}
for (_ ← 1 to m) {
val b = now
for (i ← 1 to n) assert(test3)
val e = now
println(s"3 ${e - b}, ${(n / ((e - b) / 1000.0)).toInt}")
}
}
}
| weltermann17/pleasant-scala | aio/src/main/scala/aio/conduit/Exchange.scala | Scala | apache-2.0 | 2,226 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package impl
import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.Date
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import api.BarService
import api.FooService
import com.softwaremill.macwire._
class BarLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new BarApplication(context) {
override def serviceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new BarApplication(context) with LagomDevModeComponents
}
abstract class BarApplication(context: LagomApplicationContext) extends LagomApplication(context) with AhcWSComponents {
override lazy val lagomServer = serverFor[BarService](wire[BarServiceImpl])
lazy val fooService = serviceClient.implement[FooService]
Files.write(
environment.getFile("target/reload.log").toPath,
s"${new Date()} - reloaded\\n".getBytes("utf-8"),
StandardOpenOption.CREATE,
StandardOpenOption.APPEND
)
}
| rcavalcanti/lagom | dev/sbt-plugin/src/sbt-test/sbt-plugin/run-all-scaladsl/b/impl/src/main/scala/impl/BarLoader.scala | Scala | apache-2.0 | 1,301 |
package wom.graph
import cats.implicits._
import common.validation.ErrorOr.ErrorOr
import common.collections.EnhancedCollections._
import wom.callable.Callable
import wom.callable.Callable.{InputDefinitionWithDefault, OptionalInputDefinition, OutputDefinition, RequiredInputDefinition}
import wom.graph.GraphNode._
import wom.graph.GraphNodePort.{InputPort, OutputPort}
import wom.graph.expression.ExpressionNode
trait GraphNode {
def identifier: WomIdentifier
/**
* Alias for identifier.localName.value
*/
final def localName: String = identifier.localName.value
/**
* Alias for identifier.fullyQualifiedName.value
*/
final def fullyQualifiedName: String = identifier.fullyQualifiedName.value
final override def equals(other: Any): Boolean = super.equals(other)
final override def hashCode: Int = super.hashCode()
/**
* Inputs that must be available before this graph node can be run.
*/
def inputPorts: Set[GraphNodePort.InputPort]
/**
* Outputs that are generated by this GraphNode
*/
def outputPorts: Set[GraphNodePort.OutputPort]
def outputByName(name: String): ErrorOr[GraphNodePort.OutputPort] = {
outputPorts.find(_.name == name) match {
case Some(port) => port.validNel
case None => s"No such output: $name".invalidNel
}
}
/**
* The set of all graph nodes which are (transitively) upstream from this one.
*/
lazy val upstreamAncestry = calculateUpstreamAncestry(Set.empty, this)
/**
* The set of all OuterGraphInputNodes which are somewhere upstream of this Node (in the same graph)
*/
lazy val upstreamOuterGraphInputNodes: Set[OuterGraphInputNode] = upstreamAncestry.filterByType[OuterGraphInputNode]
lazy val upstreamPorts: Set[OutputPort] = inputPorts.map(_.upstream)
lazy val upstream: Set[GraphNode] = upstreamPorts.map(_.graphNode)
}
object GraphNode {
// A recursive traversal with a fancy trick to avoid double-counting:
private def calculateUpstreamAncestry(currentSet: Set[GraphNode], graphNode: GraphNode): Set[GraphNode] = {
val setWithUpstream = currentSet ++ graphNode.upstream
val updatesNeeded = graphNode.upstream -- currentSet
updatesNeeded.foldLeft(setWithUpstream)(calculateUpstreamAncestry)
}
def inputPortNamesMatch(required: Set[InputPort], provided: Set[InputPort]): ErrorOr[Unit] = {
def requiredInputFound(r: InputPort): ErrorOr[Unit] = provided.find(_.name == r.name) match {
case Some(p) => if (r.womType.isCoerceableFrom(p.womType)) ().validNel else s"Cannot link a ${p.womType.toDisplayString} to the input ${r.name}: ${r.womType}".invalidNel
case None => s"The required input ${r.name}: ${r.womType.toDisplayString} was not provided.".invalidNel
}
required.toList.traverse(requiredInputFound).void
}
/**
* Allows a level of indirection, so that GraphNodePorts can be constructed before their associated GraphNode is
* constructed. If used, the _graphNode must be set before anything tries to apply 'get'.
*/
class GraphNodeSetter[A <: GraphNode] {
var _graphNode: A = _
private def getGraphNode = _graphNode
def get: Unit => A = _ => getGraphNode
}
private[wom] implicit class EnhancedGraphNodeSet(val nodes: Set[GraphNode]) extends AnyVal {
/**
* Interpret this graph's "GraphInputNode"s as "Callable.InputDefinition"s
*/
def inputDefinitions: Set[_ <: Callable.InputDefinition] = nodes collect {
case required: RequiredGraphInputNode => RequiredInputDefinition(required.identifier.localName, required.womType)
case optional: OptionalGraphInputNode => OptionalInputDefinition(optional.identifier.localName, optional.womType)
case withDefault: OptionalGraphInputNodeWithDefault => InputDefinitionWithDefault(withDefault.identifier.localName, withDefault.womType, withDefault.default)
}
def outputDefinitions: Set[_ <: Callable.OutputDefinition] = nodes collect {
// TODO: FIXME: Do something for PortBasedGraphOutputNodes
case gin: ExpressionBasedGraphOutputNode => OutputDefinition(gin.identifier.localName, gin.womType, gin.womExpression)
}
}
/**
* This pattern is used when new Nodes are wired into a set of Graph nodes, and potentially end up creating new input nodes.
*/
trait GeneratedNodeAndNewNodes {
def node: GraphNode
def newInputs: Set[_ <: ExternalGraphInputNode]
/**
* All OuterGraphInputNodes that should be included in the same graph as 'node'.
*/
def usedOuterGraphInputNodes: Set[_ <: OuterGraphInputNode]
def newExpressions: Set[ExpressionNode]
}
trait GraphNodeWithInnerGraph { this: GraphNode =>
def innerGraph: Graph
}
}
| ohsu-comp-bio/cromwell | wom/src/main/scala/wom/graph/GraphNode.scala | Scala | bsd-3-clause | 4,705 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history.yarn.rest
import java.security.PrivilegedExceptionAction
/**
* Take any function `() => Type` and apply it as a privileged action.
*
* @param function function to apply
* @tparam T return type of the function
*/
private[spark] class PrivilegedFunction[T](function: (() => T))
extends PrivilegedExceptionAction[T] {
override def run(): T = {
function()
}
}
| steveloughran/spark-timeline-integration | yarn-timeline-history/src/main/scala/org/apache/spark/deploy/history/yarn/rest/PrivilegedFunction.scala | Scala | apache-2.0 | 1,217 |
/*
*
*/
package see.values
import java.lang.reflect.Method
import see.CopyContext
import see.EvalError
import see.JavaWrapper
import see.ParamError
import see.Scope
private[see] case class Native(wrapper: JavaWrapper)
extends Val with Callable {
import Native._
override def selType = 'Native
override def isType(typeId: Symbol) =
(typeId == 'Native) || super.isType(typeId)
override def toJava: AnyRef = wrapper
override def size = 1
override def copy(cc: CopyContext) = new Native(wrapper)
def isDefinedIn(s: Scope) = true
// obviously
def isStable = false // we simply don't know.
def call(s: Scope, arg: Val): Val = {
val invokes = wrapper.getClass.getMethods.filter(_.getName == "invoke")
if (invokes.isEmpty)
throw new EvalError(wrapper.name + " is not a valid JavaWrapper.")
val args = arg match {
case v: Vector => v.values.toArray
case x => Array(x)
}
val meths = invokes.filter(_.getParameterTypes.length == args.size)
if (meths.isEmpty) throw new ParamError(
"No native invocation of %s with %d parameters available.".format(
wrapper.name, args.size))
val method = if (meths.size == 1) meths.head
else select(meths, args)
call(method, args)
}
// convert arguments to fit method parameters
private def call(method: Method, args: Array[Val]): Val = {
val jargs = convertArgs(method.getParameterTypes, args)
try {
val r = method.invoke(wrapper, jargs: _*)
if (r ne null) Val(r)
else if (method.getReturnType == java.lang.Void.TYPE)
VoidVal
else NullVal
} catch {
case x: Exception =>
throw new EvalError(s"Native call threw ${x.getMessage}")
}
}
}
object Native {
// Selects that method from a list which best matches given arguments.
def select(ms: Array[Method], args: Array[Val]) = {
val n = Native.bestMatch(ms.map(_.getParameterTypes), args)
if (n >= 0) ms(n)
else {
val s = "No overload of method " + ms(0).getName +
" matches argument list ("
throw new EvalError(args.mkString(s, ", ", ")."))
}
}
/** Converts argument list int to fit given parameter types.
* @return Object array suitable for reflection call.
*/
def convertArgs(types: Array[Class[_]], args: Array[Val]) = {
val jargs = new Array[AnyRef](args.size)
for (n <- 0 until args.size) {
jargs(n) = args(n) convertTo types(n)
}
jargs
}
/** Scans list of parameter types for that one which best maches argument list.
* @return index of best match, -1 if none matches.
*/
def bestMatch(typeLists: Array[Array[Class[_]]], args: Array[Val]) = {
var best = -1
var bestRating = 0
for (mIndex <- 0 until typeLists.size) {
val types = typeLists(mIndex)
var rating = 0
var n = 0
while (n < args.size && rating >= 0) {
val fit = args(n) fits types(n)
if (fit > 0) rating += fit
else rating = -1 // stop on first mismatch
n += 1
}
if (rating > bestRating) {
bestRating = rating
best = mIndex
}
}
best
}
}
| acruise/see | src/main/scala/see/values/Native.scala | Scala | bsd-3-clause | 3,160 |
package planstack.anml.model.abs
import planstack.anml.model._
import planstack.anml.model.abs.statements.AbstractStatement
/** Reference to an action as it appears in a decomposition
*
* @param name Name of the action
* @param args Parameters of the action as instances of local variables
* @param localId Local reference to the action.
*/
class AbstractActionRef(val name:String, val args:List[LVarRef], val localId:LActRef) extends AbstractStatement(localId) {
require(localId nonEmpty)
require(name nonEmpty)
override def bind(context:Context, pb:AnmlProblem) = throw new UnsupportedOperationException
override def isTemporalInterval = true
override def toString = name+"("+args.mkString(",")+")"
}
| planstack/anml | src/main/scala/planstack/anml/model/abs/AbstractActionRef.scala | Scala | apache-2.0 | 729 |
package chandu0101.scalajs.react.components
package materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import scala.scalajs.js
import scala.scalajs.js.`|`
/**
* This file is generated - submit issues instead of PR against it
*/
case class MuiFlatButton(
key: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
/* Color of button when mouse is not hovering over it.*/
backgroundColor: js.UndefOr[MuiColor] = js.undefined,
/* Disables the button if set to true.*/
disabled: js.UndefOr[Boolean] = js.undefined,
/* Color of button when mouse hovers over.*/
hoverColor: js.UndefOr[MuiColor] = js.undefined,
/* URL to link to when button clicked if `linkButton` is set to true.*/
href: js.UndefOr[String] = js.undefined,
/* Use this property to display an icon.*/
icon: js.UndefOr[ReactNode] = js.undefined,
/* Label for the button.*/
label: js.UndefOr[String] = js.undefined,
/* Place label before or after the passed children.*/
labelPosition: js.UndefOr[BeforeAfter] = js.undefined,
/* Override the inline-styles of the button's label element.*/
labelStyle: js.UndefOr[CssProperties] = js.undefined,
/* Enables use of `href` property to provide a URL to link to if set to true.*/
linkButton: js.UndefOr[Boolean] = js.undefined,
/* Called when element is focused by the keyboard.*/
onKeyboardFocus: js.UndefOr[ReactKeyboardEventH => Callback] = js.undefined,
/* Called when the mouse enters the element.*/
onMouseEnter: js.UndefOr[ReactMouseEventH => Callback] = js.undefined,
/* Called when the mouse leaves the element.*/
onMouseLeave: js.UndefOr[ReactMouseEventH => Callback] = js.undefined,
/* Called when a touch event is started inside the element.*/
onTouchStart: js.UndefOr[ReactTouchEventH => Callback] = js.undefined,
/* If true, colors button according to
primaryTextColor from the Theme.*/
primary: js.UndefOr[Boolean] = js.undefined,
/* Color for the ripple after button is clicked.*/
rippleColor: js.UndefOr[MuiColor] = js.undefined,
/* If true, colors button according to secondaryTextColor from the theme.
The primary prop has precendent if set to true.*/
secondary: js.UndefOr[Boolean] = js.undefined,
/* Override the inline-styles of the root element.*/
style: js.UndefOr[CssProperties] = js.undefined,
/* (Passed on to EnhancedButton)*/
centerRipple: js.UndefOr[Boolean] = js.undefined,
/* default: button: This component will render a button element by default and an anchor element if linkButton is set to true. However, you can override this behavior by passing in a string or another react element into this prop. This is useful for generating link buttons with the react router link element.
(Passed on to EnhancedButton)*/
containerElement: js.UndefOr[String | ReactElement] = js.undefined,
/* (Passed on to EnhancedButton)*/
disableFocusRipple: js.UndefOr[Boolean] = js.undefined,
/* (Passed on to EnhancedButton)*/
disableKeyboardFocus: js.UndefOr[Boolean] = js.undefined,
/* (Passed on to EnhancedButton)*/
disableTouchRipple: js.UndefOr[Boolean] = js.undefined,
/* (Passed on to EnhancedButton)*/
focusRippleColor: js.UndefOr[MuiColor] = js.undefined,
/* (Passed on to EnhancedButton)*/
focusRippleOpacity: js.UndefOr[Double] = js.undefined,
/* (Passed on to EnhancedButton)*/
keyboardFocused: js.UndefOr[Boolean] = js.undefined,
/* (Passed on to EnhancedButton)*/
onBlur: js.UndefOr[ReactEventH => Callback] = js.undefined,
/* (Passed on to EnhancedButton)*/
onFocus: js.UndefOr[ReactFocusEventH => Callback] = js.undefined,
/* (Passed on to EnhancedButton)*/
onKeyDown: js.UndefOr[ReactKeyboardEventH => Callback] = js.undefined,
/* (Passed on to EnhancedButton)*/
onKeyUp: js.UndefOr[ReactKeyboardEventH => Callback] = js.undefined,
/* (Passed on to EnhancedButton)*/
onTouchTap: js.UndefOr[ReactTouchEventH => Callback] = js.undefined,
/* (Passed on to EnhancedButton)*/
tabIndex: js.UndefOr[Double] = js.undefined,
/* (Passed on to EnhancedButton)*/
touchRippleColor: js.UndefOr[MuiColor] = js.undefined,
/* (Passed on to EnhancedButton)*/
touchRippleOpacity: js.UndefOr[Double] = js.undefined,
/* (Passed on to EnhancedButton)*/
`type`: js.UndefOr[String] = js.undefined){
/**
* @param children This is what will be displayed inside the button.
If a label is specified, the text within the label prop will
be displayed. Otherwise, the component will expect children
which will then be displayed. (In our example,
we are nesting an `<input type="file" />` and a `span`
that acts as our label to be displayed.) This only
applies to flat and raised buttons.
*/
def apply(children: ReactNode*) = {
val props = JSMacro[MuiFlatButton](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.FlatButton)
if (children.isEmpty)
f(props).asInstanceOf[ReactComponentU_]
else if (children.size == 1)
f(props, children.head).asInstanceOf[ReactComponentU_]
else
f(props, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
| elacin/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiFlatButton.scala | Scala | apache-2.0 | 6,079 |
package pamflet
import collection.immutable.Map
object Language {
// see http://en.wikipedia.org/wiki/IETF_language_tag
val languageNames: Map[String, String] = Map(
"ar" -> "العربية",
"bn" -> "বাংলা",
"ca" -> "Català",
"cs" -> "Čeština",
"de" -> "Deutsch",
"en" -> "English",
"es" -> "Español",
"fa" -> "فارسی",
"fi" -> "Suomi",
"fr" -> "Français",
"he" -> "עברית",
"hi" -> "हिन्दी",
"hu" -> "Magyar",
"id" -> "Bahasa Indonesia",
"it" -> "Italiano",
"ja" -> "日本語",
"ko" -> "한국어",
"nl" -> "Nederlands",
"no" -> "Norsk (Bokmål)",
"pl" -> "Polski",
"pt" -> "Português",
"ru" -> "Русский",
"sv" -> "Svenska",
"tr" -> "Türkçe",
"vi" -> "Tiếng Việt",
"uk" -> "Українська",
"zh" -> "中文"
)
def languageName(code: String): Option[String] = languageNames get code
}
| n8han/pamflet | library/src/main/scala/languages.scala | Scala | lgpl-3.0 | 961 |
/*
* Copyright 2013 agwlvssainokuni
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import java.util.Date
case class Profile(email: String, nickname: String, birthday: Option[Date])
| agwlvssainokuni/lifelog | lifelog-website/app/models/Profile.scala | Scala | apache-2.0 | 715 |
package info.fotm.aether
import akka.actor.Actor
import akka.actor.Actor.Receive
object Analytics {
case class Event(name: String, value: Int)
}
class Analytics extends Actor {
override def receive: Receive = ???
}
| Groz/fotm-info | core/src/main/scala/info/fotm/aether/Analytics.scala | Scala | mit | 222 |
package breeze.optimize
import breeze.util._
import logging.ConfiguredLogging
import breeze.stats.distributions._
import breeze.math.MutableCoordinateSpace
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* Minimizes a function using stochastic gradient descent
*
* @author dlwh
*/
abstract class StochasticGradientDescent[T](val defaultStepSize: Double,
val maxIter: Int,
tolerance: Double=1E-5,
improvementTol: Double=1E-4,
minImprovementWindow: Int = 50)
(implicit protected val vspace: MutableCoordinateSpace[T, Double])
extends FirstOrderMinimizer[T,StochasticDiffFunction[T]](maxIter, tolerance, improvementTol, minImprovementWindow, 2) with ConfiguredLogging {
import vspace._
// Optional hooks with reasonable defaults
/**
* Projects the vector x onto whatever ball is needed. Can also incorporate regularization, or whatever.
*
* Default just takes a step
*/
protected def takeStep(state: State, dir: T, stepSize: Double) = state.x + dir * stepSize
protected def chooseDescentDirection(state: State) = state.grad * -1.0
override protected def updateFValWindow(oldState: State, newAdjVal: Double) = {
if(oldState.fVals.isEmpty) IndexedSeq(newAdjVal)
else {
// weighted average. less sensitive to outliers
val interm = oldState.fVals :+ ((oldState.fVals.last * 3 + newAdjVal)/4.0)
if(interm.length > minImprovementWindow) interm.drop(1)
else interm
}
}
/**
* Choose a step size scale for this iteration.
*
* Default is eta / math.pow(state.iter + 1,2.0 / 3.0)
*/
def determineStepSize(state: State, f: StochasticDiffFunction[T], dir: T) = {
defaultStepSize / math.pow(state.iter + 1, 2.0 / 3.0)
}
}
object StochasticGradientDescent {
def apply[T](initialStepSize: Double=4, maxIter: Int=100)(implicit vs: MutableCoordinateSpace[T, Double]) :StochasticGradientDescent[T] = {
new SimpleSGD(initialStepSize,maxIter)
}
class SimpleSGD[T](eta: Double=4,
maxIter: Int=100)
(implicit vs: MutableCoordinateSpace[T, Double]) extends StochasticGradientDescent[T](eta,maxIter) {
type History = Unit
def initialHistory(f: StochasticDiffFunction[T],init: T)= ()
def updateHistory(newX: T, newGrad: T, newValue: Double, oldState: State) = ()
}
}
| tjhunter/scalanlp-core | learn/src/main/scala/breeze/optimize/StochasticGradientDescent.scala | Scala | apache-2.0 | 3,064 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
import com.asakusafw.lang.compiler.planning.SubPlan
import com.asakusafw.spark.compiler.planning.{ NameInfo, SubPlanInfo }
package object graph {
implicit class SubPlanNames(val subplan: SubPlan) extends AnyVal {
def name: String = {
Option(subplan.getAttribute(classOf[NameInfo]))
.map(_.getName)
.getOrElse("N/A")
}
def label: String = {
Seq(
Option(subplan.getAttribute(classOf[NameInfo]))
.map(_.getName),
Option(subplan.getAttribute(classOf[SubPlanInfo]))
.flatMap(info => Option(info.getLabel)))
.flatten match {
case Seq() => "N/A"
case s: Seq[String] => s.mkString(":")
}
}
}
}
| ashigeru/asakusafw-spark | extensions/iterativebatch/compiler/core/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/package.scala | Scala | apache-2.0 | 1,380 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.time.calendars
import org.quantintel.ql.time.Month._
import org.quantintel.ql.time.Weekday._
import org.quantintel.ql.time.{Date, Western, Calendar}
object SlovakiaEnum extends Enumeration {
type SlovakiaEnum = Value
val BSSE = Value(1)
def valueOf(market: Int) : SlovakiaEnum = market match {
case 1 => BSSE
case _ => throw new Exception("Valid units = 1")
}
}
object Slovakia {
def apply(): Calendar = {
new Slovakia()
}
def apply(market: org.quantintel.ql.time.calendars.SlovakiaEnum.SlovakiaEnum): Calendar = {
new Slovakia(market)
}
}
/**
*
* Slovak calendars
* Holidays for the Bratislava stock exchange
* Saturdays
* Sundays
* New Year's Day, JANUARY 1st
* Epiphany, JANUARY 6th
* Good Friday
* Easter Monday
* May Day, May 1st
* Liberation of the Republic, May 8th
* SS. Cyril and Methodius, July 5th
* Slovak National Uprising, August 29th
* Constitution of the Slovak Republic, September 1st
* Our Lady of the Seven Sorrows, September 15th
* All Saints Day, November 1st
* Freedom and Democracy of the Slovak Republic, November 17th
* Christmas Eve, December 24th
* Christmas, December 25th
* St. Stephen, December 26th
*
* Reference: http://www.bsse.sk/
*
* @author Paul Bernard
*/
class Slovakia extends Calendar {
impl = new Bsse
import org.quantintel.ql.time.calendars.SlovakiaEnum._
def this(market: org.quantintel.ql.time.calendars.SlovakiaEnum.SlovakiaEnum) {
this
market match {
case BSSE => impl = new Bsse
case _ => throw new Exception("Valid units = 1")
}
}
private class Bsse extends Western {
override def name : String = "Bratislava stock exchange"
override def isBusinessDay(date: Date): Boolean = {
// standard dependencies
val w: Weekday = date.weekday
val d: Int = date.dayOfMonth
val dd: Int = date.dayOfYear
val m: Month = date.month
val y: Int = date.year
val em: Int = easterMonday(y)
if (isWeekend(w)
|| (d == 1 && m == JANUARY) // New Year's Day
|| (d == 6 && m == JANUARY) // Epiphany
|| (dd == em - 3) // Good Friday
|| (dd == em) // Easter Monday
|| (d == 1 && m == MAY) // May Day
|| (d == 8 && m == MAY) // Liberation of the Republic
|| (d == 5 && m == JULY) // SS. Cyril and Methodius
|| (d == 29 && m == AUGUST) // Slovak National Uprising
|| (d == 1 && m == SEPTEMBER) // Constitution of the Slovak Republic
|| (d == 15 && m == SEPTEMBER) // Our Lady of the Seven Sorrows
|| (d == 1 && m == NOVEMBER) // All Saints Day
|| (d == 17 && m == NOVEMBER) // Freedom and Democracy of the Slovak Republic
|| (d == 24 && m == DECEMBER) // Christmas Eve
|| (d == 25 && m == DECEMBER) // Christmas
|| (d == 26 && m == DECEMBER) // St. Stephen
|| (d >= 24 && d <= 31 && m == DECEMBER && y == 2004) // unidentified closing days for stock exchange
|| (d >= 24 && d <= 31 && m == DECEMBER && y == 2005))
false
else true
}
}
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/time/calendars/Slovakia.scala | Scala | apache-2.0 | 3,811 |
package io.apibuilder.validation.helpers
import java.io.File
import io.apibuilder.validation.zip.FileUtil
trait FileHelpers {
def readFileAsString(file: File): String = FileUtil.readFileAsString(file)
def writeToTempFile(
contents: String,
prefix: String = "apibuildervalidation",
suffix: String = "tmp"
): File = {
FileUtil.writeToTempFile(
contents = contents,
prefix = prefix,
suffix = suffix
)
}
}
| flowcommerce/lib-apidoc-json-validation | src/test/scala/io/apibuilder/validation/helpers/FileHelpers.scala | Scala | mit | 453 |
package org.jetbrains.plugins.scala
package codeInspection.forwardReferenceInspection
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.codeInspection.AbstractInspection
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScValue, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
/**
* Alefas
*/
class ForwardReferenceInspection extends AbstractInspection {
def actionFor(holder: ProblemsHolder) = {
case ref: ScReferenceExpression =>
val member: ScMember = PsiTreeUtil.getParentOfType(ref, classOf[ScMember])
if (member != null) {
member.getContext match {
case tb: ScTemplateBody if member.isInstanceOf[ScValue] || member.isInstanceOf[ScVariable] =>
ref.bind() match {
case Some(r: ScalaResolveResult) =>
ScalaPsiUtil.nameContext(r.getActualElement) match {
case resolved if resolved.isInstanceOf[ScValue] || resolved.isInstanceOf[ScVariable]=>
if (resolved.getParent == tb && !member.hasModifierProperty("lazy") &&
!resolved.asInstanceOf[ScMember].hasModifierProperty("lazy") &&
resolved.getTextOffset > member.getTextOffset) {
holder.registerProblem(ref, ScalaBundle.message("suspicicious.forward.reference.template.body"))
}
case _ =>
}
case _ =>
}
case _ =>
}
}
}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/forwardReferenceInspection/ForwardReferenceInspection.scala | Scala | apache-2.0 | 1,866 |
import sbt._
object Dependencies {
object Versions {
val scala = "2.11.8"
val akka = "2.4.3"
val scalaTest = "2.2.6"
}
val repos = Seq(
"Atlassian Releases" at "https://maven.atlassian.com/public/",
"JCenter repo" at "https://bintray.com/bintray/jcenter/",
Resolver.sonatypeRepo("snapshots"),
"google code" at "http://repo1.maven.org/maven2/com/googlecode/libphonenumber/libphonenumber/"
)
val dependencies = Seq(
"com.typesafe.akka" %% "akka-actor" % Versions.akka,
"com.typesafe.akka" %% "akka-slf4j" % Versions.akka,
"com.typesafe.akka" %% "akka-testkit" % Versions.akka % Test,
"org.scalatest" %% "scalatest" % Versions.scalaTest % Test,
"com.typesafe.akka" %% "akka-persistence" % Versions.akka,
"com.github.dnvriend" %% "akka-persistence-inmemory" % "1.2.14",
"ch.qos.logback" % "logback-classic" % "1.1.2"
)
}
| ReactivePatterns/functional-objects | project/Dependencies.scala | Scala | apache-2.0 | 889 |
package com.github.ldaniels528.trifecta.modules.etl.io.trigger.impl
import java.io.File
import com.github.ldaniels528.trifecta.modules.etl.io.trigger.impl.FileFeedSet.FeedMatch
import org.scalatest.Matchers._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, FeatureSpec, GivenWhenThen}
/**
* File Feed Set Specification
* @author lawrence.daniels@gmail.com
*/
class FileFeedSetSpec() extends FeatureSpec with BeforeAndAfterEach with GivenWhenThen with MockitoSugar {
info("As a FileFeedSet instance")
info("I want to be able to process files in sets")
feature("Identify similar files") {
scenario("Identify files with the same patterns") {
Given("a feed set")
val feedSet = FileFeedSet(
path = "{{ user.home }}/broadway/incoming/tradingHistory",
pattern = ".*_(\\\\S*)[.]txt",
archive = None,
feeds = Seq(
FileFeed.startsWith("AMEX", flows = Nil, archive = None),
FileFeed.startsWith("NASDAQ", flows = Nil, archive = None),
FileFeed.startsWith("NYSE", flows = Nil, archive = None),
FileFeed.startsWith("OTCBB", flows = Nil, archive = None)
))
And("a set of files")
val files = Seq(
new File("AMEX_20160101.txt"),
new File("NASDAQ_20160101.txt"),
new File("NYSE_20160101.txt"),
new File("OTCBB_20160101.txt")
)
When("the pattern is tested")
feedSet.isSatisfied(files) shouldBe true
Then("a match should be found")
val results = feedSet.getFiles(files)
results foreach { case FeedMatch(groupId, file, feed) =>
info(s"groupId: $groupId, file: $file, feed: $feed")
}
val expected = feedSet.feeds zip files map { case (feed, file) =>
FeedMatch(groupId = "20160101", file, feed)
}
expected shouldBe results
}
}
}
| ldaniels528/trifecta | app-cli/src/test/scala/com/github/ldaniels528/trifecta/modules/etl/io/trigger/impl/FileFeedSetSpec.scala | Scala | apache-2.0 | 1,880 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.