code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package dotty.tools.dotc.config
enum ScalaRelease(val majorVersion: Int, val minorVersion: Int) extends Ordered[ScalaRelease]:
case Release3_0 extends ScalaRelease(3, 0)
case Release3_1 extends ScalaRelease(3, 1)
def show = s"$majorVersion.$minorVersion"
def compare(that: ScalaRelease) =
val ord = summon[Ordering[(Int, Int)]]
ord.compare((majorVersion, minorVersion), (that.majorVersion, that.minorVersion))
object ScalaRelease:
def latest = Release3_1
def parse(name: String) = name match
case "3.0" => Some(Release3_0)
case "3.1" => Some(Release3_1)
case _ => None
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/config/ScalaRelease.scala | Scala | apache-2.0 | 606 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package synthesis
package rules
case object BottomUpETE extends BottomUpETELike("BU Example-guided Term Exploration") {
def getParams(sctx: SynthesisContext, p: Problem) = {
ETEParams(
grammar = grammars.default(sctx, p),
maxExpands = 4
)
}
}
| regb/leon | src/main/scala/leon/synthesis/rules/BottomUpETE.scala | Scala | gpl-3.0 | 323 |
package com.twitter.diffy.lifter
import com.fasterxml.jackson.databind.JsonNode
import com.google.common.net.MediaType
import com.twitter.diffy.ParentSpec
import com.twitter.diffy.lifter.HttpLifter.MalformedJsonContentException
import com.twitter.io.Charsets
import com.twitter.util.{Await, Throw, Try}
import org.jboss.netty.buffer.ChannelBuffers
import org.jboss.netty.handler.codec.http._
import org.junit.runner.RunWith
import org.scalatest.Matchers._
import org.scalatest.OptionValues._
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable.ArrayBuffer
@RunWith(classOf[JUnitRunner])
class HttpLifterSpec extends ParentSpec {
object Fixture {
val reqUri = "/0/accounts"
val jsonContentType = MediaType.JSON_UTF_8.toString
val textContentType = MediaType.PLAIN_TEXT_UTF_8.toString
val htmlContentType = MediaType.HTML_UTF_8.toString
val controllerEndpoint = "account/index"
val validJsonBody =
"{" +
"\\"data_type\\": \\"account\\"," +
"\\"data\\": [" +
"{" +
"\\"name\\": \\"Account 1\\"," +
"\\"deleted\\": false" +
"}," +
"{" +
"\\"name\\": \\"Account 2\\"," +
"\\"deleted\\": true" +
"}" +
"]," +
"\\"total_count\\": 2," +
"\\"next_cursor\\": null" +
"}"
val invalidJsonBody = "invalid"
val validHtmlBody = """<html><head><title>Sample HTML</title></head><body><div class="header"><h1 class="box">Hello World</h1></div><p>Lorem ipsum dolor sit amet.</p></body></html>"""
val testException = new Exception("test exception")
def request(method: HttpMethod, uri: String): HttpRequest =
new DefaultHttpRequest(HttpVersion.HTTP_1_1, method, uri)
def response(status: HttpResponseStatus, body: String): HttpResponse = {
val resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status)
resp.headers()
.add(HttpHeaders.Names.CONTENT_LENGTH, body.length)
.add(HttpHeaders.Names.CONTENT_TYPE, jsonContentType)
.add(HttpLifter.ControllerEndpointHeaderName, controllerEndpoint)
resp.setContent(ChannelBuffers.wrappedBuffer(body.getBytes(Charsets.Utf8)))
resp
}
}
describe("HttpLifter") {
import Fixture._
describe("LiftRequest") {
it("lift simple Get request") {
val lifter = new HttpLifter(false)
val req = request(HttpMethod.GET, reqUri)
req.headers().add("Canonical-Resource", "endpoint")
val msg = Await.result(lifter.liftRequest(req))
val resultFieldMap = msg.result.asInstanceOf[FieldMap[String]]
msg.endpoint.get should equal ("endpoint")
resultFieldMap.get("request").get should equal (req.toString)
}
}
describe("LiftResponse") {
it("lift simple Json response") {
checkJsonContentTypeIsLifted(MediaType.JSON_UTF_8.toString)
}
it("lift simple Json response when the charset is not set") {
checkJsonContentTypeIsLifted(MediaType.JSON_UTF_8.withoutParameters().toString)
}
it("exclude header in response map if excludeHttpHeadersComparison flag is off") {
val lifter = new HttpLifter(true)
val resp = response(HttpResponseStatus.OK, validJsonBody)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result
resultFieldMap.get("headers") should be (None)
}
it("throw MalformedJsonContentException when json body is malformed") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, invalidJsonBody)
val thrown = the [MalformedJsonContentException] thrownBy {
Await.result(lifter.liftResponse(Try(resp)))
}
thrown.getCause should not be (null)
}
it("only compare headers when ContentType header was not set") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, validJsonBody)
resp.headers.remove(HttpHeaders.Names.CONTENT_TYPE)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result
resultFieldMap.get("headers") should not be (None)
}
it("returns FieldMap when ContentType header is Html") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, validHtmlBody)
resp.headers.set(HttpHeaders.Names.CONTENT_TYPE, MediaType.HTML_UTF_8.toString)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result
resultFieldMap shouldBe a [FieldMap[_]]
}
it("throw ContentTypeNotSupportedException when ContentType header is not Json or Html") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, validJsonBody)
resp.headers.remove(HttpHeaders.Names.CONTENT_TYPE)
.add(HttpHeaders.Names.CONTENT_TYPE, textContentType)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result
resultFieldMap shouldBe a [FieldMap[_]]
}
it("return None as controller endpoint when action header was not set") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, validJsonBody)
resp.headers.remove(HttpLifter.ControllerEndpointHeaderName)
val msg = Await.result(lifter.liftResponse(Try(resp)))
msg.endpoint should equal (None)
}
it("propagate exception if response try failed") {
val lifter = new HttpLifter(false)
val thrown = the [Exception] thrownBy {
Await.result(lifter.liftResponse(Throw(testException)))
}
thrown should be (testException)
}
it("only compares header when Content-Length is zero") {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, "")
resp.headers.set(HttpHeaders.Names.CONTENT_TYPE, MediaType.GIF.toString)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result
resultFieldMap.get("headers") should not be (None)
}
def checkJsonContentTypeIsLifted(contentType: String): Unit = {
val lifter = new HttpLifter(false)
val resp = response(HttpResponseStatus.OK, validJsonBody)
resp.headers.set(HttpHeaders.Names.CONTENT_TYPE, contentType)
val msg = Await.result(lifter.liftResponse(Try(resp)))
val resultFieldMap = msg.result.asInstanceOf[FieldMap[Map[String, Any]]]
val status = resultFieldMap.keySet.headOption.value
val headers = resultFieldMap.get(status).value
.get("headers").value.asInstanceOf[FieldMap[Any]]
val content = resultFieldMap.get(status).value.get("content").value.asInstanceOf[JsonNode]
msg.endpoint.get should equal(controllerEndpoint)
status should equal(HttpResponseStatus.OK.getCode.toString)
headers.get(HttpLifter.ControllerEndpointHeaderName).get should equal(
ArrayBuffer(controllerEndpoint))
headers.get(HttpHeaders.Names.CONTENT_TYPE).get should equal(
ArrayBuffer(contentType))
headers.get(HttpHeaders.Names.CONTENT_LENGTH).get should equal(
ArrayBuffer(validJsonBody.length.toString))
content.get("data_type").asText should equal("account")
content.get("total_count").asInt should equal(2)
content.get("next_cursor").isNull should be(true)
val data = content.get("data")
data should have size (2)
data.get(0).get("name").asText should equal("Account 1")
data.get(0).get("deleted").asBoolean should equal(false)
data.get(1).get("name").asText should equal("Account 2")
data.get(1).get("deleted").asBoolean should equal(true)
}
}
}
}
| mgifos/diffy | src/test/scala/com/twitter/diffy/lifter/HttpLifterSpec.scala | Scala | apache-2.0 | 7,870 |
/* Copyright 2017 EPFL, Lausanne */
package inox
package parsing
case class MatchPosition(id: Int) | romac/inox | src/main/scala/inox/parsing/MatchPosition.scala | Scala | apache-2.0 | 100 |
/*
* Copyright University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.mesh
import scalismo.mesh.{BarycentricCoordinates, MeshSurfaceProperty, TriangleId, TriangleMesh3D}
import scalismo.utils.Random
object MeshSurfaceSampling {
def sampleUniformlyOnSurface(expectedAmountOfPoints: Int)(mesh: TriangleMesh3D)(implicit rnd: Random): IndexedSeq[(TriangleId, BarycentricCoordinates)] = {
val meshArea = mesh.area
def drawTriangle(tid: TriangleId): Boolean = {
rnd.scalaRandom.nextDouble() < expectedAmountOfPoints * mesh.computeTriangleArea(mesh.triangulation.triangle(tid)) / meshArea
}
val triangleSubset = mesh.triangulation.triangleIds.filter(tid => drawTriangle(tid))
triangleSubset.map(tid => {
(tid, BarycentricCoordinates.randomUniform(rnd))
})
}
/** Samples according to mask, a MeshSurfaceProperty. If it is 1 the point will be chosen uniformly on the surface if it is zero it will never be chosen. */
def sampleAccordingToMask(mask: MeshSurfaceProperty[Double], expectedAmountOfPoints: Int)(mesh: TriangleMesh3D)(implicit rnd: Random): IndexedSeq[(TriangleId, BarycentricCoordinates)] = {
val uniformSamples = sampleUniformlyOnSurface(expectedAmountOfPoints)(mesh)
uniformSamples.filter { case (tid, bcc) => rnd.scalaRandom.nextDouble() < mask(tid, bcc) }
}
}
| unibas-gravis/scalismo-faces | src/main/scala/scalismo/faces/mesh/MeshSurfaceSampling.scala | Scala | apache-2.0 | 1,918 |
// #1435
object t1435 {
implicit def a(s:String):String = sys.error("")
implicit def a(i:Int):String = sys.error("")
implicit def b(i:Int):String = sys.error("")
}
class C1435 {
val v:String = {
import t1435.a
2
}
}
// #1492
class C1492 {
class X
def foo(x: X => X): Unit = {}
foo ( implicit x => implicitly[X] )
foo { implicit x => implicitly[X] }
}
// #1579
object Test1579 {
class Column
class Query[E](val value: E)
class Invoker(q: Any) { val foo = null }
implicit def unwrap[C](q: Query[C]): C = q.value
implicit def invoker(q: Query[Column]): Test1579.Invoker = new Invoker(q)
val q = new Query(new Column)
q.foo
}
// #1625
object Test1625 {
class Wrapped(x:Any) {
def unwrap() = x
}
implicit def byName[A](x: =>A): Test1625.Wrapped = new Wrapped(x)
implicit def byVal[A](x: A): A = x
def main(args: Array[String]) = {
// val res:Wrapped = 7 // works
val res = 7.unwrap() // doesn't work
println("=> result: " + res)
}
}
object Test2188 {
implicit def toJavaList[A: ClassManifest](t:collection.Seq[A]):java.util.List[A] = java.util.Arrays.asList(t.toArray:_*)
val x: java.util.List[String] = List("foo")
}
object TestNumericWidening {
val y = 1
val x: java.lang.Long = y
}
// #2709
package foo2709 {
class A
class B
package object bar {
implicit def a2b(a: A): B = new B
}
package bar {
object test {
new A: B
}
}
}
// Problem with specs
object specsProblem {
println(implicitly[Manifest[Class[_]]])
}
| yusuke2255/dotty | tests/pending/pos/implicits-old.scala | Scala | bsd-3-clause | 1,538 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import quasar.api.ColumnType
import quasar.common.{CPath, CPathField}
import org.specs2.execute.PendingUntilFixed._
import org.specs2.matcher.Matcher
import org.specs2.specification.core.SpecStructure
import scala.collection.immutable.{Map, Set}
import java.lang.String
abstract class ScalarStageSpec
extends ScalarStageSpec.IdsSpec
with ScalarStageSpec.WrapSpec
with ScalarStageSpec.ProjectSpec
with ScalarStageSpec.MaskSpec
with ScalarStageSpec.PivotSpec
with ScalarStageSpec.FocusedSpec
with ScalarStageSpec.CartesianSpec
with ScalarStageSpec.FullSpec
/*
* Test names must begin with the prefix specified in their
* `override def is` implementation followed by `-#*` in
* order for `pendingFragments` to come into effect.
*/
object ScalarStageSpec {
def parseNumber(prefix: String, name: String): Option[Int] = {
val regexPrefix = s"${prefix}-[1-9][0-9]*".r
val regexIdx = s"[1-9][0-9]*".r
regexPrefix.findPrefixOf(name)
.flatMap(regexIdx.findFirstIn)
.map(_.toInt)
}
def pendingFragments(sis: SpecStructure, pendingExamples: Set[Int], prefix: String)
: SpecStructure =
sis.copy(lazyFragments = () => sis.fragments.map { f =>
parseNumber(prefix, f.description.show) match {
case Some(i) =>
if (pendingExamples.contains(i))
f.updateExecution(_.mapResult(_.pendingUntilFixed))
else
f
case None => f
}
})
/*
* Please note that this is currently *over*-specified.
* We don't technically need monotonic ids or even numerical
* ones, we just need *unique* identities. That assertion is
* quite hard to encode though. If we find we need such an
* implementation in the future, these assertions should be
* changed.
*/
trait IdsSpec extends JsonSpec {
import IdStatus.{ExcludeId, IdOnly, IncludeId}
val idsPendingExamples: Set[Int]
"ExcludeId" should {
"ids-1 emit scalar rows unmodified" in {
val input = ldjson("""
1
"hi"
true
""")
input must interpretIdsAs(ExcludeId, input)
}
"ids-2 emit vector rows unmodified" in {
val input = ldjson("""
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
[{ "d": {} }]
""")
input must interpretIdsAs(ExcludeId, input)
}
}
"IdOnly" should {
"ids-3 return monotonic integers for each scalar row" in {
val input = ldjson("""
1
"hi"
true
""")
val expected = ldjson("""
0
1
2
""")
input must interpretIdsAs(IdOnly, expected)
}
"ids-4 return monotonic integers for each vector row" in {
val input = ldjson("""
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
[{ "d": {} }]
""")
val expected = ldjson("""
0
1
2
""")
input must interpretIdsAs(IdOnly, expected)
}
}
"IncludeId" should {
"ids-5 wrap each scalar row in monotonic integers" in {
val input = ldjson("""
1
"hi"
true
""")
val expected = ldjson("""
[0, 1]
[1, "hi"]
[2, true]
""")
input must interpretIdsAs(IncludeId, expected)
}
"ids-6 wrap each vector row in monotonic integers" in {
val input = ldjson("""
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
[{ "d": {} }]
""")
val expected = ldjson("""
[0, [1, 2, 3]]
[1, { "a": "hi", "b": { "c": null } }]
[2, [{ "d": {} }]]
""")
input must interpretIdsAs(IncludeId, expected)
}
}
override def is: SpecStructure =
pendingFragments(super.is, idsPendingExamples, "ids")
def evalIds(idStatus: IdStatus, stream: JsonStream): JsonStream
def interpretIdsAs(idStatus: IdStatus, expected: JsonStream) : Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream => evalIds(idStatus, str) }
}
trait WrapSpec extends JsonSpec {
protected final type Wrap = ScalarStage.Wrap
protected final val Wrap = ScalarStage.Wrap
val wrapPendingExamples: Set[Int]
"wrap" should {
"wrap-1 nest scalars" in {
val input = ldjson("""
1
"hi"
true
""")
val expected = ldjson("""
{ "foo": 1 }
{ "foo": "hi" }
{ "foo": true }
""")
input must wrapInto("foo")(expected)
}
"wrap-2 nest vectors" in {
val input = ldjson("""
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
[{ "d": {} }]
""")
val expected = ldjson("""
{ "bar": [1, 2, 3] }
{ "bar": { "a": "hi", "b": { "c": null } } }
{ "bar": [{ "d": {} }] }
""")
input must wrapInto("bar")(expected)
}
"wrap-3 nest empty objects" in {
val input = ldjson("""
"a"
{}
[]
1
""")
val expected = ldjson("""
{ "bar": "a" }
{ "bar": {} }
{ "bar": [] }
{ "bar": 1 }
""")
input must wrapInto("bar")(expected)
}
}
override def is: SpecStructure =
pendingFragments(super.is, wrapPendingExamples, "wrap")
def evalWrap(wrap: Wrap, stream: JsonStream): JsonStream
def wrapInto(name: String)(expected: JsonStream): Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream => evalWrap(Wrap(name), str)}
}
trait ProjectSpec extends JsonSpec {
protected final type Project = ScalarStage.Project
protected final val Project = ScalarStage.Project
val projectPendingExamples: Set[Int]
"project" should {
"prj-1 passthrough at identity" in {
val input = ldjson("""
1
"two"
false
[1, 2, 3]
{ "a": 1, "b": "two" }
[]
{}
""")
input must projectInto(".")(input)
}
"prj-2 extract .a" in {
val input = ldjson("""
{ "a": 1, "b": "two" }
{ "a": "foo", "b": "two" }
{ "a": true, "b": "two" }
{ "a": [], "b": "two" }
{ "a": {}, "b": "two" }
{ "a": [1, 2], "b": "two" }
{ "a": { "c": 3 }, "b": "two" }
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto(".a")(expected)
}
"prj-3 extract .a.b" in {
val input = ldjson("""
{ "a": { "b": 1 }, "b": "two" }
{ "a": { "b": "foo" }, "b": "two" }
{ "a": { "b": true }, "b": "two" }
{ "a": { "b": [] }, "b": "two" }
{ "a": { "b": {} }, "b": "two" }
{ "a": { "b": [1, 2] }, "b": "two" }
{ "a": { "b": { "c": 3 } }, "b": "two" }
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto(".a.b")(expected)
}
"prj-4 extract .a[1]" in {
val input = ldjson("""
{ "a": [3, 1], "b": "two" }
{ "a": [3, "foo"], "b": "two" }
{ "a": [3, true], "b": "two" }
{ "a": [3, []], "b": "two" }
{ "a": [3, {}], "b": "two" }
{ "a": [3, [1, 2]], "b": "two" }
{ "a": [3, { "c": 3 }], "b": "two" }
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto(".a[1]")(expected)
}
"prj-5 extract [1]" in {
val input = ldjson("""
[0, 1]
[0, "foo"]
[0, true]
[0, []]
[0, {}]
[0, [1, 2]]
[0, { "c": 3 }]
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto("[1]")(expected)
}
"prj-6 extract [1][0]" in {
val input = ldjson("""
[0, [1]]
[0, ["foo"]]
[0, [true]]
[0, [[]]]
[0, [{}]]
[0, [[1, 2]]]
[0, [{ "c": 3 }]]
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto("[1][0]")(expected)
}
"prj-7 extract [1].a" in {
val input = ldjson("""
[0, { "a": 1 }]
[false, { "a": "foo" }]
[1, { "a": true }]
[[], { "a": [] }]
["foo", { "a": {} }]
[{}, { "a": [1, 2] }]
[0, { "a": { "c": 3 } }]
""")
val expected = ldjson("""
1
"foo"
true
[]
{}
[1, 2]
{ "c": 3 }
""")
input must projectInto("[1].a")(expected)
}
"prj-8 elide rows not containing object path" in {
val input = ldjson("""
{ "x": 1 }
{ "x": 2, "y": 3 }
{ "y": 4, "z": 5 }
["a", "b"]
4
"seven"
{ "z": 4, "x": 8 }
false
{ "y": "nope", "x": {} }
{ "one": 1, "two": 2 }
{}
[]
""")
val expected = ldjson("""
1
2
8
{}
""")
input must projectInto(".x")(expected)
}
"prj-9 only extract paths starting from root" in {
val input = ldjson("""
{ "z": "b", "x": { "y": 4 } }
{ "x": 2, "y": { "x": 1 } }
{ "a": { "x": { "z": false, "y": true } }, "b": "five" }
{ "x": { "y": 1, "z": 2 } }
""")
val expected = ldjson("""
4
1
""")
input must projectInto(".x.y")(expected)
}
"prj-10 elide rows not containing array path" in {
val input = ldjson("""
[0, 1, 2, -1, -2]
[3]
[4, 5]
{ "y": 6, "z": 7 }
["a", "b", "c"]
["a", [8]]
["a", { "x": 9 }]
4.8
"seven"
false
null
{}
[]
""")
val expected = ldjson("""
1
5
"b"
[8]
{ "x": 9 }
""")
input must projectInto("[1]")(expected)
}
}
override def is: SpecStructure =
pendingFragments(super.is, projectPendingExamples, "prj")
def evalProject(project: Project, stream: JsonStream): JsonStream
def projectInto(path: String)(expected: JsonStream): Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalProject(Project(CPath.parse(path)), str)
}
}
trait MaskSpec extends JsonSpec {
import ColumnType._
protected final type Mask = ScalarStage.Mask
protected final val Mask = ScalarStage.Mask
val maskPendingExamples: Set[Int]
"masks" should {
"mask-1 drop everything when empty" in {
val input = ldjson("""
1
"hi"
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
true
[{ "d": {} }]
""")
val expected = ldjson("")
input must maskInto()(expected)
}
"mask-2 retain two scalar types at identity" in {
val input = ldjson("""
1
"hi"
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
true
[]
[{ "d": {} }]
""")
val expected = ldjson("""
1
true
""")
input must maskInto("." -> Set(Number, Boolean))(expected)
}
"mask-3 retain different sorts of numbers at identity" in {
val input = ldjson("""
42
3.14
null
27182e-4
"derp"
""")
val expected = ldjson("""
42
3.14
27182e-4
""")
input must maskInto("." -> Set(Number))(expected)
}
"mask-4 retain different sorts of objects at identity" in {
val input = ldjson("""
1
"hi"
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
true
{}
[{ "d": {} }]
{ "a": true }
""")
val expected = ldjson("""
{ "a": "hi", "b": { "c": null } }
{}
{ "a": true }
""")
input must maskInto("." -> Set(Object))(expected)
}
"mask-5 retain different sorts of arrays at identity" in {
val input = ldjson("""
1
"hi"
[1, 2, 3]
{ "a": "hi", "b": { "c": null } }
true
[]
[{ "d": {} }]
{ "a": true }
""")
val expected = ldjson("""
[1, 2, 3]
[]
[{ "d": {} }]
""")
input must maskInto("." -> Set(Array))(expected)
}
"mask-6 retain two scalar types at .a.b" in {
val input = ldjson("""
{ "a": { "b": 1 } }
null
{ "a": { "b": "hi" } }
{ "foo": true }
{ "a": { "b": [1, 2, 3] } }
[1, 2, 3]
{ "a": { "b": { "a": "hi", "b": { "c": null } } } }
{ "a": { "c": 42 } }
{ "a": { "b": true } }
{ "a": { "b": [] } }
{ "a": { "b": [{ "d": {} }] } }
""")
val expected = ldjson("""
{ "a": { "b": 1 } }
{ "a": { "b": true } }
""")
input must maskInto(".a.b" -> Set(Number, Boolean))(expected)
}
"mask-7 retain different sorts of numbers at .a.b" in {
val input = ldjson("""
{ "a": { "b": 42 } }
null
{ "foo": true }
{ "a": { "b": 3.14 } }
[1, 2, 3]
{ "a": { "b": null } }
{ "a": { "b": 27182e-4 } }
{ "a": { "b": "derp" } }
""")
val expected = ldjson("""
{ "a": { "b": 42 } }
{ "a": { "b": 3.14 } }
{ "a": { "b": 27182e-4 } }
""")
input must maskInto(".a.b" -> Set(Number))(expected)
}
"mask-8 retain different sorts of objects at .a.b" in {
val input = ldjson("""
{ "a": { "b": 1 } }
{ "a": { "b": "hi" } }
{ "a": { "b": [1, 2, 3] } }
{ "a": { "b": { "a": "hi", "b": { "c": null } } } }
{ "a": { "b": true } }
{ "a": { "b": {} } }
{ "a": { "b": [{ "d": {} }] } }
{ "a": { "b": { "a": true } } }
""")
val expected = ldjson("""
{ "a": { "b": { "a": "hi", "b": { "c": null } } } }
{ "a": { "b": {} } }
{ "a": { "b": { "a": true } } }
""")
input must maskInto(".a.b" -> Set(Object))(expected)
}
"mask-9 retain different sorts of arrays at .a.b" in {
val input = ldjson("""
{ "a": { "b": 1 } }
{ "a": { "b": "hi" } }
{ "a": { "b": [1, 2, 3] } }
{ "a": { "b": { "a": "hi", "b": { "c": null } } } }
{ "a": { "b": true } }
{ "a": { "b": [] } }
{ "a": { "b": [{ "d": {} }] } }
{ "a": { "b": { "a": true } } }
""")
val expected = ldjson("""
{ "a": { "b": [1, 2, 3] } }
{ "a": { "b": [] } }
{ "a": { "b": [{ "d": {} }] } }
""")
input must maskInto(".a.b" -> Set(Array))(expected)
}
"mask-10 discard unmasked structure" in {
val input = ldjson("""
{ "a": { "b": 42, "c": true }, "c": [] }
""")
val expected = ldjson("""
{ "a": { "c": true } }
""")
input must maskInto(".a.c" -> Set(Boolean))(expected)
}
"mask-11 compose disjunctively across paths" in {
val input = ldjson("""
{ "a": { "b": 42, "c": true }, "c": [] }
""")
val expected = ldjson("""
{ "a": { "c": true }, "c": [] }
""")
input must maskInto(".a.c" -> Set(Boolean), ".c" -> Set(Array))(expected)
}
"mask-12 compose disjunctively across suffix-overlapped paths" in {
val input = ldjson("""
{ "a": { "x": 42, "b": { "c": true } }, "b": { "c": [] }, "c": [1, 2] }
""")
val expected = ldjson("""
{ "a": { "b": { "c": true } }, "b": { "c": [] } }
""")
input must maskInto(".a.b.c" -> Set(Boolean), ".b.c" -> Set(Array))(expected)
}
"mask-13 compose disjunctively across paths where one side is false" in {
val input = ldjson("""
{ "a": { "b": 42, "c": true } }
""")
val expected = ldjson("""
{ "a": { "c": true } }
""")
input must maskInto(".a.c" -> Set(Boolean), ".a" -> Set(Array))(expected)
}
"mask-14 subsume inner by outer" in {
val input = ldjson("""
{ "a": { "b": 42, "c": true }, "c": [] }
""")
val expected = ldjson("""
{ "a": { "b": 42, "c": true } }
""")
input must maskInto(".a.b" -> Set(Boolean), ".a" -> Set(Object))(expected)
}
"mask-15 disallow the wrong sort of vector" in {
val input = ldjson("""
{ "a": true }
[1, 2, 3]
""")
val expected1 = ldjson("""
{ "a": true }
""")
val expected2 = ldjson("""
[1, 2, 3]
""")
input must maskInto("." -> Set(Object))(expected1)
input must maskInto("." -> Set(Array))(expected2)
}
"mask-16 compact surrounding array" in {
ldjson("[1, 2, 3]") must maskInto("[1]" -> Set(Number))(ldjson("[2]"))
}
"mask-17 compact surrounding array with multiple values retained" in {
val input = ldjson("""
[1, 2, 3, 4, 5]
""")
val expected = ldjson("""
[1, 3, 4]
""")
input must maskInto(
"[0]" -> Set(Number),
"[2]" -> Set(Number),
"[3]" -> Set(Number))(expected)
}
"mask-18 compact surrounding nested array with multiple values retained" in {
val input = ldjson("""
{ "a": { "b": [1, 2, 3, 4, 5], "c" : null } }
""")
val expected = ldjson("""
{ "a": { "b": [1, 3, 4] } }
""")
input must maskInto(
".a.b[0]" -> Set(Number),
".a.b[2]" -> Set(Number),
".a.b[3]" -> Set(Number))(expected)
}
"mask-19 compact array containing nested arrays with single nested value retained" in {
val input = ldjson("""
{ "a": [[[1, 3, 5], "k"], "foo", { "b": [5, 6, 7], "c": [] }], "d": "x" }
""")
val expected = ldjson("""
{ "a": [{"b": [5, 6, 7] }] }
""")
input must maskInto(".a[2].b" -> Set(Array))(expected)
}
"mask-20 remove object entirely when no values are retained" in {
ldjson("""{ "a": 42 }""") must maskInto(".a" -> Set(Boolean))(ldjson(""))
}
"mask-21 remove array entirely when no values are retained" in {
ldjson("[42]") must maskInto("[0]" -> Set(Boolean))(ldjson(""))
}
"mask-22 retain vector at depth and all recursive contents" in {
val input = ldjson("""{ "a": { "b": { "c": { "e": true }, "d": 42 } } }""")
input must maskInto(".a.b" -> Set(Object))(input)
}
// minimization of `multilevelFlatten.test`
"mask-23 disjunctively retain values in an array" in {
val input = ldjson("""
["a", 13]
["b", []]
["c", {}]
["d", [12]]
["e", { "z": 14}]
""")
val expected = ldjson("""
["a"]
["b"]
["c", {}]
["d"]
["e", { "z": 14}]
""")
input must maskInto("[0]" -> ColumnType.Top, "[1]" -> Set(ColumnType.Object))(expected)
}
"mask-24 disjunctively retain values in an array with compaction" in {
val input = ldjson("""
[13, "a"]
[[], "b"]
[{}, "c"]
[[12], "d"]
[{ "z": 14}, "e"]
""")
val expected = ldjson("""
["a"]
["b"]
[{}, "c"]
["d"]
[{ "z": 14}, "e"]
""")
input must maskInto("[0]" -> Set(ColumnType.Object), "[1]" -> ColumnType.Top)(expected)
}
"mask-25 disjunctively retain values in an object" in {
val input = ldjson("""
{ "v": "a", "w": 13 }
{ "v": "b", "w": [] }
{ "v": "c", "w": {} }
{ "v": "d", "w": [12] }
{ "v": "e", "w": { "z": 14} }
""")
val expected = ldjson("""
{ "v": "a" }
{ "v": "b" }
{ "v": "c", "w": {} }
{ "v": "d" }
{ "v": "e", "w": { "z": 14} }
""")
input must maskInto(".v" -> ColumnType.Top, ".w" -> Set(ColumnType.Object))(expected)
}
"mask all values at Top to themselves" >> {
// minimization of `arrayLengthHeterogeneous.test`
"mask-26 at identity path" in {
val input = ldjson("""
[[1], {"z":2}, [], {}, "foo", null, 42, 42.2, true]
{"a":[1], "b":{"z":2}, "c":[], "d":{}, "e":"foo", "f":null, "g":42, "h":42.2, "i":true}
[]
{}
"foo"
null
42
42.2
true
""")
input must maskInto("." -> ColumnType.Top)(input)
}
"mask-27 at object projected path" in {
val input = ldjson("""
{"y": [[1], {"z":2}, [], {}, "foo", null, 42, 42.2, true]}
{"y": {"a":[1], "b":{"z":2}, "c":[], "d":{}, "e":"foo", "f":null, "g":42, "h":42.2, "i":true}}
{"y": []}
{"y": {}}
{"y": "foo"}
{"y": null}
{"y": 42}
{"y": 42.2}
{"y": true}
""")
input must maskInto(".y" -> ColumnType.Top)(input)
}
"mask-28 at array projected path" in {
val input = ldjson("""
[[[1], {"z":2}, [], {}, "foo", null, 42, 42.2, true]]
[{"a":[1], "b":{"z":2}, "c":[], "d":{}, "e":"foo", "f":null, "g":42, "h":42.2, "i":true}]
[[]]
[{}]
["foo"]
[null]
[42]
[42.2]
[true]
""")
input must maskInto("[0]" -> ColumnType.Top)(input)
}
}
"retain each non-temporal scalar type at identity" >> {
val input = ldjson("""
1
2.2
27182e-4
"hi"
true
false
null
[]
{}
[1, 2, 3]
{ "a": "hi", "b": null }
""")
"mask-29 Null" in {
val expected = ldjson("""null""")
input must maskInto("." -> Set(Null))(expected)
}
"mask-30 Boolean" in {
val expected = ldjson("""
true
false
""")
input must maskInto("." -> Set(Boolean))(expected)
}
"mask-31 Number" in {
val expected = ldjson("""
1
2.2
27182e-4
""")
input must maskInto("." -> Set(Number))(expected)
}
"mask-32 String" in {
val expected = ldjson(""""hi"""")
input must maskInto("." -> Set(ColumnType.String))(expected)
}
}
"mask-33 mask multiple columns at Top" in {
val input = ldjson("""
{ "x": "hi", "y": null }
[4, 5]
""")
input must maskInto(
".x" -> ColumnType.Top,
".y" -> ColumnType.Top,
"[0]" -> ColumnType.Top,
"[1]" -> ColumnType.Top)(input)
}
}
override def is: SpecStructure =
pendingFragments(super.is, maskPendingExamples, "mask")
def evalMask(mask: Mask, stream: JsonStream): JsonStream
def maskInto(
masks: (String, Set[ColumnType])*)(
expected: JsonStream)
: Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalMask(Mask(Map(masks.map({ case (k, v) => CPath.parse(k) -> v }): _*)), str)
}
}
trait PivotSpec extends JsonSpec {
protected final type Pivot = ScalarStage.Pivot
protected final val Pivot = ScalarStage.Pivot
val pivotPendingExamples: Set[Int]
"pivot" should {
"shift an array" >> {
val input = ldjson("""
[1, 2, 3]
[4, 5, 6]
[7, 8, 9, 10]
[11]
[]
[12, 13]
""")
"pivot-1 ExcludeId" >> {
val expected = ldjson("""
1
2
3
4
5
6
7
8
9
10
11
12
13
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Array)(expected)
}
"pivot-2 IdOnly" >> {
val expected = ldjson("""
0
1
2
0
1
2
0
1
2
3
0
0
1
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Array)(expected)
}
"pivot-3 IncludeId" >> {
val expected = ldjson("""
[0, 1]
[1, 2]
[2, 3]
[0, 4]
[1, 5]
[2, 6]
[0, 7]
[1, 8]
[2, 9]
[3, 10]
[0, 11]
[0, 12]
[1, 13]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Array)(expected)
}
}
"shift an object" >> {
val input = ldjson("""
{ "a": 1, "b": 2, "c": 3 }
{ "d": 4, "e": 5, "f": 6 }
{ "g": 7, "h": 8, "i": 9, "j": 10 }
{ "k": 11 }
{}
{ "l": 12, "m": 13 }
""")
"pivot-4 ExcludeId" >> {
val expected = ldjson("""
1
2
3
4
5
6
7
8
9
10
11
12
13
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Object)(expected)
}
"pivot-5 IdOnly" >> {
val expected = ldjson("""
"a"
"b"
"c"
"d"
"e"
"f"
"g"
"h"
"i"
"j"
"k"
"l"
"m"
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Object)(expected)
}
"pivot-6 IncludeId" >> {
val expected = ldjson("""
["a", 1]
["b", 2]
["c", 3]
["d", 4]
["e", 5]
["f", 6]
["g", 7]
["h", 8]
["i", 9]
["j", 10]
["k", 11]
["l", 12]
["m", 13]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Object)(expected)
}
}
"omit undefined row in object pivot" >> {
val input = ldjson("""
{ "a": 1 }
12
{ "b": 2 }
""")
"pivot-12 ExcludeId" in {
val expected = ldjson("""
1
2
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Object)(expected)
}
"pivot-13 IdOnly" in {
val expected = ldjson("""
"a"
"b"
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Object)(expected)
}
"pivot-14 IncludeId" in {
val expected = ldjson("""
["a", 1]
["b", 2]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Object)(expected)
}
}
"omit undefined row in array pivot" >> {
val input = ldjson("""
[11]
12
[13]
""")
"pivot-15 ExcludeId" in {
val expected = ldjson("""
11
13
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Array)(expected)
}
"pivot-16 IdOnly" in {
val expected = ldjson("""
0
0
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Array)(expected)
}
"pivot-17 IncludeId" in {
val expected = ldjson("""
[0, 11]
[0, 13]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Array)(expected)
}
}
"preserve empty arrays as values of an array pivot" >> {
val input = ldjson("""
[ 1, "two", [] ]
[ [] ]
[ [], 3, "four" ]
""")
"pivot-18 ExludeId" in {
val expected = ldjson("""
1
"two"
[]
[]
[]
3
"four"
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Array)(expected)
}
"pivot-19 IdOnly" in {
val expected = ldjson("""
0
1
2
0
0
1
2
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Array)(expected)
}
"pivot-20 IncludeId" in {
val expected = ldjson("""
[0, 1]
[1, "two"]
[2, []]
[0, []]
[0, []]
[1, 3]
[2, "four"]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Array)(expected)
}
}
"preserve empty objects as values of an object pivot" >> {
val input = ldjson("""
{ "1": 1, "2": "two", "3": {} }
{ "4": {} }
{ "5": {}, "6": 3, "7": "four" }
""")
"pivot-21 ExcludeId" in {
val expected = ldjson("""
1
"two"
{}
{}
{}
3
"four"
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Object)(expected)
}
"pivot-22 IdOnly" in {
val expected = ldjson("""
"1"
"2"
"3"
"4"
"5"
"6"
"7"
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Object)(expected)
}
"pivot-23 IncludeId" in {
val expected = ldjson("""
["1", 1]
["2", "two"]
["3", {}]
["4", {}]
["5", {}]
["6", 3]
["7", "four"]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Object)(expected)
}
}
"omit results when object pivoting a value of a different kind" >> {
val input = ldjson("""
1
"three"
false
null
["x", true, {}, []]
{ "a": 1, "b": "two", "c": {}, "d": [] }
""")
"pivot-24 ExcludeId" in {
val expected = ldjson("""
1
"two"
{}
[]
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Object)(expected)
}
"pivot-25 IdOnly" in {
val expected = ldjson("""
"a"
"b"
"c"
"d"
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Object)(expected)
}
"pivot-26 IncludeId" in {
val expected = ldjson("""
["a", 1]
["b", "two"]
["c", {}]
["d", []]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Object)(expected)
}
}
"pivot-10 omit results when array pivoting a value of a different kind" >> {
val input = ldjson("""
1
"two"
false
null
["x", true, {}, []]
{ "a": 1, "b": "two", "c": {}, "d": [] }
""")
"pivot-27 ExcludeId" in {
val expected = ldjson("""
"x"
true
{}
[]
""")
input must pivotInto(IdStatus.ExcludeId, ColumnType.Array)(expected)
}
"pivot-28 IdOnly" in {
val expected = ldjson("""
0
1
2
3
""")
input must pivotInto(IdStatus.IdOnly, ColumnType.Array)(expected)
}
"pivot-29 IncludeId" in {
val expected = ldjson("""
[0, "x"]
[1, true]
[2, {}]
[3, []]
""")
input must pivotInto(IdStatus.IncludeId, ColumnType.Array)(expected)
}
}
"omit empty vector from pivot results" >> {
val input = ldjson("""
{}
[]
""")
"pivot-30 ExcludeId" in {
input must pivotInto(IdStatus.ExcludeId, ColumnType.Array)(ldjson(""))
input must pivotInto(IdStatus.ExcludeId, ColumnType.Object)(ldjson(""))
}
"pivot-31 IdOnly" in {
input must pivotInto(IdStatus.IdOnly, ColumnType.Array)(ldjson(""))
input must pivotInto(IdStatus.IdOnly, ColumnType.Object)(ldjson(""))
}
"pivot-32 IncludeId" in {
input must pivotInto(IdStatus.IncludeId, ColumnType.Array)(ldjson(""))
input must pivotInto(IdStatus.IncludeId, ColumnType.Object)(ldjson(""))
}
}
}
override def is: SpecStructure =
pendingFragments(super.is, pivotPendingExamples, "pivot")
def evalPivot(pivot: Pivot, stream: JsonStream): JsonStream
def pivotInto(
idStatus: IdStatus,
structure: ColumnType.Vector)(
expected: JsonStream)
: Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalPivot(Pivot(idStatus, structure), str)
}
}
trait FocusedSpec extends JsonSpec {
import ColumnType._
import IdStatus._
import ScalarStage.{Mask, Pivot, Project, Wrap}
val focusedPendingExamples: Set[Int]
"sequential focused stages" should {
"foc-5 Wrap . Pivot (no-op)" in {
val stages = List(
Wrap("foo"),
Pivot(ExcludeId, Object))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
input must interpretInto(stages)(input)
}
"foc-6 Wrap . Pivot (empty set)" in {
val stages = List(
Wrap("foo"),
Pivot(ExcludeId, Array))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
input must interpretInto(stages)(ldjson(""))
}
"foc-7 Wrap . Wrap" in {
val stages = List(
Wrap("foo"),
Wrap("bar"))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
val expected = ldjson("""
{ "bar": { "foo": 1 } }
{ "bar": { "foo": "two" } }
{ "bar": { "foo": false } }
{ "bar": { "foo": null } }
{ "bar": { "foo": [1, 2, 3] } }
{ "bar": { "foo": { "a": 7, "b": "two" } } }
{ "bar": { "foo": [] } }
{ "bar": { "foo": {} } }
""")
input must interpretInto(stages)(expected)
}
"foc-8 Wrap . Project (no-op)" in {
val stages = List(
Wrap("foo"),
project("foo"))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
input must interpretInto(stages)(input)
}
"foc-9 Wrap . Project (empty set)" in {
val stages = List(
Wrap("foo"),
project("bar"))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
input must interpretInto(stages)(ldjson(""))
}
"foc-10 Wrap . Mask (identity)" in {
val stages = List(
Wrap("foo"),
mask("." -> Set(Object)))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
val expected = ldjson("""
{ "foo": 1 }
{ "foo": "two" }
{ "foo": false }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
""")
input must interpretInto(stages)(expected)
}
"foc-11 Wrap . Mask (empty set)" in {
val stages = List(
Wrap("foo"),
mask("." -> Set(Array)))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 7, "b": "two" }
[]
{}
""")
input must interpretInto(stages)(ldjson(""))
}
"foc-12 Project . Pivot (object)" in {
val stages = List(
project("foo"),
Pivot(ExcludeId, Object))
val input = ldjson("""
{ "foo": 1 }
{ "foo": "two" }
{ "foo": false }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
{ "bar": 2.2 }
true
""")
val expected = ldjson("""
7
"two"
""")
input must interpretInto(stages)(expected)
}
"foc-13 Project . Pivot (array)" in {
val stages = List(
project("foo"),
Pivot(ExcludeId, Array))
val input = ldjson("""
{ "foo": 1 }
{ "foo": "two" }
{ "foo": false }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
{ "bar": 2.2 }
true
""")
val expected = ldjson("""
1
2
3
""")
input must interpretInto(stages)(expected)
}
"foc-14 Project . Wrap" in {
val stages = List(
project("foo"),
Wrap("foo"))
val input = ldjson("""
{ "foo": 1 }
{ "foo": "two", "bar": "a" }
{ "foo": false, "baz": -1, "ack": -2 }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
{ "bar": 2.2 }
true
""")
val expected = ldjson("""
{ "foo": 1 }
{ "foo": "two" }
{ "foo": false }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
""")
input must interpretInto(stages)(expected)
}
"foc-15 Project . Project" in {
val stages = List(
project("foo"),
project("a"))
val input = ldjson("""
{ "foo": 1 }
{ "foo": "two", "bar": "a" }
{ "foo": false, "baz": -1, "ack": -2 }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
{ "a": 2.2 }
true
""")
val expected = ldjson("""
7
""")
input must interpretInto(stages)(expected)
}
"foc-16 Project . Mask" in {
val stages = List(
project("foo"),
mask("." -> Set(Object)))
val input = ldjson("""
{ "foo": 1 }
{ "foo": "two", "bar": "a" }
{ "foo": false, "baz": -1, "ack": -2 }
{ "foo": null }
{ "foo": [1, 2, 3] }
{ "foo": { "a": 7, "b": "two" } }
{ "foo": [] }
{ "foo": {} }
{ "a": 2.2 }
true
""")
val expected = ldjson("""
{ "a": 7, "b": "two" }
{}
""")
input must interpretInto(stages)(expected)
}
"foc-17 Mask . Pivot" in {
val stages = List(
mask("." -> Set(Object)),
Pivot(ExcludeId, Object))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
{ "a": 1, "b": "two" }
[]
{}
""")
val expected = ldjson("""
1
"two"
""")
input must interpretInto(stages)(expected)
}
"foc-18 Mask . Wrap" in {
val stages = List(
mask("." -> Set(Object)),
Wrap("foo"))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
[1, 2, 3]
{ "a": 1, "b": "two" }
[]
{}
""")
val expected = ldjson("""
{ "foo": { "a": 1, "b": "two" } }
{ "foo": {} }
""")
input must interpretInto(stages)(expected)
}
"foc-19 Mask . Project" in {
val stages = List(
mask("." -> Set(Object)),
project("b"))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
[1, 2, 3]
{ "a": 1, "b": "two" }
[]
{}
""")
val expected = ldjson("""
"two"
""")
input must interpretInto(stages)(expected)
}
"foc-20 Mask . Mask" in {
val stages = List(
mask("." -> Set(Object)),
mask("." -> Set(Object)))
val input = ldjson("""
1
"two"
false
null
[1, 2, 3]
[1, 2, 3]
{ "a": 1, "b": "two" }
[]
{}
""")
val expected = ldjson("""
{ "a": 1, "b": "two" }
{}
""")
input must interpretInto(stages)(expected)
}
"Pivot . Wrap" >> {
val input = ldjson("""
1
"two"
false
null
[2, "foo"]
[3, [2.2]]
[4, { "p": true }]
{ "a": 5, "b": "bar" }
{ "a": 6, "b": [1.1] }
{ "a": 7, "b": { "z": false } }
[]
{}
""")
"Object" >> {
"foc-29 IncludeId" in {
val stages = List(Pivot(IncludeId, Object), Wrap("foo"))
val expected = ldjson("""
{ "foo": ["a", 5] }
{ "foo": ["b", "bar"] }
{ "foo": ["a", 6] }
{ "foo": ["b", [1.1]] }
{ "foo": ["a", 7] }
{ "foo": ["b", { "z": false }] }
""")
input must interpretInto(stages)(expected)
}
"foc-30 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Object), Wrap("foo"))
val expected = ldjson("""
{ "foo": 5 }
{ "foo": "bar" }
{ "foo": 6 }
{ "foo": [1.1] }
{ "foo": 7 }
{ "foo": { "z": false } }
""")
input must interpretInto(stages)(expected)
}
"foc-31 IdOnly" in {
val stages = List(Pivot(IdOnly, Object), Wrap("foo"))
val expected = ldjson("""
{ "foo": "a" }
{ "foo": "b" }
{ "foo": "a" }
{ "foo": "b" }
{ "foo": "a" }
{ "foo": "b" }
""")
input must interpretInto(stages)(expected)
}
}
"Array" >> {
"foc-32 IncludeId" in {
val stages = List(Pivot(IncludeId, Array), Wrap("foo"))
val expected = ldjson("""
{ "foo": [0, 2] }
{ "foo": [1, "foo"] }
{ "foo": [0, 3] }
{ "foo": [1, [2.2]] }
{ "foo": [0, 4] }
{ "foo": [1, { "p": true }] }
""")
input must interpretInto(stages)(expected)
}
"foc-33 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Array), Wrap("foo"))
val expected = ldjson("""
{ "foo": 2 }
{ "foo": "foo" }
{ "foo": 3 }
{ "foo": [2.2] }
{ "foo": 4 }
{ "foo": { "p": true } }
""")
input must interpretInto(stages)(expected)
}
"foc-34 IdOnly" in {
val stages = List(Pivot(IdOnly, Array), Wrap("foo"))
val expected = ldjson("""
{ "foo": 0 }
{ "foo": 1 }
{ "foo": 0 }
{ "foo": 1 }
{ "foo": 0 }
{ "foo": 1 }
""")
input must interpretInto(stages)(expected)
}
}
}
"Pivot . Mask" >> {
val input = ldjson("""
1
"two"
false
null
[2, "foo"]
[3, [2.2]]
[4, { "p": true }]
[5, {}]
{ "a": 6, "b": "bar" }
{ "a": 7, "b": [1.1] }
{ "a": 8, "b": { "z": false } }
{ "a": 9, "b": {} }
[]
{}
""")
"Object" >> {
"foc-35 IncludeId" in {
val stages = List(
Pivot(IncludeId, Object),
Mask(Map(
CPath.parse("[0]") -> ColumnType.Top,
CPath.parse("[1]") -> Set(ColumnType.Object))))
val expected = ldjson("""
["a"]
["b"]
["a"]
["b"]
["a"]
["b", { "z": false }]
["a"]
["b", {}]
""")
input must interpretInto(stages)(expected)
}
"foc-36 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Object), mask("." -> Set(Object)))
val expected = ldjson("""
{ "z": false }
{}
""")
input must interpretInto(stages)(expected)
}
"foc-37 IdOnly" in {
val stages = List(Pivot(IdOnly, Object), mask("." -> Set(ColumnType.String)))
val expected = ldjson("""
"a"
"b"
"a"
"b"
"a"
"b"
"a"
"b"
""")
input must interpretInto(stages)(expected)
}
}
"Array" >> {
"foc-38 IncludeId" in {
val stages = List(
Pivot(IncludeId, Array),
Mask(Map(
CPath.parse("[0]") -> ColumnType.Top,
CPath.parse("[1]") -> Set(ColumnType.Object))))
val expected = ldjson("""
[0]
[1]
[0]
[1]
[0]
[1, { "p": true }]
[0]
[1, {}]
""")
input must interpretInto(stages)(expected)
}
"foc-39 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Array), mask("." -> Set(Object)))
val expected = ldjson("""
{ "p": true }
{}
""")
input must interpretInto(stages)(expected)
}
"foc-40 IdOnly" in {
val stages = List(Pivot(IdOnly, Array), mask("." -> Set(ColumnType.Number)))
val expected = ldjson("""
0
1
0
1
0
1
0
1
""")
input must interpretInto(stages)(expected)
}
}
}
"Pivot . Project" >> {
val input = ldjson("""
1
"two"
false
null
[2, "foo"]
[3, [2.2]]
[4, { "p": 10 }]
[5, {}]
{ "a": 6, "b": "bar" }
{ "a": 7, "b": [1.1] }
{ "a": 8, "b": { "z": 11 } }
{ "a": 9, "b": {} }
[]
{}
""")
"Object" >> {
"foc-41 IncludeId" in {
val stages = List(Pivot(IncludeId, Object), project("z"))
val expected = ldjson("")
input must interpretInto(stages)(expected)
}
"foc-42 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Object), project("z"))
val expected = ldjson("11")
input must interpretInto(stages)(expected)
}
"foc-43 IdOnly" in {
val stages = List(Pivot(IdOnly, Object), project("z"))
val expected = ldjson("")
input must interpretInto(stages)(expected)
}
}
"Array" >> {
"foc-44 IncludeId" in {
val stages = List(Pivot(IncludeId, Array), project("p"))
val expected = ldjson("")
input must interpretInto(stages)(expected)
}
"foc-45 ExcludeId" in {
val stages = List(Pivot(ExcludeId, Array), project("p"))
val expected = ldjson("10")
input must interpretInto(stages)(expected)
}
"foc-46 IdOnly" in {
val stages = List(Pivot(IdOnly, Array), project("p"))
val expected = ldjson("")
input must interpretInto(stages)(expected)
}
}
}
"Pivot . Pivot" >> {
val input = ldjson("""
1
"two"
false
null
[2, "foo"]
[3, [2.2]]
[4, { "p": 10 }]
[5, {}]
{ "a": 6, "b": "bar" }
{ "a": 7, "b": [1.1] }
{ "a": 8, "b": { "z": 11 } }
{ "a": 9, "b": {} }
[]
{}
""")
"Object Object" >> {
"foc-47 IncludeId IncludeId" in {
val stages = List(Pivot(IncludeId, Object), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-48 IncludeId ExcludeId" in {
val stages = List(Pivot(IncludeId, Object), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-49 IncludeId IdOnly" in {
val stages = List(Pivot(IncludeId, Object), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-50 ExcludeId IncludeId" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson("""["z", 11]"""))
}
"foc-51 ExcludeId ExcludeId" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson("11"))
}
"foc-52 ExcludeId IdOnly" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""""z""""))
}
"foc-53 IdOnly IncludeId" in {
val stages = List(Pivot(IdOnly, Object), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-54 IdOnly ExcludeId" in {
val stages = List(Pivot(IdOnly, Object), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-55 IdOnly IdOnly" in {
val stages = List(Pivot(IdOnly, Object), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""))
}
}
"Object Array" >> {
"foc-56 IncludeId IncludeId" in {
val stages = List(Pivot(IncludeId, Object), Pivot(IncludeId, Array))
val expected = ldjson("""
[0, "a"]
[1, 6]
[0, "b"]
[1, "bar"]
[0, "a"]
[1, 7]
[0, "b"]
[1, [1.1]]
[0, "a"]
[1, 8]
[0, "b"]
[1, { "z": 11 }]
[0, "a"]
[1, 9]
[0, "b"]
[1, {}]
""")
input must interpretInto(stages)(expected)
}
"foc-57 IncludeId ExcludeId" in {
val stages = List(Pivot(IncludeId, Object), Pivot(ExcludeId, Array))
val expected = ldjson("""
"a"
6
"b"
"bar"
"a"
7
"b"
[1.1]
"a"
8
"b"
{ "z": 11 }
"a"
9
"b"
{}
""")
input must interpretInto(stages)(expected)
}
"foc-58 IncludeId IdOnly" in {
val stages = List(Pivot(IncludeId, Object), Pivot(IdOnly, Array))
val expected = ldjson("""
0
1
0
1
0
1
0
1
0
1
0
1
0
1
0
1
""")
input must interpretInto(stages)(expected)
}
"foc-59 ExcludeId IncludeId" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(IncludeId, Array))
input must interpretInto(stages)(ldjson("[0, 1.1]"))
}
"foc-60 ExcludeId ExcludeId" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(ExcludeId, Array))
input must interpretInto(stages)(ldjson("1.1"))
}
"foc-61 ExcludeId IdOnly" in {
val stages = List(Pivot(ExcludeId, Object), Pivot(IdOnly, Array))
input must interpretInto(stages)(ldjson("0"))
}
"foc-62 IdOnly IncludeId" in {
val stages = List(Pivot(IdOnly, Object), Pivot(IncludeId, Array))
input must interpretInto(stages)(ldjson(""))
}
"foc-63 IdOnly ExcludeId" in {
val stages = List(Pivot(IdOnly, Object), Pivot(ExcludeId, Array))
input must interpretInto(stages)(ldjson(""))
}
"foc-64 IdOnly IdOnly" in {
val stages = List(Pivot(IdOnly, Object), Pivot(IdOnly, Array))
input must interpretInto(stages)(ldjson(""))
}
}
"Array Object" >> {
"foc-65 IncludeId IncludeId" in {
val stages = List(Pivot(IncludeId, Array), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-66 IncludeId ExcludeId" in {
val stages = List(Pivot(IncludeId, Array), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-67 IncludeId IdOnly" in {
val stages = List(Pivot(IncludeId, Array), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-68 ExcludeId IncludeId" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson("""["p", 10]"""))
}
"foc-69 ExcludeId ExcludeId" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson("10"))
}
"foc-70 ExcludeId IdOnly" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""""p""""))
}
"foc-71 IdOnly IncludeId" in {
val stages = List(Pivot(IdOnly, Array), Pivot(IncludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-72 IdOnly ExcludeId" in {
val stages = List(Pivot(IdOnly, Array), Pivot(ExcludeId, Object))
input must interpretInto(stages)(ldjson(""))
}
"foc-73 IdOnly IdOnly" in {
val stages = List(Pivot(IdOnly, Array), Pivot(IdOnly, Object))
input must interpretInto(stages)(ldjson(""))
}
}
"Array Array" >> {
"foc-74 IncludeId IncludeId" in {
val stages = List(Pivot(IncludeId, Array), Pivot(IncludeId, Array))
val expected = ldjson("""
[0, 0]
[1, 2]
[0, 1]
[1, "foo"]
[0, 0]
[1, 3]
[0, 1]
[1, [2.2]]
[0, 0]
[1, 4]
[0, 1]
[1, { "p": 10 }]
[0, 0]
[1, 5]
[0, 1]
[1, {}]
""")
input must interpretInto(stages)(expected)
}
"foc-75 IncludeId ExcludeId" in {
val stages = List(Pivot(IncludeId, Array), Pivot(ExcludeId, Array))
val expected = ldjson("""
0
2
1
"foo"
0
3
1
[2.2]
0
4
1
{ "p": 10 }
0
5
1
{}
""")
input must interpretInto(stages)(expected)
}
"foc-76 IncludeId IdOnly" in {
val stages = List(Pivot(IncludeId, Array), Pivot(IdOnly, Array))
val expected = ldjson("""
0
1
0
1
0
1
0
1
0
1
0
1
0
1
0
1
""")
input must interpretInto(stages)(expected)
}
"foc-77 ExcludeId IncludeId" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(IncludeId, Array))
input must interpretInto(stages)(ldjson("[0, 2.2]"))
}
"foc-78 ExcludeId ExcludeId" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(ExcludeId, Array))
input must interpretInto(stages)(ldjson("2.2"))
}
"foc-79 ExcludeId IdOnly" in {
val stages = List(Pivot(ExcludeId, Array), Pivot(IdOnly, Array))
input must interpretInto(stages)(ldjson("0"))
}
"foc-80 IdOnly IncludeId" in {
val stages = List(Pivot(IdOnly, Array), Pivot(IncludeId, Array))
input must interpretInto(stages)(ldjson(""))
}
"foc-81 IdOnly ExcludeId" in {
val stages = List(Pivot(IdOnly, Array), Pivot(ExcludeId, Array))
input must interpretInto(stages)(ldjson(""))
}
"foc-82 IdOnly IdOnly" in {
val stages = List(Pivot(IdOnly, Array), Pivot(IdOnly, Array))
input must interpretInto(stages)(ldjson(""))
}
}
}
}
override def is: SpecStructure =
pendingFragments(super.is, focusedPendingExamples, "foc")
def evalFocused(stages: List[ScalarStage.Focused], stream: JsonStream): JsonStream
def mask(masks: (String, Set[ColumnType])*): Mask =
Mask(Map(masks.map({ case (k, v) => CPath.parse(k) -> v }): _*))
def project(path: String): Project =
Project(CPath.parse(path))
def interpretInto(
stages: List[ScalarStage.Focused])(
expected: JsonStream)
: Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalFocused(stages, str)
}
}
trait CartesianSpec extends JsonSpec {
protected final type Cartesian = ScalarStage.Cartesian
protected final val Cartesian = ScalarStage.Cartesian
val cartesianPendingExamples: Set[Int]
"cartesian" should {
// a0 as a1, b0 as b1, c0 as c1, d0 as d1
"cart-1 cross fields with no parse instructions" in {
val input = ldjson("""
{ "a0": "hi", "b0": null, "c0": { "x": 42 }, "d0": [1, 2, 3] }
""")
val expected = ldjson("""
{ "a1": "hi", "b1": null, "c1": { "x": 42 }, "d1": [1, 2, 3] }
""")
val targets = Map(
(CPathField("a1"), (CPathField("a0"), Nil)),
(CPathField("b1"), (CPathField("b0"), Nil)),
(CPathField("c1"), (CPathField("c0"), Nil)),
(CPathField("d1"), (CPathField("d0"), Nil)))
input must cartesianInto(targets)(expected)
}
// a0 as a1, b0 as b1
"cart-2 cross fields with no parse instructions ignoring extra fields" in {
val input = ldjson("""
{ "a0": "hi", "b0": null, "c0": 42 }
""")
val expected = ldjson("""
{ "a1": "hi", "b1": null }
""")
val targets = Map(
(CPathField("a1"), (CPathField("a0"), Nil)),
(CPathField("b1"), (CPathField("b0"), Nil)))
input must cartesianInto(targets)(expected)
}
// a0 as a1, b0 as b1, d0 as d1
"cart-3 cross fields with no parse instructions ignoring absent fields" in {
val input = ldjson("""
{ "a0": "hi", "b0": null }
""")
val expected = ldjson("""
{ "a1": "hi", "b1": null }
""")
val targets = Map(
(CPathField("a1"), (CPathField("a0"), Nil)),
(CPathField("b1"), (CPathField("b0"), Nil)),
(CPathField("d1"), (CPathField("d0"), Nil)))
input must cartesianInto(targets)(expected)
}
// a0[_] as a1, b0 as b1, c0{_} as c1
"cart-4 cross fields with single pivot" in {
import ScalarStage.Pivot
val input = ldjson("""
{ "a0": [1, 2, 3], "b0": null, "c0": { "x": 4, "y": 5 } }
""")
val expected = ldjson("""
{ "a1": 1, "b1": null, "c1": 4 }
{ "a1": 1, "b1": null, "c1": 5 }
{ "a1": 2, "b1": null, "c1": 4 }
{ "a1": 2, "b1": null, "c1": 5 }
{ "a1": 3, "b1": null, "c1": 4 }
{ "a1": 3, "b1": null, "c1": 5 }
""")
val targets = Map(
(CPathField("a1"),
(CPathField("a0"), List(Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("b1"),
(CPathField("b0"), Nil)),
(CPathField("c1"),
(CPathField("c0"), List(Pivot(IdStatus.ExcludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
// a[_].x0.y0{_} as y, a[_].x1[_] as z, b{_:} as b, c as c
"cart-5 cross fields with multiple nested pivots" in {
import ScalarStage.{Pivot, Project}
val input = ldjson("""
{
"a": [ { "x0": { "y0": { "f": "eff", "g": "gee" }, "y1": { "h": 42 } }, "x1": [ "0", 0, null ] } ],
"b": { "k1": null, "k2": null },
"c": true
}
""")
val expected = ldjson("""
{ "y": "eff", "z": "0" , "b": "k1", "c": true }
{ "y": "gee", "z": "0" , "b": "k1", "c": true }
{ "y": "eff", "z": 0 , "b": "k1", "c": true }
{ "y": "gee", "z": 0 , "b": "k1", "c": true }
{ "y": "eff", "z": null, "b": "k1", "c": true }
{ "y": "gee", "z": null, "b": "k1", "c": true }
{ "y": "eff", "z": "0" , "b": "k2", "c": true }
{ "y": "gee", "z": "0" , "b": "k2", "c": true }
{ "y": "eff", "z": 0 , "b": "k2", "c": true }
{ "y": "gee", "z": 0 , "b": "k2", "c": true }
{ "y": "eff", "z": null, "b": "k2", "c": true }
{ "y": "gee", "z": null, "b": "k2", "c": true }
""")
val targets = Map(
(CPathField("y"),
(CPathField("a"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Project(CPath.parse("x0")),
Project(CPath.parse("y0")),
Pivot(IdStatus.ExcludeId, ColumnType.Object)))),
(CPathField("z"),
(CPathField("a"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Project(CPath.parse("x1")),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("b"),
(CPathField("b"), List(
Pivot(IdStatus.IdOnly, ColumnType.Object)))),
(CPathField("c"),
(CPathField("c"), Nil)))
input must cartesianInto(targets)(expected)
}
// a as a, b[_] as ba, b{_} as bm
"cart-6 emit defined fields when some are undefined" in {
import ScalarStage.{Mask, Pivot}
val input = ldjson("""
{ "a": 1, "b": [ "two", "three" ] }
{ "a": 2, "b": { "x": "four", "y": "five" } }
{ "a": 3, "b": 42 }
""")
val expected = ldjson("""
{ "a": 1, "ba": "two" }
{ "a": 1, "ba": "three" }
{ "a": 2, "bm": "four" }
{ "a": 2, "bm": "five" }
{ "a": 3 }
""")
val targets = Map(
(CPathField("a"), (CPathField("a"), Nil)),
(CPathField("ba"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("bm"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.ExcludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
// a[_] as a, b[_] as b
"cart-9 pivoting retains row alignment through undefineds" in {
import ScalarStage.{Mask, Pivot}
val input = ldjson("""
{ "a": [1], "b": [4, 5] }
{ "a": [2] }
{ "a": [3], "b": [6] }
""")
val expected = ldjson("""
{ "a": 1, "b": 4 }
{ "a": 1, "b": 5 }
{ "a": 2 }
{ "a": 3, "b": 6 }
""")
val targets = Map(
(CPathField("a"), (CPathField("a"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("b"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
// a[_] as a, b[_] as b
"cart-10 pivoting retains row alignment through undefineds (pt II)" in {
import ScalarStage.{Mask, Pivot}
val input = ldjson("""
{ "a": [1], "b": [4, 5] }
{ "a": [2] }
{ "b": [6] }
{ "a": [3], "b": [7] }
""")
val expected = ldjson("""
{ "a": 1, "b": 4 }
{ "a": 1, "b": 5 }
{ "a": 2 }
{ "b": 6 }
{ "a": 3, "b": 7 }
""")
val targets = Map(
(CPathField("a"), (CPathField("a"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("b"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
// a0 as a1, b0 as b1
"cart-11 cross fields when some are undefined" in {
val input = ldjson("""
{ "a0": 1 }
{ "a0": 2, "b0": "foo" }
{ "b0": "bar" }
{ "c": 12 }
""")
val expected = ldjson("""
{ "a1": 1 }
{ "a1": 2, "b1": "foo" }
{ "b1": "bar" }
""")
val targets = Map(
(CPathField("a1"), (CPathField("a0"), Nil)),
(CPathField("b1"), (CPathField("b0"), Nil)))
input must cartesianInto(targets)(expected)
}
// minimization of `multilevelFlatten.test`
// x[0] as x0, x[1] as x1
"cart-12 cross fields when some are undefined after array projection" in {
import ScalarStage.Project
val input = ldjson("""
{ "x": ["foo"] }
{ "x": ["bar", 42] }
""")
val expected = ldjson("""
{ "x0": "foo" }
{ "x0": "bar", "x1": 42 }
""")
val targets = Map(
(CPathField("x0"), (CPathField("x"), List(
Project(CPath.parse("[0]"))))),
(CPathField("x1"), (CPathField("x"), List(
Project(CPath.parse("[1]"))))))
input must cartesianInto(targets)(expected)
}
// x.a as xa, x.b as xb
"cart-13 cross fields when some are undefined after object projection" in {
import ScalarStage.Project
val input = ldjson("""
{ "x": { "a": "foo" } }
{ "x": { "a": "bar", "b": 42 } }
""")
val expected = ldjson("""
{ "xa": "foo" }
{ "xa": "bar", "xb": 42 }
""")
val targets = Map(
(CPathField("xa"), (CPathField("x"), List(
Project(CPath.parse(".a"))))),
(CPathField("xb"), (CPathField("x"), List(
Project(CPath.parse(".b"))))))
input must cartesianInto(targets)(expected)
}
// minimization of `flattenArrayValueAndIndexWithField.test`
// a as x0, b[_] as x1
"cart-14 cross fields when some are undefined after array pivot" in {
import ScalarStage.Pivot
val input = ldjson("""
{ "a": 1, "b": [true, true, true] }
{ "a": 2, "b": [false, false] }
{ "a": 3, "b": 42 }
{ "a": 4 }
""")
val expected = ldjson("""
{ "x0": 1, "x1": [0, true] }
{ "x0": 1, "x1": [1, true] }
{ "x0": 1, "x1": [2, true] }
{ "x0": 2, "x1": [0, false] }
{ "x0": 2, "x1": [1, false] }
{ "x0": 3 }
{ "x0": 4 }
""")
val targets = Map(
(CPathField("x0"), (CPathField("a"), Nil)),
(CPathField("x1"), (CPathField("b"), List(
Pivot(IdStatus.IncludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
// a as x0, b{_} as x1
"cart-15 cross fields when some are undefined after object pivot" in {
import ScalarStage.Pivot
val input = ldjson("""
{ "a": 1, "b": {"x":true, "y":true, "z":true} }
{ "a": 2, "b": {"x":false, "y":false} }
{ "a": 3, "b": 42 }
{ "a": 4 }
""")
val expected = ldjson("""
{ "x0": 1, "x1": ["x", true] }
{ "x0": 1, "x1": ["y", true] }
{ "x0": 1, "x1": ["z", true] }
{ "x0": 2, "x1": ["x", false] }
{ "x0": 2, "x1": ["y", false] }
{ "x0": 3 }
{ "x0": 4 }
""")
val targets = Map(
(CPathField("x0"), (CPathField("a"), Nil)),
(CPathField("x1"), (CPathField("b"), List(
Pivot(IdStatus.IncludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
// Nested pivots in a cartesian can emit multiple rows where the
// pivoted value is undefined. When these undefined rows are crossed
// in the cartesian, they must be preserved.
"nested pivoting in a cartouche preserves undefineds" >> {
"Pivot . Pivot" >> {
"Array Array " >> {
import ScalarStage.Pivot
val input = ldjson("""
{ "a": 1, "b": [["one", "two"], "three", ["four"]] }
{ "a": 2, "b": [{ "x": "five", "y": "six" }, { "z": "seven" }] }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
"cart-24 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": "one" }
{ "a0": 1, "b0": "two" }
{ "a0": 1 }
{ "a0": 1, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-25 IdOnly" in {
val expected = ldjson("""
{ "a0": 1, "b0": 0 }
{ "a0": 1, "b0": 1 }
{ "a0": 1 }
{ "a0": 1, "b0": 0 }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Pivot(IdStatus.IdOnly, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-26 IncludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": [0, "one"] }
{ "a0": 1, "b0": [1, "two"] }
{ "a0": 1 }
{ "a0": 1, "b0": [0, "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Pivot(IdStatus.IncludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
}
"Object Object" >> {
import ScalarStage.Pivot
val input = ldjson("""
{ "a": 1, "b": { "x": { "q": "one", "r": "two"}, "y": "three", "z": { "s": "four" } } }
{ "a": 2, "b": { "a": ["five", "six"], "b": "seven" } }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
"cart-27 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": "one" }
{ "a0": 1, "b0": "two" }
{ "a0": 1 }
{ "a0": 1, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Pivot(IdStatus.ExcludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-28 IdOnly" in {
val expected = ldjson("""
{ "a0": 1, "b0": "q" }
{ "a0": 1, "b0": "r" }
{ "a0": 1 }
{ "a0": 1, "b0": "s" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Pivot(IdStatus.IdOnly, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-29 IncludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": ["q", "one"] }
{ "a0": 1, "b0": ["r", "two"] }
{ "a0": 1 }
{ "a0": 1, "b0": ["s", "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Pivot(IdStatus.IncludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
}
}
"Mask . Pivot . Mask . Pivot" >> {
"Array Array" >> {
import ScalarStage.{Mask, Pivot}
val input = ldjson("""
{ "a": 1, "b": [["one", "two"], "three", ["four"]] }
{ "a": 2, "b": [{ "x": "five", "y": "six" }, { "z": "seven" }] }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
"cart-30 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": "one" }
{ "a0": 1, "b0": "two" }
{ "a0": 1 }
{ "a0": 1, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-31 IdOnly" in {
val expected = ldjson("""
{ "a0": 1, "b0": 0 }
{ "a0": 1, "b0": 1 }
{ "a0": 1 }
{ "a0": 1, "b0": 0 }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.IdOnly, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-32 IncludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": [0, "one"] }
{ "a0": 1, "b0": [1, "two"] }
{ "a0": 1 }
{ "a0": 1, "b0": [0, "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Mask(Map(CPath.Identity -> Set(ColumnType.Array))),
Pivot(IdStatus.IncludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
}
"Object Object" >> {
import ScalarStage.{Mask, Pivot}
val input = ldjson("""
{ "a": 1, "b": { "x": { "q": "one", "r": "two"}, "y": "three", "z": { "s": "four" } } }
{ "a": 2, "b": { "a": ["five", "six"], "b": "seven" } }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
"cart-33 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": "one" }
{ "a0": 1, "b0": "two" }
{ "a0": 1 }
{ "a0": 1, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.ExcludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-34 IdOnly" in {
val expected = ldjson("""
{ "a0": 1, "b0": "q" }
{ "a0": 1, "b0": "r" }
{ "a0": 1 }
{ "a0": 1, "b0": "s" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.IdOnly, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-35 IncludeId" in {
val expected = ldjson("""
{ "a0": 1, "b0": ["q", "one"] }
{ "a0": 1, "b0": ["r", "two"] }
{ "a0": 1 }
{ "a0": 1, "b0": ["s", "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Mask(Map(CPath.Identity -> Set(ColumnType.Object))),
Pivot(IdStatus.IncludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
}
}
"Project . Pivot . Project . Pivot" >> {
"Array Array" >> {
import ScalarStage.{Pivot, Project}
val input = ldjson("""
{ "a": 1, "b": { "x": ["one", "two"] } }
{ "a": 2, "b": { "x": [{ "q": ["three", "four"] }, { "p": "five" }, "six"] } }
{ "a": 3, "b": { "x": "seven" } }
{ "a": 4, "b": "eight" }
{ "a": 5, "c": "nine" }
""")
"cart-36 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": "three" }
{ "a0": 2, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Project(CPath.parse(".q")),
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-37 IdOnly" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": 0 }
{ "a0": 2, "b0": 1 }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Project(CPath.parse(".q")),
Pivot(IdStatus.IdOnly, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
"cart-38 IncludeId" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": [0, "three"] }
{ "a0": 2, "b0": [1, "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Project(CPath.parse(".q")),
Pivot(IdStatus.IncludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
}
"Object Object" >> {
import ScalarStage.{Pivot, Project}
val input = ldjson("""
{ "a": 1, "b": { "x": { "s": "one", "t": "two" } } }
{ "a": 2, "b": { "x": { "z": { "q": { "f": "three", "g": "four" } }, "y": { "p": "five" }, "r": "six" } } }
{ "a": 3, "b": { "x": "seven" } }
{ "a": 4, "b": "eight" }
{ "a": 5, "c": "nine" }
""")
"cart-39 ExcludeId" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": "three" }
{ "a0": 2, "b0": "four" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Project(CPath.parse(".q")),
Pivot(IdStatus.ExcludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-40 IdOnly" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": "f" }
{ "a0": 2, "b0": "g" }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Project(CPath.parse(".q")),
Pivot(IdStatus.IdOnly, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
"cart-41 IncludeId" in {
val expected = ldjson("""
{ "a0": 1 }
{ "a0": 1 }
{ "a0": 2, "b0": ["f", "three"] }
{ "a0": 2, "b0": ["g", "four"] }
{ "a0": 2 }
{ "a0": 2 }
{ "a0": 3 }
{ "a0": 4 }
{ "a0": 5 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Project(CPath.parse(".x")),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Project(CPath.parse(".q")),
Pivot(IdStatus.IncludeId, ColumnType.Object)))))
input must cartesianInto(targets)(expected)
}
}
}
"Pivot . Wrap" >> {
"cart-42 Array " in {
import ScalarStage.{Pivot, Wrap}
val input = ldjson("""
{ "a": 1, "b": [["one", "two"], "three", ["four"]] }
{ "a": 2, "b": [{ "x": "five", "y": "six" }, { "z": "seven" }] }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
val expected = ldjson("""
{ "a0": 1, "b0": { "q": ["one", "two"] } }
{ "a0": 1, "b0": { "q": "three" } }
{ "a0": 1, "b0": { "q": ["four"] } }
{ "a0": 2, "b0": { "q": { "x": "five", "y": "six" } } }
{ "a0": 2, "b0": { "q": { "z": "seven" } } }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array),
Wrap("q")))))
input must cartesianInto(targets)(expected)
}
"cart-43 Object " in {
import ScalarStage.{Pivot, Wrap}
val input = ldjson("""
{ "a": 1, "b": { "x": { "t": "one", "r": "two"}, "y": "three", "z": { "s": "four" } } }
{ "a": 2, "b": { "a": ["five", "six"], "b": "seven" } }
{ "a": 3, "b": "eight" }
{ "a": 4, "c": "nine" }
""")
val expected = ldjson("""
{ "a0": 1, "b0": { "q": { "t": "one", "r": "two"} } }
{ "a0": 1, "b0": { "q": "three" } }
{ "a0": 1, "b0": { "q": { "s": "four" } } }
{ "a0": 2, "b0": { "q": ["five", "six"] } }
{ "a0": 2, "b0": { "q": "seven" } }
{ "a0": 3 }
{ "a0": 4 }
""")
val targets = Map(
(CPathField("a0"), (CPathField("a"), Nil)),
(CPathField("b0"), (CPathField("b"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Wrap("q")))))
input must cartesianInto(targets)(expected)
}
}
}
// a0[_] as a, b0[_] as b
"cart-44 cross with undefined values on both sides" >> {
import ScalarStage.Pivot
val input = ldjson("""
{ "a0": [1] }
{ "a0": [2], "b0": ["z"] }
{ "b0": ["y"] }
{ "a0": [3], "b0": "x" }
{ "a0": 4, "b0": ["w"] }
{ "a0": 5, "b0": "v" }
""")
val expected = ldjson("""
{ "a": 1 }
{ "a": 2, "b": "z" }
{ "b": "y" }
{ "a": 3 }
{ "b": "w" }
""")
val targets = Map(
(CPathField("a"), (CPathField("a0"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("b"), (CPathField("b0"), List(
Pivot(IdStatus.ExcludeId, ColumnType.Array)))))
input must cartesianInto(targets)(expected)
}
}
override def is: SpecStructure =
pendingFragments(super.is, cartesianPendingExamples, "cart")
def evalCartesian(cartesian: Cartesian, stream: JsonStream): JsonStream
def cartesianInto(
cartouches: Map[CPathField, (CPathField, List[ScalarStage.Focused])])(
expected: JsonStream)
: Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalCartesian(Cartesian(cartouches), str)
}
}
trait FullSpec extends JsonSpec {
import ScalarStage.{Cartesian, Pivot, Project}
val fullPendingExamples: Set[Int]
"scalar stages" should {
"full-1 evaluate basic nested cartesians" in {
val targets = List(
Project(CPath.parse("a")),
Pivot(IdStatus.ExcludeId, ColumnType.Object),
Cartesian(Map(
(CPathField("b0"), (CPathField("b"), List(Pivot(IdStatus.ExcludeId, ColumnType.Object)))),
(CPathField("c0"), (CPathField("c"), Nil)))),
Cartesian(Map(
(CPathField("b1"), (CPathField("b0"), List(Pivot(IdStatus.ExcludeId, ColumnType.Array)))),
(CPathField("c1"), (CPathField("c0"), Nil)))))
val stages = ScalarStages(IdStatus.ExcludeId, targets)
val input = ldjson("""
{"a": {"x": {"b": {"k": [1, 2, 3], "j": 4}, "c": 5}, "y": {"b": {"k": [6, 7, 8], "j": 9}, "c": 10}}}
""")
val expected = ldjson("""
{"b1": 1, "c1": 5}
{"b1": 2, "c1": 5}
{"b1": 3, "c1": 5}
{"c1": 5}
{"b1": 6, "c1": 10}
{"b1": 7, "c1": 10}
{"b1": 8, "c1": 10}
{"c1": 10}
""")
input must interpretFullInto(stages)(expected)
}
}
override def is: SpecStructure =
pendingFragments(super.is, fullPendingExamples, "full")
def evalFull(stages: ScalarStages, stream: JsonStream): JsonStream
def interpretFullInto(
stages: ScalarStages)(
expected: JsonStream)
: Matcher[JsonStream] =
bestSemanticEqual(expected) ^^ { str: JsonStream =>
evalFull(stages, str)
}
}
}
| djspiewak/quasar | frontend/src/test/scala/quasar/ScalarStageSpec.scala | Scala | apache-2.0 | 95,570 |
package org.jetbrains.plugins.scala
package lang.rearranger
import com.intellij.openapi.editor.Document
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi._
import com.intellij.psi.codeStyle.arrangement.ArrangementUtil
import com.intellij.psi.codeStyle.arrangement.std.ArrangementSettingsToken
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.EntryType._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Modifier._
import org.jetbrains.annotations.NonNls
import org.jetbrains.plugins.scala.extensions.{OptionExt, PsiElementExt}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructorInvocation, ScModifierList, ScReference, ScStableCodeReference}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScReferenceExpression}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTrait, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScModifierListOwner, ScPackaging}
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaElementVisitor, ScalaPsiElement, ScalaRecursiveElementVisitor}
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, api}
import scala.jdk.CollectionConverters._
import scala.collection.mutable
private class ScalaArrangementVisitor(parseInfo: ScalaArrangementParseInfo,
document: Document,
ranges: Iterable[TextRange],
groupingRules: Set[ArrangementSettingsToken])
extends ScalaElementVisitor {
import ScalaArrangementVisitor.getTokenType
private val arrangementEntries = mutable.Stack[ScalaArrangementEntry]()
private val splitBodyByExpressions = groupingRules.contains(RearrangerUtils.SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS)
private val splitBodyByImplicits = groupingRules.contains(RearrangerUtils.SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_IMPLICITS)
private val unseparableRanges = mutable.HashMap[ScalaArrangementEntry /*parent*/ ,
mutable.Queue[ScalaArrangementEntry] /*Arrangement blocks*/ ]()
/**
* Traverses method body to build inter-method dependencies.
**/
override def visitTypeAlias(alias: ScTypeAlias): Unit = {
processEntry(getEntryForRange(alias.getParent,
expandTextRangeToComment(alias), getTokenType(alias), alias.getName, canArrange = true), alias, null)
}
override def visitConstructorInvocation(constrInvocation: ScConstructorInvocation): Unit = {
getEntryForRange(constrInvocation.getParent,
expandTextRangeToComment(constrInvocation), getTokenType(constrInvocation), null, canArrange = true)
}
override def visitFunction(fun: ScFunction): Unit = {
processEntry(getEntryForRange(fun.getParent,
expandTextRangeToComment(fun), getTokenType(fun), fun.getName, canArrange = true), fun, null)
}
override def visitFunctionDefinition(fun: ScFunctionDefinition): Unit = {
val entry = getEntryForRange(fun.getParent, expandTextRangeToComment(fun), getTokenType(fun), fun.getName, canArrange = true)
parseInfo.onMethodEntryCreated(fun, entry)
fun.body match {
case Some(body) =>
processEntry(entry, fun, body)
val methodBodyProcessor = new MethodBodyProcessor(parseInfo, fun)
body.accept(methodBodyProcessor)
case None => processEntry(entry, fun, null)
}
parseProperties(fun, entry)
}
override def visitMacroDefinition(fun: ScMacroDefinition): Unit = {
val entry = getEntryForRange(fun.getParent, expandTextRangeToComment(fun), getTokenType(fun), fun.getName, canArrange = true)
parseInfo.onMethodEntryCreated(fun, entry)
processEntry(entry, fun, null)
fun.getLastChild match {
case ref: ScStableCodeReference =>
val methodBodyProcessor = new MethodBodyProcessor(parseInfo, fun)
ref.accept(methodBodyProcessor)
case _ =>
}
}
override def visitFile(file: PsiFile): Unit = file.acceptChildren(this)
override def visitPatternDefinition(pat: ScPatternDefinition): Unit = {
//TODO: insert inter-field dependency here
val name = pat.pList.patterns.headOption.flatMap(_.bindings.headOption).safeMap(_.getName) match {
case Some(value) => value
case None => return
}
val range = expandTextRangeToComment(pat)
val entry = getEntryForRange(pat.getParent, range, getTokenType(pat), name, canArrange = true)
processEntry(entry, pat, pat.expr.orNull)
}
override def visitScalaElement(v: ScalaPsiElement): Unit = v match {
case packaging: ScPackaging => packaging.acceptChildren(this)
case _ => super.visitScalaElement(v)
}
override def visitValueDeclaration(v: ScValueDeclaration): Unit =
processEntry(getEntryForRange(v.getParent, expandTextRangeToComment(v), getTokenType(v), v.getName, canArrange = true), v, null)
override def visitVariableDefinition(varr: ScVariableDefinition): Unit = {
//TODO: insert inter-field dependency here
processEntry(getEntryForRange(varr.getParent, expandTextRangeToComment(varr), getTokenType(varr), varr.declaredElements.head.getName,
canArrange = true), varr, varr.expr.orNull)
}
override def visitVariableDeclaration(varr: ScVariableDeclaration): Unit =
processEntry(getEntryForRange(varr.getParent, expandTextRangeToComment(varr), getTokenType(varr), varr.declaredElements.head.getName, canArrange = true),
varr, null)
override def visitTypeDefinition(typedef: ScTypeDefinition): Unit = {
processEntry(
getEntryForRange(typedef.getParent, expandTextRangeToComment(typedef), getTokenType(typedef), typedef.getName, canArrange = true),
typedef,
typedef.extendsBlock.templateBody.orNull
)
}
private def withinBounds(range: TextRange) = ranges.foldLeft(false)((acc: Boolean, current: TextRange) =>
acc || current.intersects(range))
private def getCurrentEntry = if (arrangementEntries.isEmpty) null else arrangementEntries.top
private def getEntryForRange(parent: PsiElement, range: TextRange,
tokenType: ArrangementSettingsToken, name: String, canArrange: Boolean,
innerTokenType: Option[ArrangementSettingsToken] = None) = {
if (!withinBounds(range)) {
null
} else {
val currentEntry = getCurrentEntry
val newRange = if (canArrange && document != null) {
ArrangementUtil.expandToLineIfPossible(range, document)
} else {
range
}
if (currentEntry != null && currentEntry.spansTextRange(newRange)) {
currentEntry
} else {
//we only arrange elements in ScTypeDefinitions and top-level elements
val newEntry = new ScalaArrangementEntry(currentEntry, newRange, tokenType, name, canArrange &&
(parent.isInstanceOf[ScTemplateBody] || parent.isInstanceOf[PsiFile]), innerTokenType)
if (currentEntry == null) {
parseInfo.addEntry(newEntry)
} else {
currentEntry.addChild(newEntry)
}
newEntry
}
}
}
private def parseModifiers(modifiers: ScModifierList, entry: ScalaArrangementEntry): Unit = {
import org.jetbrains.plugins.scala.util.EnumSet._
if (modifiers != null) {
for (modName <- modifiers.modifiers) {
RearrangerUtils.getModifierByName(modName.text()).flatMap((mod: ArrangementSettingsToken) => {
entry.addModifier(mod); None
})
}
}
if (RearrangerUtils.scalaAccessModifiers.intersect(entry.getModifiers.asScala).isEmpty) {
entry addModifier PUBLIC
}
}
private def processEntry(entry: ScalaArrangementEntry, modifiers: ScModifierListOwner, nextPsiRoot: ScalaPsiElement): Unit = {
if (entry == null) return
if (modifiers != null) {
parseModifiers(modifiers.getModifierList, entry)
}
if (nextPsiRoot != null) {
//current entry may have been processed as unseparable range in upper block
val newEntry = arrangementEntries.isEmpty || arrangementEntries.head.getType != RearrangerUtils.UNSEPARABLE_RANGE ||
arrangementEntries.head.getStartOffset != entry.getStartOffset ||
arrangementEntries.head.getEndOffset != entry.getEndOffset
if (newEntry) arrangementEntries.push(entry)
try nextPsiRoot match {
case body: ScTemplateBody if splitBodyByExpressions || splitBodyByImplicits =>
traverseTypedefBody(body, if (newEntry) entry else arrangementEntries.head)
case _ => nextPsiRoot.acceptChildren(this)
} finally {
if (newEntry) arrangementEntries.pop()
}
}
}
private def traverseTypedefBody(psiRoot: ScTemplateBody, entry: ScalaArrangementEntry): Unit = {
genUnseparableRanges(psiRoot, entry)
val top = arrangementEntries.top
val queue = unseparableRanges.getOrElse(entry, mutable.Queue[ScalaArrangementEntry]())
// var unseparable =
def next() = Option(if (queue.isEmpty) null else queue.dequeue())
psiRoot.getChildren.foldLeft(false, if (queue.isEmpty) null else queue.dequeue())((acc, child) => {
val (insideBlock, unseparable) = acc
val childStart = child.getTextRange.getStartOffset
//check if there are any more unseparable blocks at all
val res = if (unseparable != null) {
//process current child with regard to current block
(insideBlock, childStart >= unseparable.getStartOffset, childStart >= unseparable.getEndOffset) match {
case (false, true, false) => //entering arrange block
arrangementEntries.push(unseparable)
(true, unseparable)
case (true, true, false) => (true, unseparable) //inside arrange block
case (true, true, true) => //leaving arrange block
arrangementEntries.pop()
//check whether new current block is immediately adjucent to the previous
//in such case leaving the previous means entering the current
next() match {
case Some(nextUnseparable) if childStart >= nextUnseparable.getStartOffset =>
arrangementEntries.push(nextUnseparable)
(true, nextUnseparable)
case Some(nextUnseparable) => (false, nextUnseparable)
case _ => (false, null)
}
case _ => (false, unseparable) //outside arrange block
}
} else (false, unseparable)
child.accept(this)
res
})
if (arrangementEntries.top != top) {
//the last block was entered, but has never been left; i.e. the last block spans body until the end
arrangementEntries.pop()
}
}
private def expandTextRangeToComment(node: ScalaPsiElement) = {
val prev = node.getPrevSibling
val first = node.getFirstChild
var currentNode: PsiElement = node
var range =
if (first != null && first.isInstanceOf[PsiComment] && prev != null && (!prev.isInstanceOf[PsiWhiteSpace] ||
prev.isInstanceOf[PsiWhiteSpace] && !prev.getText.contains("\\n") && prev.getPrevSibling != null)) {
new TextRange(node.getTextRange.getStartOffset + first.getTextRange.getLength + 1,
node.getTextRange.getEndOffset)
} else {
node.getTextRange
}
range = node.nextSibling match {
case Some(semicolon: PsiElement) if semicolon.getNode.getElementType == ScalaTokenTypes.tSEMICOLON =>
currentNode = semicolon; range.union(semicolon.getTextRange)
case _ => range
}
val res = currentNode.getNextSibling match {
case sibling: PsiWhiteSpace =>
if (!sibling.getText.contains("\\n")) {
sibling.getNextSibling match {
case comment: PsiComment => range.union(sibling.getTextRange).union(comment.getTextRange)
case nonComment: ScalaPsiElement => val next = nonComment.getFirstChild
if (next != null && next.isInstanceOf[PsiComment]) {
range.union(sibling.getTextRange).union(next.getTextRange)
} else {
range
}
case _ => range
}
} else {
range
}
case comment: PsiComment => range.union(comment.getTextRange)
case _ => range
}
res
}
private class MethodBodyProcessor(val info: ScalaArrangementParseInfo, val baseMethod: ScFunction) extends ScalaRecursiveElementVisitor {
override def visitReference(ref: ScReference): Unit = {
ref.resolve() match {
case fun: ScFunction if fun.getContainingClass == baseMethod.getContainingClass =>
assert(baseMethod != null)
info.registerDependency(baseMethod, fun)
case _ =>
}
super.visitReference(ref)
}
override def visitReferenceExpression(ref: ScReferenceExpression): Unit = {
visitReference(ref)
}
}
private def parseProperties(method: ScFunction, entry: ScalaArrangementEntry): Unit = {
if (!(groupingRules.contains(RearrangerUtils.JAVA_GETTERS_AND_SETTERS) || groupingRules.contains(RearrangerUtils.SCALA_GETTERS_AND_SETTERS)) ||
entry == null) {
return
}
val methodName = method.getName
val psiParent = method.getParent
if (ScalaArrangementVisitor.isJavaGetter(method)) {
parseInfo.registerJavaGetter((if (methodName.startsWith("get")) StringUtil.decapitalize(methodName.substring(3))
else StringUtil.decapitalize(methodName.substring(2)), psiParent), method, entry)
} else if (ScalaArrangementVisitor.isJavaSetter(method)) {
parseInfo.registerJavaSetter((StringUtil.decapitalize(methodName.substring(3)), psiParent), method, entry)
} else if (ScalaArrangementVisitor.isScalaGetter(method)) {
parseInfo.registerScalaGetter((methodName, psiParent), method, entry)
} else if (ScalaArrangementVisitor.isScalaSetter(method)) {
parseInfo.registerScalaSetter((ScalaArrangementVisitor.removeScalaSetterEnding(methodName), psiParent), method,
entry)
}
}
private def genUnseparableRanges(body: ScTemplateBody, entry: ScalaArrangementEntry) = {
body.getChildren.foldLeft(None)((startOffset, child) => {
val newOffset = if (startOffset.isDefined) startOffset.get else child.getTextRange.getStartOffset
if (isExpressionSplit(child) || isImplicitSplit(child)) {
if (!unseparableRanges.contains(entry)) {
unseparableRanges += (entry -> mutable.Queue[ScalaArrangementEntry]())
}
unseparableRanges.get(entry).foreach(queue =>
queue.enqueue(getEntryForRange(body, new TextRange(newOffset,
child.getTextRange.getEndOffset), RearrangerUtils.UNSEPARABLE_RANGE, null, canArrange = false, Some(getTokenType(child)))))
None
} else startOffset
})
}
private def isExpressionSplit(child: PsiElement): Boolean = splitBodyByExpressions && child.isInstanceOf[ScExpression]
private def isImplicitSplit(child: PsiElement): Boolean = splitBodyByImplicits &&
(child match {
case modListOwner: ScModifierListOwner => modListOwner.hasModifierProperty("implicit")
case _ => false
})
}
object ScalaArrangementVisitor {
private def nameStartsWith(name: String, @NonNls start: String) = {
val length = name.length
name.startsWith(start) && length > start.length && !(Character.isLowerCase(name.charAt(start.length())) &&
(length == start.length() + 1 || Character.isLowerCase(name.charAt(start.length() + 1))))
}
private def hasJavaGetterName(method: ScFunction) = {
import method.projectContext
val name = method.getName
val getAnd = "getAnd"
if (nameStartsWith(name, "get") && !(nameStartsWith(name, getAnd) && name.charAt(getAnd.length).isUpper)) {
method.returnType.getOrAny != api.Unit
} else if (nameStartsWith(name, "is")) {
method.returnType.getOrAny == api.Boolean
} else false
}
private def hasJavaSetterName(method: ScFunction) = {
val name = method.name
nameStartsWith(name, "set")
}
private def hasScalaSetterName(method: ScFunction) = method.name.endsWith("_=")
private def hasSetterSignature(method: ScFunction) =
method.getParameterList.getParametersCount == 1 && (method.returnType.getOrAny match {
case t if t.isAny => true
case returnType: ScType => returnType.isUnit
})
private def isJavaGetter(method: ScFunction) =
hasJavaGetterName(method) && method.getParameterList.getParametersCount == 0
private def isJavaSetter(method: ScFunction) = hasJavaSetterName(method) && hasSetterSignature(method)
private def isScalaSetter(method: ScFunction) = hasScalaSetterName(method) && hasSetterSignature(method)
private def isScalaGetter(method: ScFunction) = method.getParameterList.getParametersCount == 0
private def removeScalaSetterEnding(name: String) = name.substring(0, name.length - 4) //removing _$eq
def getTokenType(psiElement: PsiElement): ArrangementSettingsToken = {
import RearrangerUtils._
psiElement match {
case _: ScTypeAlias => TYPE
case _: ScMacroDefinition => MACRO
case _: ScConstructorInvocation => CONSTRUCTOR
case _: ScFunction | _: ScFunctionDefinition => FUNCTION
case _: ScPatternDefinition | _: ScValueDeclaration => VAL
case _: ScClass => CLASS
case _: ScVariableDefinition | _: ScVariableDeclaration => VAR
case _: ScTrait => TRAIT
case _: ScObject => OBJECT
case _ => UNSEPARABLE_RANGE
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/rearranger/ScalaArrangementVisitor.scala | Scala | apache-2.0 | 18,035 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Explicitly
import org.scalactic.StringNormalizations
import org.scalactic.Uniformity
import org.scalactic.Prettifier
import collection.GenTraversable
import SharedHelpers._
import StringNormalizations._
import org.scalactic.ArrayHelper.deep
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers._
class InOrderContainMatcherDeciderSpec extends AnyFunSpec with Explicitly {
private val prettifier = Prettifier.default
val mapTrimmed: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
def normalized(s: (Int, String)): (Int, String) = (s._1, s._2.trim)
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val incremented: Uniformity[Int] =
new Uniformity[Int] {
var count = 0
def normalized(s: Int): Int = {
count += 1
s + count
}
def normalizedCanHandle(b: Any) = b.isInstanceOf[Int]
def normalizedOrSame(b: Any) =
b match {
case i: Int => normalized(i)
case _ => b
}
}
val mapIncremented: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
var count = 0
def normalized(s: (Int, String)): (Int, String) = {
count += 1
(s._1 + count, s._2)
}
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val appended: Uniformity[String] =
new Uniformity[String] {
var count = 0
def normalized(s: String): String = {
count += 1
s + count
}
def normalizedCanHandle(b: Any) =
b match {
case _: String => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
class Translated(map: Map[String, String]) extends Uniformity[String] {
def normalized(s: String): String =
map.get(s) match {
case Some(translated) => translated
case None => s
}
def normalizedCanHandle(b: Any) =
b match {
case _: String => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
val lowerCaseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.toLowerCase == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val reverseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.reverse == (right match {
case s: String => s.toLowerCase
case other => other
})
}
describe("inOrder ") {
def checkShouldContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
e.message should be (Some(leftText + " did not contain all of (" + right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ") + ") in order"))
e.failedCodeFileName should be (Some("InOrderContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
def checkShouldNotContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
e.message should be (Some(leftText + " contained all of (" + right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ") + ") in order"))
e.failedCodeFileName should be (Some("InOrderContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
it("should take specified equality when 'should contain' is used") {
(List("1 ", "2", "3 ") should contain inOrder ("1", "2 ", "3")) (after being trimmed)
(Array("1 ", "2", "3 ") should contain inOrder ("1", "2 ", "3")) (after being trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("1", "2 ", "3") should contain inOrder ("1", "2 ", "3")) (after being trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified equality when 'should not contain' is used") {
(List("1 ", "2", "3 ") should not contain inOrder ("1", "2 ", "3")) (after being appended)
(Array("1 ", "2", "3 ") should not contain inOrder ("1", "2 ", "3")) (after being appended)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("1 ", "2", "3 ") should not contain inOrder ("1", "2 ", "3")) (after being appended)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified normalization") {
val left1 = List("1 ", "2", "3 ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain inOrder ("1", "2 ", "3")) (after being appended)
}
checkShouldContainStackDepth(e1, left1, deep(Array("1", "2 ", "3")), thisLineNumber - 2)
val left2 = Array("1 ", "2", "3 ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain inOrder ("1", "2 ", "3")) (after being appended)
}
checkShouldContainStackDepth(e2, left2, deep(Array("1", "2 ", "3")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("1 ", "2", "3 ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain inOrder ("1", "2 ", "3")) (after being appended)
}
checkShouldContainStackDepth(e3, left3, deep(Array("1", "2 ", "3")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified normalization") {
val translated = new Translated(Map("eno" -> "one"))
val left1 = List("one", "two", "three")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain inOrder ("eno", "two", "three")) (after being translated)
}
checkShouldNotContainStackDepth(e1, left1, deep(Array("eno", "two", "three")), thisLineNumber - 2)
val left2 = Array("one", "two", "three")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain inOrder ("eno", "two", "three")) (after being translated)
}
checkShouldNotContainStackDepth(e2, left2, deep(Array("eno", "two", "three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("one", "two", "three")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain inOrder ("eno", "two", "three")) (after being translated)
}
checkShouldNotContainStackDepth(e3, left3, deep(Array("eno", "two", "three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified equality and normalization when 'should contain' is used") {
(List("A ", "B", "C ") should contain inOrder ("a", "b ", "c")) (decided by lowerCaseEquality afterBeing trimmed)
(Array("A ", "B", "C ") should contain inOrder ("a", "b ", "c")) (decided by lowerCaseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("A ", "B", "C ") should contain inOrder ("a", "b ", "c")) (decided by lowerCaseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified equality and normalization when 'should not contain' is used") {
(List("one ", "two", "three ") should not contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
(Array("one ", "two", "three ") should not contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("one ", "two", "three ") should not contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified equality and normalization") {
val left1 = List("one ", "two", "three ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e1, left1, deep(Array("one", "two ", "three")), thisLineNumber - 2)
val left2 = Array("one ", "two", "three ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e2, left2, deep(Array("one", "two ", "three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("one ", "two", "three ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain inOrder ("one", "two ", "three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e3, left3, deep(Array("one", "two ", "three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified equality and normalization") {
val left1 = List("one ", "two", "three ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain inOrder ("eno ", "owt", "eerht ")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e1, left1, deep(Array("eno ", "owt", "eerht ")), thisLineNumber - 2)
val left2 = Array("one ", "two", "three ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain inOrder ("eno ", "owt", "eerht ")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e2, left2, deep(Array("eno ", "owt", "eerht ")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("one ", "two", "three ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain inOrder ("eno ", "owt", "eerht ")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e3, left3, deep(Array("eno ", "owt", "eerht ")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/InOrderContainMatcherDeciderSpec.scala | Scala | apache-2.0 | 11,933 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table}
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, ReadLimit, SparkDataStream}
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsTruncate}
import org.apache.spark.sql.connector.write.streaming.StreamingWrite
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.connector.SupportsStreamingUpdateAsAppend
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
val resolvedCheckpointRoot: String,
val analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
/**
* Tracks the latest offsets for each input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var latestOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Map[SparkDataStream, ReadLimit] = Map.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
val startTimestamp = triggerClock.getTimeMillis()
postEvent(new QueryStartedEvent(id, runId, name, formatTimestamp(startTimestamp)))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
sparkSessionForStream.withActive {
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations
// to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.delete(checkpointPath, true)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { case (source, _) =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Interrupts the query execution thread and awaits its termination until until it exceeds the
* timeout. The timeout can be set on "spark.sql.streaming.stopTimeout".
*
* @throws TimeoutException If the thread cannot be stopped within the timeout
*/
@throws[TimeoutException]
protected def interruptAndAwaitExecutionThreadTermination(): Unit = {
val timeout = math.max(
sparkSession.sessionState.conf.getConf(SQLConf.STREAMING_STOP_TIMEOUT), 0)
queryExecutionThread.interrupt()
queryExecutionThread.join(timeout)
if (queryExecutionThread.isAlive) {
val stackTraceException = new SparkException("The stream thread was last executing:")
stackTraceException.setStackTrace(queryExecutionThread.getStackTrace)
val timeoutException = new TimeoutException(
s"Stream Execution thread for stream $prettyIdString failed to stop within $timeout " +
s"milliseconds (specified by ${SQLConf.STREAMING_STOP_TIMEOUT.key}). See the cause on " +
s"what was being executed in the streaming query thread.")
timeoutException.initCause(stackTraceException)
throw timeoutException
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
s"""|${Option(name).getOrElse("")}
|id = $id
|runId = $runId
|batch = $batchDescription""".stripMargin
}
protected def createStreamingWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): StreamingWrite = {
val info = LogicalWriteInfoImpl(
queryId = id.toString,
inputPlan.schema,
new CaseInsensitiveStringMap(options.asJava))
val writeBuilder = table.newWriteBuilder(info)
val write = outputMode match {
case Append =>
writeBuilder.build()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().build()
case Update =>
require(writeBuilder.isInstanceOf[SupportsStreamingUpdateAsAppend],
table.name + " does not support Update mode.")
writeBuilder.asInstanceOf[SupportsStreamingUpdateAsAppend].build()
}
write.toStreaming
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryExecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| BryanCutler/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 26,324 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.matfast.matrix
// export APIs from mllib for in-place operations
import com.github.fommil.netlib.{BLAS => NetlibBLAS, F2jBLAS}
import com.github.fommil.netlib.BLAS.{getInstance => NativeBLAS}
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector}
/**
* BLAS routines for MLlib's vectors and matrices.
*/
object BLAS extends Serializable with Logging {
@transient private var _f2jBLAS: NetlibBLAS = _
@transient private var _nativeBLAS: NetlibBLAS = _
// For level-1 routines, we use Java implementation.
private def f2jBLAS: NetlibBLAS = {
if (_f2jBLAS == null) {
_f2jBLAS = new F2jBLAS
}
_f2jBLAS
}
/**
* y += a * x
*/
def axpy(a: Double, x: Vector, y: Vector): Unit = {
require(x.size == y.size)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
axpysd(a, sx, dy)
case dx: DenseVector =>
axpydd(a, dx, dy)
case _ =>
throw new UnsupportedOperationException(
s"axpy doesn't support x type ${x.getClass}.")
}
case _ =>
throw new IllegalArgumentException(
s"axpy only supports adding to a dense vector but got type ${y.getClass}.")
}
}
/**
* y += a * x
*/
private def axpydd(a: Double, x: DenseVector, y: DenseVector): Unit = {
val n = x.size
f2jBLAS.daxpy(n, a, x.values, 1, y.values, 1)
}
/**
* y += a * x
*/
private def axpysd(a: Double, x: SparseVector, y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.size
if (a == 1.0) {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += xValues(k)
k += 1
}
} else {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += a * xValues(k)
k += 1
}
}
}
/**
* dot(x, y)
*/
def dot(x: Vector, y: Vector): Double = {
require(x.size == y.size,
"BLAS.dot(x: Vector, y:Vector) was given Vectors with non-matching sizes:" +
" x.size = " + x.size + ", y.size = " + y.size)
(x, y) match {
case (dx: DenseVector, dy: DenseVector) =>
dotdd(dx, dy)
case (sx: SparseVector, dy: DenseVector) =>
dotsd(sx, dy)
case (dx: DenseVector, sy: SparseVector) =>
dotsd(sy, dx)
case (sx: SparseVector, sy: SparseVector) =>
dotss(sx, sy)
case _ =>
throw new IllegalArgumentException(s"dot doesn't support (${x.getClass}, ${y.getClass}).")
}
}
/**
* dot(x, y)
*/
private def dotdd(x: DenseVector, y: DenseVector): Double = {
val n = x.size
f2jBLAS.ddot(n, x.values, 1, y.values, 1)
}
/**
* dot(x, y)
*/
private def dotsd(x: SparseVector, y: DenseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.size
var sum = 0.0
var k = 0
while (k < nnz) {
sum += xValues(k) * yValues(xIndices(k))
k += 1
}
sum
}
/**
* dot(x, y)
*/
private def dotss(x: SparseVector, y: SparseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val yIndices = y.indices
val nnzx = xIndices.size
val nnzy = yIndices.size
var kx = 0
var ky = 0
var sum = 0.0
// y catching x
while (kx < nnzx && ky < nnzy) {
val ix = xIndices(kx)
while (ky < nnzy && yIndices(ky) < ix) {
ky += 1
}
if (ky < nnzy && yIndices(ky) == ix) {
sum += xValues(kx) * yValues(ky)
ky += 1
}
kx += 1
}
sum
}
/**
* y = x
*/
def copy(x: Vector, y: Vector): Unit = {
val n = y.size
require(x.size == n)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
val sxIndices = sx.indices
val sxValues = sx.values
val dyValues = dy.values
val nnz = sxIndices.size
var i = 0
var k = 0
while (k < nnz) {
val j = sxIndices(k)
while (i < j) {
dyValues(i) = 0.0
i += 1
}
dyValues(i) = sxValues(k)
i += 1
k += 1
}
while (i < n) {
dyValues(i) = 0.0
i += 1
}
case dx: DenseVector =>
Array.copy(dx.values, 0, dy.values, 0, n)
}
case _ =>
throw new IllegalArgumentException(s"y must be dense in copy but got ${y.getClass}")
}
}
/**
* x = a * x
*/
def scal(a: Double, x: Vector): Unit = {
x match {
case sx: SparseVector =>
f2jBLAS.dscal(sx.values.size, a, sx.values, 1)
case dx: DenseVector =>
f2jBLAS.dscal(dx.values.size, a, dx.values, 1)
case _ =>
throw new IllegalArgumentException(s"scal doesn't support vector type ${x.getClass}.")
}
}
// For level-3 routines, we use the native BLAS.
private def nativeBLAS: NetlibBLAS = {
if (_nativeBLAS == null) {
_nativeBLAS = NativeBLAS
}
_nativeBLAS
}
/**
* A := alpha * x * x^T^ + A
* @param alpha a real scalar that will be multiplied to x * x^T^.
* @param x the vector x that contains the n elements.
* @param A the symmetric matrix A. Size of n x n.
*/
def syr(alpha: Double, x: Vector, A: DenseMatrix) {
val mA = A.numRows
val nA = A.numCols
require(mA == nA, s"A is not a square matrix (and hence is not symmetric). A: $mA x $nA")
require(mA == x.size, s"The size of x doesn't match the rank of A. A: $mA x $nA, x: ${x.size}")
x match {
case dv: DenseVector => syrd(alpha, dv, A)
case sv: SparseVector => syrs(alpha, sv, A)
case _ =>
throw new IllegalArgumentException(s"syr doesn't support vector type ${x.getClass}.")
}
}
private def syrd(alpha: Double, x: DenseVector, A: DenseMatrix) {
val nA = A.numRows
val mA = A.numCols
nativeBLAS.dsyr("U", x.size, alpha, x.values, 1, A.values, nA)
// Fill lower triangular part of A
var i = 0
while (i < mA) {
var j = i + 1
while (j < nA) {
A(j, i) = A(i, j)
j += 1
}
i += 1
}
}
private def syrs(alpha: Double, x: SparseVector, A: DenseMatrix) {
val mA = A.numCols
val xIndices = x.indices
val xValues = x.values
val nnz = xValues.length
val Avalues = A.values
var i = 0
while (i < nnz) {
val multiplier = alpha * xValues(i)
val offset = xIndices(i) * mA
var j = 0
while (j < nnz) {
Avalues(xIndices(j) + offset) += multiplier * xValues(j)
j += 1
}
i += 1
}
}
/**
* C := alpha * A * B + beta * C
* @param alpha a scalar to scale the multiplication A * B.
* @param A the matrix A that will be left multiplied to B. Size of m x k.
* @param B the matrix B that will be left multiplied by A. Size of k x n.
* @param beta a scalar that can be used to scale matrix C.
* @param C the resulting matrix C. Size of m x n. C.isTransposed must be false.
*/
def gemm(
alpha: Double,
A: MLMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
require(!C.isTransposed,
"The matrix C cannot be the product of a transpose() call. C.isTransposed must be false.")
if (alpha == 0.0 && beta == 1.0) {
logDebug("gemm: alpha is equal to 0 and beta is equal to 1. Returning C.")
} else if (alpha == 0.0) {
f2jBLAS.dscal(C.values.length, beta, C.values, 1)
} else {
A match {
case sparse: SparseMatrix => gemmsdd(alpha, sparse, B, beta, C)
case dense: DenseMatrix => gemmddd(alpha, dense, B, beta, C)
case _ =>
throw new IllegalArgumentException(s"gemm doesn't support matrix type ${A.getClass}.")
}
}
}
/**
* C := alpha * A * B + beta * C
* For `DenseMatrix` A.
*/
private def gemmddd(
alpha: Double,
A: DenseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val tAstr = if (A.isTransposed) "T" else "N"
val tBstr = if (B.isTransposed) "T" else "N"
val lda = if (!A.isTransposed) A.numRows else A.numCols
val ldb = if (!B.isTransposed) B.numRows else B.numCols
require(A.numCols == B.numRows,
s"The columns of A don't match the rows of B. A: ${A.numCols}, B: ${B.numRows}")
require(A.numRows == C.numRows,
s"The rows of C don't match the rows of A. C: ${C.numRows}, A: ${A.numRows}")
require(B.numCols == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: ${B.numCols}")
nativeBLAS.dgemm(tAstr, tBstr, A.numRows, B.numCols, A.numCols, alpha, A.values, lda,
B.values, ldb, beta, C.values, C.numRows)
}
/**
* C := alpha * A * B + beta * C
* For `SparseMatrix` A.
*/
private def gemmsdd(
alpha: Double,
A: SparseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val mA: Int = A.numRows
val nB: Int = B.numCols
val kA: Int = A.numCols
val kB: Int = B.numRows
require(kA == kB, s"The columns of A don't match the rows of B. A: $kA, B: $kB")
require(mA == C.numRows, s"The rows of C don't match the rows of A. C: ${C.numRows}, A: $mA")
require(nB == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: $nB")
val Avals = A.values
val Bvals = B.values
val Cvals = C.values
val ArowIndices = A.rowIndices
val AcolPtrs = A.colPtrs
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var colCounterForB = 0
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
val Bstart = colCounterForB * kA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * Bvals(Bstart + ArowIndices(i))
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * B(ArowIndices(i), colCounterForB)
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
}
} else {
// Scale matrix first if `beta` is not equal to 1.0
if (beta != 1.0) {
f2jBLAS.dscal(C.values.length, beta, C.values, 1)
}
// Perform matrix multiplication and add to C. The rows of A are multiplied by the columns of
// B, and added to C.
var colCounterForB = 0 // the column to be updated in C
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Bstart = colCounterForB * kB
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = Bvals(Bstart + colCounterForA) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = B(colCounterForA, colCounterForB) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
}
}
}
/**
* y := alpha * A * x + beta * y
* @param alpha a scalar to scale the multiplication A * x.
* @param A the matrix A that will be left multiplied to x. Size of m x n.
* @param x the vector x that will be left multiplied by A. Size of n x 1.
* @param beta a scalar that can be used to scale vector y.
* @param y the resulting vector y. Size of m x 1.
*/
def gemv(
alpha: Double,
A: MLMatrix,
x: Vector,
beta: Double,
y: DenseVector): Unit = {
require(A.numCols == x.size,
s"The columns of A don't match the number of elements of x. A: ${A.numCols}, x: ${x.size}")
require(A.numRows == y.size,
s"The rows of A don't match the number of elements of y. A: ${A.numRows}, y:${y.size}")
if (alpha == 0.0 && beta == 1.0) {
logDebug("gemv: alpha is equal to 0 and beta is equal to 1. Returning y.")
} else if (alpha == 0.0) {
scal(beta, y)
} else {
(A, x) match {
case (smA: SparseMatrix, dvx: DenseVector) =>
gemvsdd(alpha, smA, dvx, beta, y)
case (smA: SparseMatrix, svx: SparseVector) =>
gemvssd(alpha, smA, svx, beta, y)
case (dmA: DenseMatrix, dvx: DenseVector) =>
gemvddd(alpha, dmA, dvx, beta, y)
case (dmA: DenseMatrix, svx: SparseVector) =>
gemvdsd(alpha, dmA, svx, beta, y)
case _ =>
throw new IllegalArgumentException(s"gemv doesn't support running on matrix type " +
s"${A.getClass} and vector type ${x.getClass}.")
}
}
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `DenseVector` x.
*/
private def gemvddd(
alpha: Double,
A: DenseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val tStrA = if (A.isTransposed) "T" else "N"
val mA = if (!A.isTransposed) A.numRows else A.numCols
val nA = if (!A.isTransposed) A.numCols else A.numRows
nativeBLAS.dgemv(tStrA, mA, nA, alpha, A.values, mA, x.values, 1, beta,
y.values, 1)
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `SparseVector` x.
*/
private def gemvdsd(
alpha: Double,
A: DenseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val xIndices = x.indices
val xNnz = xIndices.length
val xValues = x.values
val yValues = y.values
if (A.isTransposed) {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) + rowCounterForA * nA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
} else {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) * mA + rowCounterForA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `SparseVector` x.
*/
private def gemvssd(
alpha: Double,
A: SparseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val xNnz = xIndices.length
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
var k = 0
while (k < xNnz && i < indEnd) {
if (xIndices(k) == Acols(i)) {
sum += Avals(i) * xValues(k)
i += 1
}
k += 1
}
yValues(rowCounter) = sum * alpha + beta * yValues(rowCounter)
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
var colCounterForA = 0
var k = 0
while (colCounterForA < nA && k < xNnz) {
if (xIndices(k) == colCounterForA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xTemp = xValues(k) * alpha
while (i < indEnd) {
val rowIndex = Arows(i)
yValues(Arows(i)) += Avals(i) * xTemp
i += 1
}
k += 1
}
colCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `DenseVector` x.
*/
private def gemvsdd(
alpha: Double,
A: SparseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * xValues(Acols(i))
i += 1
}
yValues(rowCounter) = beta * yValues(rowCounter) + sum * alpha
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
// Perform matrix-vector multiplication and add to y
var colCounterForA = 0
while (colCounterForA < nA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xVal = xValues(colCounterForA) * alpha
while (i < indEnd) {
val rowIndex = Arows(i)
yValues(rowIndex) += Avals(i) * xVal
i += 1
}
colCounterForA += 1
}
}
}
}
| yuyongyang800/SparkDistributedMatrix | src/main/scala/org/apache/spark/sql/matfast/matrix/BLAS.scala | Scala | apache-2.0 | 20,671 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.arrow.tools
import org.locationtech.geomesa.arrow.tools.export.ArrowExportCommand
import org.locationtech.geomesa.arrow.tools.ingest.ArrowIngestCommand
import org.locationtech.geomesa.arrow.tools.stats._
import org.locationtech.geomesa.arrow.tools.status._
import org.locationtech.geomesa.tools.{Command, Runner}
object ArrowRunner extends Runner {
override val name: String = "geomesa-arrow"
override protected def commands: Seq[Command] = {
super.commands ++ Seq(
new ArrowDescribeSchemaCommand,
new ArrowExportCommand,
new ArrowIngestCommand,
new ArrowGetTypeNamesCommand,
new ArrowGetSftConfigCommand,
new ArrowStatsBoundsCommand,
new ArrowStatsCountCommand,
new ArrowStatsTopKCommand,
new ArrowStatsHistogramCommand
)
}
}
| locationtech/geomesa | geomesa-arrow/geomesa-arrow-tools/src/main/scala/org/locationtech/geomesa/arrow/tools/ArrowRunner.scala | Scala | apache-2.0 | 1,297 |
package controllers
import views._
import com.p44.broadcast.FishStoreBroadcaster
import akka.pattern.ask
import akka.util.Timeout
import play.api.Play.current
import play.api.mvc.{ Action, Controller }
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import play.api.libs.concurrent.Akka
import play.api.libs.json._
import play.api.libs.iteratee.{ Concurrent, Enumeratee, Enumerator }
import play.api.libs.EventSource
import play.api.mvc.Request
import play.api.mvc.AnyContent
import play.api.Logger
/**
* Restful services for Fish Store Two
*/
object FishStoreTwoController extends Controller {
import com.p44.actors.store.two.FishStoreTwo
import com.p44.models.{ DeliveryReceipt, Fish, FishStoreModels }
// one reference to the controller actor
val controllerActor = Akka.system.actorOf(FishStoreTwo.propsController, name = "fishStoreTwoController")
lazy val defaultCatchSize = 100
implicit val timeout = Timeout(6.seconds) // used for ask ?
/** route to home page */
def viewStoreTwo = Action.async { request =>
Future { Ok(views.html.fishstoretwo.render) }
}
/**
* GET /store_one/catch/latest
* Provides a new load of fish as json array (simulated)
*/
def getCatchLatest = Action.async {
val f: Future[String] = FishStoreModels.aBunchOfFishToJson(FishStoreModels.generateFish(defaultCatchSize))
f.map(s => Ok(s)) // Note: f.onComplete does not work here because it returns Unit
}
/**
* POST /store_one/delivery
* Takes a shipment of fish into the store.
*/
def postDelivery = Action.async { request =>
val fDelivery = Future[Option[List[Fish]]] {
resolveDeliveryJsonToObj(request)
}
fDelivery.flatMap { delivery: Option[List[Fish]] =>
delivery.isDefined match {
case false => Future.successful(BadRequest("Please check your request for content type of json as well as the json format."))
case _ => {
val f: Future[Any] = controllerActor ? FishStoreTwo.Deliver(delivery.get) // deliver with ask
val fdr: Future[DeliveryReceipt] = f.mapTo[DeliveryReceipt]
fdr.map { dr: DeliveryReceipt =>
Ok(Json.prettyPrint(Json.toJson(dr)))
}
}
}
}
}
/** Takes a delivery, currently a json array of fish and creates an object to pass to the actors */
def resolveDeliveryJsonToObj(request: Request[AnyContent]): Option[List[Fish]] = {
val jsonBody: Option[JsValue] = request.body.asJson
jsonBody.isDefined match {
case false => None
case true => {
Json.fromJson[List[Fish]](jsonBody.get).asOpt
}
}
}
// Added...
/** Enumeratee for detecting disconnect of the stream */
def connDeathWatch(addr: String): Enumeratee[JsValue, JsValue] = {
Enumeratee.onIterateeDone { () =>
Logger.info(addr + " - fishStoreTwoOut disconnected")
}
}
/** Controller action serving activity for fish store two (no filter) */
def fishStoreTwoDeliveryFeed = Action { request =>
Logger.info("FEED fishStoreTwo - " + request.remoteAddress + " - fishStoreTwo connected")
// Enumerator: a producer of typed chunks of data (non-blocking producer)
val enumerator: Enumerator[JsValue] = FishStoreBroadcaster.fishStoreTwoOut
Ok.chunked(enumerator
through Concurrent.buffer(100) // buffers chunks and frees the enumerator to keep processing
through connDeathWatch(request.remoteAddress)
through EventSource()).as("text/event-stream")
}
/*
* Chunked transfer encoding is a data transfer mechanism in version 1.1 of the Hypertext Transfer Protocol (HTTP)
* in which a web server serves content in a series of chunks.
* It uses the Transfer-Encoding HTTP response header instead of the Content-Length header
* http://www.playframework.com/documentation/2.2.0/ScalaStream
* http://en.wikipedia.org/wiki/Chunked_transfer_encoding
*/
} | p44/FishStore | app/controllers/FishStoreTwoController.scala | Scala | apache-2.0 | 3,970 |
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
import _root_.spray.json.DefaultJsonProtocol._
import _root_.spray.json.{JsObject, JsonParser}
import com.netaporter.uri.Uri
import com.netaporter.uri.dsl._
import com.qvantel.jsonapi.Helpers._
final class MacrosSpec extends Specification with ScalaCheck {
implicit val apiRoot: com.qvantel.jsonapi.ApiRoot = ApiRoot(None)
"jsonapi.org macros" should {
"correctly generate single level ToOne includes" in {
final case class Root(id: String, child: ToOne[Child])
final case class Child(id: String)
implicit lazy val childIncludes: com.qvantel.jsonapi.Includes[Child] = includes[Child]
includes[Root].includeAllowed("child") must beTrue
prop { part: String =>
childIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("child"))
}
"correctly generate single level Option[ToOne] includes" in {
final case class Root(id: String, maybeChild: Option[ToOne[Child]])
final case class Child(id: String)
implicit lazy val childIncludes: com.qvantel.jsonapi.Includes[Child] = includes[Child]
includes[Root].includeAllowed("test") must beFalse
includes[Root].includeAllowed("maybe-child") must beTrue
prop { part: String =>
childIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("test", "maybe-child"))
}
"correctly generate single level ToMany includes" in {
final case class Root(id: String, children: ToMany[Child])
final case class Child(id: String)
implicit lazy val childIncludes: com.qvantel.jsonapi.Includes[Child] = includes[Child]
includes[Root].includeAllowed("test") must beFalse
includes[Root].includeAllowed("children") must beTrue
prop { part: String =>
childIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("test", "children"))
}
"correctly handle multi level includes" in {
final case class Root(id: String, child: ToOne[Child1], leaf: ToOne[Child2])
final case class Child1(id: String, child: ToOne[Child2])
final case class Child2(id: String)
implicit lazy val child2Includes: com.qvantel.jsonapi.Includes[Child2] = includes[Child2]
implicit lazy val child1Includes: com.qvantel.jsonapi.Includes[Child1] = includes[Child1]
implicit lazy val rootIncludes: com.qvantel.jsonapi.Includes[Root] = includes[Root]
rootIncludes.includeAllowed("test") must beFalse
rootIncludes.includeAllowed("child") must beTrue
rootIncludes.includeAllowed("leaf") must beTrue
rootIncludes.includeAllowed("child.child") must beTrue
prop { part: String =>
rootIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("test", "child", "leaf", "child"))
}
"correctly handle looped includes" in {
final case class Fun(id: String, loop: ToOne[Loop])
final case class Loop(id: String, fun: ToOne[Fun])
// lazy required when manually calling includes macro normally just use @jsonApiResource annotation
implicit lazy val funIncludes: Includes[Fun] = includes[Fun]
implicit lazy val loopIncludes: Includes[Loop] = includes[Loop]
funIncludes.includeAllowed("loop") must beTrue
loopIncludes.includeAllowed("fun") must beTrue
funIncludes.includeAllowed(
(0 to 50)
.map { x =>
if (x % 2 == 0) "loop" else "fun"
}
.mkString(".")) must beTrue
loopIncludes.includeAllowed(
(0 to 50)
.map { x =>
if (x % 2 == 0) "fun" else "loop"
}
.mkString(".")) must beTrue
prop { part: String =>
funIncludes.includeAllowed(part) must beFalse
loopIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("fun", "loop"))
}
"correctly handle self loop include" in {
final case class Looped(id: String, loop: ToOne[Looped])
implicit lazy val loopedIncludes: Includes[Looped] = includes[Looped]
loopedIncludes.includeAllowed("loop") must beTrue
loopedIncludes.includeAllowed((0 to 50).map(x => "loop").mkString("."))
prop { part: String =>
loopedIncludes.includeAllowed(part) must beFalse
}.setGen(makeGenInclude("loop"))
}
"correctly produce json" in {
import _root_.spray.json.lenses.JsonLenses._
final case class Root(id: String,
nameMangling: String,
rField: Boolean,
aField: String,
bField: Int,
cField: BigDecimal)
extends HasId
implicit val resourceType: com.qvantel.jsonapi.ResourceType[Root] = ResourceType[Root]("root")
implicit val pathTo: PathTo[Root] = new PathToId[Root] {
override final def root: Uri = "/roots"
}
val format = jsonApiFormat[Root]
val data = Root("1", "test data", false, "a field", 3, BigDecimal(3.2))
val json = format.write(data)
json.extract[String]('attributes / "name-mangling") must_== data.nameMangling
json.extract[String]('id) must_== "1"
json.extract[String]('type) must_== "root"
// check that attributes are ordered
json.extract[JsObject]('attributes) must_== JsonParser(
"""{"a-field":"a field","b-field":3,"c-field":3.2,"name-mangling":"test data","r-field":false}""")
}
"materialize multi level relationships" in {
final case class Root(id: String, nameMangling: String, child: ToOne[Child]) extends HasId
final case class Child(id: String, child: Option[ToOne[Leaf]], children: ToMany[Leaf]) extends HasId
final case class Leaf(id: String, end: ToOne[End]) extends HasId
final case class End(id: String) extends HasId
object End {
implicit lazy val endResourceType: com.qvantel.jsonapi.ResourceType[End] = ResourceType[End]("end")
implicit lazy val endPathTo: PathTo[End] = new PathToId[End] {
override final def root: Uri = "/end"
}
implicit lazy val endFormat: com.qvantel.jsonapi.JsonApiFormat[End] = jsonApiFormat[End]
implicit lazy val endIncludes: Includes[End] = includes[End]
}
object Leaf {
implicit lazy val leafResourceType: com.qvantel.jsonapi.ResourceType[Leaf] = ResourceType[Leaf]("leaves")
implicit lazy val leafPathTo: PathTo[Leaf] = new PathToId[Leaf] {
override final def root: Uri = "/leaves"
}
implicit lazy val leafFormat: com.qvantel.jsonapi.JsonApiFormat[Leaf] = jsonApiFormat[Leaf]
implicit lazy val leafIncludes: Includes[Leaf] = includes[Leaf]
}
object Child {
implicit lazy val childResourceType: com.qvantel.jsonapi.ResourceType[Child] = ResourceType[Child]("children")
implicit lazy val childPathTo: PathTo[Child] = new PathToId[Child] {
override final def root: Uri = "/children"
}
implicit lazy val childFormat: com.qvantel.jsonapi.JsonApiFormat[Child] = jsonApiFormat[Child]
implicit lazy val childIncludes: Includes[Child] = includes[Child]
}
object Root {
implicit lazy val rootResourceType: com.qvantel.jsonapi.ResourceType[Root] = ResourceType[Root]("roots")
implicit lazy val rootPathTo: PathTo[Root] = new PathToId[Root] {
override final def root: Uri = "/roots"
}
implicit lazy val rootFormat: com.qvantel.jsonapi.JsonApiFormat[Root] = jsonApiFormat[Root]
implicit lazy val rootIncludes: Includes[Root] = includes[Root]
}
val end = End("666")
val leaf = Leaf("3", ToOne.loaded(end))
val child = Child("2",
Some(ToOne.loaded(leaf)),
ToMany.loaded(Seq(Leaf("5", ToOne.loaded(end)), Leaf("30", ToOne.loaded(end)))))
val root = Root("1", "test data", ToOne.loaded(child))
val json = Root.rootFormat.write(root)
import _root_.spray.json.lenses.JsonLenses._
json.extract[String]('attributes / "name-mangling") must_== root.nameMangling
json.extract[String]('id) must_== "1"
json.extract[String]('type) must_== "roots"
json.extract[String]('relationships / 'child / 'data / 'id) must_== child.id
val includedJson = Root.rootFormat.included(root)
includedJson.exists(_.extract[String]('id) == "5") must_== true
includedJson.exists(_.extract[String]('id) == "30") must_== true
includedJson.exists(_.extract[String]('id) == "3") must_== true
includedJson.exists(_.extract[String]('id) == "2") must_== true
includedJson.exists(_.extract[String]('id) == "666") must_== true
}
}
}
| Doikor/jsonapi-scala | core/src/test/scala/com/qvantel/jsonapi/MacrosSpec.scala | Scala | bsd-3-clause | 10,473 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.commands
import java.util.regex.Pattern
import com.beust.jcommander.Parameter
class AccumuloParams {
@Parameter(names = Array("-u", "--user"), description = "Accumulo user name", required = true)
var user: String = null
@Parameter(names = Array("-p", "--password"), description = "Accumulo password (will prompt if not supplied)")
var password: String = null
@Parameter(names = Array("-i", "--instance"), description = "Accumulo instance name")
var instance: String = null
@Parameter(names = Array("-z", "--zookeepers"), description = "Zookeepers (host[:port], comma separated)")
var zookeepers: String = null
@Parameter(names = Array("-a", "--auths"), description = "Accumulo authorizations")
var auths: String = null
@Parameter(names = Array("-v", "--visibilities"), description = "Accumulo scan visibilities")
var visibilities: String = null
@Parameter(names = Array("-mc", "--mock"), description = "Run everything with a mock accumulo instance instead of a real one (true/false)", arity = 1)
var useMock: Boolean = false
}
class GeoMesaParams extends AccumuloParams {
@Parameter(names = Array("-c", "--catalog"), description = "Catalog table name for GeoMesa", required = true)
var catalog: String = null
}
class FeatureParams extends GeoMesaParams {
@Parameter(names = Array("-fn", "--feature-name"), description = "Simple Feature Type name on which to operate", required = true)
var featureName: String = null
}
class OptionalFeatureParams extends GeoMesaParams {
@Parameter(names = Array("-fn", "--feature-name"), description = "Simple Feature Type name on which to operate", required = false)
var featureName: String = null
}
class RequiredCqlFilterParameters extends FeatureParams {
@Parameter(names = Array("-q", "--cql"), description = "CQL predicate", required = true)
var cqlFilter: String = null
}
class OptionalCqlFilterParameters extends FeatureParams {
@Parameter(names = Array("-q", "--cql"), description = "CQL predicate")
var cqlFilter: String = null
}
class CreateFeatureParams extends FeatureParams {
@Parameter(names = Array("-s", "--spec"), description = "SimpleFeatureType specification as a GeoTools spec string, SFT config, or file with either")
var spec: String = null
@Parameter(names = Array("-dt", "--dtg"), description = "DateTime field name to use as the default dtg")
var dtgField: String = null
@Parameter(names = Array("-st", "--use-shared-tables"), description = "Use shared tables in Accumulo for feature storage (true/false)", arity = 1)
var useSharedTables: Boolean = true //default to true in line with datastore
}
class ForceParams {
@Parameter(names = Array("-f", "--force"), description = "Force deletion without prompt", required = false)
var force: Boolean = false
}
class PatternParams {
@Parameter(names = Array("-pt", "--pattern"), description = "Regular expression to select items to delete", required = false)
var pattern: Pattern = null
}
class RasterParams extends AccumuloParams {
@Parameter(names = Array("-t", "--raster-table"), description = "Accumulo table for storing raster data", required = true)
var table: String = null
}
class CreateRasterParams extends RasterParams {
@Parameter(names = Array("-wm", "--write-memory"), description = "Memory allocation for ingestion operation")
var writeMemory: String = null
@Parameter(names = Array("-wt", "--write-threads"), description = "Threads for writing raster data")
var writeThreads: Integer = null
@Parameter(names = Array("-qt", "--query-threads"), description = "Threads for quering raster data")
var queryThreads: Integer = null
}
| drackaer/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/commands/AccumuloParams.scala | Scala | apache-2.0 | 4,150 |
package org.jetbrains.plugins.scala.extensions
import com.intellij.psi.PsiElement
object PrevSiblingNotWhitespaceComment {
def unapply(e: PsiElement): Option[PsiElement] = Option(e.getPrevSiblingNotWhitespaceComment)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/extensions/PrevSiblingNotWhitespaceComment.scala | Scala | apache-2.0 | 223 |
package com.twitter.finagle.mux
import com.twitter.concurrent.AsyncQueue
import com.twitter.conversions.time._
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.mux.lease.exp.Lessor
import com.twitter.finagle.mux.transport.Message
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.QueueTransport
import com.twitter.finagle.util.{BufReader, BufWriter}
import com.twitter.finagle.{Failure, Path, Service, SimpleFilter, Status}
import com.twitter.io.Buf
import com.twitter.util.{Await, Duration, Future, Promise, Return, Throw, Time}
import java.util.concurrent.atomic.AtomicInteger
import org.junit.runner.RunWith
import org.mockito.invocation.InvocationOnMock
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when}
import org.mockito.stubbing.Answer
import org.scalactic.source.Position
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSuite, OneInstancePerTest, Tag}
private object TestContext {
val testContext = new Contexts.broadcast.Key[Buf]("com.twitter.finagle.mux.MuxContext") {
def marshal(buf: Buf) = buf
def tryUnmarshal(buf: Buf) = Return(buf)
}
}
private[mux] abstract class ClientServerTest
extends FunSuite
with OneInstancePerTest
with MockitoSugar
with AssertionsForJUnit
with Eventually
with IntegrationPatience {
def canDispatch: Boolean
val tracer = new BufferingTracer
class Ctx(config: FailureDetector.Config = FailureDetector.NullConfig) {
import Message.{encode, decode}
val clientToServer = new AsyncQueue[Message]
val serverToClient = new AsyncQueue[Message]
val serverTransport =
new QueueTransport(writeq=serverToClient, readq=clientToServer) {
override def write(m: Message) = super.write(decode(encode(m)))
}
val clientTransport =
new QueueTransport(writeq=clientToServer, readq=serverToClient) {
override def write(m: Message) = super.write(decode(encode(m)))
}
val service = mock[Service[Request, Response]]
val session = new ClientSession(
clientTransport, config, "test", NullStatsReceiver)
val client = ClientDispatcher.newRequestResponse(session)
val nping = new AtomicInteger(0)
val pingReq, pingRep = new Latch
def ping() = {
nping.incrementAndGet()
val f = pingRep.get
pingReq.flip()
f
}
val filter = new SimpleFilter[Message, Message] {
def apply(req: Message, service: Service[Message, Message]): Future[Message] = req match {
case Message.Tdispatch(tag, _, _, _, _) if !canDispatch =>
Future.value(Message.Rerr(tag, "Tdispatch not enabled"))
case Message.Tping(tag) =>
ping().before { Future.value(Message.Rping(tag)) }
case req => service(req)
}
}
val server = new ServerDispatcher(
serverTransport, filter andThen Processor andThen service,
Lessor.nil, tracer, NullStatsReceiver)
}
// Push a tracer for the client.
override def test(testName: String, testTags: Tag*)(f: => Any)(implicit pos: Position): Unit =
super.test(testName, testTags:_*) {
Trace.letTracer(tracer)(f)
}
def buf(b: Byte*) = Buf.ByteArray.Owned(b.toArray)
test("handle concurrent requests, handling out of order replies") {
val ctx = new Ctx
import ctx._
val p1, p2, p3 = new Promise[Response]
val reqs = (1 to 3) map { i => Request(Path.empty, buf(i.toByte)) }
when(service(reqs(0))).thenReturn(p1)
when(service(reqs(1))).thenReturn(p2)
when(service(reqs(2))).thenReturn(p3)
val f1 = client(reqs(0))
val f2 = client(reqs(1))
val f3 = client(reqs(2))
for (i <- 0 to 2)
verify(service)(reqs(i))
for (f <- Seq(f1, f2, f3))
assert(f.poll == None)
val reps = Seq(10, 20, 9) map { i => Response(buf(i.toByte)) }
p2.setValue(reps(1))
assert(f1.poll == None)
assert(f2.poll == Some(Return(reps(1))))
assert(f3.poll == None)
p1.setValue(reps(0))
assert(f1.poll == Some(Return(reps(0))))
assert(f3.poll == None)
p3.setValue(reps(2))
assert(f3.poll == Some(Return(reps(2))))
}
test("server responds to pings") {
val ctx = new Ctx
import ctx._
for (i <- 0 until 5) {
assert(nping.get == i)
val pinged = session.ping()
assert(!pinged.isDefined)
pingRep.flip()
Await.result(pinged, 30.seconds)
assert(Await.result(pinged.liftToTry, 5.seconds) == Return.Unit)
assert(nping.get == i+1)
}
}
test("server nacks new requests after draining") {
val ctx = new Ctx
import ctx._
val req1 = Request(Path.empty, buf(1))
val p1 = new Promise[Response]
when(service(req1)).thenReturn(p1)
val f1 = client(req1)
verify(service)(req1)
server.close(Time.now)
assert(f1.poll == None)
val req2 = Request(Path.empty, buf(2))
client(req2).poll match {
case Some(Throw(f: Failure)) => assert(f.isFlagged(Failure.Restartable))
case _ => fail()
}
verify(service, never)(req2)
val rep1 = Response(buf(123))
p1.setValue(rep1)
assert(f1.poll == Some(Return(rep1)))
}
test("requeueable failures transit server-to-client") {
val ctx = new Ctx
import ctx._
val req1 = Request(Path.empty, buf(1))
val p1 = new Promise[Response]
when(service(req1)).thenReturn(Future.exception(
Failure.rejected("come back tomorrow")))
client(req1).poll match {
case Some(Throw(f: Failure)) => assert(f.isFlagged(Failure.Restartable))
case bad => fail(s"got $bad")
}
}
test("handle errors") {
val ctx = new Ctx
import ctx._
val req = Request(Path.empty, buf(1))
when(service(req)).thenReturn(Future.exception(new Exception("sad panda")))
assert(client(req).poll == Some(
Throw(ServerApplicationError("java.lang.Exception: sad panda"))))
}
test("propagate interrupts") {
val ctx = new Ctx
import ctx._
val req = Request(Path.empty, buf(1))
val p = new Promise[Response]
when(service(req)).thenReturn(p)
val f = client(req)
assert(f.poll == None)
assert(p.isInterrupted == None)
val exc = new Exception("sad panda")
f.raise(exc)
assert(p.isInterrupted == Some(
ClientDiscardedRequestException("java.lang.Exception: sad panda")))
assert(f.poll == Some(Throw(exc)))
}
test("propagate trace ids") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]]() {
def answer(invocation: InvocationOnMock) =
Future.value(Response(Buf.Utf8(Trace.id.toString)))
}
)
val id = Trace.nextId
val resp = Trace.letId(id) {
client(Request(Path.empty, buf(1)))
}
assert(resp.poll.isDefined)
val Buf.Utf8(respStr) = Await.result(resp, 5.seconds).body
assert(respStr == id.toString)
}
test("propagate trace flags") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]] {
def answer(invocation: InvocationOnMock) = {
val bw = BufWriter.fixed(8)
bw.writeLongBE(Trace.id.flags.toLong)
Future.value(Response(bw.owned()))
}
}
)
val flags = Flags().setDebug
val id = Trace.nextId.copy(flags=flags)
val resp = Trace.letId(id) {
val p = client(Request(Path.empty, buf(1)))
p
}
assert(resp.poll.isDefined)
val respBr = BufReader(Await.result(resp, 5.seconds).body)
assert(respBr.remaining == 8)
val respFlags = Flags(respBr.readLongBE())
assert(respFlags == flags)
}
test("failure detection") {
val config = FailureDetector.ThresholdConfig(
minPeriod = 10.milliseconds,
closeTimeout = Duration.Top)
val ctx = new Ctx(config)
import ctx._
assert(nping.get == 1)
assert(client.status == Status.Busy)
pingRep.flip()
Status.awaitOpen(client.status)
// This is technically racy, but would require a pretty
// pathological test environment.
assert(client.status == Status.Open)
eventually { assert(client.status == Status.Busy) }
// Now begin replying.
def loop(): Future[Unit] = {
val f = pingReq.get
pingRep.flip()
f.before(loop())
}
loop()
eventually { assert(client.status == Status.Open) }
}
}
@RunWith(classOf[JUnitRunner])
class ClientServerTestNoDispatch extends ClientServerTest {
val canDispatch = false
test("does not dispatch destinations") {
val ctx = new Ctx
import ctx._
val withDst = Request(Path.read("/dst/name"), buf(123))
val withoutDst = Request(Path.empty, buf(123))
val rep = Response(buf(23))
when(service(withoutDst)).thenReturn(Future.value(rep))
assert(Await.result(client(withDst), 5.seconds) == rep)
verify(service)(withoutDst)
}
}
@RunWith(classOf[JUnitRunner])
class ClientServerTestDispatch extends ClientServerTest {
val canDispatch = true
import TestContext._
// Note: We test trace propagation here, too,
// since it's a default request context.
test("Transmits request contexts") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]] {
def answer(invocation: InvocationOnMock) =
Future.value(Response(
Contexts.broadcast.get(testContext)
.getOrElse(Buf.Empty)))
}
)
// No context set
assert(Await.result(client(Request(Path.empty, Buf.Empty)), 5.seconds).body.isEmpty)
val f = Contexts.broadcast.let(testContext, Buf.Utf8("My context!")) {
client(Request.empty)
}
assert(Await.result(f, 5.seconds).body == Buf.Utf8("My context!"))
}
test("dispatches destinations") {
val ctx = new Ctx
import ctx._
val req = Request(Path.read("/dst/name"), buf(123))
val rep = Response(buf(23))
when(service(req)).thenReturn(Future.value(rep))
assert(Await.result(client(req), 5.seconds) == rep)
verify(service)(req)
}
}
| spockz/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/ClientServerTest.scala | Scala | apache-2.0 | 10,234 |
package org.jetbrains.plugins.scala.editor.importOptimizer
import com.intellij.psi.{PsiAnchor, PsiWhiteSpace}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt
//TODO: rename to "ImportRangeInfo" or "ImportRangeInfo"
final case class RangeInfo(
firstPsi: PsiAnchor,
lastPsi: PsiAnchor,
importStmtWithInfos: Seq[(ScImportStmt, Seq[ImportInfo])],
usedImportedNames: Set[String],
isLocal: Boolean
) {
val startOffset: Int = firstPsi.getStartOffset
val endOffset: Int = lastPsi.getEndOffset
lazy val startOffsetAccepted: Int = {
val prevWhitespaceLength: Int = firstPsi.retrieve() match {
case null => 0
case last => last.getPrevSibling match {
case ws: PsiWhiteSpace => ws.getTextLength
case _ => 0
}
}
startOffset - prevWhitespaceLength
}
lazy val endOffsetAccepted: Int = {
val nextWhitespaceLength: Int = lastPsi.retrieve() match {
case null => 0
case last => last.getNextSibling match {
case ws: PsiWhiteSpace => ws.getTextLength
case _ => 0
}
}
endOffset + nextWhitespaceLength
}
def rangeCanAccept(offset: Int): Boolean =
startOffsetAccepted <= offset && offset <= endOffsetAccepted
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/editor/importOptimizer/RangeInfo.scala | Scala | apache-2.0 | 1,239 |
package sc2d
import java.awt.Paint
import java.awt.Shape
import java.awt.Stroke
import java.awt.BasicStroke
import java.awt.Graphics2D
import java.awt.geom.Point2D
import java.awt.geom.Rectangle2D
final case class StrokeShape(shape:Shape, paint:Paint, stroke:Stroke) extends Figure {
private lazy val strokedShape =
stroke createStrokedShape shape
def pick(at:Point2D):Boolean =
strokedShape contains at
val bounds:Rectangle2D =
stroke match {
case bs:BasicStroke =>
// TODO this is not worth much - oicking later will calculate the stroked shape anyway
val shapeBounds = shape.getBounds2D
val strokeSize = bs.getLineWidth + 2
new Rectangle2D.Double(
shapeBounds.getX - strokeSize,
shapeBounds.getY - strokeSize,
shapeBounds.getWidth + 2*strokeSize,
shapeBounds.getHeight + 2*strokeSize
)
case os:Stroke =>
val shapeBounds = strokedShape.getBounds2D
new Rectangle2D.Double(
shapeBounds.getX - 1,
shapeBounds.getY - 1,
shapeBounds.getWidth + 2,
shapeBounds.getHeight + 2
)
}
def paint(g:Graphics2D):Unit = {
val oldPaint = g.getPaint
val oldStroke = g.getStroke
g setPaint paint
g setStroke stroke
g draw shape
g setStroke oldStroke
g setPaint oldPaint
}
}
| ritschwumm/sc2d | src/main/scala/sc2d/StrokeShape.scala | Scala | bsd-2-clause | 1,272 |
// Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.testproject.publish
// A simple jvm binary to test the jvm_run task on. Try, e.g.,
// ./pants -ldebug run --jvm-run-jvm-options='-Dfoo=bar' --jvm-run-jvm-program-args="Foo Bar" \\\\
// testprojects/src/scala/org/pantsbuild/testproject/publish:jvm-run-example-lib
object JvmRunExample {
def main(args: Array[String]): Unit = {
println("Hello, World")
println("args: " + args.mkString(", "))
}
}
| tdyas/pants | testprojects/src/scala/org/pantsbuild/testproject/publish/JvmRunExample.scala | Scala | apache-2.0 | 565 |
package org.randi3.schema
import scala.slick.lifted.{TypeMapperDelegate, BaseTypeMapper}
import scala.slick.driver.BasicProfile
import scala.slick.session.{PositionedResult, PositionedParameters}
import java.sql.SQLException
object PostgresByteArrayTypeMapper extends
BaseTypeMapper[Array[Byte]] with TypeMapperDelegate[Array[Byte]] {
def apply(p: BasicProfile) = this
val zero = new Array[Byte](0)
val sqlType = java.sql.Types.BLOB
override val sqlTypeName = "BYTEA"
def setValue(v: Array[Byte], p: PositionedParameters) {
p.pos += 1
p.ps.setBytes(p.pos, v)
}
def setOption(v: Option[Array[Byte]], p: PositionedParameters) {
p.pos += 1
if(v eq None) p.ps.setBytes(p.pos, null) else p.ps.setBytes(p.pos, v.get)
}
def nextValue(r: PositionedResult) = {
r.nextBytes()
}
def updateValue(v: Array[Byte], r: PositionedResult) {
r.updateBytes(v)
}
override def valueToSQLLiteral(value: Array[Byte]) =
throw new SQLException("Cannot convert BYTEA to literal")
} | dschrimpf/randi3-core | src/main/scala/org/randi3/schema/PostgresTypeMapper.scala | Scala | gpl-3.0 | 1,012 |
package com.identityblitz.jwt
import com.identityblitz.json._
import com.identityblitz.json.JSuccess
import org.joda.time.DateTime
/**
* This class represents IntDate type of JSON Web Token. The type contain the number of seconds from 1970-01-01T0:0:OZ UTC
* until the specified UTC date/time.
*/
sealed case class IntDate(value: Int) {
if (value <= 0) throw new IllegalArgumentException("The number of second from epoch must be non negative.")
def before(d: IntDate): Boolean = d.value < value
def after(d: IntDate): Boolean = d.value > value
override def toString: String = value.toString
}
object IntDate {
implicit object JIntDateReader extends JReader[IntDate] {
def read(v: JVal): JResult[IntDate] = v match {
case o: JNum => JSuccess(IntDate(o.as[Int]))
case _ => JError("json.error.expected.number")
}
}
implicit object JIntDateWriter extends JWriter[IntDate] {
def write(o: IntDate): JVal = JNum(o.value)
}
def now: IntDate = IntDate((new DateTime().getMillis / 1000).toInt)
}
| brainysmith/json-lib | src/main/scala/com/identityblitz/jwt/IntDate.scala | Scala | mit | 1,042 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.calculations
import uk.gov.hmrc.ct.accounts.calculations.DebitAwareCalculation
import uk.gov.hmrc.ct.accounts.frs102.boxes._
trait TotalCreditorsWithinOneYearCalculator extends DebitAwareCalculation {
def calculateCurrentTotalCreditorsWithinOneYear(ac142: AC142, ac144: AC144, ac146: AC146, ac148: AC148, ac150: AC150, ac152: AC152): AC154 = {
sum(ac142, ac144, ac146, ac148, ac150, ac152)(AC154.apply)
}
def calculatePreviousTotalCreditorsWithinOneYear(ac143: AC143, ac145: AC145, ac147: AC147, ac149: AC149, ac151: AC151, ac153: AC153): AC155 = {
sum(ac143, ac145, ac147, ac149, ac151, ac153)(AC155.apply)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/calculations/TotalCreditorsWithinOneYearCalculator.scala | Scala | apache-2.0 | 1,276 |
package services
import javax.inject.Singleton
import play.api.Logger
import scala.collection.mutable
/**
* Store for conversion log messages.
*/
trait LogStore {
def add(id: String, log: Seq[String]): Unit
def get(id: String): Option[Seq[String]]
}
/**
* Memory backed log store.
*/
@Singleton
class SimpleLogStore extends LogStore {
private val logger = Logger(this.getClass)
val logs = mutable.Map[String, Seq[String]]()
override def add(id: String, log: Seq[String]): Unit = {
logger.info(s"Add $id to log store: ${log.size} lines")
logs += id -> log
}
override def get(id: String): Option[Seq[String]] = {
logger.info("Log store has " + logs.keys.mkString(", "))
logs.get(id)
}
} | kuhnuri/kuhnuri-queue | app/services/LogStore.scala | Scala | apache-2.0 | 733 |
import p.C
import scala.tools.asm.Opcodes
import scala.tools.partest.BytecodeTest
import scala.tools.partest.ASMConverters._
object Test extends BytecodeTest {
def foo(c: C, x: Int) = c.f(x)
def goo(c: C, x: Int) = c.g(x)
def has(i: Instruction, c: String, m: String) = {
val cls = loadClassNode(c)
val mth = convertMethod(getMethod(cls, m))
assert(mth.instructions.contains(i))
}
def show(): Unit = {
assert(foo(new C, -2) == -5L)
assert(goo(new C, -2) == -10L)
val bipush2 = IntOp(Opcodes.BIPUSH, -2)
has(bipush2, "p.C", "f")
has(bipush2, "Test$", "foo")
val sipush300 = IntOp(Opcodes.SIPUSH, -300)
has(sipush300, "p.C", "g")
has(sipush300, "Test$", "goo")
}
}
| felixmulder/scala | test/files/run/t9403/Test_2.scala | Scala | bsd-3-clause | 725 |
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by ma on 15-1-27.
*/
class QueryT18 extends BaseQuery{
System.setProperty("spark.cores.max",String.valueOf(ParamSet.cores))
val conf = new SparkConf()
conf.setAppName("TPCH-Q18")
val sc = new SparkContext(conf)
val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
override def execute: Unit ={
// setAppName("TPC-H_Q18")
//get the time before the query be executed
val t0 = System.nanoTime : Double
var t1 = System.nanoTime : Double
println("ID: "+ID+"query 18 will be parsed")
val choosDdatabase = sqlContext.sql("use "+ParamSet.database)
choosDdatabase.count()
println("DATABASE: "+ParamSet.database)
//the query
val res0 = sqlContext.sql("""drop view q18_tmp_cached""")
val res1 = sqlContext.sql("""create view q18_tmp_cached as
select l_orderkey,sum(l_quantity) as t_sum_quantity
from lineitem
where l_orderkey is not null
group by l_orderkey""")
val res2 = sqlContext.sql("""select c_name,c_custkey,o_orderkey,o_orderdate,o_totalprice,sum(l_quantity)
from customer,orders,q18_tmp_cached t,lineitem l
where c_custkey = o_custkey and o_orderkey = t.l_orderkey and o_orderkey is not null and t.t_sum_quantity > 300 and o_orderkey = l.l_orderkey and l.l_orderkey is not null
group by c_name, c_custkey,o_orderkey,o_orderdate,o_totalprice
order by o_totalprice desc, o_orderdate
limit 100""")
t1 = System.nanoTime : Double
println("ID: "+ID+"query 18 parse done, parse time:"+ (t1 - t0) / 1000000000.0 + " secs")
if(ParamSet.isExplain){
println(res0.queryExecution.executedPlan)
println(res1.queryExecution.executedPlan)
println(res2.queryExecution.executedPlan)
}else{
if (ParamSet.showResult){
res2.collect().foreach(println)
}else{
res2.count()
}
t1 = System.nanoTime : Double
println("ID: "+ID+"query 18's execution time : " + (t1 - t0) / 1000000000.0 + " secs")
}
println("ID: "+ID+"Query 18 completed!")
sc.stop()
println("ID: "+ID+"Query 18's context successfully stopped")
Runtime.getRuntime.exec(ParamSet.execFREE)
}
}
| f7753/spark-SQL-tpch-test-tool | QueryT18.scala | Scala | apache-2.0 | 2,471 |
package scala.slick.jdbc
trait ResultSetMutator[T] {
/**
* Get the current row's value.
*/
def row: T
/**
* Update the current row.
*/
def row_=(value: T)
/**
* Insert a new row.
*/
def insert(value: T)
/**
* Delete the current row.
*/
def delete(): Unit
}
| szeiger/scala-query | src/main/scala/scala/slick/jdbc/ResultSetMutator.scala | Scala | bsd-2-clause | 302 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy
import java.io.InputStream
import java.util.concurrent.locks.ReentrantLock
import scala.io.Source
class LineBufferedStream(inputStream: InputStream) extends Logging {
private[this] var _lines: IndexedSeq[String] = IndexedSeq()
private[this] val _lock = new ReentrantLock()
private[this] val _condition = _lock.newCondition()
private[this] var _finished = false
private val thread = new Thread {
override def run() = {
val lines = Source.fromInputStream(inputStream).getLines()
for (line <- lines) {
_lock.lock()
try {
trace("stdout: ", line)
_lines = _lines :+ line
_condition.signalAll()
} finally {
_lock.unlock()
}
}
_lock.lock()
try {
_finished = true
_condition.signalAll()
} finally {
_lock.unlock()
}
}
}
thread.setDaemon(true)
thread.start()
def lines: IndexedSeq[String] = _lines
def iterator: Iterator[String] = {
new LinesIterator
}
private class LinesIterator extends Iterator[String] {
private[this] var index = 0
override def hasNext: Boolean = {
if (index < _lines.length) {
true
} else {
// Otherwise we might still have more data.
_lock.lock()
try {
if (_finished) {
false
} else {
_condition.await()
index < _lines.length
}
} finally {
_lock.unlock()
}
}
}
override def next(): String = {
val line = _lines(index)
index += 1
line
}
}
}
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/spark/java/livy-core/src/main/scala/com/cloudera/hue/livy/LineBufferedStream.scala | Scala | gpl-2.0 | 2,444 |
package foo
trait ArgumentExprs1 {
def f(foo: Int, bar: String)(implicit ev0: Ev0, ev1: Ev1) = 1
f(
23,
"bar",
)(
Ev0,
Ev1,
)
// test arg exprs in the presence of varargs
def g(x: Int, y: Int*) = 1
g(1,2,
)
g(1,List(2, 3): _*,
)
}
trait ArgumentExprs2 {
class C(foo: Int, bar: String)(implicit ev0: Ev0, ev1: Ev1)
new C(
23,
"bar",
)(
Ev0,
Ev1,
)
}
trait Params {
def f(
foo: Int,
bar: String,
)(implicit
ev0: Ev0,
ev1: Ev1,
): Unit
}
trait ClassParams {
class C(
foo: Int,
bar: String,
)(implicit
ev0: Ev0,
ev1: Ev1,
)
// test class params in the precense of varargs
case class D(i: Int*,
)
}
trait SimpleExpr1 {
def f: (Int, String) = (
23,
"bar",
)
// the Tuple1 value case, the trailing comma is ignored so the type is Int and the value 23
def g: Int = (
23,
)
}
trait TypeArgs {
class C[A, B]
def f: C[
Int,
String,
]
}
trait TypeParamClause {
class C[
A,
B,
]
}
trait FunTypeParamClause {
def f[
A,
B,
]: Unit
}
trait SimpleType {
def f: (
Int,
String,
)
// the Tuple1 type case, the trailing comma is ignored so the type is Int and the value 23
def g: (
Int,
) = 23
}
trait FunctionArgTypes {
def f: (
Int,
String,
) => Boolean
}
trait SimplePattern {
val (
foo,
bar,
) = null: Any
// test '@' syntax in patterns
Some(1) match {
case Some(x @ 1,
) => x
}
// test ': _*' syntax in patterns
List(1, 2, 3) match {
case List(1, 2, _ @ _*,
) => 1
}
// test varargs in patterns
val List(x, y, _*,
) = 42 :: 17 :: Nil
}
trait ImportSelectors {
import foo.{
Ev0,
Ev1,
}
}
trait Bindings {
def g(f: (Int, String) => Boolean): Unit
g((
foo,
bar,
) => true)
}
// Import, ids, ValDcl, VarDcl, VarDef, PatDef use commas, but not inside paren, bracket or brace,
// so they don't support an optional trailing comma
// test utilities
object `package` {
sealed trait Ev0; implicit object Ev0 extends Ev0
sealed trait Ev1; implicit object Ev1 extends Ev1
}
| lrytz/scala | test/files/pos/trailing-commas.scala | Scala | apache-2.0 | 2,149 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.webhooks.mailchimp
import org.apache.predictionio.data.webhooks.FormConnector
import org.apache.predictionio.data.webhooks.ConnectorException
import org.apache.predictionio.data.storage.EventValidation
import org.apache.predictionio.data.Utils
import org.json4s.JObject
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
private[predictionio] object MailChimpConnector extends FormConnector {
override
def toEventJson(data: Map[String, String]): JObject = {
val json = data.get("type") match {
case Some("subscribe") => subscribeToEventJson(data)
// UNSUBSCRIBE
case Some("unsubscribe") => unsubscribeToEventJson(data)
// PROFILE UPDATES
case Some("profile") => profileToEventJson(data)
// EMAIL UPDATE
case Some("upemail") => upemailToEventJson(data)
// CLEANED EMAILS
case Some("cleaned") => cleanedToEventJson(data)
// CAMPAIGN SENDING STATUS
case Some("campaign") => campaignToEventJson(data)
// invalid type
case Some(x) => throw new ConnectorException(
s"Cannot convert unknown MailChimp data type ${x} to event JSON")
case None => throw new ConnectorException(
s"The field 'type' is required for MailChimp data.")
}
json
}
val mailChimpDateTimeFormat = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss")
.withZone(EventValidation.defaultTimeZone)
def parseMailChimpDateTime(s: String): DateTime = {
mailChimpDateTimeFormat.parseDateTime(s)
}
def subscribeToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
"type": "subscribe",
"fired_at": "2009-03-26 21:35:57",
"data[id]": "8a25ff1d98",
"data[list_id]": "a6b5da1054",
"data[email]": "api@mailchimp.com",
"data[email_type]": "html",
"data[merges][EMAIL]": "api@mailchimp.com",
"data[merges][FNAME]": "MailChimp",
"data[merges][LNAME]": "API",
"data[merges][INTERESTS]": "Group1,Group2",
"data[ip_opt]": "10.20.10.30",
"data[ip_signup]": "10.20.10.30"
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
// TODO: handle optional fields
val json =
("event" -> "subscribe") ~
("entityType" -> "user") ~
("entityId" -> data("data[id]")) ~
("targetEntityType" -> "list") ~
("targetEntityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("email" -> data("data[email]")) ~
("email_type" -> data("data[email_type]")) ~
("merges" -> (
("EMAIL" -> data("data[merges][EMAIL]")) ~
("FNAME" -> data("data[merges][FNAME]"))) ~
("LNAME" -> data("data[merges][LNAME]")) ~
("INTERESTS" -> data.get("data[merges][INTERESTS]"))
)) ~
("ip_opt" -> data("data[ip_opt]")) ~
("ip_signup" -> data("data[ip_signup]")
))
json
}
def unsubscribeToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
"action" will either be "unsub" or "delete".
The reason will be "manual" unless caused by a spam complaint - then it will be "abuse"
"type": "unsubscribe",
"fired_at": "2009-03-26 21:40:57",
"data[action]": "unsub",
"data[reason]": "manual",
"data[id]": "8a25ff1d98",
"data[list_id]": "a6b5da1054",
"data[email]": "api+unsub@mailchimp.com",
"data[email_type]": "html",
"data[merges][EMAIL]": "api+unsub@mailchimp.com",
"data[merges][FNAME]": "MailChimp",
"data[merges][LNAME]": "API",
"data[merges][INTERESTS]": "Group1,Group2",
"data[ip_opt]": "10.20.10.30",
"data[campaign_id]": "cb398d21d2",
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
val json =
("event" -> "unsubscribe") ~
("entityType" -> "user") ~
("entityId" -> data("data[id]")) ~
("targetEntityType" -> "list") ~
("targetEntityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("action" -> data("data[action]")) ~
("reason" -> data("data[reason]")) ~
("email" -> data("data[email]")) ~
("email_type" -> data("data[email_type]")) ~
("merges" -> (
("EMAIL" -> data("data[merges][EMAIL]")) ~
("FNAME" -> data("data[merges][FNAME]"))) ~
("LNAME" -> data("data[merges][LNAME]")) ~
("INTERESTS" -> data.get("data[merges][INTERESTS]"))
)) ~
("ip_opt" -> data("data[ip_opt]")) ~
("campaign_id" -> data("data[campaign_id]")
))
json
}
def profileToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
"type": "profile",
"fired_at": "2009-03-26 21:31:21",
"data[id]": "8a25ff1d98",
"data[list_id]": "a6b5da1054",
"data[email]": "api@mailchimp.com",
"data[email_type]": "html",
"data[merges][EMAIL]": "api@mailchimp.com",
"data[merges][FNAME]": "MailChimp",
"data[merges][LNAME]": "API",
"data[merges][INTERESTS]": "Group1,Group2", \\\\OPTIONAL
"data[ip_opt]": "10.20.10.30"
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
val json =
("event" -> "profile") ~
("entityType" -> "user") ~
("entityId" -> data("data[id]")) ~
("targetEntityType" -> "list") ~
("targetEntityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("email" -> data("data[email]")) ~
("email_type" -> data("data[email_type]")) ~
("merges" -> (
("EMAIL" -> data("data[merges][EMAIL]")) ~
("FNAME" -> data("data[merges][FNAME]"))) ~
("LNAME" -> data("data[merges][LNAME]")) ~
("INTERESTS" -> data.get("data[merges][INTERESTS]"))
)) ~
("ip_opt" -> data("data[ip_opt]")
))
json
}
def upemailToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
"type": "upemail",
"fired_at": "2009-03-26 22:15:09",
"data[list_id]": "a6b5da1054",
"data[new_id]": "51da8c3259",
"data[new_email]": "api+new@mailchimp.com",
"data[old_email]": "api+old@mailchimp.com"
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
val json =
("event" -> "upemail") ~
("entityType" -> "user") ~
("entityId" -> data("data[new_id]")) ~
("targetEntityType" -> "list") ~
("targetEntityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("new_email" -> data("data[new_email]")) ~
("old_email" -> data("data[old_email]"))
))
json
}
def cleanedToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
Reason will be one of "hard" (for hard bounces) or "abuse"
"type": "cleaned",
"fired_at": "2009-03-26 22:01:00",
"data[list_id]": "a6b5da1054",
"data[campaign_id]": "4fjk2ma9xd",
"data[reason]": "hard",
"data[email]": "api+cleaned@mailchimp.com"
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
val json =
("event" -> "cleaned") ~
("entityType" -> "list") ~
("entityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("campaignId" -> data("data[campaign_id]")) ~
("reason" -> data("data[reason]")) ~
("email" -> data("data[email]"))
))
json
}
def campaignToEventJson(data: Map[String, String]): JObject = {
import org.json4s.JsonDSL._
/*
"type": "campaign",
"fired_at": "2009-03-26 21:31:21",
"data[id]": "5aa2102003",
"data[subject]": "Test Campaign Subject",
"data[status]": "sent",
"data[reason]": "",
"data[list_id]": "a6b5da1054"
*/
// convert to ISO8601 format
val eventTime = Utils.dateTimeToString(parseMailChimpDateTime(data("fired_at")))
val json =
("event" -> "campaign") ~
("entityType" -> "campaign") ~
("entityId" -> data("data[id]")) ~
("targetEntityType" -> "list") ~
("targetEntityId" -> data("data[list_id]")) ~
("eventTime" -> eventTime) ~
("properties" -> (
("subject" -> data("data[subject]")) ~
("status" -> data("data[status]")) ~
("reason" -> data("data[reason]"))
))
json
}
}
| alex9311/PredictionIO | data/src/main/scala/org/apache/predictionio/data/webhooks/mailchimp/MailChimpConnector.scala | Scala | apache-2.0 | 9,315 |
// Databricks notebook source exported at Sat, 18 Jun 2016 07:08:06 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
// COMMAND ----------
// MAGIC %md
// MAGIC This is an elaboration of the [Apache Spark 1.6 sql-progamming-guide](http://spark.apache.org/docs/latest/sql-programming-guide.html).
// MAGIC
// MAGIC # [Data Sources](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/003_dataSources_sqlProgGuide)
// MAGIC
// MAGIC ## [Spark Sql Programming Guide](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/000_sqlProgGuide)
// MAGIC
// MAGIC - [Overview](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/001_overview_sqlProgGuide)
// MAGIC - SQL
// MAGIC - DataFrames
// MAGIC - Datasets
// MAGIC - [Getting Started](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/002_gettingStarted_sqlProgGuide)
// MAGIC - Starting Point: SQLContext
// MAGIC - Creating DataFrames
// MAGIC - DataFrame Operations
// MAGIC - Running SQL Queries Programmatically
// MAGIC - Creating Datasets
// MAGIC - Interoperating with RDDs
// MAGIC - Inferring the Schema Using Reflection
// MAGIC - Programmatically Specifying the Schema
// MAGIC - [Data Sources](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/003_dataSources_sqlProgGuide)
// MAGIC - Generic Load/Save Functions
// MAGIC - Manually Specifying Options
// MAGIC - Run SQL on files directly
// MAGIC - Save Modes
// MAGIC - Saving to Persistent Tables
// MAGIC - Parquet Files
// MAGIC - Loading Data Programmatically
// MAGIC - Partition Discovery
// MAGIC - Schema Merging
// MAGIC - Hive metastore Parquet table conversion
// MAGIC - Hive/Parquet Schema Reconciliation
// MAGIC - Metadata Refreshing
// MAGIC - Configuration
// MAGIC - JSON Datasets
// MAGIC - Hive Tables
// MAGIC - Interacting with Different Versions of Hive Metastore
// MAGIC - JDBC To Other Databases
// MAGIC - Troubleshooting
// MAGIC - [Performance Tuning](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/004_performanceTuning_sqlProgGuide)
// MAGIC - Caching Data In Memory
// MAGIC - Other Configuration Options
// MAGIC - [Distributed SQL Engine](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/005_distributedSqlEngine_sqlProgGuide)
// MAGIC - Running the Thrift JDBC/ODBC server
// MAGIC - Running the Spark SQL CLI
// COMMAND ----------
// MAGIC %md
// MAGIC # [Data Sources](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/sqlProgrammingGuide/003_dataSources_sqlProgGuide)
// MAGIC
// MAGIC Spark SQL supports operating on a variety of data sources through the `DataFrame` interface. A DataFrame can be operated on as normal RDDs and can also be registered as a temporary table. Registering a DataFrame as a table allows you to run SQL queries over its data. But from time to time you would need to either load or save DataFrame. Spark SQL provides built-in data sources as well as Data Source API to define your own data source and use it read / write data into Spark.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Overview
// MAGIC Spark provides some built-in datasources that you can use straight out of the box, such as [Parquet](https://parquet.apache.org/), [JSON](http://www.json.org/), [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity), [ORC](https://orc.apache.org/) (available with HiveContext), and Text (since Spark 1.6) and CSV (since Spark 2.0, before that it is accessible as a package).
// MAGIC
// MAGIC ## Third-party datasource packages
// MAGIC Community also have built quite a few datasource packages to provide easy access to the data from other formats. You can find list of those packages on http://spark-packages.org/, e.g. [Avro](http://spark-packages.org/package/databricks/spark-avro), [CSV](http://spark-packages.org/package/databricks/spark-csv), [Amazon Redshit](http://spark-packages.org/package/databricks/spark-redshift) (for Spark < 2.0), [XML](http://spark-packages.org/package/HyukjinKwon/spark-xml), [NetFlow](http://spark-packages.org/package/sadikovi/spark-netflow) and many others.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Generic Load/Save functions
// MAGIC In order to load or save DataFrame you have to call either ``read`` or ``write``. This will return [DataFrameReader](https://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.sql.DataFrameReader) or [DataFrameWriter](https://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.sql.DataFrameWriter) depending on what you are trying to achieve. Essentially these classes are entry points to the reading / writing actions. They allow you to specify writing mode or provide additional options to read data source.
// COMMAND ----------
// This will return DataFrameReader to read data source
sqlContext.read
val df = sqlContext.range(0, 10)
// This will return DataFrameWriter to save DataFrame
df.write
// COMMAND ----------
// Loading Parquet table in Scala
val df = sqlContext.read.parquet("/tmp/platforms.parquet")
df.show(5)
// COMMAND ----------
// MAGIC %py
// MAGIC # Loading Parquet table in Python
// MAGIC dfPy = sqlContext.read.parquet("/tmp/platforms.parquet")
// MAGIC dfPy.show(5)
// COMMAND ----------
// Loading JSON dataset in Scala
val df = sqlContext.read.json("/tmp/platforms.json")
df.show(5)
// COMMAND ----------
// MAGIC %py
// MAGIC # Loading JSON dataset in Python
// MAGIC dfPy = sqlContext.read.json("/tmp/platforms.json")
// MAGIC dfPy.show(5)
// COMMAND ----------
// MAGIC %md
// MAGIC ### Manually Specifying Options
// MAGIC
// MAGIC You can also manually specify the data source that will be used along with any extra options that you would like to pass to the data source. Data sources are specified by their fully qualified name (i.e., `org.apache.spark.sql.parquet`), but for built-in sources you can also use their short names (`json`, `parquet`, `jdbc`). DataFrames of any type can be converted into other types using this syntax.
// COMMAND ----------
val json = sqlContext.read.format("json").load("/tmp/platforms.json")
json.select("name").show()
val parquet = sqlContext.read.format("parquet").load("/tmp/platforms.parquet")
parquet.select("name").show()
// COMMAND ----------
// MAGIC %md
// MAGIC ### Run SQL on files directly
// MAGIC Instead of using read API to load a file into DataFrame and query it, you can also query that file directly with SQL.
// COMMAND ----------
val df = sqlContext.sql("SELECT * FROM parquet.`/tmp/platforms.parquet`")
df.printSchema()
// COMMAND ----------
// MAGIC %md
// MAGIC ### Save Modes
// MAGIC Save operations can optionally take a `SaveMode`, that specifies how to handle existing data if present. It is important to realize that these save modes do not utilize any locking and are not atomic. Additionally, when performing a `Overwrite`, the data will be deleted before writing out the new data.
// MAGIC
// MAGIC | Scala/Java | Any language | Meaning |
// MAGIC | --- | --- | --- |
// MAGIC | `SaveMode.ErrorIfExists` (default) | `"error"` (default) | When saving a DataFrame to a data source, if data already exists, an exception is expected to be thrown.
// MAGIC | `SaveMode.Append` | `"append"` | When saving a DataFrame to a data source, if data/table already exists, contents of the DataFrame are expected to be appended to existing data.
// MAGIC | `SaveMode.Overwrite` | `"overwrite"` | Overwrite mode means that when saving a DataFrame to a data source, if data/table already exists, existing data is expected to be overwritten by the contents of the DataFrame.
// MAGIC | `SaveMode.Ignore` | `"ignore"` | Ignore mode means that when saving a DataFrame to a data source, if data already exists, the save operation is expected to not save the contents of the DataFrame and to not change the existing data. This is similar to a `CREATE TABLE IF NOT EXISTS` in SQL.
// COMMAND ----------
// MAGIC %md
// MAGIC ### Saving to Persistent Tables
// MAGIC When working with a `HiveContext`, `DataFrames` can also be saved as persistent tables using the `saveAsTable` command. Unlike the `registerTempTable` command, `saveAsTable` will materialize the contents of the dataframe and create a pointer to the data in the HiveMetastore. Persistent tables will still exist even after your Spark program has restarted, as long as you maintain your connection to the same metastore. A DataFrame for a persistent table can be created by calling the `table` method on a `SQLContext` with the name of the table.
// MAGIC
// MAGIC By default `saveAsTable` will create a “managed table”, meaning that the location of the data will be controlled by the metastore. Managed tables will also have their data deleted automatically when a table is dropped.
// COMMAND ----------
// First of all list tables to see that table we are about to create does not exist
sqlContext.tables.show()
// COMMAND ----------
// MAGIC %sql
// MAGIC drop table simple_range
// COMMAND ----------
val df = sqlContext.range(0, 100)
df.write.saveAsTable("simple_range")
// Verify that table is saved and it is marked as persistent ("isTemporary" value should be "false")
sqlContext.tables.show()
// COMMAND ----------
// MAGIC %md
// MAGIC ## Parquet Files
// MAGIC [Parquet](http://parquet.io) is a columnar format that is supported by many other data processing systems. Spark SQL provides support for both reading and writing Parquet files that automatically preserves the schema of the original data. When writing Parquet files, all columns are automatically converted to be nullable for compatibility reasons.
// COMMAND ----------
// MAGIC %md
// MAGIC ### More on Parquet
// MAGIC [Apache Parquet](https://parquet.apache.org/) is a [columnar storage](http://en.wikipedia.org/wiki/Column-oriented_DBMS) format available to any project in the Hadoop ecosystem, regardless of the choice of data processing framework, data model or programming language. It is a more efficient way to store data frames.
// MAGIC
// MAGIC * To understand the ideas read [Dremel: Interactive Analysis of Web-Scale Datasets, Sergey Melnik, Andrey Gubarev, Jing Jing Long, Geoffrey Romer, Shiva Shivakumar, Matt Tolton and Theo Vassilakis,Proc. of the 36th Int'l Conf on Very Large Data Bases (2010), pp. 330-339](http://research.google.com/pubs/pub36632.html), whose Abstract is as follows:
// MAGIC * Dremel is a scalable, interactive ad-hoc query system for analysis of read-only nested data. By combining multi-level execution trees and columnar data layouts it is **capable of running aggregation queries over trillion-row tables in seconds**. The system **scales to thousands of CPUs and petabytes of data, and has thousands of users at Google**. In this paper, we describe the architecture and implementation of Dremel, and explain how it complements MapReduce-based computing. We present a novel columnar storage representation for nested records and discuss experiments on few-thousand node instances of the system.
// COMMAND ----------
//This allows easy embedding of publicly available information into any other notebook
//when viewing in git-book just ignore this block - you may have to manually chase the URL in frameIt("URL").
//Example usage:
// displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
def frameIt( u:String, h:Int ) : String = {
"""<iframe
src=""""+ u+""""
width="95%" height="""" + h + """"
sandbox>
<p>
<a href="http://spark.apache.org/docs/latest/index.html">
Fallback link for browsers that, unlikely, don't support frames
</a>
</p>
</iframe>"""
}
displayHTML(frameIt("https://parquet.apache.org/documentation/latest/",500))
// COMMAND ----------
// MAGIC %md
// MAGIC ### Loading Data Programmatically
// COMMAND ----------
// Read in the parquet file created above. Parquet files are self-describing so the schema is preserved.
// The result of loading a Parquet file is also a DataFrame.
val parquetFile = sqlContext.read.parquet("/tmp/platforms.parquet")
// Parquet files can also be registered as tables and then used in SQL statements.
parquetFile.registerTempTable("parquetFile")
val platforms = sqlContext.sql("SELECT name FROM parquetFile WHERE visits > 0")
platforms.map(t => "Name: " + t(0)).collect().foreach(println)
// COMMAND ----------
// MAGIC %md
// MAGIC ### Partition Discovery
// MAGIC Table partitioning is a common optimization approach used in systems like Hive. In a partitioned table, data are usually stored in different directories, with partitioning column values encoded in the path of each partition directory. The Parquet data source is now able to discover and infer partitioning information automatically. For example, we can store all our previously used population data (from the programming guide example!) into a partitioned table using the following directory structure, with two extra columns, `gender` and `country` as partitioning columns:
// MAGIC ```
// MAGIC path
// MAGIC └── to
// MAGIC └── table
// MAGIC ├── gender=male
// MAGIC │ ├── ...
// MAGIC │ │
// MAGIC │ ├── country=US
// MAGIC │ │ └── data.parquet
// MAGIC │ ├── country=CN
// MAGIC │ │ └── data.parquet
// MAGIC │ └── ...
// MAGIC └── gender=female
// MAGIC ├── ...
// MAGIC │
// MAGIC ├── country=US
// MAGIC │ └── data.parquet
// MAGIC ├── country=CN
// MAGIC │ └── data.parquet
// MAGIC └── ...
// MAGIC ```
// MAGIC By passing `path/to/table` to either `SQLContext.read.parquet` or `SQLContext.read.load`, Spark SQL will automatically extract the partitioning information from the paths. Now the schema of the returned DataFrame becomes:
// MAGIC ```
// MAGIC root
// MAGIC |-- name: string (nullable = true)
// MAGIC |-- age: long (nullable = true)
// MAGIC |-- gender: string (nullable = true)
// MAGIC |-- country: string (nullable = true)
// MAGIC ```
// MAGIC Notice that the data types of the partitioning columns are automatically inferred. Currently, numeric data types and string type are supported. Sometimes users may not want to automatically infer the data types of the partitioning columns. For these use cases, the automatic type inference can be configured by `spark.sql.sources.partitionColumnTypeInference.enabled`, which is default to `true`. When type inference is disabled, string type will be used for the partitioning columns.
// MAGIC
// MAGIC Starting from Spark 1.6.0, partition discovery only finds partitions under the given paths by default. For the above example, if users pass `path/to/table/gender=male` to either `SQLContext.read.parquet` or `SQLContext.read.load`, `gender` will not be considered as a partitioning column. If users need to specify the base path that partition discovery should start with, they can set `basePath` in the data source options. For example, when `path/to/table/gender=male` is the path of the data and users set `basePath` to `path/to/table/`, `gender` will be a partitioning column.
// COMMAND ----------
// MAGIC %md
// MAGIC ### Schema Merging
// MAGIC Like ProtocolBuffer, Avro, and Thrift, Parquet also supports schema evolution. Users can start with a simple schema, and gradually add more columns to the schema as needed. In this way, users may end up with multiple Parquet files with different but mutually compatible schemas. The Parquet data source is now able to automatically detect this case and merge schemas of all these files.
// MAGIC
// MAGIC Since schema merging is a relatively expensive operation, and is not a necessity in most cases, we turned it off by default starting from 1.5.0. You may enable it by:
// MAGIC 1. setting data source option `mergeSchema` to `true` when reading Parquet files (as shown in the examples below), or
// MAGIC 2. setting the global SQL option `spark.sql.parquet.mergeSchema` to `true`.
// COMMAND ----------
// Create a simple DataFrame, stored into a partition directory
val df1 = sc.parallelize(1 to 5).map(i => (i, i * 2)).toDF("single", "double")
df1.write.mode("overwrite").parquet("/tmp/data/test_table/key=1")
// Create another DataFrame in a new partition directory, adding a new column and dropping an existing column
val df2 = sc.parallelize(6 to 10).map(i => (i, i * 3)).toDF("single", "triple")
df2.write.mode("overwrite").parquet("/tmp/data/test_table/key=2")
// Read the partitioned table
val df3 = sqlContext.read.option("mergeSchema", "true").parquet("/tmp/data/test_table")
df3.printSchema()
// The final schema consists of all 3 columns in the Parquet files together
// with the partitioning column appeared in the partition directory paths.
// root
// |-- single: integer (nullable = true)
// |-- double: integer (nullable = true)
// |-- triple: integer (nullable = true)
// |-- key: integer (nullable = true))
// COMMAND ----------
// MAGIC %md
// MAGIC ### Hive metastore Parquet table conversion
// MAGIC When reading from and writing to Hive metastore Parquet tables, Spark SQL will try to use its own Parquet support instead of Hive SerDe for better performance. This behavior is controlled by the `spark.sql.hive.convertMetastoreParquet` configuration, and is turned on by default.
// MAGIC
// MAGIC #### Hive/Parquet Schema Reconciliation
// MAGIC There are two key differences between Hive and Parquet from the perspective of table schema processing.
// MAGIC 1. Hive is case insensitive, while Parquet is not
// MAGIC 2. Hive considers all columns nullable, while nullability in Parquet is significant
// MAGIC
// MAGIC Due to this reason, we must reconcile Hive metastore schema with Parquet schema when converting a Hive metastore Parquet table to a Spark SQL Parquet table. The reconciliation rules are:
// MAGIC 1. Fields that have the same name in both schema must have the same data type regardless of nullability. The reconciled field should have the data type of the Parquet side, so that nullability is respected.
// MAGIC 2. The reconciled schema contains exactly those fields defined in Hive metastore schema.
// MAGIC - Any fields that only appear in the Parquet schema are dropped in the reconciled schema.
// MAGIC - Any fileds that only appear in the Hive metastore schema are added as nullable field in the reconciled schema.
// MAGIC
// MAGIC #### Metadata Refreshing
// MAGIC Spark SQL caches Parquet metadata for better performance. When Hive metastore Parquet table conversion is enabled, metadata of those converted tables are also cached. If these tables are updated by Hive or other external tools, you need to refresh them manually to ensure consistent metadata.
// COMMAND ----------
// sqlContext should be a HiveContext to refresh table
sqlContext.refreshTable("simple_range")
// COMMAND ----------
// MAGIC %sql
// MAGIC -- Or you can use SQL to refresh table
// MAGIC REFRESH TABLE simple_range;
// COMMAND ----------
// MAGIC %md
// MAGIC ### Configuration
// MAGIC
// MAGIC Configuration of Parquet can be done using the `setConf` method on
// MAGIC `SQLContext` or by running `SET key=value` commands using SQL.
// MAGIC
// MAGIC | Property Name | Default | Meaning |
// MAGIC | --- | --- | --- | --- |
// MAGIC | `spark.sql.parquet.binaryAsString` | false | Some other Parquet-producing systems, in particular Impala, Hive, and older versions of Spark SQL, do not differentiate between binary data and strings when writing out the Parquet schema. This flag tells Spark SQL to interpret binary data as a string to provide compatibility with these systems.
// MAGIC | `spark.sql.parquet.int96AsTimestamp` | true | Some Parquet-producing systems, in particular Impala and Hive, store Timestamp into INT96. This flag tells Spark SQL to interpret INT96 data as a timestamp to provide compatibility with these systems. |
// MAGIC | `spark.sql.parquet.cacheMetadata` | true | Turns on caching of Parquet schema metadata. Can speed up querying of static data. |
// MAGIC | `spark.sql.parquet.compression.codec` | gzip | Sets the compression codec use when writing Parquet files. Acceptable values include: uncompressed, snappy, gzip, lzo. |
// MAGIC | `spark.sql.parquet.filterPushdown` | true | Enables Parquet filter push-down optimization when set to true. |
// MAGIC | `spark.sql.hive.convertMetastoreParquet` | true | When set to false, Spark SQL will use the Hive SerDe for parquet tables instead of the built in support. |
// MAGIC | `spark.sql.parquet.output.committer.class` | `org.apache.parquet.hadoop.ParquetOutputCommitter` | The output committer class used by Parquet. The specified class needs to be a subclass of `org.apache.hadoop.mapreduce.OutputCommitter`. Typically, it's also a subclass of `org.apache.parquet.hadoop.ParquetOutputCommitter`. Spark SQL comes with a builtin `org.apache.spark.sql.parquet.DirectParquetOutputCommitter`, which can be more efficient then the default Parquet output committer when writing data to S3. |
// MAGIC | `spark.sql.parquet.mergeSchema` | `false` | When true, the Parquet data source merges schemas collected from all data files, otherwise the schema is picked from the summary file or a random data file if no summary file is available. |
// COMMAND ----------
// MAGIC %md
// MAGIC ## JSON Datasets
// MAGIC Spark SQL can automatically infer the schema of a JSON dataset and load it as a DataFrame. This conversion can be done using `SQLContext.read.json()` on either an RDD of String, or a JSON file.
// MAGIC
// MAGIC Note that the file that is offered as *a json file* is not a typical JSON file. Each line must contain a separate, self-contained valid JSON object. As a consequence, a regular multi-line JSON file will most often fail.
// COMMAND ----------
// A JSON dataset is pointed to by path.
// The path can be either a single text file or a directory storing text files.
val path = "/tmp/platforms.json"
val platforms = sqlContext.read.json(path)
// The inferred schema can be visualized using the printSchema() method.
platforms.printSchema()
// root
// |-- name: string (nullable = true)
// |-- status: boolean (nullable = true)
// |-- visits: double (nullable = true)
// Register this DataFrame as a table.
platforms.registerTempTable("platforms")
// SQL statements can be run by using the sql methods provided by sqlContext.
val facebook = sqlContext.sql("SELECT name FROM platforms WHERE name like 'Face%k'")
facebook.show()
// Alternatively, a DataFrame can be created for a JSON dataset represented by
// an RDD[String] storing one JSON object per string.
val rdd = sc.parallelize("""{"name":"IWyn","address":{"city":"Columbus","state":"Ohio"}}""" :: Nil)
val anotherPlatforms = sqlContext.read.json(rdd)
anotherPlatforms.show()
// COMMAND ----------
// MAGIC %md
// MAGIC ## Hive Tables
// MAGIC Spark SQL also supports reading and writing data stored in [Apache Hive](http://hive.apache.org/). However, since Hive has a large number of dependencies, it is not included in the default Spark assembly. Hive support is enabled by adding the `-Phive` and `-Phive-thriftserver` flags to Spark’s build. This command builds a new assembly jar that includes Hive. Note that this Hive assembly jar must also be present on all of the worker nodes, as they will need access to the Hive serialization and deserialization libraries (SerDes) in order to access data stored in Hive.
// MAGIC
// MAGIC Configuration of Hive is done by placing your `hive-site.xml`, `core-site.xml` (for security configuration), `hdfs-site.xml` (for HDFS configuration) file in `conf/`. Please note when running the query on a YARN cluster (`cluster` mode), the `datanucleus` jars under the `lib_managed/jars` directory and `hive-site.xml` under `conf/` directory need to be available on the driver and all executors launched by the YARN cluster. The convenient way to do this is adding them through the `--jars` option and `--file` option of the `spark-submit` command.
// MAGIC
// MAGIC When working with Hive one must construct a `HiveContext`, which inherits from `SQLContext`, and adds support for finding tables in the MetaStore and writing queries using HiveQL. Users who do not have an existing Hive deployment can still create a `HiveContext`. When not configured by the hive-site.xml, the context automatically creates `metastore_db` in the current directory and creates `warehouse` directory indicated by HiveConf, which defaults to `/user/hive/warehouse`. Note that you may need to grant write privilege on `/user/hive/warehouse` to the user who starts the spark application.
// MAGIC
// MAGIC ```scala
// MAGIC // sc is an existing SparkContext.
// MAGIC val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
// MAGIC
// MAGIC sqlContext.sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
// MAGIC sqlContext.sql("LOAD DATA LOCAL INPATH 'examples/src/main/resources/kv1.txt' INTO TABLE src")
// MAGIC
// MAGIC // Queries are expressed in HiveQL
// MAGIC sqlContext.sql("FROM src SELECT key, value").collect().foreach(println)
// MAGIC ```
// MAGIC
// MAGIC ### Interacting with Different Versions of Hive Metastore
// MAGIC One of the most important pieces of Spark SQL’s Hive support is interaction with Hive metastore, which enables Spark SQL to access metadata of Hive tables. Starting from Spark 1.4.0, a single binary build of Spark SQL can be used to query different versions of Hive metastores, using the configuration described below. Note that independent of the version of Hive that is being used to talk to the metastore, internally Spark SQL will compile against Hive 1.2.1 and use those classes for internal execution (serdes, UDFs, UDAFs, etc).
// MAGIC
// MAGIC The following options can be used to configure the version of Hive that is used to retrieve metadata:
// MAGIC
// MAGIC | Property Name | Default | Meaning |
// MAGIC | --- | --- | --- |
// MAGIC | `spark.sql.hive.metastore.version` | `1.2.1` | Version of the Hive metastore. Available options are `0.12.0` through `1.2.1`. |
// MAGIC | `spark.sql.hive.metastore.jars` | `builtin` | Location of the jars that should be used to instantiate the HiveMetastoreClient. This property can be one of three options: `builtin`, `maven`, a classpath in the standard format for the JVM. This classpath must include all of Hive and its dependencies, including the correct version of Hadoop. These jars only need to be present on the driver, but if you are running in yarn cluster mode then you must ensure they are packaged with you application. |
// MAGIC | `spark.sql.hive.metastore.sharedPrefixes` | `com.mysql.jdbc,org.postgresql,com.microsoft.sqlserver,oracle.jdbc` | A comma separated list of class prefixes that should be loaded using the classloader that is shared between Spark SQL and a specific version of Hive. An example of classes that should be shared is JDBC drivers that are needed to talk to the metastore. Other classes that need to be shared are those that interact with classes that are already shared. For example, custom appenders that are used by log4j. |
// MAGIC | `spark.sql.hive.metastore.barrierPrefixes` | `(empty)` | A comma separated list of class prefixes that should explicitly be reloaded for each version of Hive that Spark SQL is communicating with. For example, Hive UDFs that are declared in a prefix that typically would be shared (i.e. `org.apache.spark.*`). |
// COMMAND ----------
// MAGIC %md
// MAGIC ## JDBC To Other Databases
// MAGIC Spark SQL also includes a data source that can read data from other databases using JDBC. This functionality should be preferred over using [JdbcRDD](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.rdd.JdbcRDD). This is because the results are returned as a DataFrame and they can easily be processed in Spark SQL or joined with other data sources. The JDBC data source is also easier to use from Java or Python as it does not require the user to provide a ClassTag. (Note that this is different than the Spark SQL JDBC server, which allows other applications to run queries using Spark SQL).
// MAGIC
// MAGIC To get started you will need to include the JDBC driver for you particular database on the spark classpath. For example, to connect to postgres from the Spark Shell you would run the following command:
// MAGIC ```scala
// MAGIC SPARK_CLASSPATH=postgresql-9.3-1102-jdbc41.jar bin/spark-shell
// MAGIC ```
// MAGIC
// MAGIC Tables from the remote database can be loaded as a DataFrame or Spark SQL Temporary table using the Data Sources API. The following options are supported:
// MAGIC
// MAGIC | Property Name | Meaning |
// MAGIC | --- | --- | --- |
// MAGIC | `url` | The JDBC URL to connect to. |
// MAGIC | `dbtable` | The JDBC table that should be read. Note that anything that is valid in a `FROM` clause of a SQL query can be used. For example, instead of a full table you could also use a subquery in parentheses. |
// MAGIC | `driver` | The class name of the JDBC driver needed to connect to this URL. This class will be loaded on the master and workers before running an JDBC commands to allow the driver to register itself with the JDBC subsystem.
// MAGIC | `partitionColumn, lowerBound, upperBound, numPartitions` | These options must all be specified if any of them is specified. They describe how to partition the table when reading in parallel from multiple workers. `partitionColumn` must be a numeric column from the table in question. Notice that `lowerBound` and `upperBound` are just used to decide the partition stride, not for filtering the rows in table. So all rows in the table will be partitioned and returned.
// MAGIC | `fetchSize` | The JDBC fetch size, which determines how many rows to fetch per round trip. This can help performance on JDBC drivers which default to low fetch size (eg. Oracle with 10 rows).
// MAGIC
// MAGIC ```scala
// MAGIC // Example of using JDBC datasource
// MAGIC val jdbcDF = sqlContext.read.format("jdbc").options(Map("url" -> "jdbc:postgresql:dbserver", "dbtable" -> "schema.tablename")).load()
// MAGIC ```
// MAGIC
// MAGIC ```sql
// MAGIC -- Or using JDBC datasource in SQL
// MAGIC CREATE TEMPORARY TABLE jdbcTable
// MAGIC USING org.apache.spark.sql.jdbc
// MAGIC OPTIONS (
// MAGIC url "jdbc:postgresql:dbserver",
// MAGIC dbtable "schema.tablename"
// MAGIC )
// MAGIC ```
// COMMAND ----------
// MAGIC %md
// MAGIC ### Troubleshooting
// MAGIC - The JDBC driver class must be visible to the primordial class loader on the client session and on all executors. This is because Java’s DriverManager class does a security check that results in it ignoring all drivers not visible to the primordial class loader when one goes to open a connection. One convenient way to do this is to modify compute\\_classpath.sh on all worker nodes to include your driver JARs.
// MAGIC - Some databases, such as H2, convert all names to upper case. You’ll need to use upper case to refer to those names in Spark SQL.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | raazesh-sainudiin/scalable-data-science | db/xtraResources/ProgGuides1_6/sqlProgrammingGuide/003_dataSources_sqlProgGuide.scala | Scala | unlicense | 33,097 |
package sri.web.vdom
import org.scalajs.dom
import sri.core._
import scalajsplus.macros.{FunctionObjectMacro, exclude}
import scalajsplus.{OptDefault => NoValue, OptionalParam => U}
import sri.universal.MergeJSObjects
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.JSConverters.genTravConvertible2JSRichGenTrav
import scala.scalajs.js.`|`
trait Tags extends ReactEventAliases {
@inline
def solidcolor[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("solidcolor", props, children = children.toJSArray)
}
@inline
def big[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("big", props, children = children.toJSArray)
}
@inline
def tr[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("tr", props, children = children.toJSArray)
}
@inline
def cite[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("cite", props, children = children.toJSArray)
}
@inline
def html[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("html", props, children = children.toJSArray)
}
@inline
def footer[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("footer", props, children = children.toJSArray)
}
@inline
def h4[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h4", props, children = children.toJSArray)
}
@inline
def caption[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("caption", props, children = children.toJSArray)
}
@inline
def datalist[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("datalist", props, children = children.toJSArray)
}
@inline
def header[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("header", props, children = children.toJSArray)
}
@inline
def wbr[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("wbr", props)
}
@inline
def canvas[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("canvas", props, children = children.toJSArray)
}
@inline
def base[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("base", props)
}
@inline
def source[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("source", props)
}
@inline
def feFuncB[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feFuncB", props, children = children.toJSArray)
}
@inline
def b[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("b", props, children = children.toJSArray)
}
@inline
def mesh[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("mesh", props, children = children.toJSArray)
}
@inline
def table[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("table", props, children = children.toJSArray)
}
@inline
def style[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("style", props, children = children.toJSArray)
}
@inline
def title[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("title", props, children = children.toJSArray)
}
@inline
def keygen[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("keygen", props)
}
@inline
def tfoot[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("tfoot", props, children = children.toJSArray)
}
@inline
def view[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("view", props, children = children.toJSArray)
}
@inline
def area[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("area", props)
}
@inline
def details[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("details", props, children = children.toJSArray)
}
@inline
def feDistantLight[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feDistantLight", props, children = children.toJSArray)
}
@inline
def hgroup[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("hgroup", props, children = children.toJSArray)
}
@inline
def hr[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("hr", props)
}
@inline
def q[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("q", props, children = children.toJSArray)
}
@inline
def meshpatch[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("meshpatch", props, children = children.toJSArray)
}
@inline
def legend[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("legend", props, children = children.toJSArray)
}
@inline
def feTurbulence[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feTurbulence", props, children = children.toJSArray)
}
@inline
def set[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("set", props, children = children.toJSArray)
}
@inline
def link[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("link", props)
}
@inline
def meshgradient[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("meshgradient", props, children = children.toJSArray)
}
@inline
def del[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("del", props, children = children.toJSArray)
}
@inline
def line[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("line", props, children = children.toJSArray)
}
@inline
def rt[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("rt", props, children = children.toJSArray)
}
@inline
def map[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("map", props, children = children.toJSArray)
}
@inline
def fieldset[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("fieldset", props, children = children.toJSArray)
}
@inline
def menu[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("menu", props, children = children.toJSArray)
}
@inline
def polygon[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("polygon", props, children = children.toJSArray)
}
@inline
def ol[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("ol", props, children = children.toJSArray)
}
@inline
def unknown[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("unknown", props, children = children.toJSArray)
}
@inline
def tbody[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("tbody", props, children = children.toJSArray)
}
@inline
def feDiffuseLighting[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feDiffuseLighting", props, children = children.toJSArray)
}
@inline
def feImage[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feImage", props, children = children.toJSArray)
}
@inline
def pre[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("pre", props, children = children.toJSArray)
}
@inline
def filter[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("filter", props, children = children.toJSArray)
}
@inline
def optgroup[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("optgroup", props, children = children.toJSArray)
}
@inline
def mask[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("mask", props, children = children.toJSArray)
}
@inline
def embed[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("embed", props)
}
@inline
def u[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("u", props, children = children.toJSArray)
}
@inline
def image[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("image", props, children = children.toJSArray)
}
@inline
def select[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
value: U[String] = NoValue,
onChange: U[ReactEventI => _] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("select", props, children = children.toJSArray)
}
@inline
def s[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("s", props, children = children.toJSArray)
}
@inline
def use[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("use", props, children = children.toJSArray)
}
@inline
def input[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
name: U[String] = NoValue,
placeholder: U[String] = NoValue,
`type`: U[String] = NoValue,
defaultChecked: U[Boolean] = NoValue,
disabled: U[Boolean] = NoValue,
autoFocus: U[Boolean] = NoValue,
onChange: U[ReactEventI => _] = NoValue,
value: U[String | Int | Double] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
import scalajsplus.DangerousUnionToJSAnyImplicit._
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("input", props)
}
@inline
def thead[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("thead", props, children = children.toJSArray)
}
@inline
def feMergeNode[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feMergeNode", props, children = children.toJSArray)
}
@inline
def h6[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h6", props, children = children.toJSArray)
}
@inline
def abbr[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("abbr", props, children = children.toJSArray)
}
@inline
def meshrow[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("meshrow", props, children = children.toJSArray)
}
@inline
def td[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("td", props, children = children.toJSArray)
}
@inline
def em[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("em", props, children = children.toJSArray)
}
@inline
def feFlood[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feFlood", props, children = children.toJSArray)
}
@inline
def svg[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
viewBox: U[String] = NoValue,
width: U[String | Double | Int] = NoValue,
height: U[String | Double | Int] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
import scalajsplus.DangerousUnionToJSAnyImplicit._
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("svg", props, children = children.toJSArray)
}
@inline
def colgroup[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("colgroup", props, children = children.toJSArray)
}
@inline
def track[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("track", props)
}
@inline
def bdo[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("bdo", props, children = children.toJSArray)
}
@inline
def circle[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("circle", props, children = children.toJSArray)
}
@inline
def time[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("time", props, children = children.toJSArray)
}
@inline
def feBlend[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feBlend", props, children = children.toJSArray)
}
@inline
def ul[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("ul", props, children = children.toJSArray)
}
@inline
def body[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("body", props, children = children.toJSArray)
}
@inline
def output[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("output", props, children = children.toJSArray)
}
@inline
def feOffset[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feOffset", props, children = children.toJSArray)
}
@inline
def `var`[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("var", props, children = children.toJSArray)
}
@inline
def progress[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("progress", props, children = children.toJSArray)
}
@inline
def h1[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h1", props, children = children.toJSArray)
}
@inline
def figcaption[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("figcaption", props, children = children.toJSArray)
}
@inline
def stop[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("stop", props, children = children.toJSArray)
}
@inline
def defs[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("defs", props, children = children.toJSArray)
}
@inline
def iframe[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("iframe", props, children = children.toJSArray)
}
@inline
def small[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("small", props, children = children.toJSArray)
}
@inline
def textPath[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("textPath", props, children = children.toJSArray)
}
@inline
def picture[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("picture", props, children = children.toJSArray)
}
@inline
def a[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
href: U[String] = NoValue,
target: U[String] = NoValue,
onClick: U[ReactEventH => _] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("a", props, children = children.toJSArray)
}
@inline
def meta[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("meta", props)
}
@inline
def article[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("article", props, children = children.toJSArray)
}
@inline
def blockquote[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("blockquote", props, children = children.toJSArray)
}
@inline
def fePointLight[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("fePointLight", props, children = children.toJSArray)
}
@inline
def feFuncA[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feFuncA", props, children = children.toJSArray)
}
@inline
def bdi[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("bdi", props, children = children.toJSArray)
}
@inline
def li[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("li", props, children = children.toJSArray)
}
@inline
def cursor[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("cursor", props, children = children.toJSArray)
}
@inline
def h5[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h5", props, children = children.toJSArray)
}
@inline
def col[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("col", props)
}
@inline
def dialog[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("dialog", props, children = children.toJSArray)
}
@inline
def kbd[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("kbd", props, children = children.toJSArray)
}
@inline
def nav[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("nav", props, children = children.toJSArray)
}
@inline
def aside[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("aside", props, children = children.toJSArray)
}
@inline
def animateTransform[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("animateTransform", props, children = children.toJSArray)
}
@inline
def g[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("g", props, children = children.toJSArray)
}
@inline
def span[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("span", props, children = children.toJSArray)
}
@inline
def ellipse[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("ellipse", props, children = children.toJSArray)
}
@inline
def feSpotLight[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feSpotLight", props, children = children.toJSArray)
}
@inline
def feFuncG[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feFuncG", props, children = children.toJSArray)
}
@inline
def th[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("th", props, children = children.toJSArray)
}
@inline
def pattern[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("pattern", props, children = children.toJSArray)
}
@inline
def ins[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("ins", props, children = children.toJSArray)
}
@inline
def p[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("p", props, children = children.toJSArray)
}
@inline
def `object`[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("object", props, children = children.toJSArray)
}
@inline
def hatchpath[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("hatchpath", props, children = children.toJSArray)
}
@inline
def feMerge[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feMerge", props, children = children.toJSArray)
}
@inline
def feColorMatrix[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feColorMatrix", props, children = children.toJSArray)
}
@inline
def script[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("script", props, children = children.toJSArray)
}
@inline
def feMorphology[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feMorphology", props, children = children.toJSArray)
}
@inline
def summary[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("summary", props, children = children.toJSArray)
}
@inline
def feFuncR[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feFuncR", props, children = children.toJSArray)
}
@inline
def br[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("br", props)
}
@inline
def sup[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("sup", props, children = children.toJSArray)
}
@inline
def clipPath[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("clipPath", props, children = children.toJSArray)
}
@inline
def dt[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("dt", props, children = children.toJSArray)
}
@inline
def hatch[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("hatch", props, children = children.toJSArray)
}
@inline
def code[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("code", props, children = children.toJSArray)
}
@inline
def polyline[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("polyline", props, children = children.toJSArray)
}
@inline
def h2[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h2", props, children = children.toJSArray)
}
@inline
def menuitem[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("menuitem", props)
}
@inline
def address[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("address", props, children = children.toJSArray)
}
@inline
def feComponentTransfer[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feComponentTransfer",
props,
children = children.toJSArray)
}
@inline
def feDropShadow[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feDropShadow", props, children = children.toJSArray)
}
@inline
def ruby[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("ruby", props, children = children.toJSArray)
}
@inline
def feSpecularLighting[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feSpecularLighting", props, children = children.toJSArray)
}
@inline
def path[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("path", props, children = children.toJSArray)
}
@inline
def feTile[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feTile", props, children = children.toJSArray)
}
@inline
def metadata[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("metadata", props, children = children.toJSArray)
}
@inline
def feGaussianBlur[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feGaussianBlur", props, children = children.toJSArray)
}
@inline
def symbol[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("symbol", props, children = children.toJSArray)
}
@inline
def desc[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("desc", props, children = children.toJSArray)
}
@inline
def dl[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("dl", props, children = children.toJSArray)
}
@inline
def meter[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("meter", props, children = children.toJSArray)
}
@inline
def figure[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("figure", props, children = children.toJSArray)
}
@inline
def samp[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("samp", props, children = children.toJSArray)
}
@inline
def rp[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("rp", props, children = children.toJSArray)
}
@inline
def foreignObject[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("foreignObject", props, children = children.toJSArray)
}
@inline
def dfn[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("dfn", props, children = children.toJSArray)
}
@inline
def feConvolveMatrix[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feConvolveMatrix", props, children = children.toJSArray)
}
@inline
def option[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
value: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("option", props, children = children.toJSArray)
}
@inline
def head[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("head", props, children = children.toJSArray)
}
@inline
def video[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("video", props, children = children.toJSArray)
}
@inline
def main[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("main", props, children = children.toJSArray)
}
@inline
def i[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("i", props, children = children.toJSArray)
}
@inline
def sub[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("sub", props, children = children.toJSArray)
}
@inline
def label[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("label", props, children = children.toJSArray)
}
@inline
def button[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
onClick: U[ReactEventH => _] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("button", props, children = children.toJSArray)
}
@inline
def feComposite[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feComposite", props, children = children.toJSArray)
}
@inline
def mpath[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("mpath", props, children = children.toJSArray)
}
@inline
def h3[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("h3", props, children = children.toJSArray)
}
@inline
def animate[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("animate", props, children = children.toJSArray)
}
@inline
def discard[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("discard", props, children = children.toJSArray)
}
@inline
def section[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("section", props, children = children.toJSArray)
}
@inline
def linearGradient[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("linearGradient", props, children = children.toJSArray)
}
@inline
def text[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("text", props, children = children.toJSArray)
}
@inline
def div[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
onClick: U[ReactEventH => Unit] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("div", props, children = children.toJSArray)
}
@inline
def audio[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("audio", props, children = children.toJSArray)
}
@inline
def tspan[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("tspan", props, children = children.toJSArray)
}
@inline
def mark[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("mark", props, children = children.toJSArray)
}
@inline
def animateMotion[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("animateMotion", props, children = children.toJSArray)
}
@inline
def form[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
onSubmit: U[ReactEventH => _] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("form", props, children = children.toJSArray)
}
@inline
def textarea[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
onChange: U[ReactEventI => _] = NoValue,
value: U[String] = NoValue,
placeholder: U[String] = NoValue,
cols: U[Int] = NoValue,
rows: U[Int] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("textarea", props, children = children.toJSArray)
}
@inline
def param[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("param", props)
}
@inline
def marker[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("marker", props, children = children.toJSArray)
}
@inline
def noscript[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("noscript", props, children = children.toJSArray)
}
@inline
def dd[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("dd", props, children = children.toJSArray)
}
@inline
def radialGradient[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("radialGradient", props, children = children.toJSArray)
}
@inline
def rect[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("rect", props, children = children.toJSArray)
}
@inline
def feDisplacementMap[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("feDisplacementMap", props, children = children.toJSArray)
}
@inline
def switch[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("switch", props, children = children.toJSArray)
}
@inline
def data[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("data", props, children = children.toJSArray)
}
@inline
def img[T <: dom.Node](
style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
src: U[String] = NoValue,
sizes: U[String] = NoValue,
alt: U[String] = NoValue,
srcset: U[String] = NoValue,
height: U[String] = NoValue,
width: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("img", props)
}
@inline
def strong[T <: dom.Node](style: U[js.Any] = NoValue,
id: U[String] = NoValue,
className: U[String] = NoValue,
@exclude key: String | Int = null,
@exclude ref: js.Function1[T, Unit] = null,
@exclude extraProps: U[DOMProps] = NoValue)(
children: ReactNode*): ReactElement = {
val props = FunctionObjectMacro()
extraProps.foreach(v => { MergeJSObjects(props, v) })
CreateDOMElement("strong", props, children = children.toJSArray)
}
@inline
def meshrowC(children: ReactNode*) = {
CreateDOMElement("meshrow", json(), children = children.toJSArray)
}
@inline
def feFuncBC(children: ReactNode*) = {
CreateDOMElement("feFuncB", json(), children = children.toJSArray)
}
@inline
def aC(children: ReactNode*) = {
CreateDOMElement("a", json(), children = children.toJSArray)
}
@inline
def trC(children: ReactNode*) = {
CreateDOMElement("tr", json(), children = children.toJSArray)
}
@inline
def h4C(children: ReactNode*) = {
CreateDOMElement("h4", json(), children = children.toJSArray)
}
@inline
def circleC(children: ReactNode*) = {
CreateDOMElement("circle", json(), children = children.toJSArray)
}
@inline
def meshpatchC(children: ReactNode*) = {
CreateDOMElement("meshpatch", json(), children = children.toJSArray)
}
@inline
def imageC(children: ReactNode*) = {
CreateDOMElement("image", json(), children = children.toJSArray)
}
@inline
def feDisplacementMapC(children: ReactNode*) = {
CreateDOMElement("feDisplacementMap", json(), children = children.toJSArray)
}
@inline
def bC(children: ReactNode*) = {
CreateDOMElement("b", json(), children = children.toJSArray)
}
@inline
def foreignObjectC(children: ReactNode*) = {
CreateDOMElement("foreignObject", json(), children = children.toJSArray)
}
@inline
def bdiC(children: ReactNode*) = {
CreateDOMElement("bdi", json(), children = children.toJSArray)
}
@inline
def legendC(children: ReactNode*) = {
CreateDOMElement("legend", json(), children = children.toJSArray)
}
@inline
def bodyC(children: ReactNode*) = {
CreateDOMElement("body", json(), children = children.toJSArray)
}
@inline
def h1C(children: ReactNode*) = {
CreateDOMElement("h1", json(), children = children.toJSArray)
}
@inline
def polygonC(children: ReactNode*) = {
CreateDOMElement("polygon", json(), children = children.toJSArray)
}
@inline
def rubyC(children: ReactNode*) = {
CreateDOMElement("ruby", json(), children = children.toJSArray)
}
@inline
def h5C(children: ReactNode*) = {
CreateDOMElement("h5", json(), children = children.toJSArray)
}
@inline
def feFloodC(children: ReactNode*) = {
CreateDOMElement("feFlood", json(), children = children.toJSArray)
}
@inline
def ulC(children: ReactNode*) = {
CreateDOMElement("ul", json(), children = children.toJSArray)
}
@inline
def feTurbulenceC(children: ReactNode*) = {
CreateDOMElement("feTurbulence", json(), children = children.toJSArray)
}
@inline
def smallC(children: ReactNode*) = {
CreateDOMElement("small", json(), children = children.toJSArray)
}
@inline
def meterC(children: ReactNode*) = {
CreateDOMElement("meter", json(), children = children.toJSArray)
}
@inline
def animateTransformC(children: ReactNode*) = {
CreateDOMElement("animateTransform", json(), children = children.toJSArray)
}
@inline
def feFuncAC(children: ReactNode*) = {
CreateDOMElement("feFuncA", json(), children = children.toJSArray)
}
@inline
def qC(children: ReactNode*) = {
CreateDOMElement("q", json(), children = children.toJSArray)
}
@inline
def ellipseC(children: ReactNode*) = {
CreateDOMElement("ellipse", json(), children = children.toJSArray)
}
@inline
def markerC(children: ReactNode*) = {
CreateDOMElement("marker", json(), children = children.toJSArray)
}
@inline
def bigC(children: ReactNode*) = {
CreateDOMElement("big", json(), children = children.toJSArray)
}
@inline
def selectC(children: ReactNode*) = {
CreateDOMElement("select", json(), children = children.toJSArray)
}
@inline
def dataC(children: ReactNode*) = {
CreateDOMElement("data", json(), children = children.toJSArray)
}
@inline
def delC(children: ReactNode*) = {
CreateDOMElement("del", json(), children = children.toJSArray)
}
@inline
def dialogC(children: ReactNode*) = {
CreateDOMElement("dialog", json(), children = children.toJSArray)
}
@inline
def unknownC(children: ReactNode*) = {
CreateDOMElement("unknown", json(), children = children.toJSArray)
}
@inline
def tspanC(children: ReactNode*) = {
CreateDOMElement("tspan", json(), children = children.toJSArray)
}
@inline
def preC(children: ReactNode*) = {
CreateDOMElement("pre", json(), children = children.toJSArray)
}
@inline
def uC(children: ReactNode*) = {
CreateDOMElement("u", json(), children = children.toJSArray)
}
@inline
def timeC(children: ReactNode*) = {
CreateDOMElement("time", json(), children = children.toJSArray)
}
@inline
def tdC(children: ReactNode*) = {
CreateDOMElement("td", json(), children = children.toJSArray)
}
@inline
def sC(children: ReactNode*) = {
CreateDOMElement("s", json(), children = children.toJSArray)
}
@inline
def spanC(children: ReactNode*) = {
CreateDOMElement("span", json(), children = children.toJSArray)
}
@inline
def olC(children: ReactNode*) = {
CreateDOMElement("ol", json(), children = children.toJSArray)
}
@inline
def svgC(children: ReactNode*) = {
CreateDOMElement("svg", json(), children = children.toJSArray)
}
@inline
def areaC(children: ReactNode*) = {
CreateDOMElement("area", json(), children = children.toJSArray)
}
@inline
def discardC(children: ReactNode*) = {
CreateDOMElement("discard", json(), children = children.toJSArray)
}
@inline
def feImageC(children: ReactNode*) = {
CreateDOMElement("feImage", json(), children = children.toJSArray)
}
@inline
def pathC(children: ReactNode*) = {
CreateDOMElement("path", json(), children = children.toJSArray)
}
@inline
def markC(children: ReactNode*) = {
CreateDOMElement("mark", json(), children = children.toJSArray)
}
@inline
def theadC(children: ReactNode*) = {
CreateDOMElement("thead", json(), children = children.toJSArray)
}
@inline
def useC(children: ReactNode*) = {
CreateDOMElement("use", json(), children = children.toJSArray)
}
@inline
def feColorMatrixC(children: ReactNode*) = {
CreateDOMElement("feColorMatrix", json(), children = children.toJSArray)
}
@inline
def kbdC(children: ReactNode*) = {
CreateDOMElement("kbd", json(), children = children.toJSArray)
}
@inline
def feGaussianBlurC(children: ReactNode*) = {
CreateDOMElement("feGaussianBlur", json(), children = children.toJSArray)
}
@inline
def articleC(children: ReactNode*) = {
CreateDOMElement("article", json(), children = children.toJSArray)
}
@inline
def styleC(children: ReactNode*) = {
CreateDOMElement("style", json(), children = children.toJSArray)
}
@inline
def sampC(children: ReactNode*) = {
CreateDOMElement("samp", json(), children = children.toJSArray)
}
@inline
def datalistC(children: ReactNode*) = {
CreateDOMElement("datalist", json(), children = children.toJSArray)
}
@inline
def fePointLightC(children: ReactNode*) = {
CreateDOMElement("fePointLight", json(), children = children.toJSArray)
}
@inline
def titleC(children: ReactNode*) = {
CreateDOMElement("title", json(), children = children.toJSArray)
}
@inline
def optionC(children: ReactNode*) = {
CreateDOMElement("option", json(), children = children.toJSArray)
}
@inline
def solidcolorC(children: ReactNode*) = {
CreateDOMElement("solidcolor", json(), children = children.toJSArray)
}
@inline
def liC(children: ReactNode*) = {
CreateDOMElement("li", json(), children = children.toJSArray)
}
@inline
def rtC(children: ReactNode*) = {
CreateDOMElement("rt", json(), children = children.toJSArray)
}
@inline
def gC(children: ReactNode*) = {
CreateDOMElement("g", json(), children = children.toJSArray)
}
@inline
def objectC(children: ReactNode*) = {
CreateDOMElement("object", json(), children = children.toJSArray)
}
@inline
def feMergeC(children: ReactNode*) = {
CreateDOMElement("feMerge", json(), children = children.toJSArray)
}
@inline
def feSpecularLightingC(children: ReactNode*) = {
CreateDOMElement("feSpecularLighting",
json(),
children = children.toJSArray)
}
@inline
def noscriptC(children: ReactNode*) = {
CreateDOMElement("noscript", json(), children = children.toJSArray)
}
@inline
def pictureC(children: ReactNode*) = {
CreateDOMElement("picture", json(), children = children.toJSArray)
}
@inline
def hatchC(children: ReactNode*) = {
CreateDOMElement("hatch", json(), children = children.toJSArray)
}
@inline
def figcaptionC(children: ReactNode*) = {
CreateDOMElement("figcaption", json(), children = children.toJSArray)
}
@inline
def formC(children: ReactNode*) = {
CreateDOMElement("form", json(), children = children.toJSArray)
}
@inline
def insC(children: ReactNode*) = {
CreateDOMElement("ins", json(), children = children.toJSArray)
}
@inline
def h3C(children: ReactNode*) = {
CreateDOMElement("h3", json(), children = children.toJSArray)
}
@inline
def metadataC(children: ReactNode*) = {
CreateDOMElement("metadata", json(), children = children.toJSArray)
}
@inline
def feDistantLightC(children: ReactNode*) = {
CreateDOMElement("feDistantLight", json(), children = children.toJSArray)
}
@inline
def descC(children: ReactNode*) = {
CreateDOMElement("desc", json(), children = children.toJSArray)
}
@inline
def feMorphologyC(children: ReactNode*) = {
CreateDOMElement("feMorphology", json(), children = children.toJSArray)
}
@inline
def hatchpathC(children: ReactNode*) = {
CreateDOMElement("hatchpath", json(), children = children.toJSArray)
}
@inline
def h2C(children: ReactNode*) = {
CreateDOMElement("h2", json(), children = children.toJSArray)
}
@inline
def symbolC(children: ReactNode*) = {
CreateDOMElement("symbol", json(), children = children.toJSArray)
}
@inline
def progressC(children: ReactNode*) = {
CreateDOMElement("progress", json(), children = children.toJSArray)
}
@inline
def bdoC(children: ReactNode*) = {
CreateDOMElement("bdo", json(), children = children.toJSArray)
}
@inline
def mainC(children: ReactNode*) = {
CreateDOMElement("main", json(), children = children.toJSArray)
}
@inline
def scriptC(children: ReactNode*) = {
CreateDOMElement("script", json(), children = children.toJSArray)
}
@inline
def colgroupC(children: ReactNode*) = {
CreateDOMElement("colgroup", json(), children = children.toJSArray)
}
@inline
def optgroupC(children: ReactNode*) = {
CreateDOMElement("optgroup", json(), children = children.toJSArray)
}
@inline
def ddC(children: ReactNode*) = {
CreateDOMElement("dd", json(), children = children.toJSArray)
}
@inline
def abbrC(children: ReactNode*) = {
CreateDOMElement("abbr", json(), children = children.toJSArray)
}
@inline
def stopC(children: ReactNode*) = {
CreateDOMElement("stop", json(), children = children.toJSArray)
}
@inline
def rpC(children: ReactNode*) = {
CreateDOMElement("rp", json(), children = children.toJSArray)
}
@inline
def defsC(children: ReactNode*) = {
CreateDOMElement("defs", json(), children = children.toJSArray)
}
@inline
def maskC(children: ReactNode*) = {
CreateDOMElement("mask", json(), children = children.toJSArray)
}
@inline
def thC(children: ReactNode*) = {
CreateDOMElement("th", json(), children = children.toJSArray)
}
@inline
def citeC(children: ReactNode*) = {
CreateDOMElement("cite", json(), children = children.toJSArray)
}
@inline
def blockquoteC(children: ReactNode*) = {
CreateDOMElement("blockquote", json(), children = children.toJSArray)
}
@inline
def codeC(children: ReactNode*) = {
CreateDOMElement("code", json(), children = children.toJSArray)
}
@inline
def feConvolveMatrixC(children: ReactNode*) = {
CreateDOMElement("feConvolveMatrix", json(), children = children.toJSArray)
}
@inline
def feCompositeC(children: ReactNode*) = {
CreateDOMElement("feComposite", json(), children = children.toJSArray)
}
@inline
def tbodyC(children: ReactNode*) = {
CreateDOMElement("tbody", json(), children = children.toJSArray)
}
@inline
def outputC(children: ReactNode*) = {
CreateDOMElement("output", json(), children = children.toJSArray)
}
@inline
def detailsC(children: ReactNode*) = {
CreateDOMElement("details", json(), children = children.toJSArray)
}
@inline
def iframeC(children: ReactNode*) = {
CreateDOMElement("iframe", json(), children = children.toJSArray)
}
@inline
def h6C(children: ReactNode*) = {
CreateDOMElement("h6", json(), children = children.toJSArray)
}
@inline
def polylineC(children: ReactNode*) = {
CreateDOMElement("polyline", json(), children = children.toJSArray)
}
@inline
def divC(children: ReactNode*) = {
CreateDOMElement("div", json(), children = children.toJSArray)
}
@inline
def rectC(children: ReactNode*) = {
CreateDOMElement("rect", json(), children = children.toJSArray)
}
@inline
def patternC(children: ReactNode*) = {
CreateDOMElement("pattern", json(), children = children.toJSArray)
}
@inline
def navC(children: ReactNode*) = {
CreateDOMElement("nav", json(), children = children.toJSArray)
}
@inline
def filterC(children: ReactNode*) = {
CreateDOMElement("filter", json(), children = children.toJSArray)
}
@inline
def feSpotLightC(children: ReactNode*) = {
CreateDOMElement("feSpotLight", json(), children = children.toJSArray)
}
@inline
def animateC(children: ReactNode*) = {
CreateDOMElement("animate", json(), children = children.toJSArray)
}
@inline
def strongC(children: ReactNode*) = {
CreateDOMElement("strong", json(), children = children.toJSArray)
}
@inline
def captionC(children: ReactNode*) = {
CreateDOMElement("caption", json(), children = children.toJSArray)
}
@inline
def meshC(children: ReactNode*) = {
CreateDOMElement("mesh", json(), children = children.toJSArray)
}
@inline
def textPathC(children: ReactNode*) = {
CreateDOMElement("textPath", json(), children = children.toJSArray)
}
@inline
def feComponentTransferC(children: ReactNode*) = {
CreateDOMElement("feComponentTransfer",
json(),
children = children.toJSArray)
}
@inline
def feOffsetC(children: ReactNode*) = {
CreateDOMElement("feOffset", json(), children = children.toJSArray)
}
@inline
def feFuncRC(children: ReactNode*) = {
CreateDOMElement("feFuncR", json(), children = children.toJSArray)
}
@inline
def feFuncGC(children: ReactNode*) = {
CreateDOMElement("feFuncG", json(), children = children.toJSArray)
}
@inline
def buttonC(children: ReactNode*) = {
CreateDOMElement("button", json(), children = children.toJSArray)
}
@inline
def emC(children: ReactNode*) = {
CreateDOMElement("em", json(), children = children.toJSArray)
}
@inline
def footerC(children: ReactNode*) = {
CreateDOMElement("footer", json(), children = children.toJSArray)
}
@inline
def varC(children: ReactNode*) = {
CreateDOMElement("var", json(), children = children.toJSArray)
}
@inline
def dlC(children: ReactNode*) = {
CreateDOMElement("dl", json(), children = children.toJSArray)
}
@inline
def sectionC(children: ReactNode*) = {
CreateDOMElement("section", json(), children = children.toJSArray)
}
@inline
def pC(children: ReactNode*) = {
CreateDOMElement("p", json(), children = children.toJSArray)
}
@inline
def lineC(children: ReactNode*) = {
CreateDOMElement("line", json(), children = children.toJSArray)
}
@inline
def subC(children: ReactNode*) = {
CreateDOMElement("sub", json(), children = children.toJSArray)
}
@inline
def viewC(children: ReactNode*) = {
CreateDOMElement("view", json(), children = children.toJSArray)
}
@inline
def canvasC(children: ReactNode*) = {
CreateDOMElement("canvas", json(), children = children.toJSArray)
}
@inline
def headC(children: ReactNode*) = {
CreateDOMElement("head", json(), children = children.toJSArray)
}
@inline
def htmlC(children: ReactNode*) = {
CreateDOMElement("html", json(), children = children.toJSArray)
}
@inline
def labelC(children: ReactNode*) = {
CreateDOMElement("label", json(), children = children.toJSArray)
}
@inline
def summaryC(children: ReactNode*) = {
CreateDOMElement("summary", json(), children = children.toJSArray)
}
@inline
def switchC(children: ReactNode*) = {
CreateDOMElement("switch", json(), children = children.toJSArray)
}
@inline
def feTileC(children: ReactNode*) = {
CreateDOMElement("feTile", json(), children = children.toJSArray)
}
@inline
def cursorC(children: ReactNode*) = {
CreateDOMElement("cursor", json(), children = children.toJSArray)
}
@inline
def menuC(children: ReactNode*) = {
CreateDOMElement("menu", json(), children = children.toJSArray)
}
@inline
def feMergeNodeC(children: ReactNode*) = {
CreateDOMElement("feMergeNode", json(), children = children.toJSArray)
}
@inline
def textC(children: ReactNode*) = {
CreateDOMElement("text", json(), children = children.toJSArray)
}
@inline
def linearGradientC(children: ReactNode*) = {
CreateDOMElement("linearGradient", json(), children = children.toJSArray)
}
@inline
def figureC(children: ReactNode*) = {
CreateDOMElement("figure", json(), children = children.toJSArray)
}
@inline
def mpathC(children: ReactNode*) = {
CreateDOMElement("mpath", json(), children = children.toJSArray)
}
@inline
def dtC(children: ReactNode*) = {
CreateDOMElement("dt", json(), children = children.toJSArray)
}
@inline
def asideC(children: ReactNode*) = {
CreateDOMElement("aside", json(), children = children.toJSArray)
}
@inline
def addressC(children: ReactNode*) = {
CreateDOMElement("address", json(), children = children.toJSArray)
}
@inline
def hgroupC(children: ReactNode*) = {
CreateDOMElement("hgroup", json(), children = children.toJSArray)
}
@inline
def animateMotionC(children: ReactNode*) = {
CreateDOMElement("animateMotion", json(), children = children.toJSArray)
}
@inline
def meshgradientC(children: ReactNode*) = {
CreateDOMElement("meshgradient", json(), children = children.toJSArray)
}
@inline
def setC(children: ReactNode*) = {
CreateDOMElement("set", json(), children = children.toJSArray)
}
@inline
def audioC(children: ReactNode*) = {
CreateDOMElement("audio", json(), children = children.toJSArray)
}
@inline
def dfnC(children: ReactNode*) = {
CreateDOMElement("dfn", json(), children = children.toJSArray)
}
@inline
def supC(children: ReactNode*) = {
CreateDOMElement("sup", json(), children = children.toJSArray)
}
@inline
def tfootC(children: ReactNode*) = {
CreateDOMElement("tfoot", json(), children = children.toJSArray)
}
@inline
def iC(children: ReactNode*) = {
CreateDOMElement("i", json(), children = children.toJSArray)
}
@inline
def clipPathC(children: ReactNode*) = {
CreateDOMElement("clipPath", json(), children = children.toJSArray)
}
@inline
def videoC(children: ReactNode*) = {
CreateDOMElement("video", json(), children = children.toJSArray)
}
@inline
def headerC(children: ReactNode*) = {
CreateDOMElement("header", json(), children = children.toJSArray)
}
@inline
def mapC(children: ReactNode*) = {
CreateDOMElement("map", json(), children = children.toJSArray)
}
@inline
def fieldsetC(children: ReactNode*) = {
CreateDOMElement("fieldset", json(), children = children.toJSArray)
}
@inline
def textareaC(children: ReactNode*) = {
CreateDOMElement("textarea", json(), children = children.toJSArray)
}
@inline
def feDiffuseLightingC(children: ReactNode*) = {
CreateDOMElement("feDiffuseLighting", json(), children = children.toJSArray)
}
@inline
def feDropShadowC(children: ReactNode*) = {
CreateDOMElement("feDropShadow", json(), children = children.toJSArray)
}
@inline
def radialGradientC(children: ReactNode*) = {
CreateDOMElement("radialGradient", json(), children = children.toJSArray)
}
@inline
def tableC(children: ReactNode*) = {
CreateDOMElement("table", json(), children = children.toJSArray)
}
@inline
def feBlendC(children: ReactNode*) = {
CreateDOMElement("feBlend", json(), children = children.toJSArray)
}
}
| scalajs-react-interface/vdom | src/main/scala/sri/web/vdom/Tags.scala | Scala | apache-2.0 | 129,267 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.codec.binary
import org.beangle.commons.codec.{ Decoder, Encoder }
object Base64 {
def encode(data: Array[Byte]): String =
Base64Encoder.encode(data)
def decode(data: String): Array[Byte] =
Base64Decoder.decode(data)
def decode(data: Array[Char]): Array[Byte] =
Base64Decoder.decode(data)
}
object Base64Encoder extends Encoder[Array[Byte], String] {
private val Alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=".toCharArray()
def encode(data: Array[Byte]): String = {
val out = new Array[Char](((data.length + 2) / 3) * 4)
var i = 0
var index = 0
while (i < data.length) {
var quad = false
var trip = false
var value = 0xff & data(i)
value <<= 8
if (i + 1 < data.length) {
value |= 0xff & data(i + 1)
trip = true
}
value <<= 8
if (i + 2 < data.length) {
value |= 0xff & data(i + 2)
quad = true
}
out(index + 3) = Alphabets(if (quad) value & 0x3f else 64)
value >>= 6
out(index + 2) = Alphabets(if (trip) value & 0x3f else 64)
value >>= 6
out(index + 1) = Alphabets(value & 0x3f)
value >>= 6
out(index + 0) = Alphabets(value & 0x3f)
i += 3
index += 4
}
new String(out)
}
}
object Base64Decoder extends Decoder[String, Array[Byte]] {
private val Codes = buildCodes()
def decode(pArray: String): Array[Byte] = decode(pArray.toCharArray())
def decode(data: Array[Char]): Array[Byte] = {
var tempLen = data.length
data.indices foreach { ix =>
if (data(ix) > '\\u00ff' || Codes(data(ix)) < 0) tempLen -= 1
}
var len = (tempLen / 4) * 3
if (tempLen % 4 == 3) len += 2
if (tempLen % 4 == 2) len += 1
val out = new Array[Byte](len)
var shift = 0
var accum = 0
var index = 0
data.indices foreach { ix =>
val value = if (data(ix) <= '\\u00ff') (Codes(data(ix))).toInt else -1
if (value >= 0) {
accum <<= 6
shift += 6
accum |= value
if (shift >= 8) {
shift -= 8
out(index) = (accum >> shift & 0xff).toByte
index += 1
}
}
}
if (index != out.length) throw new Error(s"Miscalculated data length (wrote $index instead of ${out.length})")
else out
}
private def buildCodes(): Array[Byte] = {
val codes = new Array[Byte](256)
(0 until 256) foreach { i => codes(i) = -1 }
codes(43) = 62
codes(47) = 63
(48 to 57) foreach (i => codes(i) = ((52 + i) - 48).toByte)
(65 to 90) foreach (i => codes(i) = (i - 65).toByte)
(97 to 122) foreach (i => codes(i) = ((26 + i) - 97).toByte)
codes
}
}
| beangle/commons | core/src/main/scala/org/beangle/commons/codec/binary/Base64.scala | Scala | lgpl-3.0 | 3,439 |
package shared
import app.{RouterBuilderUtils, WsClientMacro}
import org.scalatest.{FlatSpec, Matchers}
class WsMacrosTest extends FlatSpec with Matchers {
trait Api {
def updateTopic(id: Long, data: String): List[String]
def updateParagraph(id: Long): Either[String, Int]
}
"WsClient.post" should "do correct post call" in {
var called = false
val client = new WsClientMacro[Api, String] {
override def doCall[O](path: String,
dataStr: String,
reader: (String) => O,
errHnd: Throwable => String): (O => String) => String = {
called = true
path should be("WsMacrosTest.this.Api.updateTopic")
dataStr shouldBe ("""["78","eeeee"]""")
fn => fn(reader(""))
}
}
client.post(_.updateTopic(78, "eeeee"), th => "th.")
called should be(true)
}
"WsClient.post" should "invoke error handler" in {
var called = false
val throwable = new Throwable
val client = new WsClientMacro[Api, String] {
override def doCall[O](path: String,
dataStr: String,
reader: (String) => O,
errHnd: Throwable => String): (O => String) => String = {
errHnd(throwable)
fn => fn(reader(""))
}
}
client.post(_.updateTopic(78, "eeeee"), th => {
th should be(throwable)
called = true
""
})
called should be(true)
}
"WsClient.post" should "return correct result" in {
val client = new WsClientMacro[Api, String] {
override def doCall[O](path: String,
dataStr: String,
reader: (String) => O,
errHnd: Throwable => String): (O => String) => String = {
fn => fn(reader("""["159","160"]"""))
}
}
val result = client.post(_.updateTopic(78, "eeeee"), _ => "")(_.mkString("<",";",">"))
result should be("<159;160>")
}
"WsClient.forMethod" should "return correct result for a method with two arguments" in {
val server = new RouterBuilderUtils[Api] {
val result = forMethod2(_.updateTopic)
}
val (path, reader, writer) = server.result
path should be(("WsMacrosTest.this.Api.updateTopic"))
reader("""["48","s478"]""") should be((48L,"s478"))
writer(List("A1","b2","c-")) should be("""["A1","b2","c-"]""")
}
"WsClient.forMethod" should "return correct result for a method with one argument" in {
val server = new RouterBuilderUtils[Api] {
val result = forMethod(_.updateParagraph)
}
val (path, reader, writer) = server.result
path should be(("WsMacrosTest.this.Api.updateParagraph"))
reader(""""48"""") should be(48L)
writer(Right(49)) should be("[1,49]")
}
}
| Igorocky/lesn | macroses/src/test/scala/shared/WsMacrosTest.scala | Scala | mit | 2,849 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.matchers.must.Matchers.the
import org.apache.spark.TestUtils.{assertNotSpilled, assertSpilled}
import org.apache.spark.sql.catalyst.optimizer.TransposeWindow
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.exchange.Exchange
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction, Window}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
/**
* Window function testing for DataFrame API.
*/
class DataFrameWindowFunctionsSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper{
import testImplicits._
test("reuse window partitionBy") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
val w = Window.partitionBy("key").orderBy("value")
checkAnswer(
df.select(
lead("key", 1).over(w),
lead("value", 1).over(w)),
Row(1, "1") :: Row(2, "2") :: Row(null, null) :: Row(null, null) :: Nil)
}
test("reuse window orderBy") {
val df = Seq((1, "1"), (2, "2"), (1, "1"), (2, "2")).toDF("key", "value")
val w = Window.orderBy("value").partitionBy("key")
checkAnswer(
df.select(
lead("key", 1).over(w),
lead("value", 1).over(w)),
Row(1, "1") :: Row(2, "2") :: Row(null, null) :: Row(null, null) :: Nil)
}
test("rank functions in unspecific window") {
withTempView("window_table") {
val df = Seq((1, "1"), (2, "2"), (1, "2"), (2, "2")).toDF("key", "value")
df.createOrReplaceTempView("window_table")
checkAnswer(
df.select(
$"key",
max("key").over(Window.partitionBy("value").orderBy("key")),
min("key").over(Window.partitionBy("value").orderBy("key")),
mean("key").over(Window.partitionBy("value").orderBy("key")),
count("key").over(Window.partitionBy("value").orderBy("key")),
sum("key").over(Window.partitionBy("value").orderBy("key")),
ntile(2).over(Window.partitionBy("value").orderBy("key")),
row_number().over(Window.partitionBy("value").orderBy("key")),
dense_rank().over(Window.partitionBy("value").orderBy("key")),
rank().over(Window.partitionBy("value").orderBy("key")),
cume_dist().over(Window.partitionBy("value").orderBy("key")),
percent_rank().over(Window.partitionBy("value").orderBy("key"))),
Row(1, 1, 1, 1.0d, 1, 1, 1, 1, 1, 1, 1.0d, 0.0d) ::
Row(1, 1, 1, 1.0d, 1, 1, 1, 1, 1, 1, 1.0d / 3.0d, 0.0d) ::
Row(2, 2, 1, 5.0d / 3.0d, 3, 5, 1, 2, 2, 2, 1.0d, 0.5d) ::
Row(2, 2, 1, 5.0d / 3.0d, 3, 5, 2, 3, 2, 2, 1.0d, 0.5d) :: Nil)
}
}
test("window function should fail if order by clause is not specified") {
val df = Seq((1, "1"), (2, "2"), (1, "2"), (2, "2")).toDF("key", "value")
val e = intercept[AnalysisException](
// Here we missed .orderBy("key")!
df.select(row_number().over(Window.partitionBy("value"))).collect())
assert(e.message.contains("requires window to be ordered"))
}
test("corr, covar_pop, stddev_pop functions in specific window") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") {
val df = Seq(
("a", "p1", 10.0, 20.0),
("b", "p1", 20.0, 10.0),
("c", "p2", 20.0, 20.0),
("d", "p2", 20.0, 20.0),
("e", "p3", 0.0, 0.0),
("f", "p3", 6.0, 12.0),
("g", "p3", 6.0, 12.0),
("h", "p3", 8.0, 16.0),
("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2")
checkAnswer(
df.select(
$"key",
corr("value1", "value2").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
covar_pop("value1", "value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_pop("value1")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_pop("value1")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_pop("value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_pop("value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))),
// As stddev_pop(expr) = sqrt(var_pop(expr))
// the "stddev_pop" column can be calculated from the "var_pop" column.
//
// As corr(expr1, expr2) = covar_pop(expr1, expr2) / (stddev_pop(expr1) * stddev_pop(expr2))
// the "corr" column can be calculated from the "covar_pop" and the two "stddev_pop" columns
Seq(
Row("a", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0),
Row("b", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0),
Row("c", null, 0.0, 0.0, 0.0, 0.0, 0.0),
Row("d", null, 0.0, 0.0, 0.0, 0.0, 0.0),
Row("e", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("f", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("g", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("h", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("i", Double.NaN, 0.0, 0.0, 0.0, 0.0, 0.0)))
}
}
test("SPARK-13860: " +
"corr, covar_pop, stddev_pop functions in specific window " +
"LEGACY_STATISTICAL_AGGREGATE off") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") {
val df = Seq(
("a", "p1", 10.0, 20.0),
("b", "p1", 20.0, 10.0),
("c", "p2", 20.0, 20.0),
("d", "p2", 20.0, 20.0),
("e", "p3", 0.0, 0.0),
("f", "p3", 6.0, 12.0),
("g", "p3", 6.0, 12.0),
("h", "p3", 8.0, 16.0),
("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2")
checkAnswer(
df.select(
$"key",
corr("value1", "value2").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
covar_pop("value1", "value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_pop("value1")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_pop("value1")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_pop("value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_pop("value2")
.over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))),
// As stddev_pop(expr) = sqrt(var_pop(expr))
// the "stddev_pop" column can be calculated from the "var_pop" column.
//
// As corr(expr1, expr2) = covar_pop(expr1, expr2) / (stddev_pop(expr1) * stddev_pop(expr2))
// the "corr" column can be calculated from the "covar_pop" and the two "stddev_pop" columns
Seq(
Row("a", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0),
Row("b", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0),
Row("c", null, 0.0, 0.0, 0.0, 0.0, 0.0),
Row("d", null, 0.0, 0.0, 0.0, 0.0, 0.0),
Row("e", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("f", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("g", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("h", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0),
Row("i", null, 0.0, 0.0, 0.0, 0.0, 0.0)))
}
}
test("covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") {
val df = Seq(
("a", "p1", 10.0, 20.0),
("b", "p1", 20.0, 10.0),
("c", "p2", 20.0, 20.0),
("d", "p2", 20.0, 20.0),
("e", "p3", 0.0, 0.0),
("f", "p3", 6.0, 12.0),
("g", "p3", 6.0, 12.0),
("h", "p3", 8.0, 16.0),
("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2")
checkAnswer(
df.select(
$"key",
covar_samp("value1", "value2").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_samp("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
variance("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_samp("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))
),
Seq(
Row("a", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755),
Row("b", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755),
Row("c", 0.0, 0.0, 0.0, 0.0, 0.0),
Row("d", 0.0, 0.0, 0.0, 0.0, 0.0),
Row("e", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("f", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("g", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("h", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("i", Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN)))
}
}
test("SPARK-13860: " +
"covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window " +
"LEGACY_STATISTICAL_AGGREGATE off") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") {
val df = Seq(
("a", "p1", 10.0, 20.0),
("b", "p1", 20.0, 10.0),
("c", "p2", 20.0, 20.0),
("d", "p2", 20.0, 20.0),
("e", "p3", 0.0, 0.0),
("f", "p3", 6.0, 12.0),
("g", "p3", 6.0, 12.0),
("h", "p3", 8.0, 16.0),
("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2")
checkAnswer(
df.select(
$"key",
covar_samp("value1", "value2").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
var_samp("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
variance("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev_samp("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
stddev("value1").over(Window.partitionBy("partitionId")
.orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))
),
Seq(
Row("a", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755),
Row("b", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755),
Row("c", 0.0, 0.0, 0.0, 0.0, 0.0),
Row("d", 0.0, 0.0, 0.0, 0.0, 0.0),
Row("e", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("f", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("g", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("h", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544),
Row("i", null, null, null, null, null)))
}
}
test("collect_list in ascending ordered window") {
val df = Seq(
("a", "p1", "1"),
("b", "p1", "2"),
("c", "p1", "2"),
("d", "p1", null),
("e", "p1", "3"),
("f", "p2", "10"),
("g", "p2", "11"),
("h", "p3", "20"),
("i", "p4", null)).toDF("key", "partition", "value")
checkAnswer(
df.select(
$"key",
sort_array(
collect_list("value").over(Window.partitionBy($"partition").orderBy($"value")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)))),
Seq(
Row("a", Array("1", "2", "2", "3")),
Row("b", Array("1", "2", "2", "3")),
Row("c", Array("1", "2", "2", "3")),
Row("d", Array("1", "2", "2", "3")),
Row("e", Array("1", "2", "2", "3")),
Row("f", Array("10", "11")),
Row("g", Array("10", "11")),
Row("h", Array("20")),
Row("i", Array())))
}
test("collect_list in descending ordered window") {
val df = Seq(
("a", "p1", "1"),
("b", "p1", "2"),
("c", "p1", "2"),
("d", "p1", null),
("e", "p1", "3"),
("f", "p2", "10"),
("g", "p2", "11"),
("h", "p3", "20"),
("i", "p4", null)).toDF("key", "partition", "value")
checkAnswer(
df.select(
$"key",
sort_array(
collect_list("value").over(Window.partitionBy($"partition").orderBy($"value".desc)
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)))),
Seq(
Row("a", Array("1", "2", "2", "3")),
Row("b", Array("1", "2", "2", "3")),
Row("c", Array("1", "2", "2", "3")),
Row("d", Array("1", "2", "2", "3")),
Row("e", Array("1", "2", "2", "3")),
Row("f", Array("10", "11")),
Row("g", Array("10", "11")),
Row("h", Array("20")),
Row("i", Array())))
}
test("collect_set in window") {
val df = Seq(
("a", "p1", "1"),
("b", "p1", "2"),
("c", "p1", "2"),
("d", "p1", "3"),
("e", "p1", "3"),
("f", "p2", "10"),
("g", "p2", "11"),
("h", "p3", "20")).toDF("key", "partition", "value")
checkAnswer(
df.select(
$"key",
sort_array(
collect_set("value").over(Window.partitionBy($"partition").orderBy($"value")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)))),
Seq(
Row("a", Array("1", "2", "3")),
Row("b", Array("1", "2", "3")),
Row("c", Array("1", "2", "3")),
Row("d", Array("1", "2", "3")),
Row("e", Array("1", "2", "3")),
Row("f", Array("10", "11")),
Row("g", Array("10", "11")),
Row("h", Array("20"))))
}
test("skewness and kurtosis functions in window") {
val df = Seq(
("a", "p1", 1.0),
("b", "p1", 1.0),
("c", "p1", 2.0),
("d", "p1", 2.0),
("e", "p1", 3.0),
("f", "p1", 3.0),
("g", "p1", 3.0),
("h", "p2", 1.0),
("i", "p2", 2.0),
("j", "p2", 5.0)).toDF("key", "partition", "value")
checkAnswer(
df.select(
$"key",
skewness("value").over(Window.partitionBy("partition").orderBy($"key")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)),
kurtosis("value").over(Window.partitionBy("partition").orderBy($"key")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))),
// results are checked by scipy.stats.skew() and scipy.stats.kurtosis()
Seq(
Row("a", -0.27238010581457267, -1.506920415224914),
Row("b", -0.27238010581457267, -1.506920415224914),
Row("c", -0.27238010581457267, -1.506920415224914),
Row("d", -0.27238010581457267, -1.506920415224914),
Row("e", -0.27238010581457267, -1.506920415224914),
Row("f", -0.27238010581457267, -1.506920415224914),
Row("g", -0.27238010581457267, -1.506920415224914),
Row("h", 0.5280049792181881, -1.5000000000000013),
Row("i", 0.5280049792181881, -1.5000000000000013),
Row("j", 0.5280049792181881, -1.5000000000000013)))
}
test("aggregation function on invalid column") {
val df = Seq((1, "1")).toDF("key", "value")
val e = intercept[AnalysisException](
df.select($"key", count("invalid").over()))
assert(e.getErrorClass == "MISSING_COLUMN")
assert(e.messageParameters.sameElements(Array("invalid", "value, key")))
}
test("numerical aggregate functions on string column") {
val df = Seq((1, "a", "b")).toDF("key", "value1", "value2")
checkAnswer(
df.select($"key",
var_pop("value1").over(),
variance("value1").over(),
stddev_pop("value1").over(),
stddev("value1").over(),
sum("value1").over(),
mean("value1").over(),
avg("value1").over(),
corr("value1", "value2").over(),
covar_pop("value1", "value2").over(),
covar_samp("value1", "value2").over(),
skewness("value1").over(),
kurtosis("value1").over()),
Seq(Row(1, null, null, null, null, null, null, null, null, null, null, null, null)))
}
test("statistical functions") {
val df = Seq(("a", 1), ("a", 1), ("a", 2), ("a", 2), ("b", 4), ("b", 3), ("b", 2)).
toDF("key", "value")
val window = Window.partitionBy($"key")
checkAnswer(
df.select(
$"key",
var_pop($"value").over(window),
var_samp($"value").over(window),
approx_count_distinct($"value").over(window)),
Seq.fill(4)(Row("a", 1.0d / 4.0d, 1.0d / 3.0d, 2))
++ Seq.fill(3)(Row("b", 2.0d / 3.0d, 1.0d, 3)))
}
test("window function with aggregates") {
val df = Seq(("a", 1), ("a", 1), ("a", 2), ("a", 2), ("b", 4), ("b", 3), ("b", 2)).
toDF("key", "value")
val window = Window.orderBy()
checkAnswer(
df.groupBy($"key")
.agg(
sum($"value"),
sum(sum($"value")).over(window) - sum($"value")),
Seq(Row("a", 6, 9), Row("b", 9, 6)))
}
test("SPARK-16195 empty over spec") {
withTempView("window_table") {
val df = Seq(("a", 1), ("a", 1), ("a", 2), ("b", 2)).
toDF("key", "value")
df.createOrReplaceTempView("window_table")
checkAnswer(
df.select($"key", $"value", sum($"value").over(), avg($"value").over()),
Seq(Row("a", 1, 6, 1.5), Row("a", 1, 6, 1.5), Row("a", 2, 6, 1.5), Row("b", 2, 6, 1.5)))
checkAnswer(
sql("select key, value, sum(value) over(), avg(value) over() from window_table"),
Seq(Row("a", 1, 6, 1.5), Row("a", 1, 6, 1.5), Row("a", 2, 6, 1.5), Row("b", 2, 6, 1.5)))
}
}
test("window function with udaf") {
val udaf = new UserDefinedAggregateFunction {
def inputSchema: StructType = new StructType()
.add("a", LongType)
.add("b", LongType)
def bufferSchema: StructType = new StructType()
.add("product", LongType)
def dataType: DataType = LongType
def deterministic: Boolean = true
def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = 0L
}
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
if (!(input.isNullAt(0) || input.isNullAt(1))) {
buffer(0) = buffer.getLong(0) + input.getLong(0) * input.getLong(1)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
}
def evaluate(buffer: Row): Any =
buffer.getLong(0)
}
val df = Seq(
("a", 1, 1),
("a", 1, 5),
("a", 2, 10),
("a", 2, -1),
("b", 4, 7),
("b", 3, 8),
("b", 2, 4))
.toDF("key", "a", "b")
val window = Window.partitionBy($"key").orderBy($"a").rangeBetween(Long.MinValue, 0L)
checkAnswer(
df.select(
$"key",
$"a",
$"b",
udaf($"a", $"b").over(window)),
Seq(
Row("a", 1, 1, 6),
Row("a", 1, 5, 6),
Row("a", 2, 10, 24),
Row("a", 2, -1, 24),
Row("b", 4, 7, 60),
Row("b", 3, 8, 32),
Row("b", 2, 4, 8)))
}
test("window function with aggregator") {
val agg = udaf(new Aggregator[(Long, Long), Long, Long] {
def zero: Long = 0L
def reduce(b: Long, a: (Long, Long)): Long = b + (a._1 * a._2)
def merge(b1: Long, b2: Long): Long = b1 + b2
def finish(r: Long): Long = r
def bufferEncoder: Encoder[Long] = Encoders.scalaLong
def outputEncoder: Encoder[Long] = Encoders.scalaLong
})
val df = Seq(
("a", 1, 1),
("a", 1, 5),
("a", 2, 10),
("a", 2, -1),
("b", 4, 7),
("b", 3, 8),
("b", 2, 4))
.toDF("key", "a", "b")
val window = Window.partitionBy($"key").orderBy($"a").rangeBetween(Long.MinValue, 0L)
checkAnswer(
df.select(
$"key",
$"a",
$"b",
agg($"a", $"b").over(window)),
Seq(
Row("a", 1, 1, 6),
Row("a", 1, 5, 6),
Row("a", 2, 10, 24),
Row("a", 2, -1, 24),
Row("b", 4, 7, 60),
Row("b", 3, 8, 32),
Row("b", 2, 4, 8)))
}
test("null inputs") {
val df = Seq(("a", 1), ("a", 1), ("a", 2), ("a", 2), ("b", 4), ("b", 3), ("b", 2))
.toDF("key", "value")
val window = Window.orderBy()
checkAnswer(
df.select(
$"key",
$"value",
avg(lit(null)).over(window),
sum(lit(null)).over(window)),
Seq(
Row("a", 1, null, null),
Row("a", 1, null, null),
Row("a", 2, null, null),
Row("a", 2, null, null),
Row("b", 4, null, null),
Row("b", 3, null, null),
Row("b", 2, null, null)))
}
test("last/first with ignoreNulls") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, nullStr),
("b", 1, nullStr),
("b", 2, nullStr)).
toDF("key", "order", "value")
val window = Window.partitionBy($"key").orderBy($"order")
checkAnswer(
df.select(
$"key",
$"order",
first($"value").over(window),
first($"value", ignoreNulls = false).over(window),
first($"value", ignoreNulls = true).over(window),
last($"value").over(window),
last($"value", ignoreNulls = false).over(window),
last($"value", ignoreNulls = true).over(window)),
Seq(
Row("a", 0, null, null, null, null, null, null),
Row("a", 1, null, null, "x", "x", "x", "x"),
Row("a", 2, null, null, "x", "y", "y", "y"),
Row("a", 3, null, null, "x", "z", "z", "z"),
Row("a", 4, null, null, "x", null, null, "z"),
Row("b", 1, null, null, null, null, null, null),
Row("b", 2, null, null, null, null, null, null)))
}
test("last/first on descending ordered window") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, "v"),
("b", 1, "k"),
("b", 2, "l"),
("b", 3, nullStr)).
toDF("key", "order", "value")
val window = Window.partitionBy($"key").orderBy($"order".desc)
checkAnswer(
df.select(
$"key",
$"order",
first($"value").over(window),
first($"value", ignoreNulls = false).over(window),
first($"value", ignoreNulls = true).over(window),
last($"value").over(window),
last($"value", ignoreNulls = false).over(window),
last($"value", ignoreNulls = true).over(window)),
Seq(
Row("a", 0, "v", "v", "v", null, null, "x"),
Row("a", 1, "v", "v", "v", "x", "x", "x"),
Row("a", 2, "v", "v", "v", "y", "y", "y"),
Row("a", 3, "v", "v", "v", "z", "z", "z"),
Row("a", 4, "v", "v", "v", "v", "v", "v"),
Row("b", 1, null, null, "l", "k", "k", "k"),
Row("b", 2, null, null, "l", "l", "l", "l"),
Row("b", 3, null, null, null, null, null, null)))
}
test("nth_value with ignoreNulls") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, nullStr),
("b", 1, nullStr),
("b", 2, nullStr)).
toDF("key", "order", "value")
val window = Window.partitionBy($"key").orderBy($"order")
checkAnswer(
df.select(
$"key",
$"order",
nth_value($"value", 2).over(window),
nth_value($"value", 2, ignoreNulls = false).over(window),
nth_value($"value", 2, ignoreNulls = true).over(window),
nth_value($"value", 3, ignoreNulls = false).over(window)),
Seq(
Row("a", 0, null, null, null, null),
Row("a", 1, "x", "x", null, null),
Row("a", 2, "x", "x", "y", "y"),
Row("a", 3, "x", "x", "y", "y"),
Row("a", 4, "x", "x", "y", "y"),
Row("b", 1, null, null, null, null),
Row("b", 2, null, null, null, null)))
}
test("nth_value with ignoreNulls over offset window frame") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, nullStr),
("b", 1, nullStr),
("b", 2, nullStr)).
toDF("key", "order", "value")
val window1 = Window.partitionBy($"key").orderBy($"order")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
val window2 = Window.partitionBy($"key").orderBy($"order")
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
checkAnswer(
df.select(
$"key",
$"order",
nth_value($"value", 2).over(window1),
nth_value($"value", 2, ignoreNulls = false).over(window1),
nth_value($"value", 2, ignoreNulls = true).over(window1),
nth_value($"value", 2).over(window2),
nth_value($"value", 2, ignoreNulls = false).over(window2),
nth_value($"value", 2, ignoreNulls = true).over(window2)),
Seq(
Row("a", 0, "x", "x", "y", null, null, null),
Row("a", 1, "x", "x", "y", "x", "x", null),
Row("a", 2, "x", "x", "y", "x", "x", "y"),
Row("a", 3, "x", "x", "y", "x", "x", "y"),
Row("a", 4, "x", "x", "y", "x", "x", "y"),
Row("b", 1, null, null, null, null, null, null),
Row("b", 2, null, null, null, null, null, null)))
}
test("nth_value on descending ordered window") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("a", 2, "y"),
("a", 3, "z"),
("a", 4, "v"),
("b", 1, "k"),
("b", 2, "l"),
("b", 3, nullStr)).
toDF("key", "order", "value")
val window = Window.partitionBy($"key").orderBy($"order".desc)
checkAnswer(
df.select(
$"key",
$"order",
nth_value($"value", 2).over(window),
nth_value($"value", 2, ignoreNulls = false).over(window),
nth_value($"value", 2, ignoreNulls = true).over(window)),
Seq(
Row("a", 0, "z", "z", "z"),
Row("a", 1, "z", "z", "z"),
Row("a", 2, "z", "z", "z"),
Row("a", 3, "z", "z", "z"),
Row("a", 4, null, null, null),
Row("b", 1, "l", "l", "k"),
Row("b", 2, "l", "l", null),
Row("b", 3, null, null, null)))
}
test("lead/lag with ignoreNulls") {
val nullStr: String = null
val df = Seq(
("a", 0, nullStr),
("a", 1, "x"),
("b", 2, nullStr),
("c", 3, nullStr),
("a", 4, "y"),
("b", 5, nullStr),
("a", 6, "z"),
("a", 7, "v"),
("a", 8, nullStr)).
toDF("key", "order", "value")
val window = Window.orderBy($"order")
checkAnswer(
df.select(
$"key",
$"order",
$"value",
lead($"value", 1).over(window),
lead($"value", 2).over(window),
lead($"value", 0, null, true).over(window),
lead($"value", 1, null, true).over(window),
lead($"value", 2, null, true).over(window),
lead($"value", 3, null, true).over(window),
lead(concat($"value", $"key"), 1, null, true).over(window),
lag($"value", 1).over(window),
lag($"value", 2).over(window),
lag($"value", 0, null, true).over(window),
lag($"value", 1, null, true).over(window),
lag($"value", 2, null, true).over(window),
lag($"value", 3, null, true).over(window),
lag(concat($"value", $"key"), 1, null, true).over(window))
.orderBy($"order"),
Seq(
Row("a", 0, null, "x", null, null, "x", "y", "z", "xa",
null, null, null, null, null, null, null),
Row("a", 1, "x", null, null, "x", "y", "z", "v", "ya",
null, null, "x", null, null, null, null),
Row("b", 2, null, null, "y", null, "y", "z", "v", "ya",
"x", null, null, "x", null, null, "xa"),
Row("c", 3, null, "y", null, null, "y", "z", "v", "ya",
null, "x", null, "x", null, null, "xa"),
Row("a", 4, "y", null, "z", "y", "z", "v", null, "za",
null, null, "y", "x", null, null, "xa"),
Row("b", 5, null, "z", "v", null, "z", "v", null, "za",
"y", null, null, "y", "x", null, "ya"),
Row("a", 6, "z", "v", null, "z", "v", null, null, "va",
null, "y", "z", "y", "x", null, "ya"),
Row("a", 7, "v", null, null, "v", null, null, null, null,
"z", null, "v", "z", "y", "x", "za"),
Row("a", 8, null, null, null, null, null, null, null, null,
"v", "z", null, "v", "z", "y", "va")))
}
test("SPARK-12989 ExtractWindowExpressions treats alias as regular attribute") {
val src = Seq((0, 3, 5)).toDF("a", "b", "c")
.withColumn("Data", struct("a", "b"))
.drop("a")
.drop("b")
val winSpec = Window.partitionBy("Data.a", "Data.b").orderBy($"c".desc)
val df = src.select($"*", max("c").over(winSpec) as "max")
checkAnswer(df, Row(5, Row(0, 3), 5))
}
test("aggregation and rows between with unbounded + predicate pushdown") {
withTempView("window_table") {
val df = Seq((1, "1"), (2, "2"), (2, "3"), (1, "3"), (3, "2"), (4, "3")).toDF("key", "value")
df.createOrReplaceTempView("window_table")
val selectList = Seq($"key", $"value",
last("key").over(
Window.partitionBy($"value").orderBy($"key").rowsBetween(0, Long.MaxValue)),
last("key").over(
Window.partitionBy($"value").orderBy($"key").rowsBetween(Long.MinValue, 0)),
last("key").over(Window.partitionBy($"value").orderBy($"key").rowsBetween(-1, 1)))
checkAnswer(
df.select(selectList: _*).where($"value" < "3"),
Seq(Row(1, "1", 1, 1, 1), Row(2, "2", 3, 2, 3), Row(3, "2", 3, 3, 3)))
}
}
test("aggregation and range between with unbounded + predicate pushdown") {
withTempView("window_table") {
val df = Seq((5, "1"), (5, "2"), (4, "2"), (6, "2"), (3, "1"), (2, "2")).toDF("key", "value")
df.createOrReplaceTempView("window_table")
val selectList = Seq($"key", $"value",
last("value").over(
Window.partitionBy($"value").orderBy($"key").rangeBetween(-2, -1)).equalTo("2")
.as("last_v"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(Long.MinValue, 1))
.as("avg_key1"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(0, Long.MaxValue))
.as("avg_key2"),
avg("key").over(Window.partitionBy("value").orderBy("key").rangeBetween(-1, 1))
.as("avg_key3"))
checkAnswer(
df.select(selectList: _*).where($"value" < 2),
Seq(Row(3, "1", null, 3.0, 4.0, 3.0), Row(5, "1", false, 4.0, 5.0, 5.0)))
}
}
test("Window spill with less than the inMemoryThreshold") {
val df = Seq((1, "1"), (2, "2"), (1, "3"), (2, "4")).toDF("key", "value")
val window = Window.partitionBy($"key").orderBy($"value")
withSQLConf(SQLConf.WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "2",
SQLConf.WINDOW_EXEC_BUFFER_SPILL_THRESHOLD.key -> "2") {
assertNotSpilled(sparkContext, "select") {
df.select($"key", sum("value").over(window)).collect()
}
}
}
test("Window spill with more than the inMemoryThreshold but less than the spillThreshold") {
val df = Seq((1, "1"), (2, "2"), (1, "3"), (2, "4")).toDF("key", "value")
val window = Window.partitionBy($"key").orderBy($"value")
withSQLConf(SQLConf.WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "1",
SQLConf.WINDOW_EXEC_BUFFER_SPILL_THRESHOLD.key -> "2") {
assertNotSpilled(sparkContext, "select") {
df.select($"key", sum("value").over(window)).collect()
}
}
}
test("Window spill with more than the inMemoryThreshold and spillThreshold") {
val df = Seq((1, "1"), (2, "2"), (1, "3"), (2, "4")).toDF("key", "value")
val window = Window.partitionBy($"key").orderBy($"value")
withSQLConf(SQLConf.WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "1",
SQLConf.WINDOW_EXEC_BUFFER_SPILL_THRESHOLD.key -> "1") {
assertSpilled(sparkContext, "select") {
df.select($"key", sum("value").over(window)).collect()
}
}
}
test("SPARK-21258: complex object in combination with spilling") {
// Make sure we trigger the spilling path.
withSQLConf(SQLConf.WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD.key -> "1",
SQLConf.WINDOW_EXEC_BUFFER_SPILL_THRESHOLD.key -> "17") {
val sampleSchema = new StructType().
add("f0", StringType).
add("f1", LongType).
add("f2", ArrayType(new StructType().
add("f20", StringType))).
add("f3", ArrayType(new StructType().
add("f30", StringType)))
val w0 = Window.partitionBy("f0").orderBy("f1")
val w1 = w0.rowsBetween(Long.MinValue, Long.MaxValue)
val c0 = first(struct($"f2", $"f3")).over(w0) as "c0"
val c1 = last(struct($"f2", $"f3")).over(w1) as "c1"
val input =
"""{"f1":1497820153720,"f2":[{"f20":"x","f21":0}],"f3":[{"f30":"x","f31":0}]}
|{"f1":1497802179638}
|{"f1":1497802189347}
|{"f1":1497802189593}
|{"f1":1497802189597}
|{"f1":1497802189599}
|{"f1":1497802192103}
|{"f1":1497802193414}
|{"f1":1497802193577}
|{"f1":1497802193709}
|{"f1":1497802202883}
|{"f1":1497802203006}
|{"f1":1497802203743}
|{"f1":1497802203834}
|{"f1":1497802203887}
|{"f1":1497802203893}
|{"f1":1497802203976}
|{"f1":1497820168098}
|""".stripMargin.split("\\n").toSeq
import testImplicits._
assertSpilled(sparkContext, "select") {
spark.read.schema(sampleSchema).json(input.toDS()).select(c0, c1).foreach { _ => () }
}
}
}
test("SPARK-24575: Window functions inside WHERE and HAVING clauses") {
def checkAnalysisError(df: => DataFrame, clause: String): Unit = {
val thrownException = the[AnalysisException] thrownBy {
df.queryExecution.analyzed
}
assert(thrownException.message.contains(s"window functions inside $clause clause"))
}
checkAnalysisError(
testData2.select("a").where(rank().over(Window.orderBy($"b")) === 1), "WHERE")
checkAnalysisError(
testData2.where($"b" === 2 && rank().over(Window.orderBy($"b")) === 1), "WHERE")
checkAnalysisError(
testData2.groupBy($"a")
.agg(avg($"b").as("avgb"))
.where($"a" > $"avgb" && rank().over(Window.orderBy($"a")) === 1), "WHERE")
checkAnalysisError(
testData2.groupBy($"a")
.agg(max($"b").as("maxb"), sum($"b").as("sumb"))
.where(rank().over(Window.orderBy($"a")) === 1), "WHERE")
checkAnalysisError(
testData2.groupBy($"a")
.agg(max($"b").as("maxb"), sum($"b").as("sumb"))
.where($"sumb" === 5 && rank().over(Window.orderBy($"a")) === 1), "WHERE")
checkAnalysisError(sql("SELECT a FROM testData2 WHERE RANK() OVER(ORDER BY b) = 1"), "WHERE")
checkAnalysisError(
sql("SELECT * FROM testData2 WHERE b = 2 AND RANK() OVER(ORDER BY b) = 1"), "WHERE")
checkAnalysisError(
sql("SELECT * FROM testData2 GROUP BY a HAVING a > AVG(b) AND RANK() OVER(ORDER BY a) = 1"),
"HAVING")
checkAnalysisError(
sql("SELECT a, MAX(b), SUM(b) FROM testData2 GROUP BY a HAVING RANK() OVER(ORDER BY a) = 1"),
"HAVING")
checkAnalysisError(
sql(
s"""SELECT a, MAX(b)
|FROM testData2
|GROUP BY a
|HAVING SUM(b) = 5 AND RANK() OVER(ORDER BY a) = 1""".stripMargin),
"HAVING")
}
test("window functions in multiple selects") {
val df = Seq(
("S1", "P1", 100),
("S1", "P1", 700),
("S2", "P1", 200),
("S2", "P2", 300)
).toDF("sno", "pno", "qty")
Seq(true, false).foreach { transposeWindowEnabled =>
val excludedRules = if (transposeWindowEnabled) "" else TransposeWindow.ruleName
withSQLConf(SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> excludedRules) {
val w1 = Window.partitionBy("sno")
val w2 = Window.partitionBy("sno", "pno")
val select = df.select($"sno", $"pno", $"qty", sum($"qty").over(w2).alias("sum_qty_2"))
.select($"sno", $"pno", $"qty", col("sum_qty_2"), sum("qty").over(w1).alias("sum_qty_1"))
val expectedNumExchanges = if (transposeWindowEnabled) 1 else 2
val actualNumExchanges = stripAQEPlan(select.queryExecution.executedPlan).collect {
case e: Exchange => e
}.length
assert(actualNumExchanges == expectedNumExchanges)
checkAnswer(
select,
Seq(
Row("S1", "P1", 100, 800, 800),
Row("S1", "P1", 700, 800, 800),
Row("S2", "P1", 200, 200, 500),
Row("S2", "P2", 300, 300, 500)))
}
}
}
test("NaN and -0.0 in window partition keys") {
val df = Seq(
(Float.NaN, Double.NaN),
(0.0f/0.0f, 0.0/0.0),
(0.0f, 0.0),
(-0.0f, -0.0)).toDF("f", "d")
checkAnswer(
df.select($"f", count(lit(1)).over(Window.partitionBy("f", "d"))),
Seq(
Row(Float.NaN, 2),
Row(0.0f/0.0f, 2),
Row(0.0f, 2),
Row(-0.0f, 2)))
// test with complicated window partition keys.
val windowSpec1 = Window.partitionBy(array("f"), struct("d"))
checkAnswer(
df.select($"f", count(lit(1)).over(windowSpec1)),
Seq(
Row(Float.NaN, 2),
Row(0.0f/0.0f, 2),
Row(0.0f, 2),
Row(-0.0f, 2)))
val windowSpec2 = Window.partitionBy(array(struct("f")), struct(array("d")))
checkAnswer(
df.select($"f", count(lit(1)).over(windowSpec2)),
Seq(
Row(Float.NaN, 2),
Row(0.0f/0.0f, 2),
Row(0.0f, 2),
Row(-0.0f, 2)))
// test with df with complicated-type columns.
val df2 = Seq(
(Array(-0.0f, 0.0f), Tuple2(-0.0d, Double.NaN), Seq(Tuple2(-0.0d, Double.NaN))),
(Array(0.0f, -0.0f), Tuple2(0.0d, Double.NaN), Seq(Tuple2(0.0d, 0.0/0.0)))
).toDF("arr", "stru", "arrOfStru")
val windowSpec3 = Window.partitionBy("arr", "stru", "arrOfStru")
checkAnswer(
df2.select($"arr", $"stru", $"arrOfStru", count(lit(1)).over(windowSpec3)),
Seq(
Row(Seq(-0.0f, 0.0f), Row(-0.0d, Double.NaN), Seq(Row(-0.0d, Double.NaN)), 2),
Row(Seq(0.0f, -0.0f), Row(0.0d, Double.NaN), Seq(Row(0.0d, 0.0/0.0)), 2)))
}
test("SPARK-34227: WindowFunctionFrame should clear its states during preparation") {
// This creates a single partition dataframe with 3 records:
// "a", 0, null
// "a", 1, "x"
// "b", 0, null
val df = spark.range(0, 3, 1, 1).select(
when($"id" < 2, lit("a")).otherwise(lit("b")).as("key"),
($"id" % 2).cast("int").as("order"),
when($"id" % 2 === 0, lit(null)).otherwise(lit("x")).as("value"))
val window1 = Window.partitionBy($"key").orderBy($"order")
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
val window2 = Window.partitionBy($"key").orderBy($"order")
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
checkAnswer(
df.select(
$"key",
$"order",
nth_value($"value", 1, ignoreNulls = true).over(window1),
nth_value($"value", 1, ignoreNulls = true).over(window2)),
Seq(
Row("a", 0, "x", null),
Row("a", 1, "x", "x"),
Row("b", 0, null, null)))
}
}
| nchammas/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala | Scala | apache-2.0 | 41,837 |
package net.rrm.ehour.reminder
import net.rrm.ehour.AbstractSpec
import net.rrm.ehour.config.EhourConfigStub
import net.rrm.ehour.domain.UserObjectMother
import net.rrm.ehour.mail.service.MailMan
import net.rrm.ehour.persistence.mail.dao.MailLogDao
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
class ReminderServiceSpec extends AbstractSpec {
val mailMan = mock[MailMan]
val userFinder = mock[IFindUsersWithoutSufficientHours]
val mailLogDao = mock[MailLogDao]
val config = new EhourConfigStub
config.setCompleteDayHours(8f)
config.setReminderEnabled(true)
config.setReminderMinimalHours(32)
val subject = new ReminderService(config, userFinder, mailMan, mailLogDao)
override protected def beforeEach() = reset(mailMan, userFinder)
"Reminder Service" should {
"mail users to remind" in {
val user = UserObjectMother.createUser
when(userFinder.findUsersWithoutSufficientHours(32, 8f)).thenReturn(List(user))
when(mailLogDao.find(any(), any())).thenReturn(List())
subject.sendReminderMail()
verify(mailMan).deliver(any(), any(), any())
val mailEventCaptor = ArgumentCaptor.forClass(classOf[String])
val emailCaptor = ArgumentCaptor.forClass(classOf[String])
verify(mailLogDao).find(emailCaptor.capture(), mailEventCaptor.capture())
emailCaptor.getValue should equal(user.getEmail)
mailEventCaptor.getValue should startWith("1:Reminder for ")
}
"replace $name with the user full name" in {
val user = UserObjectMother.createUser
user.setFirstName("a")
user.setLastName("b")
config.setReminderBody("hello $name")
val body = subject.enrichMailBody(user)
body should be("hello a b")
}
}
}
| momogentoo/ehour | eHour-service/src/test/scala/net/rrm/ehour/reminder/ReminderServiceSpec.scala | Scala | gpl-2.0 | 1,786 |
package org.bitcoins.rpc.marshallers.wallet
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.currency.{Bitcoins, CurrencyUnit}
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.rpc.bitcoincore.wallet.{UTXO, WalletTransaction}
import spray.json.{DefaultJsonProtocol, JsArray, JsBoolean, JsNull, JsNumber, JsObject, JsString, JsValue, RootJsonFormat}
import scala.util.Try
/**
* Created by chris on 5/4/17.
*/
object WalletTransactionMarshaller extends DefaultJsonProtocol {
val amountKey = "amount"
val feeKey = "fee"
val confirmationsKey = "confirmations"
val generatedKey = "generated"
val blockHashKey = "blockhash"
val blockIndexKey = "blockindex"
val blockTimeKey = "blocktime"
val txIdKey = "txid"
val walletConflictsKey = "walletconflicts"
val timeKey = "time"
val timeReceivedKey = "timereceived"
val bip125ReplaceableKey = "bip125-replaceable"
val commentKey = "comment"
val toKey = "to"
val hexKey = "hex"
implicit object WalletTransactionFormatter extends RootJsonFormat[WalletTransaction] {
override def read(value: JsValue): WalletTransaction = {
val f = value.asJsObject.fields
val amount = Bitcoins(f(amountKey).convertTo[Double])
val fee = Bitcoins(f(feeKey).convertTo[Double])
val confirmations = f(confirmationsKey).convertTo[Int]
val generated = Try(Some(f(generatedKey).convertTo[Boolean])).getOrElse(None)
val blockHashStr: Option[String] = Try(Some(f(blockHashKey).convertTo[String])).getOrElse(None)
val blockHash: Option[DoubleSha256Digest] = blockHashStr.map(DoubleSha256Digest(_))
val blockIndex: Option[Long] = Try(Some(f(blockIndexKey).convertTo[Long])).getOrElse(None)
val blockTime = Try(Some(f(blockTimeKey).convertTo[Long])).getOrElse(None)
val txId = DoubleSha256Digest(f(txIdKey).convertTo[String])
val walletConflicts: Seq[DoubleSha256Digest] = f(walletConflictsKey) match {
case arr: JsArray => arr.elements.map(e => DoubleSha256Digest(e.convertTo[String])).toSeq
case _ => throw new IllegalArgumentException("Wallet conflicts must be JsArray")
}
val time = f(timeKey).convertTo[Long]
val timeReceived = f(timeReceivedKey).convertTo[Long]
val bip125Replaceable = f(bip125ReplaceableKey).convertTo[String]
val comment = Try(Some(f(commentKey).convertTo[String])).getOrElse(None)
val to = Try(Some(f(toKey).convertTo[String])).getOrElse(None)
val tx = Transaction(f(hexKey).convertTo[String])
WalletTransaction(amount,fee,confirmations,generated,blockHash,blockIndex,blockTime,txId,
walletConflicts,time,timeReceived, bip125Replaceable,comment,to,tx)
}
override def write(walletTx: WalletTransaction): JsValue = {
val m : Map[String,JsValue] = Map(
amountKey -> JsNumber(Bitcoins(walletTx.amount.satoshis).toBigDecimal),
feeKey -> JsNumber(Bitcoins(walletTx.amount.satoshis).toBigDecimal),
confirmationsKey -> JsNumber(walletTx.confirmations),
generatedKey -> (if (walletTx.generated.isDefined) JsBoolean(walletTx.generated.get) else JsNull),
blockHashKey -> (if (walletTx.blockHash.isDefined) JsString(walletTx.blockHash.get.hex) else JsNull),
blockIndexKey -> (if (walletTx.blockIndex.isDefined) JsNumber(walletTx.blockIndex.get) else JsNull),
blockTimeKey -> (if (walletTx.blockTime.isDefined) JsNumber(walletTx.blockTime.get) else JsNull),
txIdKey -> JsString(walletTx.txId.hex),
walletConflictsKey -> JsArray(walletTx.walletConflicts.map(h => JsString(h.hex)).toVector),
timeKey -> JsNumber(walletTx.time),
timeReceivedKey -> JsNumber(walletTx.timeReceived),
bip125ReplaceableKey -> JsString(walletTx.bip125Replaceable),
commentKey -> (if (walletTx.comment.isDefined) JsString(walletTx.comment.get) else JsNull),
toKey -> (if (walletTx.to.isDefined) JsString(walletTx.to.get) else JsNull),
hexKey -> JsString(walletTx.transaction.hex)
)
JsObject(m)
}
}
}
| bitcoin-s/bitcoin-s-rpc-client | src/main/scala/org/bitcoins/rpc/marshallers/wallet/WalletTransactionMarshaller.scala | Scala | mit | 4,132 |
package se.gigurra.leavu3.datamodel
import com.github.gigurra.heisenberg.MapData._
import com.github.gigurra.heisenberg.{Schema, Parsed}
case class NavIndicators(source: SourceData = Map.empty) extends SafeParsed[NavIndicators.type] {
val requirements = parse(schema.requirements)
val acs = parse(schema.acs)
val mode = parse(schema.mode)
}
object NavIndicators extends Schema[NavIndicators] {
val requirements = required[NavRequirements]("Requirements", default = NavRequirements())
val acs = required[Acs]("ACS", default = Acs())
val mode = required[AircraftMode]("SystemMode", default = AircraftMode())
}
| GiGurra/leavu3 | src/main/scala/se/gigurra/leavu3/datamodel/NavIndicators.scala | Scala | mit | 658 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends App {
reify {
/** A mutable property whose getter and setter may be customized. */
case class Property[T](init: T) {
private var value: T = init
/** The getter function, defaults to identity. */
private var setter: T => T = identity[T]
/** The setter function, defaults to identity. */
private var getter: T => T = identity[T]
/** Retrive the value held in this property. */
def apply(): T = getter(value)
/** Update the value held in this property, through the setter. */
def update(newValue: T) = value = setter(newValue)
/** Change the getter. */
def get(newGetter: T => T) = { getter = newGetter; this }
/** Change the setter */
def set(newSetter: T => T) = { setter = newSetter; this }
}
class User {
// Create a property with custom getter and setter
val firstname = Property("")
.get { v => v.toUpperCase() }
.set { v => "Mr. " + v }
val lastname = Property("<noname>")
/** Scala provides syntactic sugar for calling 'apply'. Simply
* adding a list of arguments between parenthesis (in this case,
* an empty list) is translated to a call to 'apply' with those
* arguments.
*/
override def toString() = firstname() + " " + lastname()
}
val user1 = new User
// Syntactic sugar for 'update': an assignment is translated to a
// call to method 'update'
user1.firstname() = "Robert"
val user2 = new User
user2.firstname() = "bob"
user2.lastname() = "KUZ"
println("user1: " + user1)
println("user2: " + user2)
}.eval
}
| som-snytt/dotty | tests/disabled/macro/run/reify_properties.scala | Scala | apache-2.0 | 1,742 |
package sttp.client3
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import java.nio.charset.StandardCharsets
class ToRfc2616ConverterTest extends AnyFlatSpec with Matchers {
private val localhost = uri"http://localhost"
it should "convert base request" in {
val req = basicRequest
.get(uri"$localhost")
.toRfc2616Format
req shouldBe "GET http://localhost"
}
it should "convert request with method to curl" in {
basicRequest.get(localhost).toRfc2616Format should startWith("GET")
basicRequest.post(localhost).toRfc2616Format should startWith("POST")
basicRequest.put(localhost).toRfc2616Format should startWith("PUT")
basicRequest.delete(localhost).toRfc2616Format should startWith("DELETE")
basicRequest.patch(localhost).toRfc2616Format should startWith("PATCH")
basicRequest.head(localhost).toRfc2616Format should startWith("HEAD")
basicRequest.options(localhost).toRfc2616Format should startWith("OPTIONS")
}
it should "convert request with header" in {
basicRequest
.header("User-Agent", "myapp")
.header("Content-Type", "application/json")
.get(localhost)
.toRfc2616Format should include(
"""User-Agent: myapp
|Content-Type: application/json""".stripMargin
)
}
it should "convert request with body" in {
basicRequest.body(Map("name" -> "john", "org" -> "sml")).post(localhost).toRfc2616Format should include(
"""Content-Type: application/x-www-form-urlencoded
|Content-Length: 17
|
|name=john&org=sml""".stripMargin
)
basicRequest.body("name=john").post(localhost).toRfc2616Format should include(
"""Content-Type: text/plain; charset=utf-8
|Content-Length: 9
|
|name=john""".stripMargin
)
basicRequest.body("name=john", StandardCharsets.ISO_8859_1.name()).post(localhost).toRfc2616Format should include(
"""Content-Type: text/plain; charset=ISO-8859-1
|Content-Length: 9
|
|name=john""".stripMargin
)
basicRequest.body("name=\"john\"").post(localhost).toRfc2616Format should include(
"""Content-Type: text/plain; charset=utf-8
|Content-Length: 11
|
|name="john"""".stripMargin
)
val xmlBody = """<request>
| <name>sample</name>
| <time>Wed, 21 Oct 2015 18:27:50 GMT</time>
|</request>""".stripMargin
basicRequest
.header("Authorization", "token")
.contentType("application/xml")
.body(xmlBody)
.post(localhost)
.toRfc2616Format should include(
"""Authorization: ***
|Content-Type: application/xml
|Content-Length: 91
|
|<request>
| <name>sample</name>
| <time>Wed, 21 Oct 2015 18:27:50 GMT</time>
|</request>""".stripMargin
)
val jsonBody = """{
| "name": "sample",
| "time": "Wed, 21 Oct 2015 18:27:50 GMT"
|}""".stripMargin
basicRequest
.header("Authorization", "token")
.contentType("application/json")
.body(jsonBody)
.post(localhost)
.toRfc2616Format should include(
"""Authorization: ***
|Content-Type: application/json
|Content-Length: 69
|
|{
| "name": "sample",
| "time": "Wed, 21 Oct 2015 18:27:50 GMT"
|}""".stripMargin
)
}
it should "render multipart form data if content is a plain string" in {
basicRequest
.header("Content-Type", "multipart/form-data;boundary=<PLACEHOLDER>")
.multipartBody(multipart("k1", "v1"), multipart("k2", "v2"))
.post(localhost)
.toRfc2616Format should include(
"""|Content-Disposition: form-data; name="k1"
|
|v1""".stripMargin
).and(
include(
"""|Content-Disposition: form-data; name="k2"
|
|v2""".stripMargin
)
).and(endWith("--"))
}
}
| softwaremill/sttp | core/src/test/scala/sttp/client3/ToRfc2616ConverterTest.scala | Scala | apache-2.0 | 4,053 |
package test_data
import scala.xml.Elem
case class SectionAboutEmployment(xml: Elem) {
val rootPathJobDetails = xml \\\\ "DWPCATransaction" \\\\ "DWPCAClaim" \\\\ "Employment" \\\\ "JobDetails"
val areYouEmployedQuestion = xml \\\\ "DWPCATransaction" \\\\ "DWPCAClaim" \\\\ "Employed" \\\\ "QuestionLabel"
val areYouEmployedAnswer = xml \\\\ "DWPCATransaction" \\\\ "DWPCAClaim" \\\\ "Employed" \\\\ "Answer"
val address = (rootPathJobDetails \\\\ "Employer" \\\\ "Address" \\\\ "Answer" \\\\"Line").map(x => x.text).filterNot(x => x.isEmpty).mkString(" ")
val postCode = rootPathJobDetails \\\\ "Employer" \\\\ "Address" \\\\ "Answer" \\\\ "PostCode"
val employmentDetails: Seq[String] = {
(rootPathJobDetails
map (y =>
(y \\\\ "Employer").
map(x => {
Seq((x \\\\ "CurrentlyEmployed" \\ "QuestionLabel").text+" "+(x \\\\ "CurrentlyEmployed" \\ "Answer").text,
(x \\\\ "Name" \\\\ "QuestionLabel").text+ " "+(x \\\\ "Name" \\\\ "Answer").text,
(x \\\\ "DidJobStartBeforeClaimDate" \\\\ "QuestionLabel").text+ " "+(x \\\\ "DidJobStartBeforeClaimDate" \\\\ "Answer").text,
(x \\\\ "DateJobStarted" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateJobStarted" \\\\ "Answer").text,
(x \\\\ "DateJobEnded" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateJobEnded" \\\\ "Answer").text,
(x \\\\ "P45LeavingDate" \\\\ "QuestionLabel").text+" "+(x \\\\ "P45LeavingDate" \\\\ "Answer").text,
"Employer's contact details",
(x \\\\ "Address" \\\\ "QuestionLabel").text+" "+(x \\\\ "Address" \\\\ "Answer" \\\\"Line").map(x => x.text).filterNot(x => x.isEmpty).mkString(" "),
(x \\\\ "Address" \\\\ "Answer" \\\\ "PostCode").text,
(x \\\\ "EmployersPhoneNumber" \\\\ "QuestionLabel").text+" "+(x \\\\ "EmployersPhoneNumber" \\\\ "Answer").text,
(x \\\\ "JobType" \\\\ "QuestionLabel").text+" "+(x \\\\ "JobType" \\\\ "Answer").text
)
}).flatten ++
(y \\\\ "Pay").
map(x => {
Seq((x \\\\ "WeeklyHoursWorked" \\\\ "QuestionLabel").text+" "+(x \\\\ "WeeklyHoursWorked" \\\\ "Answer").text,
"Your last wage",
(x \\\\ "DateLastPaid" \\\\ "QuestionLabel").text+" "+(x \\\\ "DateLastPaid" \\\\ "Answer").text,
(x \\\\ "GrossPayment" \\\\ "QuestionLabel").text+" "+(x \\\\ "GrossPayment" \\\\ "Answer" \\\\ "Amount").text,
(x \\\\ "IncludedInWage" \\\\ "QuestionLabel").text+" "+(x \\\\ "IncludedInWage" \\\\ "Answer").text,
(x \\\\ "ConstantEarnings" \\\\ "QuestionLabel").text+" "+(x \\\\ "ConstantEarnings" \\\\ "Answer").text,
"Additional details on your last wage",
(x \\\\ "PayFrequency" \\\\ "QuestionLabel").text+" "+(x \\\\ "PayFrequency" \\\\ "Answer").text,
(x \\\\ "PayFrequency" \\\\ "Other").text,
(x \\\\ "UsualPayDay" \\\\ "QuestionLabel").text+" "+(x \\\\ "UsualPayDay" \\\\ "Answer").text
)
}).flatten ++
(y).map(x => {
Seq((x \\\\ "OweMoney" \\\\ "QuestionLabel").text+" "+(x \\\\ "OweMoney" \\\\ "Answer").text,
"Pension And Expenses",
(x \\\\ "PaidForPension" \\\\ "QuestionLabel").text+" "+(x \\\\ "PaidForPension" \\\\ "Answer").text,
(x \\\\ "PensionExpenses" \\\\ "Expense" \\\\ "QuestionLabel").text+" "+(x \\\\ "PensionExpenses" \\\\ "Expense" \\\\ "Answer" ).text,
(x \\\\ "PaidForJobExpenses" \\\\ "QuestionLabel").text+" "+(x \\\\ "PaidForJobExpenses" \\\\ "Answer").text,
(x \\\\ "JobExpenses" \\\\ "Expense" \\\\ "QuestionLabel").text+" "+(x \\\\ "JobExpenses" \\\\ "Expense" \\\\ "Answer").text
)
}).flatten
)
).flatten
}
}
| Department-for-Work-and-Pensions/RenderingService | test/test_data/SectionAboutEmployment.scala | Scala | mit | 3,472 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.rogue.lift
import com.mongodb.DBObject
import io.fsq.field.Field
import io.fsq.rogue.{AbstractListModifyField, AbstractListQueryField, SelectableDummyField}
class CaseClassQueryField[V, M](val field: Field[V, M]) {
def unsafeField[F](name: String): SelectableDummyField[F, M] = {
new SelectableDummyField[F, M](field.name + "." + name, field.owner)
}
}
class CaseClassListQueryField[V, M](field: Field[List[V], M])
extends AbstractListQueryField[V, V, DBObject, M, List](field) {
override def valueToDB(v: V) = LiftQueryHelpers.asDBObject(v)
def unsafeField[F](name: String): SelectableDummyField[List[F], M] =
new SelectableDummyField[List[F], M](field.name + "." + name, field.owner)
}
class CaseClassListModifyField[V, M](field: Field[List[V], M])
extends AbstractListModifyField[V, DBObject, M, List](field) {
override def valueToDB(v: V) = LiftQueryHelpers.asDBObject(v)
}
| foursquare/fsqio | src/jvm/io/fsq/rogue/lift/LiftQueryField.scala | Scala | apache-2.0 | 981 |
package com.github.diegopacheco.sandbox.scripts.scala.colletions
object ListCollectionsFun extends App {
val teams = List("Grêmio FC","São Paulo FC","Palmeiras FC","Inter FC")
println(teams.head)
println(teams(1))
val moreTeams = "Curitiba" :: teams
println(moreTeams.head)
println(moreTeams)
val brasileirao = teams ::: (List("Curitiba FC"))
println(brasileirao)
println( brasileirao filter(_ contains "mio") )
println( brasileirao forall(_ contains " FC") )
println( brasileirao exists(_ contains "on") )
println( brasileirao map(_ length) )
// '/:' is foldLeft
val teamsSizes = (0 /: brasileirao){ (t,v) => t + v.length() }
printf("Team sizes %d \n",teamsSizes)
printf("Team sizes %d \n", (0 /: brasileirao){ _ + _.length() } )
printf("Sizes %d \n", (0 /: ("ABC" :: "DE" :: "CC" :: Nil)){ _+_.length } )
} | diegopacheco/scala-playground | scala-playground/src/com/github/diegopacheco/sandbox/scripts/scala/colletions/ListCollectionsFun.scala | Scala | unlicense | 842 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.mongodb
import slamdata.Predef._
import quasar.{RenderTree, Terminal}
import quasar.contrib.pathy._
import quasar.fp.ski._
import quasar.fs._
import scala.AnyVal
import scala.util.parsing.combinator._
import com.mongodb.MongoNamespace
import scalaz._, Scalaz._
import pathy.Path.{dir => pDir, file => pFile, _}
// TODO: use Refined to constrain the value here
final case class DatabaseName(value: String) extends AnyVal {
def bson: Bson = Bson.Text(value)
}
object DatabaseName {
implicit def equal: Equal[DatabaseName] = Equal.equalA
}
// TODO: use Refined to constrain the value here
final case class CollectionName(value: String) extends AnyVal {
def isDescendantOf(ancestor: CollectionName): Boolean =
if (ancestor.value == "") true
else value startsWith (ancestor.value + ".")
def bson: Bson = Bson.Text(value)
}
object CollectionName {
implicit def equal: Equal[CollectionName] = Equal.equalA
}
/** Identifies a collection in a specific database. Sometimes referred to as a
* "namespace" in MongoDB docs.
*/
final case class Collection(database: DatabaseName, collection: CollectionName) {
import Collection._
/** Convert this collection to a file. */
def asFile: AFile = {
val db = DatabaseNameUnparser(database)
val segs = CollectionNameUnparser(collection).reverse
val f = segs.headOption getOrElse db
(segs ::: List(db)).drop(1).foldRight(rootDir)((d, p) => p </> pDir(d)) </> pFile(f)
}
def asNamespace: MongoNamespace = new MongoNamespace(database.value, collection.value)
}
object Collection {
/** The collection represented by the given file. */
def fromFile(file: AFile): PathError \\/ Collection =
fromPath(file)
/** The name of a collection represented by the given directory. */
def prefixFromDir(dir: ADir): PathError \\/ CollectionName =
fromPath(dir) map (_.collection)
/** Returns the database name determined by the given path. */
def dbNameFromPath(path: APath): PathError \\/ DatabaseName =
dbNameAndRest(path) bimap (PathError.invalidPath(path, _), _._1)
/** Returns the directory name derived from the given database name. */
def dirNameFromDbName(dbName: DatabaseName): DirName =
DirName(DatabaseNameUnparser(dbName))
private def fromPath(path: APath): PathError \\/ Collection = {
import PathError._
val collResult = for {
tpl <- dbNameAndRest(path)
(db, r) = tpl
ss <- r.toNel.toRightDisjunction("path names a database, but no collection")
segs <- ss.traverse(CollectionSegmentParser(_))
coll = CollectionName(segs.toList mkString ".")
len = utf8length(db.value) + 1 + utf8length(coll.value)
_ <- if (len > 120)
s"database+collection name too long ($len > 120 bytes): $db.$coll".left
else ().right
} yield Collection(db, coll)
collResult leftMap (invalidPath(path, _))
}
private def dbNameAndRest(path: APath): String \\/ (DatabaseName, IList[String]) =
flatten(None, None, None, Some(_), Some(_), path)
.toIList.unite.uncons(
"no database specified".left,
(h, t) => DatabaseNameParser(h) strengthR t)
private trait PathParser extends RegexParsers {
override def skipWhitespace = false
protected def substitute(pairs: List[(String, String)]): Parser[String] =
pairs.foldLeft[Parser[String]](failure("no match")) {
case (acc, (a, b)) => (a ^^ κ(b)) | acc
}
}
def utf8length(str: String) = str.getBytes("UTF-8").length
val DatabaseNameEscapes = List(
" " -> "+",
"." -> "~",
"%" -> "%%",
"+" -> "%add",
"~" -> "%tilde",
"/" -> "%div",
"\\\\" -> "%esc",
"\\"" -> "%quot",
"*" -> "%mul",
"<" -> "%lt",
">" -> "%gt",
":" -> "%colon",
"|" -> "%bar",
"?" -> "%qmark")
private object DatabaseNameParser extends PathParser {
def name: Parser[DatabaseName] =
char.* ^^ { cs => DatabaseName(cs.mkString) }
def char: Parser[String] = substitute(DatabaseNameEscapes) | "(?s).".r
def apply(input: String): String \\/ DatabaseName = parseAll(name, input) match {
case Success(name, _) if utf8length(name.value) > 64 =>
s"database name too long (> 64 bytes): ${name.value}".left
case Success(name, _) =>
name.right
case failure : NoSuccess =>
s"failed to parse ‘$input’: ${failure.msg}".left
}
}
private object DatabaseNameUnparser extends PathParser {
def name = nameChar.* ^^ { _.mkString }
def nameChar = substitute(DatabaseNameEscapes.map(_.swap)) | "(?s).".r
def apply(input: DatabaseName): String = parseAll(name, input.value) match {
case Success(result, _) => result
case failure : NoSuccess => scala.sys.error("doesn't happen")
}
}
val CollectionNameEscapes = List(
"." -> "\\\\.",
"$" -> "\\\\d",
"\\\\" -> "\\\\\\\\")
private object CollectionSegmentParser extends PathParser {
def seg: Parser[String] =
char.* ^^ { _.mkString }
def char: Parser[String] = substitute(CollectionNameEscapes) | "(?s).".r
/**
* @return If implemented correctly, should always return a [[String]] in the right hand of the [[Disjunction]]
*/
def apply(input: String): String \\/ String = parseAll(seg, input) match {
case Success(seg, _) =>
seg.right
case failure : NoSuccess =>
s"failed to parse ‘$input’: ${failure.msg}".left
}
}
private object CollectionNameUnparser extends PathParser {
def name = repsep(seg, ".")
def seg = segChar.* ^^ { _.mkString }
def segChar = substitute(CollectionNameEscapes.map(_.swap)) | "(?s)[^.]".r
def apply(input: CollectionName): List[String] = parseAll(name, input.value) match {
case Success(result, _) => result
case failure : NoSuccess => scala.sys.error("doesn't happen")
}
}
implicit val order: Order[Collection] =
Order.orderBy(c => (c.database.value, c.collection.value))
implicit val renderTree: RenderTree[Collection] =
new RenderTree[Collection] {
def render(v: Collection) =
Terminal(List("Collection"), Some(v.database.value + "; " + v.collection.value))
}
}
| jedesah/Quasar | mongodb/src/main/scala/quasar/physical/mongodb/collection.scala | Scala | apache-2.0 | 6,839 |
package org.functionalkoans.forscala
import org.functionalkoans.forscala.support.KoanFunSuite
import org.scalatest.Matchers
import org.scalatest.Matchers
class AboutLiteralBooleans extends KoanFunSuite with Matchers {
koan("""Boolean literals are either true or false, using the true or false keyword""") {
val a = true
val b = false
val c = 1 > 2
val d = 1 < 2
val e = a == c
val f = b == d
a should be(__)
b should be(__)
c should be(__)
d should be(__)
e should be(__)
f should be(__)
}
}
| pharmpress/codingdojo | scala-koans/src/test/scala/org/functionalkoans/forscala/AboutLiteralBooleans.scala | Scala | apache-2.0 | 548 |
package com.twitter.finagle.thriftmux
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.builder.{ServerBuilder, ClientBuilder}
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.dispatch.PipeliningDispatcher
import com.twitter.finagle.param.{Label, Stats, Tracer => PTracer}
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.thrift.{ThriftClientFramedCodec, ClientId, Protocols, ThriftClientRequest}
import com.twitter.finagle.thriftmux.thriftscala.{TestService$FinagleClient, TestService, TestService$FinagleService}
import com.twitter.finagle.tracing.Annotation.{ServerRecv, ClientSend}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.context.Contexts
import com.twitter.io.Buf
import com.twitter.util.{Closable, Await, Future, Promise, Return}
import java.net.{InetAddress, SocketAddress, InetSocketAddress}
import org.apache.thrift.protocol._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
@RunWith(classOf[JUnitRunner])
class EndToEndTest extends FunSuite with AssertionsForJUnit {
// Used for testing ThriftMux's Context functionality. Duplicated from the
// finagle-mux package as a workaround because you can't easily depend on a
// test package in Maven.
case class TestContext(buf: Buf)
val testContext = new Contexts.broadcast.Key[TestContext] {
val marshalId = Buf.Utf8("com.twitter.finagle.mux.MuxContext")
def marshal(tc: TestContext) = tc.buf
def tryUnmarshal(buf: Buf) = Return(TestContext(buf))
}
trait ThriftMuxTestServer {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) =
Contexts.broadcast.get(testContext) match {
case None => Future.value(x+x)
case Some(TestContext(buf)) =>
val Buf.Utf8(str) = buf
Future.value(str)
}
})
}
test("end-to-end thriftmux") {
new ThriftMuxTestServer {
val client = ThriftMux.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) === "okok")
}
}
test("end-to-end thriftmux: propagate Contexts") {
new ThriftMuxTestServer {
val client = ThriftMux.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) === "okok")
Contexts.broadcast.let(testContext, TestContext(Buf.Utf8("hello context world"))) {
assert(Await.result(client.query("ok")) === "hello context world")
}
}
}
test("thriftmux server + Finagle thrift client") {
new ThriftMuxTestServer {
val client = Thrift.newIface[TestService.FutureIface](server)
1 to 5 foreach { _ =>
assert(Await.result(client.query("ok")) === "okok")
}
}
}
val clientId = ClientId("test.service")
def servers(pf: TProtocolFactory): Seq[(String, Closable, Int)] = {
val iface = new TestService.FutureIface {
def query(x: String) =
if (x.isEmpty) Future.value(ClientId.current.map(_.name).getOrElse(""))
else Future.value(x + x)
}
val pfSvc = new TestService$FinagleService(iface, pf)
val builder = ServerBuilder()
.stack(ThriftMux.server.withProtocolFactory(pf))
.name("ThriftMuxServer")
.bindTo(new InetSocketAddress(0))
.build(pfSvc)
val builderOld = ServerBuilder()
.stack(ThriftMuxServer.withProtocolFactory(pf))
.name("ThriftMuxServer")
.bindTo(new InetSocketAddress(0))
.build(pfSvc)
val protoNew = ThriftMux.server
.withProtocolFactory(pf)
.serveIface(new InetSocketAddress(InetAddress.getLoopbackAddress, 0), iface)
val protoOld = ThriftMuxServer
.withProtocolFactory(pf)
.serveIface(new InetSocketAddress(InetAddress.getLoopbackAddress, 0), iface)
def port(socketAddr: SocketAddress): Int =
socketAddr.asInstanceOf[InetSocketAddress].getPort
Seq(
("ServerBuilder deprecated", builderOld, port(builderOld.localAddress)),
("ServerBuilder", builder, port(builder.localAddress)),
("ThriftMux proto deprecated", protoOld, port(protoOld.boundAddress)),
("ThriftMux proto", protoOld, port(protoNew.boundAddress))
)
}
def clients(
pf: TProtocolFactory,
port: Int
): Seq[(String, TestService$FinagleClient, Closable)] = {
val dest = s"localhost:$port"
val builder = ClientBuilder()
.stack(ThriftMux.client.withClientId(clientId).withProtocolFactory(pf))
.dest(dest)
.build()
val oldBuilder = ClientBuilder()
.stack(ThriftMuxClient.withClientId(clientId).withProtocolFactory(pf))
.dest(dest)
.build()
val thriftBuilder = ClientBuilder()
.codec(ThriftClientFramedCodec(Some(clientId)).protocolFactory(pf))
.hostConnectionLimit(1)
.dest(dest)
.build()
val thriftProto = Thrift.client
.withClientId(clientId)
.withProtocolFactory(pf)
.newService(dest)
val newProto = ThriftMux.client
.withClientId(clientId)
.withProtocolFactory(pf)
.newService(dest)
val oldProto = ThriftMuxClient
.withClientId(clientId)
.withProtocolFactory(pf)
.newService(dest)
def toIface(svc: Service[ThriftClientRequest, Array[Byte]]): TestService$FinagleClient =
new TestService.FinagledClient(svc, pf)
Seq(
("ThriftMux via ClientBuilder", toIface(builder), builder),
("ThriftMux via deprecated ClientBuilder", toIface(oldBuilder), oldBuilder),
("Thrift via ClientBuilder", toIface(thriftBuilder), thriftBuilder),
("Thrift via proto", toIface(thriftProto), thriftProto),
("ThriftMux proto deprecated", toIface(oldProto), oldProto),
("ThriftMux proto", toIface(newProto), newProto)
)
}
// While we're supporting both old & new APIs, test the cross-product
test("Mix of client and server creation styles") {
for {
pf <- Seq(new TCompactProtocol.Factory, Protocols.binaryFactory())
(serverWhich, serverClosable, port) <- servers(pf)
} {
for {
(clientWhich, clientIface, clientClosable) <- clients(pf, port)
} withClue(s"Server ($serverWhich), Client ($clientWhich) client with protocolFactory $pf") {
1.to(5).foreach { _ => assert(Await.result(clientIface.query("ok")) === "okok")}
assert(Await.result(clientIface.query("")) === clientId.name)
clientClosable.close()
}
serverClosable.close()
}
}
test("thriftmux server + Finagle thrift client: propagate Contexts") {
new ThriftMuxTestServer {
val client = Thrift.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) === "okok")
Contexts.broadcast.let(testContext, TestContext(Buf.Utf8("hello context world"))) {
assert(Await.result(client.query("ok")) === "hello context world")
}
}
}
test("""|thriftmux server + Finagle thrift client: client should receive a
| TApplicationException if the server throws an unhandled exception
""".stripMargin) {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = throw new Exception("sad panda")
})
val client = Thrift.newIface[TestService.FutureIface](server)
val thrown = intercept[Exception] { Await.result(client.query("ok")) }
assert(thrown.getMessage === "Internal error processing query: 'java.lang.Exception: sad panda'")
}
test("thriftmux server + Finagle thrift client: traceId should be passed from client to server") {
@volatile var cltTraceId: Option[TraceId] = None
@volatile var srvTraceId: Option[TraceId] = None
val tracer = new Tracer {
def record(record: Record) {
record match {
case Record(id, _, ServerRecv(), _) => srvTraceId = Some(id)
case Record(id, _, ClientSend(), _) => cltTraceId = Some(id)
case _ =>
}
}
def sampleTrace(traceId: TraceId): Option[Boolean] = None
}
val server = ThriftMux.server
.configured(PTracer(tracer))
.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(x + x)
})
val client = Thrift.client
.configured(PTracer(tracer))
.newIface[TestService.FutureIface](server)
Await.result(client.query("ok"))
(srvTraceId, cltTraceId) match {
case (Some(id1), Some(id2)) => assert(id1 === id2)
case _ => assert(false, s"the trace ids sent by client and received by server do not match srv: $srvTraceId clt: $cltTraceId")
}
}
test("thriftmux server + Finagle thrift client: clientId should be passed from client to server") {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(ClientId.current map { _.name } getOrElse(""))
})
val clientId = "test.service"
val client = Thrift.client
.withClientId(ClientId(clientId))
.newIface[TestService.FutureIface](server)
1 to 5 foreach { _ =>
assert(Await.result(client.query("ok")) === clientId)
}
}
test("thriftmux server + Finagle thrift client: ClientId should not be overridable externally") {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(ClientId.current map { _.name } getOrElse(""))
})
val clientId = ClientId("test.service")
val otherClientId = ClientId("other.bar")
val client = Thrift.client
.withClientId(clientId)
.newIface[TestService.FutureIface](server)
1 to 5 foreach { _ =>
otherClientId.asCurrent {
assert(Await.result(client.query("ok")) === clientId.name)
}
}
}
test("thriftmux server + Finagle thrift client: server.close()") {
new ThriftMuxTestServer {
val client = Thrift.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) == "okok")
Await.result(server.close())
intercept[ChannelWriteException] {
Await.result(client.query("ok"))
}
}
}
test("thriftmux server + thriftmux client: ClientId should not be overridable externally") {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(ClientId.current map { _.name } getOrElse(""))
})
val clientId = ClientId("test.service")
val otherClientId = ClientId("other.bar")
val client = ThriftMux.client
.withClientId(clientId)
.newIface[TestService.FutureIface](server)
1 to 5 foreach { _ =>
otherClientId.asCurrent {
assert(Await.result(client.query("ok")) === clientId.name)
}
}
}
// Skip upnegotiation.
object OldPlainThriftClient extends Thrift.Client(stack=StackClient.newStack)
test("thriftmux server + Finagle thrift client w/o protocol upgrade") {
new ThriftMuxTestServer {
val client = OldPlainThriftClient.newIface[TestService.FutureIface](server)
1 to 5 foreach { _ =>
assert(Await.result(client.query("ok")) === "okok")
}
}
}
test("thriftmux server + Finagle thrift client w/o protocol upgrade: server.close()") {
new ThriftMuxTestServer {
val client = OldPlainThriftClient.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) == "okok")
Await.result(server.close())
intercept[ChannelWriteException] {
Await.result(client.query("ok"))
}
}
}
test("""|thriftmux server + thrift client: client should receive a
| TApplicationException if the server throws an unhandled exception
""".stripMargin) {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = throw new Exception("sad panda")
})
val client = OldPlainThriftClient.newIface[TestService.FutureIface](server)
val thrown = intercept[Exception] { Await.result(client.query("ok")) }
assert(thrown.getMessage === "Internal error processing query: 'java.lang.Exception: sad panda'")
}
test("thriftmux server should count exceptions as failures") {
val iface = new TestService.FutureIface {
def query(x: String) = Future.exception(new RuntimeException("lolol"))
}
val svc = new TestService.FinagledService(iface, Protocols.binaryFactory())
val sr = new InMemoryStatsReceiver()
val server = ThriftMux.server
.configured(Stats(sr))
.serve(new InetSocketAddress(InetAddress.getLoopbackAddress, 0), svc)
val client = ThriftMux.client.newIface[TestService.FutureIface](server)
val ex = intercept[org.apache.thrift.TApplicationException] {
Await.result(client.query("hi"))
}
assert(ex.getMessage.contains("lolol"))
assert(sr.counters(Seq("thrift", "requests")) === 1)
assert(sr.counters.get(Seq("thrift", "success")) === None)
assert(sr.counters(Seq("thrift", "failures")) === 1)
server.close()
}
test("thriftmux server + thrift client w/o protocol upgrade but w/ pipelined dispatch") {
val nreqs = 5
val servicePromises = Array.fill(nreqs)(new Promise[String])
val requestReceived = Array.fill(nreqs)(new Promise[String])
val testService = new TestService.FutureIface {
@volatile var nReqReceived = 0
def query(x: String) = synchronized {
nReqReceived += 1
requestReceived(nReqReceived-1).setValue(x)
servicePromises(nReqReceived-1)
}
}
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0), testService)
object OldPlainPipeliningThriftClient extends Thrift.Client(stack=StackClient.newStack) {
override protected def newDispatcher(transport: Transport[ThriftClientRequest, Array[Byte]]) =
new PipeliningDispatcher(transport)
}
val service = Await.result(OldPlainPipeliningThriftClient.newClient(server)())
val client = new TestService.FinagledClient(service, Protocols.binaryFactory())
val reqs = 1 to nreqs map { i => client.query("ok" + i) }
// Although the requests are pipelined in the client, they must be
// received by the service serially.
1 to nreqs foreach { i =>
val req = Await.result(requestReceived(i-1), 5.seconds)
if (i != nreqs) assert(!requestReceived(i).isDefined)
assert(testService.nReqReceived === i)
servicePromises(i-1).setValue(req + req)
}
1 to nreqs foreach { i =>
assert(Await.result(reqs(i-1)) === "ok" + i + "ok" + i)
}
}
test("thriftmux client: should emit ClientId") {
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = {
Future.value(ClientId.current.map(_.name).getOrElse(""))
}
})
val client = ThriftMux.client.withClientId(ClientId("foo.bar"))
.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) === "foo.bar")
}
/* TODO: add back when sbt supports old-school thrift gen
test("end-to-end finagle-thrift") {
import com.twitter.finagle.thriftmux.thrift.TestService
val server = ThriftMux.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0), new TestService.ServiceIface {
def query(x: String) = Future.value(x+x)
})
val client = ThriftMux.newIface[TestService.ServiceIface](server)
assert(client.query("ok").get() === "okok")
}
*/
test("ThriftMux servers and clients should export protocol stats") {
val iface = new TestService.FutureIface {
def query(x: String) = Future.value(x+x)
}
val mem = new InMemoryStatsReceiver
val sr = Stats(mem)
val server = ThriftMux.server
.configured(sr)
.configured(Label("server"))
.serveIface(new InetSocketAddress(InetAddress.getLoopbackAddress, 0), iface)
val client = ThriftMux.client
.configured(sr)
.configured(Label("client"))
.newIface[TestService.FutureIface](server)
assert(Await.result(client.query("ok")) === "okok")
assert(mem.gauges(Seq("server", "protocol", "thriftmux"))() === 1.0)
assert(mem.gauges(Seq("client", "protocol", "thriftmux"))() === 1.0)
}
test("ThriftMux clients are properly labeled and scoped") {
new ThriftMuxTestServer {
def base(sr: InMemoryStatsReceiver) = ThriftMux.Client().configured(Stats(sr))
def assertStats(prefix: String, sr: InMemoryStatsReceiver, iface: TestService.FutureIface) {
assert(Await.result(iface.query("ok")) === "okok")
// These stats are exported by scrooge generated code.
assert(sr.counters(Seq(prefix, "query", "requests")) === 1)
}
// non-labeled client inherits destination as label
val sr1 = new InMemoryStatsReceiver
assertStats(server.toString, sr1,
base(sr1).newIface[TestService.FutureIface](server))
// labeled via configured
val sr2 = new InMemoryStatsReceiver
assertStats("client1", sr2,
base(sr2).configured(Label("client1")).newIface[TestService.FutureIface](server))
}
}
test("ThriftMux with TCompactProtocol") {
// ThriftMux.server API
{
val pf = new TCompactProtocol.Factory
val server = ThriftMux.server.withProtocolFactory(pf)
.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(x+x)
})
val tcompactClient = ThriftMux.client.withProtocolFactory(pf)
.newIface[TestService.FutureIface](server)
assert(Await.result(tcompactClient.query("ok")) === "okok")
val tbinaryClient = ThriftMux.newIface[TestService.FutureIface](server)
intercept[com.twitter.finagle.mux.ServerApplicationError] {
Await.result(tbinaryClient.query("ok"))
}
}
// ThriftMuxServer API
{
val pf = new TCompactProtocol.Factory
val server = ThriftMuxServer.withProtocolFactory(pf)
.serveIface(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new TestService.FutureIface {
def query(x: String) = Future.value(x+x)
})
val tcompactClient = ThriftMux.client.withProtocolFactory(pf)
.newIface[TestService.FutureIface](server)
assert(Await.result(tcompactClient.query("ok")) === "okok")
val tbinaryClient = ThriftMux.newIface[TestService.FutureIface](server)
intercept[com.twitter.finagle.mux.ServerApplicationError] {
Await.result(tbinaryClient.query("ok"))
}
}
}
}
| kristofa/finagle | finagle-thriftmux/src/test/scala/com/twitter/finagle/thriftmux/EndToEndTest.scala | Scala | apache-2.0 | 19,247 |
package org.scalajs.testsuite.javalib.time.chrono
import java.time.{DateTimeException, LocalTime, LocalDate}
import java.time.chrono.ChronoLocalDate
import org.scalajs.testsuite.javalib.time.temporal.TemporalTest
object ChronoLocalDateTest extends TemporalTest {
import ChronoLocalDate._
describe("java.time.chrono.ChronoLocalDate") {
it("should respond to `timeLineOrder`") {
val ord = timeLineOrder
val ds = Seq(LocalDate.MIN, LocalDate.of(2011, 2, 28), LocalDate.MAX)
for {
d1 <- ds
d2 <- ds
} {
expect(ord.compare(d1, d2) == d1.compareTo(d2)).toBeTruthy
}
}
it("should respond to `from`") {
for (d <- Seq(LocalDate.MIN, LocalDate.of(2011, 2, 28), LocalDate.MAX))
testTemporal(from(d))(d)
for (t <- Seq(LocalTime.MIN, LocalTime.NOON, LocalTime.MAX))
expectThrows[DateTimeException](from(t))
}
}
}
| jasonchaffee/scala-js | test-suite/js/src/test/require-jdk8/org/scalajs/testsuite/javalib/time/chrono/ChronoLocalDateTest.scala | Scala | bsd-3-clause | 911 |
/**
* Copyright 2010 James Strachan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package webbytest
import sbt._
/**
* @version $Revision : 1.1 $
*/
trait HtmlTestsProject extends DefaultProject {
//this : DefaultProject =>
def htmlTestReportFileName = {
//val dir = projectName + "/" + outputDirectoryName + "/"
val dir = outputPath.asFile.getCanonicalPath + "/"
println("Using output path of: " + dir)
dir + "tests.html"
}
override def testListeners: Seq[TestReportListener] = {
val htmlTestListener = new HtmlTestsListener(htmlTestReportFileName)
(htmlTestListener :: Nil) ++ super.testListeners
}
} | jstrachan/webbytest | src/main/scala/webbytest/HtmlTestsProject.scala | Scala | apache-2.0 | 1,155 |
package edu.cmu.lti.nlp.amr.GraphDecoder
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.FastFeatureVector._
import java.io.File
import java.io.FileOutputStream
import java.io.PrintStream
import java.io.BufferedOutputStream
import java.io.OutputStreamWriter
import java.lang.Math.abs
import java.lang.Math.log
import java.lang.Math.exp
import java.lang.Math.random
import java.lang.Math.floor
import java.lang.Math.min
import java.lang.Math.max
import scala.io.Source
import scala.util.matching.Regex
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
import Double.{NegativeInfinity => minusInfty}
class Alg1(options: Map[Symbol,String], featureNames: List[String], labelSet: Array[(String, Int)], connectedConstraint: String = "none") extends Decoder {
// Base class has defined:
// val features: Features
var features = new Features(options, featureNames, labelSet.map(_._1))
def decode(input: Input) : DecoderResult = {
// Assumes that Node.relations has been setup correctly for the graph fragments
features.input = input // WARNING: This needs to be called before graphObj is created, because when graphObj is created we compute the features of the edges that are already present in the graph fragments
var graph = input.graph.get.duplicate
logger(1, "graph.spans = "+graph.spans.toList)
val nodes : List[Node] = graph.nodes.filter(_.name != None).toList // TODO: test to see if a view is faster
val graphObj = new GraphObj(graph, nodes.toArray, features) // graphObj keeps track of the connectivity of the graph as we add edges
logger(1, "Alg1")
//logger(2, "weights = " + features.weights)
for { (node1, index1) <- nodes.zipWithIndex
relations = node1.relations.map(_._1)
((label, maxCardinality), labelIndex) <- labelSet.zipWithIndex } {
if (relations.count(_ ==label) == 0) { // relations.count(_ == label) counts the edges that are already in the graph fragments
// Search over the nodes, and pick the ones with highest score
val nodes2 : List[(Node, Int, Double)] = nodes.zipWithIndex.filter(x => x._2 != index1).map(x => (x._1, x._2, graphObj.localScore(index1, x._2, labelIndex))).filter(x => x._3 > 0 && x._1.id != node1.id).sortBy(-_._3).take(maxCardinality)
for ((node2, index2, weight) <- nodes2) {
logger(1, "Adding edge ("+node1.concept+", "+label +", "+node2.concept + ") with weight "+weight.toString)
graphObj.addEdge(node1, index1, node2, index2, label, weight)
}
if (nodes2.size > 0) {
logger(2, "node1 = " + node1.concept)
logger(2, "label = " + label)
logger(2, "nodes2 = " + nodes.toString)
//logger(1, "feats = " + feats.toString)
}
} else if (relations.count(_ == label) < maxCardinality) { // relations.count(_ == label) counts the edges that are already in the graph fragments
// Search over the nodes, and pick the ones with highest score
val relationIds : List[String] = node1.relations.map(_._2.id) // we assume if there is a relation already in the fragment, we can't add another relation type between the two nodes
val nodes2 : List[(Node, Int, Double)] = nodes.zipWithIndex.filter(x => x._2 != index1).map(x => (x._1, x._2, graphObj.localScore(index1, x._2, labelIndex))).filter(x => x._3 > 0 && x._1.id != node1.id && !relationIds.contains(x._1.id)).sortBy(-_._3).take(maxCardinality - relations.count(_ == label))
for ((node2, index2, weight) <- nodes2) {
logger(1, "Adding edge ("+node1.concept+", "+label +", "+node2.concept + ") with weight "+weight.toString)
graphObj.addEdge(node1, index1, node2, index2, label, weight)
}
if (nodes2.size > 0) {
logger(2, "node1 = " + node1.concept)
logger(2, "label = " + label)
logger(2, "nodes2 = " + nodes.toString)
//logger(1, "feats = " + feats.toString)
}
}
}
logger(1, "Alg1 adding root")
if (nodes.size > 0) {
if (connectedConstraint == "and" && !graphObj.connected) {
// We need to add a top level 'and' node, and make it the root
// We will pick it's children by rootScore, but won't add any features to the feature vector
// because it would adversely affect learning
val children = (
for { setid: Int <- graphObj.set.toList.distinct
} yield {
graph.getNodeById(graphObj.setArray(setid).map(nodeIndex => { val node = graphObj.nodes(nodeIndex); (node.id, features.rootScore(node)) }).maxBy(_._2)._1)
})
val relations = children.map(x => (":op",x))
graph.root = Node(graph.nodes.size.toString, // id TODO: check that this id doesn't conflict
Some("a99"), // name TODO: pick a better name
"and", // concept
relations, // relations
relations, // topologicalOrdering
List(), // variableRelations
None, // alignment
ArrayBuffer()) // spans
graph.getNodeById(graph.root.id) = graph.root
graph.getNodeByName(graph.root.name.get) = graph.root
graphObj.set = graphObj.set.map(x => 0) // update the graphObj so it knows the graph is connected
graphObj.setArray(0) ++= Range(0, graphObj.set.size)
} else {
val candidates = nodes.filter(node => !node.isConstant)
if (features.rootFeatureFunctions.size > 0 && candidates.size > 0) {
graph.root = candidates.map(x => (x, features.rootScore(x))).maxBy(_._2)._1
} else {
graph.root = nodes(0)
}
graphObj.feats += features.rootFeatures(graph.root)
graphObj.score += features.rootScore(graph.root)
}
nodes.map(node => { node.relations = node.relations.reverse })
//logger(1, "Alg1 makeTopologicalOrdering")
//if (connectedConstraint != "none") {
// graph.makeTopologicalOrdering() // won't work if not connected
//}
} else {
graph = Graph.AMREmpty()
}
logger(1, "Alg1 returning")
return DecoderResult(graph, graphObj.feats, graphObj.score)
}
}
| jflanigan/jamr | src/GraphDecoder/Alg1.scala | Scala | bsd-2-clause | 6,964 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.libs.ws
import akka.actor.ActorSystem
import akka.stream.Materializer
import play.api.mvc.Results._
import play.api.mvc._
import play.api.test._
import play.core.server.Server
import play.libs.ws.ahc.AhcWSClient
import play.shaded.ahc.org.asynchttpclient.AsyncHttpClient
class WSSpec extends PlaySpecification with WsTestClient {
sequential
"WSClient.url().post(InputStream)" should {
"uploads the stream" in {
Server.withRouterFromComponents() { components =>
import components.{ defaultActionBuilder => Action }
import play.api.routing.sird.{ POST => SirdPost }
import play.api.routing.sird._
{
case SirdPost(p"/") =>
Action { req: Request[AnyContent] =>
req.body.asRaw.fold[Result](BadRequest) { raw =>
val size = raw.size
Ok(s"size=$size")
}
}
}
} { implicit port =>
withClient { ws =>
val mat = Materializer.matFromSystem(ActorSystem())
val javaWs = new AhcWSClient(ws.underlying[AsyncHttpClient], mat)
val input = this.getClass.getClassLoader.getResourceAsStream("play/libs/ws/play_full_color.png")
val rep = javaWs.url(s"http://localhost:$port/").post(input).toCompletableFuture.get()
rep.getStatus must ===(200)
rep.getBody must ===("size=20039")
}
}
}
}
}
| benmccann/playframework | transport/client/play-ahc-ws/src/test/scala/play/libs/ws/WSSpec.scala | Scala | apache-2.0 | 1,499 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.json
import javax.inject.Inject
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import play.api.mvc._
import play.api.test._
import scala.concurrent.Future
@RunWith(classOf[JUnitRunner])
class ScalaJsonHttpSpec extends PlaySpecification with Results {
"JSON with HTTP" should {
"allow serving JSON" in new WithApplication() with Injecting {
val Action = inject[DefaultActionBuilder]
//#serve-json-imports
//###insert: import play.api.mvc._
import play.api.libs.functional.syntax._
import play.api.libs.json._
//#serve-json-imports
//#serve-json-implicits
implicit val locationWrites: Writes[Location] = (
(JsPath \ "lat").write[Double] and
(JsPath \ "long").write[Double]
)(unlift(Location.unapply))
implicit val placeWrites: Writes[Place] = (
(JsPath \ "name").write[String] and
(JsPath \ "location").write[Location]
)(unlift(Place.unapply))
//#serve-json-implicits
//#serve-json
def listPlaces = Action {
val json = Json.toJson(Place.list)
Ok(json)
}
//#serve-json
val result: Future[Result] = listPlaces().apply(FakeRequest())
status(result) === OK
contentType(result) === Some("application/json")
contentAsString(result) === """[{"name":"Sandleford","location":{"lat":51.377797,"long":-1.318965}},{"name":"Watership Down","location":{"lat":51.235685,"long":-1.309197}}]"""
}
"allow handling JSON" in new WithApplication() with Injecting {
val Action = inject[DefaultActionBuilder]
//#handle-json-imports
import play.api.libs.functional.syntax._
import play.api.libs.json._
//#handle-json-imports
//#handle-json-implicits
implicit val locationReads: Reads[Location] = (
(JsPath \ "lat").read[Double] and
(JsPath \ "long").read[Double]
)(Location.apply _)
implicit val placeReads: Reads[Place] = (
(JsPath \ "name").read[String] and
(JsPath \ "location").read[Location]
)(Place.apply _)
//#handle-json-implicits
//#handle-json
def savePlace = Action { request =>
request.body.asJson.map { json =>
val placeResult = json.validate[Place]
placeResult.fold(
errors => {
BadRequest(Json.obj("status" ->"KO", "message" -> JsError.toJson(errors)))
},
place => {
Place.save(place)
Ok(Json.obj("status" ->"OK", "message" -> ("Place '"+place.name+"' saved.") ))
}
)
}.getOrElse {
BadRequest(Json.obj("status" ->"KO", "message" -> "Expecting JSON data."))
}
}
//#handle-json
val body = Json.parse("""
{
"name" : "Nuthanger Farm",
"location" : {
"lat" : 51.244031,
"long" : -1.263224
}
}
""")
val request = FakeRequest().withHeaders(CONTENT_TYPE -> "application/json").withJsonBody(body)
val result: Future[Result] = savePlace().apply(request)
status(result) === OK
contentType(result) === Some("application/json")
contentAsString(result) === """{"status":"OK","message":"Place 'Nuthanger Farm' saved."}"""
}
"allow handling JSON with BodyParser" in new WithApplication() with Injecting {
import play.api.libs.functional.syntax._
import play.api.libs.json._
implicit val locationReads: Reads[Location] = (
(JsPath \ "lat").read[Double] and
(JsPath \ "long").read[Double]
)(Location.apply _)
implicit val placeReads: Reads[Place] = (
(JsPath \ "name").read[String] and
(JsPath \ "location").read[Location]
)(Place.apply _)
val parse = inject[PlayBodyParsers]
val Action = inject[DefaultActionBuilder]
//#handle-json-bodyparser
def savePlace = Action(parse.json) { request =>
val placeResult = request.body.validate[Place]
placeResult.fold(
errors => {
BadRequest(Json.obj("status" ->"KO", "message" -> JsError.toJson(errors)))
},
place => {
Place.save(place)
Ok(Json.obj("status" ->"OK", "message" -> ("Place '"+place.name+"' saved.") ))
}
)
}
//#handle-json-bodyparser
val body: JsValue = Json.parse("""
{
"name" : "Nuthanger Farm",
"location" : {
"lat" : 51.244031,
"long" : -1.263224
}
}
""")
val request = FakeRequest().withHeaders(CONTENT_TYPE -> "application/json").withBody(body)
val result: Future[Result] = savePlace().apply(request)
val bodyText: String = contentAsString(result)
status(result) === OK
contentType(result) === Some("application/json")
contentAsString(result) === """{"status":"OK","message":"Place 'Nuthanger Farm' saved."}"""
}
"allow concise handling JSON with BodyParser" in new WithApplication() with Injecting {
import scala.concurrent.ExecutionContext.Implicits.global
val parse = inject[PlayBodyParsers]
val Action = inject[DefaultActionBuilder]
//#handle-json-bodyparser-concise
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads._
import play.api.libs.json._
implicit val locationReads: Reads[Location] = (
(JsPath \ "lat").read[Double](min(-90.0) keepAnd max(90.0)) and
(JsPath \ "long").read[Double](min(-180.0) keepAnd max(180.0))
)(Location.apply _)
implicit val placeReads: Reads[Place] = (
(JsPath \ "name").read[String](minLength[String](2)) and
(JsPath \ "location").read[Location]
)(Place.apply _)
// This helper parses and validates JSON using the implicit `placeReads`
// above, returning errors if the parsed json fails validation.
def validateJson[A : Reads] = parse.json.validate(
_.validate[A].asEither.left.map(e => BadRequest(JsError.toJson(e)))
)
// if we don't care about validation we could replace `validateJson[Place]`
// with `BodyParsers.parse.json[Place]` to get an unvalidated case class
// in `request.body` instead.
def savePlaceConcise = Action(validateJson[Place]) { request =>
// `request.body` contains a fully validated `Place` instance.
val place = request.body
Place.save(place)
Ok(Json.obj("status" ->"OK", "message" -> ("Place '"+place.name+"' saved.") ))
}
//#handle-json-bodyparser-concise
val body: JsValue = Json.parse("""
{
"name" : "Nuthanger Farm",
"location" : {
"lat" : 51.244031,
"long" : -1.263224
}
}
""")
val request = FakeRequest().withHeaders(CONTENT_TYPE -> "application/json").withBody(Json.fromJson[Place](body).get)
val result: Future[Result] = savePlaceConcise().apply(request)
val bodyText: String = contentAsString(result)
status(result) === OK
contentType(result) === Some("application/json")
contentAsString(result) === """{"status":"OK","message":"Place 'Nuthanger Farm' saved."}"""
}
}
}
//#model
case class Location(lat: Double, long: Double)
case class Place(name: String, location: Location)
object Place {
var list: List[Place] = {
List(
Place(
"Sandleford",
Location(51.377797, -1.318965)
),
Place(
"Watership Down",
Location(51.235685, -1.309197)
)
)
}
def save(place: Place) = {
list = list ::: List(place)
}
}
//#model
//#controller
import play.api.mvc._
class HomeController @Inject()(cc:ControllerComponents) extends AbstractController(cc) {
}
//#controller
| Shenker93/playframework | documentation/manual/working/scalaGuide/main/json/code/ScalaJsonHttpSpec.scala | Scala | apache-2.0 | 7,909 |
/*
* Copyright (c) 2013-2022 Erik van Oosten
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.grons.metrics4.scala
import com.codahale.metrics.{MetricRegistry, Reservoir, Histogram => DropwizardHistogram, Timer => DropwizardTimer}
import nl.grons.metrics4.scala.Implicits._
import org.mpierce.metrics.reservoir.hdrhistogram.{HdrHistogramReservoir, HdrHistogramResetOnSnapshotReservoir}
/**
* An alternative metric builder that creates [[Histogram]]s and [[Timer]]s with
* [[Reservoir]]s from the HdrHistogram library.
*
* See the [[https://github.com/erikvanoosten/metrics-scala/blob/master/docs/Hdrhistogram.md the manual]]
* for more instructions on using hdrhistogram.
*
* @param resetAtSnapshot `false` to use reservoirs that accumulate internal state forever, or
* `true` to use a reservoir that resets its internal state on each snapshot
* (which is how reporters get information from reservoirs).
* See [[http://taint.org/2014/01/16/145944a.html this article]] for when the latter is useful.
*/
class HdrMetricBuilder(
baseName: MetricName,
registry: MetricRegistry,
resetAtSnapshot: Boolean
) extends MetricBuilder(baseName, registry) {
/**
* Creates a new histogram metric with a [[Reservoir]] from the HdrHistogram library.
*
* @param name the name of the histogram
*/
override def histogram(name: String): Histogram =
new Histogram(
registry.histogram(
metricNameFor(name),
() => new DropwizardHistogram(createHdrReservoir())))
/**
* Creates a new timer metric with a [[Reservoir]] from the HdrHistogram library.
*
* @param name the name of the timer
*/
override def timer(name: String): Timer =
new Timer(
registry.timer(
metricNameFor(name),
() => new DropwizardTimer(createHdrReservoir())))
private def createHdrReservoir(): Reservoir =
if (resetAtSnapshot) new HdrHistogramResetOnSnapshotReservoir() else new HdrHistogramReservoir()
}
| erikvanoosten/metrics-scala | metrics-scala-hdr/src/main/scala/nl/grons/metrics4/scala/HdrMetricBuilder.scala | Scala | apache-2.0 | 2,557 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.common.json.formats
import java.util.{Calendar, TimeZone}
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
class Rfc822DateFormatTest extends FlatSpec with MustMatchers {
"Date format" must "parse RFC 822 dates" in {
val date = new Rfc822DateFormat().parse("2014-04-08T12:31:45+0100")
val calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))
calendar.setTime(date)
calendar.get(Calendar.YEAR) must be (2014)
calendar.get(Calendar.MONTH) must be (3)
calendar.get(Calendar.DAY_OF_MONTH) must be (8)
calendar.get(Calendar.HOUR_OF_DAY) must be (12)
}
}
| telefonicaid/fiware-cosmos-platform | infinity/common/src/test/scala/es/tid/cosmos/infinity/common/json/formats/Rfc822DateFormatTest.scala | Scala | apache-2.0 | 1,294 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.models.neuralnets
import breeze.linalg.DenseVector
import io.github.mandar2812.dynaml.graph.FFNeuralGraph
import io.github.mandar2812.dynaml.models.LinearModel
import io.github.mandar2812.dynaml.optimization.{BackPropagation, CommitteeModelSolver, RegularizedOptimizer}
import io.github.mandar2812.dynaml.pipes.DataPipe
/**
* @author mandar2812 date: 9/2/16.
*
* A Neural committee is an ensemble of feedforward
* neural networks whose predictions are weighted
* inorder to generate a model prediction for an input
* data point.
*
* @tparam D The type of the training data
* @param data The training data
* @param transform A data pipe which transforms the training data from its type [[D]]
* to a stream of vector tuples.
* @param networks A collection of neural networks which will form the committee.
*/
class CommitteeNetwork[D](data: D,
transform: DataPipe[D, Stream[(DenseVector[Double],
DenseVector[Double])]],
networks: FFNeuralGraph*) extends
LinearModel[D, DenseVector[Double], DenseVector[Double],
Double, Stream[(DenseVector[Double], Double)]] {
override protected val g: D = data
val num_points = dataAsStream(g).length
val baseNetworks: List[FFNeuralGraph] = networks.toList
val baseOptimizer = new BackPropagation()
.setMomentum(0.01).setRegParam(0.001)
.setMiniBatchFraction(1.0/baseNetworks.length)
.setNumIterations(20)
override protected val optimizer: RegularizedOptimizer[DenseVector[Double],
DenseVector[Double], Double,
Stream[(DenseVector[Double], Double)]] = new CommitteeModelSolver()
override protected var params: DenseVector[Double] =
DenseVector.fill[Double](baseNetworks.length)(1.0)
def dataAsStream(d: D) = transform.run(d)
/**
* Predict the value of the
* target variable given a
* point.
*
**/
override def predict(point: DenseVector[Double]): Double =
params dot featureMap(point)
override def clearParameters(): Unit = {
params = initParams()
}
override def initParams(): DenseVector[Double] =
DenseVector.fill[Double](baseNetworks.length)(1.0/baseNetworks.length)
featureMap = (pattern) =>
DenseVector(baseNetworks.map(_.forwardPass(pattern)(0)).toArray)
/**
* Learn the parameters
* of the model which
* are in a node of the
* graph.
*
**/
override def learn(): Unit = {
//First learn the base Networks
baseNetworks.foreach(network => {
baseOptimizer.optimize(num_points,dataAsStream(g),network)
})
params = optimizer.optimize(
num_points,
dataAsStream(g).map(couple =>
(featureMap(couple._1), couple._2(0))),
initParams()
)
}
def setMomentum(m: Double): this.type = {
this.baseOptimizer.setMomentum(m)
this
}
/**
* Generate predictions (as a stream of tuples representing the predicted and actual output respectively)
* for a test set
*
* @param d The test set.
* */
def test(d: D): Stream[(DenseVector[Double], DenseVector[Double])] = {
val (procInputs, _) =
dataAsStream(d)
.map(c =>
(c._1.toArray.toList.map(i => List(i)), c._2.toArray.toList.map(i => List(i))))
.reduce((c1,c2) =>
(c1._1.zip(c2._1).map(c => c._1++c._2), c1._2.zip(c2._2).map(c => c._1++c._2)))
val committeepredictions = baseNetworks.map(network => {
network.predictBatch(procInputs)
})
dataAsStream(d).map(_._2).zipWithIndex.map(c => {
val votes = DenseVector.tabulate[Double](baseNetworks.length)(Ndim =>
committeepredictions(Ndim)(0)(c._2))
val prediction: Double = params dot votes
(DenseVector(prediction), c._1)
})
}
}
| transcendent-ai-labs/DynaML | dynaml-core/src/main/scala/io/github/mandar2812/dynaml/models/neuralnets/CommitteeNetwork.scala | Scala | apache-2.0 | 4,583 |
package com.github.sstone.amqp
import akka.testkit.{ImplicitSender, TestKit}
import akka.actor.{ActorRef, Props, ActorSystem}
import akka.util.Timeout
import akka.pattern.{ask, gracefulStop}
import org.scalatest.{BeforeAndAfter, WordSpecLike}
import org.scalatest.matchers.ShouldMatchers
import java.util.concurrent.TimeUnit
import scala.concurrent.Await
import scala.concurrent.duration._
import com.rabbitmq.client.ConnectionFactory
import com.github.sstone.amqp.Amqp._
import scala.util.Random
class ChannelSpec extends TestKit(ActorSystem("TestSystem")) with WordSpecLike with ShouldMatchers with BeforeAndAfter with ImplicitSender {
implicit val timeout = Timeout(5 seconds)
val connFactory = new ConnectionFactory()
val uri = system.settings.config.getString("amqp-client-test.rabbitmq.uri")
connFactory.setUri(uri)
var conn: ActorRef = _
var channelOwner: ActorRef = _
val random = new Random()
def randomQueueName = "queue" + random.nextInt()
def randomExchangeName = "exchange" + random.nextInt()
def randomQueue = QueueParameters(name = randomQueueName, passive = false, exclusive = false)
def randomKey = "key" + random.nextInt()
before {
println("before")
conn = system.actorOf(ConnectionOwner.props(connFactory, 1 second))
channelOwner = ConnectionOwner.createChildActor(conn, ChannelOwner.props())
waitForConnection(system, conn, channelOwner).await(5, TimeUnit.SECONDS)
}
after {
println("after")
Await.result(gracefulStop(conn, 5 seconds), 6 seconds)
}
}
| sstone/amqp-client | src/test/scala/com/github/sstone/amqp/ChannelSpec.scala | Scala | mit | 1,577 |
import org.scalawebtest.core.gauge.HtmlGauge
import org.scalawebtest.core.{IntegrationFlatSpec, IntegrationFreeSpec}
abstract class BaseSpec extends IntegrationFlatSpec with HtmlGauge {
config.useBaseUri("http://localhost:9000")
} | unic/ScalaWebTest | examples/play-scala-starter-example/it/BaseSpec.scala | Scala | apache-2.0 | 233 |
package com.github.mdr.mash.evaluator
class ClassesTest extends AbstractEvaluatorTest {
"class Point x y; Point.new 3 4 | [.x, .y]" ==> "[3, 4]"
"class Point x y; Point 3 4 | [.x, .y]" ==> "[3, 4]"
"class Point x y { def sum = x + y }; Point 3 4 | .sum" ==> 7
"class Point x y { def sum = this.x + this.y }; Point 3 4 | .sum" ==> 7
"class Point x y { def sum = x + y; def sumSquared = sum * sum }; Point 3 4 | .sumSquared" ==> 49
"class Box n { def update n = this.n = n }; b = Box 0; b.update 10; b.n" ==> 10
"class Box n { def increment = n += 1 }; box = Box 10; box.increment; box.n" ==> 11
"class Box n { def increment = this['n'] += 1 }; box = Box 10; box.increment; box.n" ==> 11
"""class Outer o1 {
def outer o2 = {
class Inner i1 {
def inner i2 = o1 + o2 + i1 + i2
}
Inner 1 | .inner 2
}
}
Outer 3 | .outer 4
""" ==> "10"
"class Thing x { def x = 100 }; Thing 42 | .x" ==> 42
"class Thing { def x = 100; def y = { x = 42; x } }; Thing.new.y" ==> 42
"class Thing; Thing.new.getClass.name" ==> "'Thing'"
"class Thing { }; Thing.new.getClass.name" ==> "'Thing'"
"class Point x y { def add = x + y }; [Point 1 2, Point 3 4].add" ==> "[3, 7]"
"[Object, Object].merge { foo: 42 }" ==> "[{ foo: 42 }, { foo: 42 }]"
"class Foo { def getFields = this.fields }; Foo.new.getFields" ==> "[]"
"class Foo { def getFields = this.toString }; Foo.new.getFields" ==> "'{}'"
"(class A { def foo = 42 }) | .new.foo" ==> 42
}
| mdr/mash | src/test/scala/com/github/mdr/mash/evaluator/ClassesTest.scala | Scala | mit | 1,521 |
package BootstrapResampling
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
class BootstrapResamplingTest extends FunSuite with BeforeAndAfter {
def mean(x: Vector[Double]) = {
x.foldLeft(0.0)((a, b) => a + b) / x.length
}
def variance(x: Vector[Double]) = {
val mu = mean(x)
x.foldLeft(0.0)((a, b) => a + math.pow(mu - b, 2)) / (x.length - 1)
}
val data = Vector(22.8283028, 41.6764537, 14.3581104, 20.4552237, 19.0313830,
12.4496791,1.2669327, 29.6890167, 9.4031684, 17.0438982,
0.1387493, -0.7459116,2.4532079, -0.7734217, -3.2601918,
1.3305351, 0.8977259, -1.0393727,-2.2172627, -3.8445457,
0.4352354, 1.4622797, -1.0556765, -2.8137459,0.8129859,
1.9504362, 0.4503604, -9.8128885, 2.4238871, 0.3365611)
var b: Bootstrapper = _
val B = 5000
val seed = 1184
val perc_low = 2.120656563333334
val per_high = 10.103015473333334
val bca_low = 2.62535025
val bca_high = 10.79515345666667
before {
b = new Bootstrapper(data, mean, B, seed)
}
test("Correctly finds mean") {
val true_mean = 5.844371
assert(mean(b.t_tilde) - true_mean < 0.02)
}
test("Finds correct percentile CIs for mean") {
val perc_ci = b.CI("percentile")
perc_ci match {
case Left(err) => sys.error(err)
case Right((low, high)) => {
assert(low == perc_low)
assert(high == per_high)
}
}
}
test("Finds correct BCa CIs for mean") {
val bca_ci = b.CI("bca")
bca_ci match {
case Left(err) => sys.error(err)
case Right(confInt: ConfInt) => {
assert(confInt._1 == bca_low)
assert(confInt._2 == bca_high)
}
}
}
test("Finds correct percentile CIs for variance") {
b = new Bootstrapper(data, variance, B, seed)
val var_perc_low = 49.82842715300962
val var_perc_high = 220.28869068888378
val perc_ci = b.CI("percentile")
perc_ci match {
case Left(err) => sys.error(err)
case Right(confInt: ConfInt) => {
assert(confInt._1 == var_perc_low)
assert(confInt._2 == var_perc_high)
}
} }
test("Finds correct BCa CIs for variance") {
b = new Bootstrapper(data, variance, B, seed)
val var_bca_low = 67.61649769675655
val var_bca_high = 286.02288870943335
val bca_ci = b.CI("bca")
bca_ci match {
case Left(err) => sys.error(err)
case Right(confInt: ConfInt) => {
assert(confInt._1 == var_bca_low)
assert(confInt._2 == var_bca_high)
}
}
}
test("Default CI used") {
val bca_ci = b.CI()
bca_ci match {
case Left(err) => sys.error(err)
case Right(confInt: ConfInt) => {
assert(confInt._1 == bca_low)
assert(confInt._2 == bca_high)
}
}
}
test("Returns Left(err) for bad CI type") {
val bad_ci = b.CI("KingOfOoo")
bad_ci match {
case Left(err) => assert(true)
case Right(confInt: ConfInt) => {
sys.error("Returned confidence interval for fake CI type.")
}
}
}
test("Corrects invalid B") {
val bad_B = -12 // Invalid and must be greater than 1
val fixed_boot = new Bootstrapper(data, mean, bad_B, seed)
assert(fixed_boot.B == default_B)
}
}
| rdtaylor/BootstrapResampling | src/test/scala/BootstrapResampling/BootstrapResamplingTest.scala | Scala | mit | 3,307 |
package streams
import common._
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
inside the terrain)
* - `T` denotes the final position of the block (which is also considered
inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
// def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = {
// case Pos(x,y) => (for {
// row <- levelVector lift x
// ch <- row lift y
// if ch != '-'
// } yield ch).isDefined
// }
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = (pos: Pos) => try {
levelVector(pos.x)(pos.y) != '-'
} catch {
case e: IndexOutOfBoundsException => false
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
val x = levelVector.indexWhere(_.indexOf(c) >= 0)
Pos(x, levelVector(x).indexOf(c))
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.split("\\n").map(str => Vector(str: _*)): _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
| mmcc007/scala-principles | streams/src/main/scala/streams/StringParserTerrain.scala | Scala | gpl-3.0 | 2,712 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.observers
import java.util.concurrent.{CountDownLatch, TimeUnit}
import minitest.TestSuite
import monifu.concurrent.Scheduler
import monifu.reactive.Ack.Continue
import monifu.reactive.OverflowStrategy.Unbounded
import monifu.reactive.{Subscriber, Ack, Observer}
import scala.concurrent.{Future, Promise}
object BufferUnboundedConcurrencySuite extends TestSuite[Scheduler] {
def tearDown(env: Scheduler) = ()
def setup() = {
monifu.concurrent.Implicits.globalScheduler
}
test("should not lose events, test 1") { implicit s =>
var number = 0
val completed = new CountDownLatch(1)
val underlying = new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
number += 1
Continue
}
def onError(ex: Throwable): Unit = {
s.reportFailure(ex)
}
def onComplete(): Unit = {
completed.countDown()
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
for (i <- 0 until 100000) buffer.onNext(i)
buffer.onComplete()
assert(completed.await(20, TimeUnit.SECONDS), "completed.await should have succeeded")
assertEquals(number, 100000)
}
test("should not lose events, test 2") { implicit s =>
var number = 0
val completed = new CountDownLatch(1)
val underlying = new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
number += 1
Continue
}
def onError(ex: Throwable): Unit = {
s.reportFailure(ex)
}
def onComplete(): Unit = {
completed.countDown()
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
def loop(n: Int): Unit =
if (n > 0) s.execute(new Runnable {
def run() = { buffer.onNext(n); loop(n-1) }
})
else buffer.onComplete()
loop(10000)
assert(completed.await(20, TimeUnit.SECONDS), "completed.await should have succeeded")
assertEquals(number, 10000)
}
test("should send onError when empty") { implicit s =>
val latch = new CountDownLatch(1)
val underlying = new Observer[Int] {
def onError(ex: Throwable) = {
assertEquals(ex.getMessage, "dummy")
latch.countDown()
}
def onNext(elem: Int) = throw new IllegalStateException()
def onComplete() = throw new IllegalStateException()
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
buffer.onError(new RuntimeException("dummy"))
assert(latch.await(5, TimeUnit.SECONDS), "latch.await should have succeeded")
}
test("should send onError when in flight") { implicit s =>
val latch = new CountDownLatch(1)
val underlying = new Observer[Int] {
def onError(ex: Throwable) = {
assertEquals(ex.getMessage, "dummy")
latch.countDown()
}
def onNext(elem: Int) = Continue
def onComplete() = throw new IllegalStateException()
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
buffer.onNext(1)
buffer.onError(new RuntimeException("dummy"))
assert(latch.await(5, TimeUnit.SECONDS), "latch.await should have succeeded")
}
test("should send onComplete when empty") { implicit s =>
val latch = new CountDownLatch(1)
val underlying = new Observer[Int] {
def onError(ex: Throwable) = throw new IllegalStateException()
def onNext(elem: Int) = throw new IllegalStateException()
def onComplete() = latch.countDown()
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
buffer.onComplete()
assert(latch.await(5, TimeUnit.SECONDS), "latch.await should have succeeded")
}
test("should send onComplete when in flight") { implicit s =>
val latch = new CountDownLatch(1)
val promise = Promise[Ack]()
val underlying = new Observer[Int] {
def onError(ex: Throwable) = throw new IllegalStateException()
def onNext(elem: Int) = promise.future
def onComplete() = latch.countDown()
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), Unbounded)
buffer.onNext(1)
buffer.onComplete()
assert(!latch.await(1, TimeUnit.SECONDS), "latch.await should have failed")
promise.success(Continue)
assert(latch.await(5, TimeUnit.SECONDS), "latch.await should have succeeded")
}
test("should do onComplete only after all the queue was drained") { implicit s =>
var sum = 0L
val complete = new CountDownLatch(1)
val startConsuming = Promise[Continue]()
val underlying = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
startConsuming.future
}
def onError(ex: Throwable) = throw ex
def onComplete() = complete.countDown()
}
val buffer = BufferedSubscriber[Long](Subscriber(underlying, s), Unbounded)
(0 until 9999).foreach(x => buffer.onNext(x))
buffer.onComplete()
startConsuming.success(Continue)
assert(complete.await(10, TimeUnit.SECONDS), "complete.await should have succeeded")
assert(sum == (0 until 9999).sum)
}
test("should do onComplete only after all the queue was drained, test2") { implicit s =>
var sum = 0L
val complete = new CountDownLatch(1)
val underlying = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = throw ex
def onComplete() = complete.countDown()
}
val buffer = BufferedSubscriber[Long](Subscriber(underlying, s), Unbounded)
(0 until 9999).foreach(x => buffer.onNext(x))
buffer.onComplete()
assert(complete.await(10, TimeUnit.SECONDS), "complete.await should have succeeded")
assert(sum == (0 until 9999).sum)
}
test("should do onError only after the queue was drained") { implicit s =>
var sum = 0L
val complete = new CountDownLatch(1)
val startConsuming = Promise[Continue]()
val underlying = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
startConsuming.future
}
def onError(ex: Throwable) = complete.countDown()
def onComplete() = throw new IllegalStateException()
}
val buffer = BufferedSubscriber[Long](Subscriber(underlying, s), Unbounded)
(0 until 9999).foreach(x => buffer.onNext(x))
buffer.onError(new RuntimeException)
startConsuming.success(Continue)
assert(complete.await(10, TimeUnit.SECONDS), "complete.await should have succeeded")
assertEquals(sum, (0 until 9999).sum)
}
test("should do onError only after all the queue was drained, test2") { implicit s =>
var sum = 0L
val complete = new CountDownLatch(1)
val underlying = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = complete.countDown()
def onComplete() = throw new IllegalStateException()
}
val buffer = BufferedSubscriber[Long](Subscriber(underlying, s), Unbounded)
(0 until 9999).foreach(x => buffer.onNext(x))
buffer.onError(new RuntimeException)
assert(complete.await(10, TimeUnit.SECONDS), "complete.await should have succeeded")
assertEquals(sum, (0 until 9999).sum)
}
}
| virtualirfan/monifu | monifu/jvm/src/test/scala/monifu/reactive/observers/BufferUnboundedConcurrencySuite.scala | Scala | apache-2.0 | 7,931 |
package software.betamax.specs2
import org.apache.commons.io.IOUtils
import org.apache.http.client.HttpClient
import org.apache.http.client.methods.HttpGet
import org.apache.http.impl.client.HttpClientBuilder
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
/**
* Created by sean on 2/11/16.
*/
@RunWith(classOf[JUnitRunner])
class RecordedInteractionTest extends Specification {
sequential
def withHttpClient[T](block: HttpClient => T): T = {
val client = HttpClientBuilder.create().useSystemProperties().build()
block(client)
}
"A Betamax test" should {
"replay google.com" in RecordedInteraction(tape = "google", configuration = _.sslEnabled(true)) {
withHttpClient { client =>
val response = client.execute(new HttpGet("https://www.google.com"))
response.getStatusLine.getStatusCode must beEqualTo(902) // obviously not from Google
}
}
"replay a gzip'd request from https://www.cultizm.com/" in RecordedInteraction(tape = "cultizm", configuration = _.sslEnabled(true)) {
withHttpClient { client =>
val response = client.execute(new HttpGet("https://www.cultizm.com/"))
IOUtils.toString(response.getEntity.getContent) must contain("<!DOCTYPE html PUBLIC")
response.getStatusLine.getStatusCode must beEqualTo(902) // obviously not from cultizm
}
}
}
} | betamaxteam/betamax | betamax-specs2_2.11/src/test/scala/software/betamax/specs2/RecordedInteractionTest.scala | Scala | apache-2.0 | 1,419 |
package skinny.micro.response
import java.io.{ OutputStream, PrintWriter }
import javax.servlet.http.{ Cookie => ServletCookie, HttpServletResponse }
import skinny.micro.ServletConcurrencyException
import skinny.micro.cookie.Cookie
import skinny.micro.implicits.RicherStringImplicits
import scala.collection.JavaConverters._
import scala.collection.mutable.Map
/**
* Rich Servlet response.
*/
case class RichResponse(res: HttpServletResponse) {
import RicherStringImplicits._
/**
* Note: the servlet API doesn't remember the reason. If a custom
* reason was set, it will be returned incorrectly here,
*/
def status: ResponseStatus = ResponseStatus(res.getStatus)
def status_=(statusLine: ResponseStatus): Unit = {
// Deprecated. As of version 2.1, due to ambiguous meaning of the message parameter.
// To set a status code use setStatus(int), to send an error with a description use sendError(int, String).
// res.setStatus(statusLine.code, statusLine.message)
res.setStatus(statusLine.code)
}
object headers extends Map[String, String] {
def get(key: String): Option[String] =
res.getHeaders(key) match {
case xs if xs.isEmpty => None
case xs => Some(xs.asScala mkString ",")
}
def iterator: Iterator[(String, String)] =
for (name <- res.getHeaderNames.asScala.iterator)
yield (name, res.getHeaders(name).asScala mkString ", ")
def +=(kv: (String, String)): this.type = {
res.setHeader(kv._1, kv._2)
this
}
def -=(key: String): this.type = {
res.setHeader(key, "")
this
}
}
def addCookie(cookie: Cookie): Unit = {
import cookie._
val sCookie = new ServletCookie(name, value)
if (options.domain.nonBlank) sCookie.setDomain(options.domain)
if (options.path.nonBlank) sCookie.setPath(options.path)
sCookie.setMaxAge(options.maxAge)
sCookie.setSecure(options.secure)
if (options.comment.nonBlank) sCookie.setComment(options.comment)
sCookie.setHttpOnly(options.httpOnly)
sCookie.setVersion(options.version)
res.addCookie(sCookie)
}
def characterEncoding: Option[String] = Option(res.getCharacterEncoding)
def characterEncoding_=(encoding: Option[String]): Unit = {
res.setCharacterEncoding(encoding getOrElse null)
}
def contentType: Option[String] = Option(res.getContentType)
def contentType_=(contentType: Option[String]): Unit = {
res.setContentType(contentType.orNull[String])
}
def redirect(uri: String): Unit = {
res.sendRedirect(uri)
}
def outputStream: OutputStream = res.getOutputStream
def writer: PrintWriter = res.getWriter
def end(): Unit = {
res.flushBuffer()
res.getOutputStream.close()
}
}
| xerial/skinny-micro | micro/src/main/scala/skinny/micro/response/RichResponse.scala | Scala | bsd-2-clause | 2,750 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package api
import scala.language.implicitConversions
/**
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
*
* This trait defines `Name`s in Scala Reflection, and operations on them.
*
* Names are simple wrappers for strings. [[scala.reflect.api.Names#Name Name]] has two subtypes
* [[scala.reflect.api.Names#TermName TermName]] and [[scala.reflect.api.Names#TypeName TypeName]]
* which distinguish names of terms (like objects or members) and types. A term and a type of the
* same name can co-exist in an object.
*
* To search for the `map` method (which is a term) declared in the `List` class, one can do:
*
* {{{
* scala> typeOf[List[_]].member(TermName("map"))
* res0: reflect.runtime.universe.Symbol = method map
* }}}
*
* To search for a type member, one can follow the same procedure, using `TypeName` instead.
*
* For more information about creating and using `Name`s, see the [[http://docs.scala-lang.org/overviews/reflection/annotations-names-scopes.html Reflection Guide: Annotations, Names, Scopes, and More]]
*
* @contentDiagram hideNodes "*Api"
* @group ReflectionAPI
*/
trait Names {
/** An implicit conversion from String to TermName.
* Enables an alternative notation `"map": TermName` as opposed to `TermName("map")`.
* @group Names
*/
@deprecated("use explicit `TermName(s)` instead", "2.11.0")
implicit def stringToTermName(s: String): TermName = TermName(s)
/** An implicit conversion from String to TypeName.
* Enables an alternative notation `"List": TypeName` as opposed to `TypeName("List")`.
* @group Names
*/
@deprecated("use explicit `TypeName(s)` instead", "2.11.0")
implicit def stringToTypeName(s: String): TypeName = TypeName(s)
/** The abstract type of names.
* @group Names
*/
type Name >: Null <: AnyRef with NameApi
/** The abstract type of names representing terms.
* @group Names
*/
type TypeName >: Null <: TypeNameApi with Name
/** Has no special methods. Is here to provides erased identity for `TypeName`.
* @group API
*/
trait TypeNameApi
/** The abstract type of names representing types.
* @group Names
*/
type TermName >: Null <: TermNameApi with Name
/** Has no special methods. Is here to provides erased identity for `TermName`.
* @group API
*/
trait TermNameApi
/** The API of Name instances.
* @group API
*/
abstract class NameApi {
/** Checks whether the name is a term name */
def isTermName: Boolean
/** Checks whether the name is a type name */
def isTypeName: Boolean
/** Returns a term name that wraps the same string as `this` */
def toTermName: TermName
/** Returns a type name that wraps the same string as `this` */
def toTypeName: TypeName
/** Replaces all occurrences of \\$op_names in this name by corresponding operator symbols.
* Example: `foo_\\$plus\\$eq` becomes `foo_+=`
*/
@deprecated("use `decodedName.toString` instead", "2.11.0")
def decoded: String
/** Replaces all occurrences of operator symbols in this name by corresponding \\$op_names.
* Example: `foo_+=` becomes `foo_\\$plus\\$eq`.
*/
@deprecated("use `encodedName.toString` instead", "2.11.0")
def encoded: String
/** The decoded name, still represented as a name.
*/
def decodedName: Name
/** The encoded name, still represented as a name.
*/
def encodedName: Name
}
/** Create a new term name.
* @group Names
*/
@deprecated("use TermName instead", "2.11.0")
def newTermName(s: String): TermName
/** Creates a new type name.
* @group Names
*/
@deprecated("use TypeName instead", "2.11.0")
def newTypeName(s: String): TypeName
/** The constructor/extractor for `TermName` instances.
* @group Extractors
*/
val TermName: TermNameExtractor
/** An extractor class to create and pattern match with syntax `TermName(s)`.
* @group Extractors
*/
abstract class TermNameExtractor {
def apply(s: String): TermName
def unapply(name: TermName): Option[String]
}
/** The constructor/extractor for `TypeName` instances.
* @group Extractors
*/
val TypeName: TypeNameExtractor
/** An extractor class to create and pattern match with syntax `TypeName(s)`.
* @group Extractors
*/
abstract class TypeNameExtractor {
def apply(s: String): TypeName
def unapply(name: TypeName): Option[String]
}
}
| martijnhoekstra/scala | src/reflect/scala/reflect/api/Names.scala | Scala | apache-2.0 | 4,801 |
package beer.features
import org.scalatest.{FlatSpec, ShouldMatchers}
/**
* @author milos
*/
class LengthDisbalanceTest extends FlatSpec with ShouldMatchers {
"disbalance" should "work" in {
val exponential = false
val f = new LengthDisbalance(exponential, List(1.0))
val sys = "Hello I am".split(" ")
val ref = "Hello I am Milos".split(" ")
val res1 = f.featureValues(sys, ref)
println(res1)
val res2 = f.featureValues(ref, sys)
println(res2)
}
"nbest" should "not crash" in {
val exponential = false
val f = new LengthDisbalance(exponential, List(1.0))
val nbest = List(
("Hello I am".split(" "), 0.2 ),
("Hello I am Milos".split(" "), 0.2 ),
("Hello am".split(" "), 0.3 ))
val res1 = f.expectedFeatureValues(nbest)
println(res1)
}
} | qingsongma/blend | tools/beer_2.0/src/beer/features/LengthDisbalanceTest.scala | Scala | gpl-3.0 | 837 |
package renesca.graph
import renesca.NonBacktickName
import renesca.parameter.{PropertyKey, PropertyMap}
import scala.collection.mutable
object Label {
implicit def StringToLabel(name: String): Label = Label(name)
}
case class Label(name: String) extends NonBacktickName
object Node {
private[renesca] def apply(id: Id, labels: Traversable[Label] = Nil, properties: PropertyMap = Map.empty): Node = {
new Node(id, labels, properties)
}
def create: Node = create()
def create(labels: Traversable[Label] = Nil, properties: PropertyMap = Map.empty): Node = {
new Node(Create(), labels, properties)
}
def merge: Node = merge()
def merge(labels: Traversable[Label] = Nil, properties: PropertyMap = Map.empty, merge: Set[PropertyKey] = Set.empty, onMatch: Set[PropertyKey] = Set.empty): Node = {
new Node(Merge(merge, onMatch), labels, properties)
}
def matches: Node = matches()
def matches(labels: Traversable[Label] = Nil, properties: PropertyMap = Map.empty, matches: Set[PropertyKey] = Set.empty): Node = {
new Node(Match(matches), labels, properties)
}
}
class Node private[graph](
var origin: Origin,
initialLabels: Traversable[Label] = Nil,
initialProperties: PropertyMap = Map.empty
) extends Item {
val labels = new NodeLabels(this, mutable.HashSet(initialLabels.toSeq: _*))
val properties = new Properties(this, mutable.Map(initialProperties.toSeq: _*))
def changes: Seq[GraphChange] = labels.localChanges ++ properties.localChanges
def outRelations(implicit graph: Graph) = graph.outRelations(this)
def inRelations(implicit graph: Graph) = graph.inRelations(this)
def relations(implicit graph: Graph) = graph.incidentRelations(this)
def neighbours(implicit graph: Graph) = graph.neighbours(this)
def successors(implicit graph: Graph) = graph.successors(this)
def predecessors(implicit graph: Graph) = graph.predecessors(this)
def inDegree(implicit graph: Graph) = graph.inDegree(this)
def outDegree(implicit graph: Graph) = graph.outDegree(this)
def degree(implicit graph: Graph) = graph.degree(this)
def canEqual(other: Any): Boolean = other.isInstanceOf[Node]
override def equals(other: Any): Boolean = other match {
case that: Node => (that canEqual this) && this.origin == that.origin
case _ => false
}
override def hashCode: Int = origin.hashCode
override def toString = s"(${ origin }${ labels.map(":" + _.name).mkString })"
}
| renesca/renesca | shared/src/main/scala/renesca/graph/Node.scala | Scala | apache-2.0 | 2,555 |
package org.denigma.threejs.extras
import org.denigma.threejs.{ Vector3, Vector2, Camera }
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
@js.native
@JSName("THREE.TrackballControls")
class TrackBallControls(camera: Camera) extends js.Object {
def getMouseOnScreen(clientX: Double, clientY: Double): Vector2 = js.native
def getMouseProjectionOnBall(clientX: Double, clientY: Double): Vector3 = js.native
def rotateCamera(): Unit = js.native
def zoomCamera(): Unit = js.native
def panCamera(): Unit = js.native
def update(): Unit = js.native
def keydown(event: dom.KeyboardEvent): Unit = js.native
def keyup(event: dom.KeyboardEvent): Unit = js.native
def mousedown(event: dom.MouseEvent): Unit = js.native
def mouseup(event: dom.MouseEvent): Unit = js.native
def mousmove(event: dom.MouseEvent): Unit = js.native
}
| antonkulaga/threejs-facade | facade/src/main/scala/org/denigma/threejs/extras/TrackBallControls.scala | Scala | mpl-2.0 | 929 |
package org.jetbrains.bsp.protocol
import java.io.File
import ch.epfl.scala.bsp4j.BspConnectionDetails
import com.google.gson.Gson
import com.intellij.openapi.util.SystemInfo
import com.intellij.openapi.util.text.StringUtil.defaultIfEmpty
import com.intellij.util.SystemProperties
import org.jetbrains.bsp.{BspBundle, BspErrorMessage}
import scala.io.Source
import scala.util.{Failure, Try}
object BspConnectionConfig {
val BspWorkspaceConfigDirName = ".bsp"
private val BspSystemConfigDirName = "bsp"
def workspaceConfigurationFiles(workspace: File): List[File] = {
val bspDir = new File(workspace, BspWorkspaceConfigDirName)
if(bspDir.isDirectory)
bspDir.listFiles(file => file.getName.endsWith(".json")).toList
else List.empty
}
/** Find all BSP connection configs for a workspace. */
def workspaceBspConfigs(workspace: File): List[(File, BspConnectionDetails)] = {
val files = workspaceConfigurationFiles(workspace)
tryReadingConnectionFiles(files).flatMap(_.toOption).toList
}
/** Find all BSP connection configs either in a workspace, or installed on a system. */
def allBspConfigs(workspace: File): List[(File, BspConnectionDetails)] = {
val workspaceConfigs = workspaceConfigurationFiles(workspace)
val systemConfigs = systemDependentConnectionFiles
val potentialConfigs = tryReadingConnectionFiles(workspaceConfigs ++ systemConfigs)
potentialConfigs.flatMap(_.toOption).toList
}
def isBspConfigFile(file: File): Boolean = {
file.isFile &&
file.getParentFile.getName == BspWorkspaceConfigDirName &&
file.getName.endsWith(".json")
}
/**
* Find connection files installed on user's system.
* https://build-server-protocol.github.io/docs/server-discovery.html#default-locations-for-bsp-connection-files
*/
private def systemDependentConnectionFiles: List[File] = {
val basePaths =
if (SystemInfo.isWindows) windowsBspFiles()
else if (SystemInfo.isMac) macBspFiles()
else if (SystemInfo.isUnix) unixBspFiles()
else Nil
listFiles(bspDirs(basePaths))
}
private def tryReadingConnectionFiles(files: Seq[File]): Seq[Try[(File, BspConnectionDetails)]] = {
implicit val gson: Gson = new Gson()
files.map { f => readConnectionFile(f).map((f, _)) }
}
def readConnectionFile(file: File)(implicit gson: Gson): Try[BspConnectionDetails] = {
if (file.canRead) {
val reader = Source.fromFile(file).bufferedReader()
Try(gson.fromJson(reader, classOf[BspConnectionDetails]))
} else Failure(BspErrorMessage(BspBundle.message("bsp.protocol.file.not.readable", file)))
}
private def windowsBspFiles() = {
val localAppData = System.getenv("LOCALAPPDATA")
val programData = System.getenv("PROGRAMDATA")
List(localAppData, programData)
}
private def unixBspFiles() = {
val xdgDataHome = System.getenv("XDG_DATA_HOME")
val xdgDataDirs = System.getenv("XDG_DATA_DIRS")
val dataHome = defaultIfEmpty(xdgDataHome, SystemProperties.getUserHome + "/.local/share")
val dataDirs = defaultIfEmpty(xdgDataDirs, "/usr/local/share:/usr/share").split(":").toList
dataHome :: dataDirs
}
private def macBspFiles() = {
val userHome = SystemProperties.getUserHome
val userData = userHome + "/Library/Application Support"
val systemData = "/Library/Application Support"
List(userData, systemData)
}
private def bspDirs(basePaths: List[String]): List[File] = basePaths.map(new File(_, BspSystemConfigDirName))
private def listFiles(dirs: List[File]): List[File] = dirs.flatMap { dir =>
if (dir.isDirectory) dir.listFiles()
else Array.empty[File]
}
}
| JetBrains/intellij-scala | bsp/src/org/jetbrains/bsp/protocol/BspConnectionConfig.scala | Scala | apache-2.0 | 3,681 |
package org.ai4fm.proofprocess.zeves.core.parse
import scala.collection.JavaConverters._
import scala.language.existentials
import org.ai4fm.proofprocess.{Intent, Loc, ProofElem, ProofEntry, ProofProcessFactory, Term, Trace}
import org.ai4fm.proofprocess.core.graph.PProcessGraph._
import org.ai4fm.proofprocess.core.util.PProcessUtil
import org.ai4fm.proofprocess.zeves.ZEvesProofProcessFactory
import org.ai4fm.proofprocess.zeves.core.analysis.ZEvesGraph
import org.ai4fm.proofprocess.zeves.core.internal.ZEvesPProcessCorePlugin.{error, log}
import net.sourceforge.czt.session.SectionInfo
import net.sourceforge.czt.zeves.ast.ProofCommand
import net.sourceforge.czt.zeves.response.{ZEvesOutput, ZEvesProofTrace}
import net.sourceforge.czt.zeves.response.ZEvesProofTrace.TraceType._
import net.sourceforge.czt.zeves.response.form.ZEvesName
import net.sourceforge.czt.zeves.snapshot.ISnapshotEntry
import net.sourceforge.czt.zeves.snapshot.SnapshotData
/** @author Andrius Velykis
*/
trait ProofEntryReader {
private val factory = ProofProcessFactory.eINSTANCE
private val zevesFactory = ZEvesProofProcessFactory.eINSTANCE
def stepIntent(): Intent
def cloneTerm(term: Term): Term
def matchTerms(term1: Term, term2: Term): Boolean
def textLoc(entry: ISnapshotEntry): Loc
def readEntries(sectInfo: SectionInfo, proofSnapshot: List[ISnapshotEntry]): Option[ProofEntryData] =
proofSnapshot match {
// Assume that a proof with just one command (e.g. "declaration") is too short to be included
// in the ProofProcess. This way we only concern ourselves with proofs that have been tried proving
// (instead of capturing every version of lemma declaration)
case goalEntry :: restEntries if !restEntries.isEmpty => {
// Assume that the first step in any proof is the "declaration" command, e.g. "lemma ..."
// Also check that initial goals are not empty - don't allow proofs with empty goals
// Also check that proof steps are available
assert(SnapshotUtil.isGoal(goalEntry), "The first element in the proof must be the goal.")
val initialGoals = parseGoals(sectInfo, goalEntry)
val restGoals = restEntries.map(entry => (entry, parseGoals(sectInfo, entry)))
val (proofGraph, entryMapping) = readProofSteps(restGoals, initialGoals)
Some(ProofEntryData(initialGoals,
Option(goalEntry.getData.getGoalName),
proofGraph,
entryMapping))
}
// empty/short proof state - nothing to parse
case _ => None
}
private def parseGoals(sectInfo: SectionInfo, snapshotEntry: ISnapshotEntry) =
// get the option - should always the Some here, since we have filtered the errors before
SnapshotUtil.parseGoals(sectInfo, snapshotEntry).get
private def snapshotProofResult(snapshotEntry: ISnapshotEntry) =
// get the option - should always the Some here, since we have filtered the errors before
SnapshotUtil.zEvesProofResult(snapshotEntry).get
private def readProofSteps(proofSteps: List[(ISnapshotEntry, List[Term])],
inGoals: List[Term])
: (PPRootGraph[ProofEntry, _], Map[ISnapshotEntry, ProofEntry]) = {
val proofStepEntries = PProcessUtil.toInOutGoalSteps(proofEntry)(inGoals, proofSteps)
// link snapshot entries with respective proof step entries (for activity logging)
val stepEntryMapping = ((proofSteps map (_._1)) zip proofStepEntries).toMap
val proofGraph = ZEvesGraph.proofStepsGraph(proofStepEntries)
(proofGraph, stepEntryMapping)
}
private def proofEntry(snapshotEntry: ISnapshotEntry,
inGoals: List[Term], outGoals: List[Term]): ProofEntry = {
val snapshotData = snapshotEntry.getData
val zevesResult = snapshotData.getResult match {
case zeves: ZEvesOutput => Some(zeves)
case _ => None
}
val info = factory.createProofInfo
val entryCmd = snapshotData.getTerm
val commandText = entryCmd match {
case proofCmd: ProofCommand => ProofCommandPrinter.print(proofCmd)
case _ => zevesResult.map(_.getCommand.toString) getOrElse ""
}
info.setNarrative(commandText)
info.setIntent(stepIntent)
// TODO set features
val inFeatures = info.getInFeatures
val outFeatures = info.getOutFeatures
val step = factory.createProofStep
step.setTrace(proofStepTrace(snapshotData, commandText, zevesResult))
step.setSource(textLoc(snapshotEntry))
// copy the goals defensively because inGoals is a containment ref
step.getInGoals.addAll(inGoals.map(cloneTerm).asJava)
step.getOutGoals.addAll(outGoals.asJava)
// create tactic application attempt
val entry = factory.createProofEntry
entry.setInfo(info)
entry.setProofStep(step)
entry
}
private def proofStepTrace(snapshotData: SnapshotData,
commandText: String, zevesResult: Option[ZEvesOutput]): Trace = {
val trace = zevesFactory.createZEvesTrace
trace.setText(commandText)
// set the proof case if Z/EVES result is available
zevesResult foreach (res => trace.setCase(ProofEntryReader.proofCaseStr(res.getProofCase.asScala)))
// retrieve used lemmas from the proof trace
val lemmas = snapshotData.getTrace.asScala.flatMap(traceResult => usedLemmas(traceResult.getProofTrace))
trace.getUsedLemmas.addAll(lemmas.asJava)
trace
}
private val lemmaTypes = List(APPLY, REWRITE, FRULE, GRULE, USE)
private def usedLemmas(trace: ZEvesProofTrace): Set[String] = {
def traceName(elem: Any): Option[String] = elem match {
case name: ZEvesName => Some(name.getIdent)
case _ => {
log(error(msg = Some("Unknown used lemma element found in trace: " + elem.toString)))
None
}
}
// get the trace elements of each lemma type and extract their names
lemmaTypes.map(trace.getTraceElements(_).asScala.flatMap(traceName)).flatten.toSet
}
}
object ProofEntryReader {
def proofCaseStr(proofCase: Iterable[java.lang.Integer]) = proofCase.mkString(".")
def proofCase(caseStr: String): List[Int] = {
Option(caseStr)
if (caseStr.isEmpty) {
List()
} else {
val caseNos = caseStr.split("\\.")
try {
caseNos.map(_.toInt).toList
} catch {
// invalid case?
case ne: NumberFormatException => List()
}
}
}
}
case class ProofEntryData(goals: List[Term],
label: Option[String],
proofGraph: PPRootGraph[ProofEntry, _],
entryMap: Map[ISnapshotEntry, ProofEntry])
| andriusvelykis/proofprocess | org.ai4fm.proofprocess.zeves.core/src/org/ai4fm/proofprocess/zeves/core/parse/ProofEntryReader.scala | Scala | epl-1.0 | 6,672 |
package pl.tk.scalaconstructs
trait FunctorObject[A,F[_]] {
def map[B <: A](mapper : A => B) : F[B]
}
trait FunctorStandalone[F[_]] {
def map[A,B](value : F[A])(f : A =>B) : F[B]
}
object FunctorStandalone {
implicit def listFunctor = new FunctorStandalone[List] {
def map[A,B](value : List[A])(f:A=>B) = value map (f(_))
}
implicit def intFunctor = new FunctorStandalone[Option] {
def map[A,B](value : Option[A])(f:A=>B) = value map(f(_))
}
implicit def eitherFunctor[R] = new FunctorStandalone[({type lambda[P] = Either[R,P]})#lambda] {
def map[A,B](value : Either[R,A])(f:A=>B) = value match {
case Right(v) => Right(f(v))
case Left(v) => Left(v)
}
}
implicit class EitherOps[L,R](e : Either[L,R]) extends FunctorObject[R,({type lambda[P] = Either[L,P]})#lambda] {
def map[B](f : R => B) : Either[L,B] = {
e match {
case Right(v) => Right(f(v))
case Left(l) => Left(l)
}
}
def flatMap[B](f : R => Either[L,B]) = {
e match {
case Right(v) => f(v)
case Left(l) => Left(l)
}
}
}
}
case class ListFunctor[T](lst : List[T]) extends FunctorObject[T,List] {
def map[B](f : T => B) : List[B] = lst.map(f)
}
object Appendex extends App {
import FunctorStandalone._
println(implicitly[FunctorStandalone[Option]].map(Some(2))(_+2))
val e1 : Either[Int,String] = Right("22")
val e2 : Either[Int,String] = Right("22")
val e3 : Either[Int,String] = Left(22)
val e = for {
i <- e1
j <- e2
k <- e3
} yield i +"@"+ j + "@" + k
println(e)
//println(e.map(x => x*2))
// implicit def listFunctor = new Functor[List] {
// def map[A,B](arg : List[A])(mapper : A => B) : List[B] = {
// arg map mapper
// }
// }
// type ValOrThrowable[A] = Either[Throwable,A]
// implicit def EitherFunctor = new Functor[ValOrThrowable] {
// def map[A,B](arg : ValOrThrowable[A])(mapper : A => B) : ValOrThrowable[B] = {
// arg match {
// case Left(t) => Left(t)
// case Right(a) => Right(mapper(a))
// }
// }
// }
// implicit def EitherFunctor1[S] = new Functor[Either[S,_]] {
// def map[A,B](arg : Either[S,A])(mapper : A => B) = ???
// }
// implicit def EitherFunctor2[S] = new Functor[ ({type lambda[A] = Either[S,A] })#lambda] {
// def map[A,B](arg : Either[S,A])(mapper : A => B) : Either[S,B] = {
// arg match {
// case Left(t) => Left(t)
// case Right(a) => Right(mapper(a))
// }
// }
// }
//def transforme[A,B, C[_] : Functor](arg : C[A])(lambda : A=>B ) : C[B] = implicitly[Functor[C]].map(arg)(lambda)
//println(transforme(List[Int](1,2,3,4,4))((p:Int) => p.toString+"@"))
}
| almendar/scala-constructs-learn | src/main/scala/pl/tk/scalaconstructs/MonadicEither.scala | Scala | apache-2.0 | 2,725 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cypher.cucumber.reporter
import com.novus.salat.annotations.{Ignore, Key, Persist}
import org.neo4j.cypher.internal.compiler.v2_3.ast.QueryTag
import scala.annotation.meta.getter
object Outcome {
def from(value: String) = value match {
case "passed" => Success
case _ => Failure
}
}
sealed trait Outcome
object Success extends Outcome {
override def toString = "success"
}
object Failure extends Outcome {
override def toString = "failure"
}
case class JsonResult(query: String, @Ignore tags: Set[QueryTag], @Ignore outcome: Outcome) {
@Key("tags")
@(Persist@getter)
val prettyTags: Set[String] = tags.map(_.toString)
@Key("outcome")
@(Persist@getter)
val prettyOutcome = outcome.toString
}
| HuangLS/neo4j | community/cypher/compatibility-suite/src/test/scala/cypher/cucumber/reporter/JsonResult.scala | Scala | apache-2.0 | 1,531 |
package com.twitter.server
import com.twitter.app.{App, Flag}
import com.twitter.finagle.client.ClientRegistry
import com.twitter.finagle.filter.ServerAdmissionControl
import com.twitter.finagle.http.{HttpMuxer, Method, Request, Response, Route => HttpRoute}
import com.twitter.finagle.server.ServerRegistry
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.tracing.NullTracer
import com.twitter.finagle.util.LoadService
import com.twitter.finagle.{Http, ListeningServer, NullServer, Service}
import com.twitter.server.Admin.Grouping
import com.twitter.server.filters.AdminThreadPoolFilter
import com.twitter.server.handler.{AdminHttpMuxHandler, LoggingHandler, NoLoggingHandler}
import com.twitter.server.lint.LoggingRules
import com.twitter.server.util.HttpUtils
import com.twitter.server.view.{IndexView, NotFoundView}
import com.twitter.util.lint.GlobalRules
import com.twitter.util.registry.Library
import com.twitter.util.{Future, Monitor, Time}
import java.net.{InetSocketAddress, URLEncoder}
import java.nio.charset.StandardCharsets
import org.slf4j.LoggerFactory
import scala.language.reflectiveCalls
object AdminHttpServer {
/**
* The name used for the finagle server.
*/
val ServerName = "adminhttp"
/**
* Represents an element which can be routed to via the admin http server.
*
* @param path The path used to access the route. A request
* is routed to the path as per the [[com.twitter.finagle.http.HttpMuxer]]
* spec.
*
* @param handler The service which requests are routed to.
*
* @param alias A short name used to identify the route when listed in
* index.
*
* @param group A grouping used to organize the route in the
* admin server pages. Routes with the same grouping are displayed
* together in the admin server pages.
*
* @param includeInIndex Indicates whether the route is included
* as part of the admin server index.
*
* @param method Specifies which HTTP Method to use from
* [[com.twitter.finagle.http.Method]]. The default is [[Method.Get]].
*/
case class Route(
path: String,
handler: Service[Request, Response],
alias: String,
group: Option[String],
includeInIndex: Boolean,
method: Method = Method.Get)
object Route {
// backwards compatibility
def isolate(r: Route): Route = AdminThreadPoolFilter.isolateRoute(r)
def isolate(s: Service[Request, Response]): Service[Request, Response] =
AdminThreadPoolFilter.isolateService(s)
def from(route: HttpRoute): Route = route.index match {
case Some(index) =>
Route(
path = index.path.getOrElse(route.pattern),
handler = route.handler,
alias = index.alias,
group = Some(index.group),
includeInIndex = true,
method = index.method
)
case None =>
Route(
path = route.pattern,
handler = route.handler,
alias = route.pattern,
group = None,
includeInIndex = false
)
}
}
/**
* Create a Route using a Finagle service interface
*/
def mkRoute(
path: String,
handler: Service[Request, Response],
alias: String,
group: Option[String],
includeInIndex: Boolean,
method: Method = Method.Get
): Route = {
Route(path, handler, alias, group, includeInIndex, method)
}
/** Convert an AdminHttpMuxHandler to a AdminHttpServer.Route */
private def muxHandlerToRoute(handler: AdminHttpMuxHandler): Route = {
AdminThreadPoolFilter.isolateRoute(Route.from(handler.route))
}
private val defaultLoggingHandlerRoute: Route =
AdminThreadPoolFilter.isolateRoute(
Route(
path = "/admin/logging",
handler = new NoLoggingHandler,
alias = "Logging",
group = Some(Grouping.Utilities),
includeInIndex = true
)
)
}
trait AdminHttpServer { self: App with Stats =>
import AdminHttpServer._
// We use slf4-api directly b/c we're in a trait and want the trait class to be the Logger name
private[this] val log = LoggerFactory.getLogger(getClass)
/**
* If true, the Twitter-Server admin server will be disabled.
* Note: Disabling the admin server allows services to be deployed into environments where only a single port is allowed
*/
protected def disableAdminHttpServer: Boolean = false
def defaultAdminPort: Int = 9990
val adminPort: Flag[InetSocketAddress] =
flag("admin.port", new InetSocketAddress(defaultAdminPort), "Admin http server port")
private[this] val adminHttpMuxer = new Service[Request, Response] {
override def apply(request: Request): Future[Response] = underlying(request)
@volatile var underlying: Service[Request, Response] =
new Service[Request, Response] {
def apply(request: Request): Future[Response] =
HttpUtils.new404("no admin server initialized")
}
override def close(deadline: Time): Future[Unit] = underlying.close(deadline)
}
@volatile protected var adminHttpServer: ListeningServer = NullServer
// Look up a logging handler, will only be added if a single one is found.
private[this] val loggingHandlerRoute: Seq[Route] = {
val handlers = LoadService[LoggingHandler]()
if (handlers.length > 1) {
// add linting issue for multiple logging handlers
GlobalRules.get.add(LoggingRules.multipleLoggingHandlers(handlers.map(_.name)))
Seq(defaultLoggingHandlerRoute)
} else if (handlers.length == 1) {
// add the logging handler
handlers.map(muxHandlerToRoute)
} else {
// add linting issue for missing logging handler
GlobalRules.get.add(LoggingRules.NoLoggingHandler)
Seq(defaultLoggingHandlerRoute)
}
}
// We start with routes added via load service, note that these will be overridden
// by any routes added in any call to updateMuxer().
private[this] val loadServiceRoutes: Seq[Route] =
LoadService[AdminHttpMuxHandler]().map(muxHandlerToRoute) ++ loggingHandlerRoute
private[this] var allRoutes: Seq[Route] = loadServiceRoutes
/**
* The address to which the Admin HTTP server is bound.
*/
def adminBoundAddress: InetSocketAddress = {
// this should never be a NullServer unless
// [[com.twitter.server.AdminHttpServer#premain]] is skipped/broken
adminHttpServer.boundAddress.asInstanceOf[InetSocketAddress]
}
/**
* Add a collection of [[Route]]s into admin http server.
*/
def addAdminRoutes(newRoutes: Seq[Route]): Unit = synchronized {
allRoutes = allRoutes ++ newRoutes
updateMuxer()
}
/**
* Add a [[Route]] into admin http server.
*/
def addAdminRoute(route: Route): Unit = {
addAdminRoutes(Seq(route))
}
/**
* Get all [[Route]]s of admin http server.
*/
def routes: Seq[Route] = allRoutes
/**
* Name used for registration in the [[com.twitter.util.registry.Library]]
* @return library name to register in the Library registry.
*/
protected def libraryName: String = "twitter-server"
/**
* This method allows for further configuration of the http server for parameters not exposed by
* this trait or for overriding defaults provided herein, e.g.,
*
* {{{
* override def configureAdminHttpServer(server: Http.Server): Http.Server =
* server.withMonitor(myMonitor)
* }}}
*
* @param server - the [[com.twitter.finagle.Http.Server]] to configure.
* @return a configured Http.Server.
*/
protected def configureAdminHttpServer(server: Http.Server): Http.Server = server
private[this] def updateMuxer(): Unit = {
// create a service which multiplexes across all endpoints.
val localMuxer = allRoutes.foldLeft(new HttpMuxer) {
case (muxer, route) =>
val service =
new IndexView(route.alias, route.path, () => indexEntries).andThen(route.handler)
muxer.withHandler(route.path, service)
}
val endpoints = allRoutes.map { route => s"\t${route.path} => ${route.handler.toString}" }
log.debug(s"AdminHttpServer Muxer endpoints:\n" + endpoints.mkString("\n"))
adminHttpMuxer.underlying = HttpUtils.combine(Seq(localMuxer, HttpMuxer))
}
/** create index with both the local and global muxer namespace and server/client registries. */
private[this] def indexEntries: Seq[IndexView.Entry] =
downstreamClients +: listeningServers +: localRoutes
/** group listening servers for display */
private[this] def listeningServers: IndexView.Group = {
val serverLinks: Seq[IndexView.Entry] = ServerRegistry.registrants.collect {
case server if server.name.nonEmpty =>
val encodedName = URLEncoder.encode(server.name, StandardCharsets.UTF_8.name)
IndexView.Link(server.name, "/admin/servers/" + encodedName)
}.toSeq
IndexView.Group("Listening Servers", serverLinks.sorted(IndexView.EntryOrdering))
}
/** group downstream clients for display */
private[this] def downstreamClients: IndexView.Group = {
val clientLinks: Seq[IndexView.Entry] = ClientRegistry.registrants.collect {
case client if client.name.nonEmpty =>
val encodedName = URLEncoder.encode(client.name, StandardCharsets.UTF_8.name)
IndexView.Link(client.name, "/admin/clients/" + encodedName)
}.toSeq
IndexView.Group("Downstream Clients", clientLinks.sorted(IndexView.EntryOrdering))
}
/** convert local routes into the IndexView data model */
private[this] def localRoutes: Seq[IndexView.Entry] = {
val routes = allRoutes ++
HttpMuxer.routes.map(Route.from)
routes
.filter(_.includeInIndex)
.groupBy(_.group)
.flatMap {
case (groupOpt, rts) =>
val links = rts.map(routeToIndexLink).sorted(IndexView.EntryOrdering)
groupOpt match {
case Some(group) => Seq(IndexView.Group(group, links))
case None => links
}
}
.toSeq
}
private[this] def routeToIndexLink(route: Route): IndexView.Link =
IndexView.Link(route.alias, route.path, route.method)
private[this] def startServer(): Unit = {
val loggingMonitor = new Monitor {
def handle(exc: Throwable): Boolean = {
log.error(s"Caught exception in AdminHttpServer: $exc", exc)
false
}
}
log.info(s"Serving admin http on ${adminPort()}")
adminHttpServer = configureAdminHttpServer(
Http.server
.configured(Http.Netty4Impl)
.withStatsReceiver(NullStatsReceiver)
.withTracer(NullTracer)
.withMonitor(loggingMonitor)
.withLabel(ServerName)
// disable admission control, since we want the server to report stats
// especially when it's in a bad state.
.configured(ServerAdmissionControl.Param(false))
).serve(adminPort(), new NotFoundView andThen adminHttpMuxer)
closeOnExitLast(adminHttpServer)
Library.register(libraryName, Map.empty)
}
premain {
// For consistency, we will add the routes regardless of whether the `adminHttpServer` gets
// started. This may not always be true and we may change this behavior in the future.
addAdminRoutes(Admin.adminRoutes(statsReceiver, self))
// we delay this check until we call the premain to ensure the `disableAdminHttpServer` value
// has the correct initialization order
if (disableAdminHttpServer) {
log.info("admin http is disabled and will not be started.")
} else {
startServer()
}
}
}
| twitter/twitter-server | server/src/main/scala/com/twitter/server/AdminHttpServer.scala | Scala | apache-2.0 | 11,475 |
package controllers
import play.api.libs.json.Json
import play.api.libs.ws.WS
import play.api.mvc._
import securesocial.core._
/**
* A controller for authenticating REST api clients.
* Based on http://eng.kifi.com/mobile-auth-with-play-and-securesocial/
*/
object ApiAuth extends Controller {
private implicit val readsOAuth2Info = Json.reads[OAuth2Info]
def authenticate(providerName: String) = Action(parse.json) { implicit request =>
// Some of the below code is taken from ProviderController in SecureSocial
// format: { "accessToken": "..." }
val oauth2Info = request.body.asOpt[OAuth2Info]
val provider = Registry.providers.get(providerName).get
val filledUser = provider.fillProfile(
SocialUser(IdentityId("", provider.id), "", "", "", None, None, provider.authMethod, oAuth2Info = oauth2Info)
)
UserService.find(filledUser.identityId) map { user =>
val newSession = Events.fire(new LoginEvent(user)).getOrElse(session)
Authenticator.create(user).fold(
error => throw error,
authenticator => Ok(Json.obj("sessionId" -> authenticator.id))
.withSession(newSession - SecureSocial.OriginalUrlKey - IdentityProvider.SessionId - OAuth1Provider.CacheKey)
.withCookies(authenticator.toCookie)
)
} getOrElse NotFound(Json.obj("error" -> "user not found"))
}
}
| epidataio/epidata-community | play/app/controllers/ApiAuth.scala | Scala | apache-2.0 | 1,364 |
package net.sansa_stack.query.tests
import java.io.File
import java.net.{URI, URL}
import java.nio.file.{Path, Paths}
import scala.collection.JavaConverters._
import org.apache.jena.riot.RDFParserBuilder
import org.apache.jena.ext.com.google.common.reflect.ClassPath
import org.apache.jena.iri.{IRIFactory, ViolationCodes}
import org.apache.jena.query.{QueryExecutionFactory, QueryFactory, ResultSetFormatter}
import org.apache.jena.rdf.model.{ModelFactory, RDFList, Resource}
import org.apache.jena.riot.{Lang, RDFDataMgr, RDFParserBuilder}
import org.apache.jena.util.FileManager
import org.scalatest.FunSuite
/**
* @author Lorenz Buehmann
*/
class W3cConformanceSPARQLQueryEvaluationTestSuite(val sparqlVersion: SPARQL_VERSION.Value)
extends FunSuite {
protected val aggregatesManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/aggregates/manifest#"
protected val bindManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/bind/manifest#"
protected val bindingsManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/bindings/manifest#"
protected val functionsManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/functions/manifest#"
protected val constructManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/construct/manifest#"
protected val csvTscResManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/csv-tsv-res/manifest#"
protected val groupingManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/grouping/manifest#"
protected val negationManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/negation/manifest#"
protected val existsManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/exists/manifest#"
protected val projectExpressionManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/project-expression/manifest#"
protected val propertyPathManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/property-path/manifest#"
protected val subqueryManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/subquery/manifest#"
protected val serviceManifest = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/service/manifest#"
// contains the list of ignored tests cases, must be overridden
lazy val IGNORE: Set[String] = Set.empty[String]
val baseDir = "/sparql11"
val testDirSPARQL11: String = baseDir + (if (sparqlVersion == SPARQL_VERSION.SPARQL_11) "/data-sparql11/" else "/data-r2/")
private def resolveGlobal(s: String) = {
val globalBase = "file://" + new File("").toURI.toString.substring(5)
val factory = new IRIFactory(IRIFactory.jenaImplementation)
factory.shouldViolation(false, false)
factory.securityViolation(false, false)
factory.setIsWarning(ViolationCodes.UNREGISTERED_IANA_SCHEME, false)
factory.setIsError(ViolationCodes.UNREGISTERED_IANA_SCHEME, false)
factory.setSameSchemeRelativeReferences("file")
val cwd = factory.construct(globalBase)
cwd.resolve(s).toString
}
private def loadTestCasesFromManifest(): List[SPARQLQueryEvaluationTest] = {
val baseURL = classOf[W3cConformanceSPARQLQueryEvaluationTestSuite].getResource(testDirSPARQL11)
val model = ModelFactory.createDefaultModel()
RDFParserBuilder.create.source("sparql11/data-sparql11/manifest-sparql11-query.ttl").checking(false).resolveURIs(false).parse(model)
val includesList = model.listObjectsOfProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#include")).next().as(classOf[RDFList])
includesList.asJavaList().asScala.flatMap(subDir => loadTestCasesFromSubManifest(subDir.asResource(), "sparql11/data-sparql11/")).toList
}
private def loadTestCasesFromSubManifest(r: Resource, base: String) = {
val model = ModelFactory.createDefaultModel()
RDFParserBuilder.create.source(base + r.getURI).checking(false).resolveURIs(false).parse(model)
val uri = URI.create(r.getURI)
val parent = Paths.get(base + (if (uri.getPath().endsWith("/")) uri.resolve("..") else uri.resolve(".")).toString)
val members = model.listObjectsOfProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#entries")).next().as(classOf[RDFList])
val query = QueryFactory.create(
"""
|prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
|prefix : <http://www.w3.org/2009/sparql/docs/tests/data-sparql11/construct/manifest#>
|prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
|prefix mf: <http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#>
|prefix qt: <http://www.w3.org/2001/sw/DataAccess/tests/test-query#>
|prefix dawgt: <http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#>
|
|SELECT * {
|?test rdf:type mf:QueryEvaluationTest ;
| mf:name ?name ;
| dawgt:approval dawgt:Approved ;
| mf:action
| [ qt:query ?queryFile ;
| qt:data ?dataFile ] ;
| mf:result ?resultsFile .
|OPTIONAL {?test rdfs:comment ?description }
|}
|""".stripMargin)
val qe = QueryExecutionFactory.create(query, model)
val rs = qe.execSelect()
ResultSetFormatter.toList(rs).asScala.map(qs => {
val desc = if (qs.get("description") != null) qs.getLiteral("description").getLexicalForm else ""
SPARQLQueryEvaluationTest(
qs.getResource("test").getURI,
qs.getLiteral("name").getLexicalForm,
desc,
relativePath(qs.getResource("queryFile"), parent),
relativePath(qs.getResource("dataFile"), parent),
Some(relativePath(qs.getResource("resultsFile"), parent))
)
}
).toList
}
private def relativePath(r: Resource, path: Path): String = {
path.resolve(r.getURI).toString
}
/**
* the list of SPARQL evaluation tests
*/
val tests: List[SPARQLQueryEvaluationTest] = loadTestCasesFromManifest()
}
object W3cConformanceSPARQLQueryEvaluationTestSuite {
def main(args: Array[String]): Unit = {
new W3cConformanceSPARQLQueryEvaluationTestSuite(SPARQL_VERSION.SPARQL_11).tests.foreach(println)
}
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-tests/src/main/scala/net/sansa_stack/query/tests/W3cConformanceSPARQLQueryEvaluationTestSuite.scala | Scala | apache-2.0 | 6,236 |
/*
* MnoClassifier learns MSISDN-Operator combinations to afterwards predict Operators.
* Copyright (C) 2013 MACH Connectivity GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package com.github.mkroli.mnoclassifier.service
import java.lang.management.ManagementFactory
import akka.actor.Actor
import akka.actor.Props
import akka.actor.actorRef2Scala
import javax.management.ObjectName
import scala.language.implicitConversions
case class ShutdownHook[T](priority: Int, hook: () => Responder[T])
trait ShutdownMXBean {
def shutdown
}
trait ShutdownServiceComponent {
this: AkkaComponent =>
lazy val shutdownServiceActor = actorSystem.actorOf(
Props(new ShutdownServiceActor), name = "shutdownServiceActor")
ManagementFactory.getPlatformMBeanServer().registerMBean(
new ShutdownMXBean {
override def shutdown = actorSystem.stop(shutdownServiceActor)
},
new ObjectName("%s:name=%s".format(
classOf[ShutdownMXBean].getPackage.getName, "ShutdownService")))
implicit def constToResponder[T](c: T) = Responder.constant(c)
def addShutdownHook[T](priority: Int)(hook: => Responder[T]) =
shutdownServiceActor ! ShutdownHook(priority, () => hook)
class ShutdownServiceActor extends Actor {
var shutdownHooks: List[ShutdownHook[_]] = Nil
override def postStop {
def processShutdownHooks(shutdownHooks: List[ShutdownHook[_]]) {
shutdownHooks match {
case head :: tail => head.hook().foreach(_ => processShutdownHooks(tail))
case Nil => sys.exit
}
}
processShutdownHooks(shutdownHooks.sortBy {
case ShutdownHook(priority, _) => priority
})
}
override def receive = {
case hook @ ShutdownHook(_, _) => shutdownHooks = hook :: shutdownHooks
}
}
}
| mkroli/mnoclassifier | src/main/scala/com/github/mkroli/mnoclassifier/service/ShutdownServiceComponent.scala | Scala | gpl-2.0 | 2,470 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.commands.expressions
import org.neo4j.cypher.internal.compiler.v2_3.pipes.QueryState
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import org.neo4j.cypher.internal.compiler.v2_3.{ExecutionContext, InequalitySeekRange}
import org.neo4j.cypher.internal.frontend.v2_3.InternalException
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
case class InequalitySeekRangeExpression(range: InequalitySeekRange[Expression])
extends Expression {
override def apply(ctx: ExecutionContext)(implicit state: QueryState): Any = throw new
InternalException("This should never be called")
override def rewrite(f: (Expression) => Expression): Expression = f(this)
override def arguments: Seq[Expression] = Seq.empty
override protected def calculateType(symbols: SymbolTable): CypherType = CTAny
override def symbolTableDependencies: Set[String] = Set.empty
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/commands/expressions/InequalitySeekRangeExpression.scala | Scala | apache-2.0 | 1,737 |
package glint.model
/**
* Root trait for glinting objects (nodes and )
* Created by CAB on 01.03.2015.
*/
trait Glint {
val version:Long = -1
val glintID:Long = -1}
| AlexCAB/Glint | src/main/scala/glint/model/Glint.scala | Scala | mit | 175 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package types
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 08.02.2008
*/
/*
* SelfType ::= id [':' Type] '=>' |
* ['this' | '_'] ':' Type '=>'
*/
object SelfType extends SelfType {
override protected def infixType = InfixType
}
trait SelfType {
protected def infixType: InfixType
def parse(builder: ScalaPsiBuilder) {
val selfTypeMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.kTHIS | ScalaTokenTypes.tUNDER =>
builder.advanceLexer // Ate this or _
builder.getTokenType match {
case ScalaTokenTypes.tCOLON => {
builder.advanceLexer //Ate ':'
if (!parseType(builder)) {
selfTypeMarker.rollbackTo
return
}
else {
builder.getTokenType match {
case ScalaTokenTypes.tFUNTYPE => {
builder.advanceLexer //Ate '=>'
selfTypeMarker.done(ScalaElementTypes.SELF_TYPE)
return
}
case _ => {
selfTypeMarker.rollbackTo
return
}
}
}
}
case _ => {
selfTypeMarker.rollbackTo
return
}
}
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer //Ate identifier
builder.getTokenType match {
case ScalaTokenTypes.tCOLON => {
builder.advanceLexer //Ate ':'
if (!parseType(builder)) {
selfTypeMarker.rollbackTo
return
}
else {
builder.getTokenType match {
case ScalaTokenTypes.tFUNTYPE => {
builder.advanceLexer //Ate '=>'
selfTypeMarker.done(ScalaElementTypes.SELF_TYPE)
return
}
case _ => {
selfTypeMarker.rollbackTo
return
}
}
}
}
case ScalaTokenTypes.tFUNTYPE => {
builder.advanceLexer //Ate '=>'
selfTypeMarker.done(ScalaElementTypes.SELF_TYPE)
return
}
case _ => {
selfTypeMarker.rollbackTo
return
}
}
case _ =>
selfTypeMarker.rollbackTo
return
}
}
def parseType(builder : ScalaPsiBuilder) : Boolean = {
val typeMarker = builder.mark
if (!infixType.parse(builder, star = false, isPattern = true)) {
typeMarker.drop()
return false
}
builder.getTokenType match {
case ScalaTokenTypes.kFOR_SOME =>
ExistentialClause parse builder
typeMarker.done(ScalaElementTypes.EXISTENTIAL_TYPE)
case _ => typeMarker.drop()
}
true
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/types/SelfType.scala | Scala | apache-2.0 | 3,062 |
package modules
import java.io._
import java.net.URISyntaxException
import java.util.Scanner
import irc.info.{Info, Rank}
import irc.message.{Message, MessageCommands}
import irc.server.ServerResponder
import ircbot.{BotCommand, BotModule, ModuleFiles}
import org.json.JSONArray
class Administration extends BotModule{
override val commands: Map[String, Array[String]] = Map("kme" -> Array("Kick yourself from the channel. Only work when administration is turned on in that channel",
"Command is always .kme"),
"banme" -> Array("Ban yourself from the channel. Only work when administration is turned on in that channel",
"Command is always .banme"),
"tkbme" -> Array("Time ban yourself from the channel. Only work when administration is turned on in that channel",
"Command is always .tkbme Usage: .tkbme <seconds>"))
override val adminCommands: Map[String, Array[String]] = Map("administration" -> Array("Toggle adminstration on or off for a channel.",
"To use, do %padministration <on/off> <channel>"))
var channels: Set[String] = Set()
val jsonfile = ModuleFiles.getFile("administration.json")
var json: JSONArray = new JSONArray()
try {
val scan = new Scanner(new FileInputStream(jsonfile))
var jsonstring = ""
while (scan.hasNext) {
jsonstring += scan.next() + " "
}
scan.close()
json = new JSONArray(jsonstring)
} catch {
case e: IOException => e.printStackTrace()
case e: URISyntaxException => e.printStackTrace()
}
for(i <- 0 until json.length()){
channels += json.getString(i)
}
var antispams: Map[String, Map[String, Int]] = Map()
override def parse(m: Message, b: BotCommand, r: ServerResponder): Unit = {
val target = if(!m.params.first.startsWith("#")) m.sender.nickname else m.params.first
if((m.command == MessageCommands.PRIVMSG || m.command == MessageCommands.NOTICE) && channels.contains(m.server + ":" + m.params.first)){
val rank = (for{
info <- Info.get(m.server)
channel <- info.findChannel(m.params.first)
} yield {
channel.getRank(m.sender.nickname)
}).getOrElse(Rank.UNKNOWN)
// Antispam
if(rank < Rank.AOP) {
var highlights = 0
var massHighlight = false
for{
info <- Info.get(m.server)
channel <- info.findChannel(m.params.first)
} yield {
for((username,user) <- channel.users) {
if (!massHighlight) {
if (m.trailing.contains(username)) {
highlights += 1
if (highlights > 5) massHighlight = true
}
}
}
}
if(massHighlight){
r.ban(m.params.first,"@" + m.sender.host)
r.kick(m.params.first, m.sender.nickname, "Mass highlighting is not allowed")
}
val spams = {
if (antispams.contains(m.server + ":" + m.params.first)) {
if (antispams(m.server + ":" + m.params.first).contains(m.sender.nickname)) {
antispams(m.server + ":" + m.params.first)(m.sender.nickname)
}
else 0
}
else 0
}
val existingSpams = {
if (antispams.contains(m.server + ":" + m.params.first)) {
antispams(m.server + ":" + m.params.first)
}
else Map()
}
antispams ++= Map(m.server + ":" + m.params.first -> (existingSpams ++ Map(m.sender.nickname -> (spams + 1))))
removeAntispam(m.server + ":" + m.params.first, m.sender.nickname, 10000)
if (spams > 4) {
r.kick(m.params.first, m.sender.nickname, "Stop flooding!")
return
}
}
val bAsDot = new BotCommand(m, ".")
// SELF BAN COMMANDS
if(bAsDot.command == "kme") {
r.kick(m.params.first, m.sender.nickname, "There you go")
}
if(bAsDot.command == "banme") {
r.ban(m.params.first, "@" + m.sender.host)
r.kick(m.params.first, m.sender.nickname, "There you go")
}
if(bAsDot.command == "tkbme") {
if(bAsDot.hasParams) {
var time = 0
try {
if(bAsDot.paramsArray(0) == "gentoo") time = 2147483647
else time = Integer.parseInt(bAsDot.paramsArray(0))
if(time <= 0) throw new NumberFormatException()
r.ban(m.params.first, "@" + m.sender.host)
r.kick(m.params.first, m.sender.nickname, s"See you in $time seconds")
removeBan(r, m.params.first, m.sender.host,time)
}
catch {
case e: NumberFormatException =>
r.say(target, s"${m.sender.nickname}: ${bAsDot.paramsArray(1)} is not a valid timeout")
return
}
}
}
// ADMINISTRATION COMMANDS
// HALFOP ONLY
if(rank >= Rank.HOP){
// KICKBAN
if(bAsDot.command == "kb"){
if(bAsDot.hasParams) {
val currentChannel = Info.get(m.server).get.findChannel(m.params.first).get
try {
if(m.sender.nickname == bAsDot.paramsArray(0)) {
r.say(target, s"${m.sender.nickname}: To kickban yourself, use the .banme command")
}
else if(m.config.getNickname == bAsDot.paramsArray(0)){
r.say(target, "no fuck you")
}
else if(currentChannel.getRank(m.sender.nickname) == Rank.HOP && currentChannel.getRank(b.paramsArray(0)) == Rank.HOP){
r.say(target, s"${m.sender.nickname}: You cannot kick another hop")
}
else if(currentChannel.getRank(m.sender.nickname) == Rank.SOP && currentChannel.getRank(b.paramsArray(0)) == Rank.SOP){
r.say(target, s"${m.sender.nickname}: You cannot kick another sop")
}
else if(currentChannel.getRank(m.sender.nickname) >= currentChannel.getRank(b.paramsArray(0))){
val host = "@" + Info.get(m.server).get.findUser(bAsDot.paramsArray(0)).get.host
r.ban(m.params.first, host)
if(bAsDot.paramsArray.length > 1) r.kick(m.params.first, bAsDot.paramsArray(0), bAsDot.paramsString.substring(bAsDot.paramsArray(0).length + 1))
else r.kick(m.params.first, bAsDot.paramsArray(0))
}
else r.say(target, s"${m.sender.nickname}: They are a higher rank than you")
}
catch{
case e: Exception =>
r.say(target, s"${m.sender.nickname}: Could not find user, ${b.paramsArray(0)}")
}
}
}
//TIME KICKBAN
if(bAsDot.command == "tkb"){
if(bAsDot.paramsArray.length > 1) {
var time = 0
try {
if(bAsDot.paramsArray(1) == "gentoo") time = 2147483647
else time = Integer.parseInt(bAsDot.paramsArray(1))
if(time <= 0) throw new NumberFormatException()
}
catch {
case e: NumberFormatException =>
r.say(target, s"${m.sender.nickname}: ${bAsDot.paramsArray(1)} is not a valid timeout")
return
}
val currentChannel = Info.get(m.server).get.findChannel(m.params.first).get
try {
if(m.sender.nickname == bAsDot.paramsArray(0)) {
r.say(target, s"${m.sender.nickname}: To time ban yourself, use the .tkbme command")
}
else if(m.config.getNickname == bAsDot.paramsArray(0)){
r.say(target, "no fuck you")
}
else if(currentChannel.getRank(m.sender.nickname) == Rank.HOP && currentChannel.getRank(b.paramsArray(0)) == Rank.HOP){
r.say(target, s"${m.sender.nickname}: You cannot kick another hop")
}
else if(currentChannel.getRank(m.sender.nickname) == Rank.SOP && currentChannel.getRank(b.paramsArray(0)) == Rank.SOP){
r.say(target, s"${m.sender.nickname}: You cannot kick another sop")
}
else if(currentChannel.getRank(m.sender.nickname) >= currentChannel.getRank(b.paramsArray(0))){
val host = "@" + Info.get(m.server).get.findUser(bAsDot.paramsArray(0)).get.host
r.ban(m.params.first, host)
if(bAsDot.paramsArray.length > 2) {
r.notice(bAsDot.paramsArray(0), s"You will be unbanned from ${m.params.first} in $time seconds")
val reason = bAsDot.paramsString.substring(bAsDot.paramsArray(0).length + bAsDot.paramsArray(1).length + 2)
r.kick(m.params.first, bAsDot.paramsArray(0), reason)
}
else r.kick(m.params.first, bAsDot.paramsArray(0), s"Come back in $time seconds")
removeBan(r, m.params.first, host, time)
}
else r.say(target, s"${m.sender.nickname}: They are a higher rank than you")
}
catch{
case e: Exception =>
r.say(target, s"${m.sender.nickname}: Could not find user, ${b.paramsArray(0)}")
}
}
}
// KICK
if(bAsDot.command == "k") {
if (bAsDot.hasParams) {
val currentChannel = Info.get(m.server).get.findChannel(m.params.first).get
try {
if(m.sender.nickname == bAsDot.paramsArray(0)) {
r.say(target, s"${m.sender.nickname}: To kick yourself, use the .kme command")
}
else if (currentChannel.getRank(m.sender.nickname) == Rank.HOP && currentChannel.getRank(b.paramsArray(0)) == Rank.HOP) {
r.say(target, s"${m.sender.nickname}: You cannot kick another hop")
}
else if(currentChannel.getRank(m.sender.nickname) == Rank.SOP && currentChannel.getRank(b.paramsArray(0)) == Rank.SOP){
r.say(target, s"${m.sender.nickname}: You cannot kick another sop")
}
else if (currentChannel.getRank(m.sender.nickname) >= currentChannel.getRank(b.paramsArray(0))){
if (bAsDot.paramsArray.length > 1) r.kick(m.params.first, bAsDot.paramsArray(0), bAsDot.paramsString.substring(bAsDot.paramsArray(0).length + 1))
else r.kick(m.params.first, bAsDot.paramsArray(0))
}
else r.say(target, s"${m.sender.nickname}: They are a higher rank than you")
}
catch {
case e: Exception =>
r.say(target, s"${m.sender.nickname}: Could not find user, ${b.paramsArray(0)}")
}
}
}
if(bAsDot.command == "ub"){
if(bAsDot.hasParams){
try{
r.unban(m.params.first, "@" + Info.get(m.server).get.findUser(bAsDot.paramsArray(0)).get.host)
}
catch{
case e: Exception =>
r.say(target, s"${m.sender.nickname}: Could not find user, ${b.paramsArray(0)}")
}
}
}
}
}
// ADMIN TOGGLE
if(b.command == "administration" && m.sender.isAdmin){
val usage = s"Usage: ${b.commandPrefix}administration <on/off> <channel>"
if(b.hasParams){
val channel = {
if(b.paramsArray.length > 1) b.paramsArray(1)
else m.params.first
}
b.paramsArray(0) match {
case "on" =>
if (channels.contains(m.server + ":" + channel)) {
r.say(target, s"${m.sender.nickname}: Administration is already on for $channel")
}
else {
channels = channels + (m.server + ":" + channel)
save()
r.say(target, s"${m.sender.nickname}: Administration is now on for $channel")
}
case "off" =>
if (channels.contains(m.server + ":" + channel)) {
channels -= (m.server + ":" + channel)
save()
r.say(target, s"${m.sender.nickname}: Administration is now off for $channel")
}
else {
r.say(target, s"${m.sender.nickname}: Administration was already off for $channel")
}
case _ =>
r.say(target, usage)
}
}
else r.say(target, usage)
}
}
private def save() {
val jsonArray = new JSONArray()
for(channel: String <- channels){
jsonArray.put(channel)
}
json = jsonArray
try {
val writer = new PrintWriter(jsonfile)
writer.println(json.toString)
writer.close()
} catch {
case e: FileNotFoundException => e.printStackTrace()
}
}
private def removeAntispam(channel: String, user: String, timeoutMillis: Long): Unit ={
new Thread(new Runnable {
override def run(): Unit = {
Thread.sleep(timeoutMillis)
val spams = antispams(channel)(user)
if(antispams.contains(channel)){
if(antispams(channel).contains(user)){
antispams ++= Map(channel -> (antispams(channel) ++ Map(user -> (spams - 1))))
}
}
}
}).start()
}
private def removeBan(responder: ServerResponder, channel: String, ban: String, timeoutSeconds: Long): Unit ={
new Thread(new Runnable {
override def run(): Unit = {
Thread.sleep(timeoutSeconds*1000)
responder.unban(channel, ban)
}
}).start()
}
}
| wiiaam/taylorswift | src/main/scala/modules/Administration.scala | Scala | agpl-3.0 | 13,434 |
package tracing
import monix.eval.tracing.{TaskEvent, TaskTrace}
import monix.eval.{BaseTestSuite, Task}
/**
* All Credits to https://github.com/typelevel/cats-effect and https://github.com/RaasAhsan
*/
object FullStackTracingSuite extends BaseTestSuite {
def traced[A](io: Task[A]): Task[TaskTrace] =
io.flatMap(_ => Task.trace)
testAsync("captures map frames") { implicit s =>
val task = Task.pure(0).map(_ + 1).map(_ + 1)
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 5)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }.count(_.stackTrace.exists(_.getMethodName == "map")),
3)
}
test.runToFuture
}
testAsync("captures bind frames") { implicit s =>
val task = Task.pure(0).flatMap(a => Task(a + 1)).flatMap(a => Task(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 7)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }
.count(_.stackTrace.exists(_.getMethodName == "flatMap")),
3
) // the extra one is used to capture the trace
}
test.runToFuture
}
testAsync("captures async frames") { implicit s =>
val task = Task.async[Int](_(Right(0))).flatMap(a => Task(a + 1)).flatMap(a => Task(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 7)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }.count(_.stackTrace.exists(_.getMethodName == "async")),
1)
}
test.runToFuture
}
testAsync("captures pure frames") { implicit s =>
val task = Task.pure(0).flatMap(a => Task.pure(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 5)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }.count(_.stackTrace.exists(_.getMethodName == "pure")),
2)
}
test.runToFuture
}
testAsync("full stack tracing captures eval frames") { implicit s =>
val task = Task(0).flatMap(a => Task(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 5)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }.count(_.stackTrace.exists(_.getMethodName == "eval")),
2)
}
test.runToFuture
}
testAsync("full stack tracing captures suspend frames") { implicit s =>
val task = Task.suspend(Task(1)).flatMap(a => Task.suspend(Task(a + 1)))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 7)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }
.count(_.stackTrace.exists(_.getMethodName == "suspend")),
2)
}
test.runToFuture
}
testAsync("captures raiseError frames") { implicit s =>
val task = Task(0).flatMap(_ => Task.raiseError(new Throwable())).onErrorHandleWith(_ => Task.unit)
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 6)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }
.count(_.stackTrace.exists(_.getMethodName == "raiseError")),
1)
}
test.runToFuture
}
testAsync("captures bracket frames") { implicit s =>
val task = Task.unit.bracket(_ => Task.pure(10))(_ => Task.unit).flatMap(a => Task(a + 1)).flatMap(a => Task(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 13)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }
.count(_.stackTrace.exists(_.getMethodName == "bracket")),
1)
}
test.runToFuture
}
testAsync("captures bracketCase frames") { implicit s =>
val task =
Task.unit.bracketCase(_ => Task.pure(10))((_, _) => Task.unit).flatMap(a => Task(a + 1)).flatMap(a => Task(a + 1))
val test =
for (r <- traced(task)) yield {
assertEquals(r.captured, 13)
assertEquals(
r.events.collect { case e: TaskEvent.StackTrace => e }
.count(_.stackTrace.exists(_.getMethodName == "bracketCase")),
1)
}
test.runToFuture
}
}
| monix/monix | tracingTests/src/fulltracing/scala/tracing/FullStackTracingSuite.scala | Scala | apache-2.0 | 4,306 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.lang.{Double => JDouble, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import java.util.{Locale, TimeZone}
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Cast, Literal}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
// TODO: We should tighten up visibility of the classes here once we clean up Hive coupling.
object PartitionPath {
def apply(values: InternalRow, path: String): PartitionPath =
apply(values, new Path(path))
}
/**
* Holds a directory in a partitioned collection of files as well as the partition values
* in the form of a Row. Before scanning, the files at `path` need to be enumerated.
*/
case class PartitionPath(values: InternalRow, path: Path)
case class PartitionSpec(
partitionColumns: StructType,
partitions: Seq[PartitionPath])
object PartitionSpec {
val emptySpec = PartitionSpec(StructType(Seq.empty[StructField]), Seq.empty[PartitionPath])
}
object PartitioningUtils {
private[datasources] case class PartitionValues(columnNames: Seq[String], literals: Seq[Literal])
{
require(columnNames.size == literals.size)
}
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.DEFAULT_PARTITION_NAME
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.unescapePathName
/**
* Given a group of qualified paths, tries to parse them and returns a partition specification.
* For example, given:
* {{{
* hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14
* hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28
* }}}
* it returns:
* {{{
* PartitionSpec(
* partitionColumns = StructType(
* StructField(name = "a", dataType = IntegerType, nullable = true),
* StructField(name = "b", dataType = StringType, nullable = true),
* StructField(name = "c", dataType = DoubleType, nullable = true)),
* partitions = Seq(
* Partition(
* values = Row(1, "hello", 3.14),
* path = "hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14"),
* Partition(
* values = Row(2, "world", 6.28),
* path = "hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28")))
* }}}
*/
private[datasources] def parsePartitions(
paths: Seq[Path],
typeInference: Boolean,
basePaths: Set[Path],
timeZoneId: String): PartitionSpec = {
parsePartitions(paths, typeInference, basePaths, DateTimeUtils.getTimeZone(timeZoneId))
}
private[datasources] def parsePartitions(
paths: Seq[Path],
typeInference: Boolean,
basePaths: Set[Path],
timeZone: TimeZone): PartitionSpec = {
// First, we need to parse every partition's path and see if we can find partition values.
val (partitionValues, optDiscoveredBasePaths) = paths.map { path =>
parsePartition(path, typeInference, basePaths, timeZone)
}.unzip
// We create pairs of (path -> path's partition value) here
// If the corresponding partition value is None, the pair will be skipped
val pathsWithPartitionValues = paths.zip(partitionValues).flatMap(x => x._2.map(x._1 -> _))
if (pathsWithPartitionValues.isEmpty) {
// This dataset is not partitioned.
PartitionSpec.emptySpec
} else {
// This dataset is partitioned. We need to check whether all partitions have the same
// partition columns and resolve potential type conflicts.
// Check if there is conflicting directory structure.
// For the paths such as:
// var paths = Seq(
// "hdfs://host:9000/invalidPath",
// "hdfs://host:9000/path/a=10/b=20",
// "hdfs://host:9000/path/a=10.5/b=hello")
// It will be recognised as conflicting directory structure:
// "hdfs://host:9000/invalidPath"
// "hdfs://host:9000/path"
// TODO: Selective case sensitivity.
val discoveredBasePaths = optDiscoveredBasePaths.flatten.map(_.toString.toLowerCase())
assert(
discoveredBasePaths.distinct.size == 1,
"Conflicting directory structures detected. Suspicious paths:\\b" +
discoveredBasePaths.distinct.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"If provided paths are partition directories, please set " +
"\\"basePath\\" in the options of the data source to specify the " +
"root directory of the table. If there are multiple root directories, " +
"please load them separately and then union them.")
val resolvedPartitionValues = resolvePartitions(pathsWithPartitionValues, timeZone)
// Creates the StructType which represents the partition columns.
val fields = {
val PartitionValues(columnNames, literals) = resolvedPartitionValues.head
columnNames.zip(literals).map { case (name, Literal(_, dataType)) =>
// We always assume partition columns are nullable since we've no idea whether null values
// will be appended in the future.
StructField(name, dataType, nullable = true)
}
}
// Finally, we create `Partition`s based on paths and resolved partition values.
val partitions = resolvedPartitionValues.zip(pathsWithPartitionValues).map {
case (PartitionValues(_, literals), (path, _)) =>
PartitionPath(InternalRow.fromSeq(literals.map(_.value)), path)
}
PartitionSpec(StructType(fields), partitions)
}
}
/**
* Parses a single partition, returns column names and values of each partition column, also
* the path when we stop partition discovery. For example, given:
* {{{
* path = hdfs://<host>:<port>/path/to/partition/a=42/b=hello/c=3.14
* }}}
* it returns the partition:
* {{{
* PartitionValues(
* Seq("a", "b", "c"),
* Seq(
* Literal.create(42, IntegerType),
* Literal.create("hello", StringType),
* Literal.create(3.14, DoubleType)))
* }}}
* and the path when we stop the discovery is:
* {{{
* hdfs://<host>:<port>/path/to/partition
* }}}
*/
private[datasources] def parsePartition(
path: Path,
typeInference: Boolean,
basePaths: Set[Path],
timeZone: TimeZone): (Option[PartitionValues], Option[Path]) = {
val columns = ArrayBuffer.empty[(String, Literal)]
// Old Hadoop versions don't have `Path.isRoot`
var finished = path.getParent == null
// currentPath is the current path that we will use to parse partition column value.
var currentPath: Path = path
while (!finished) {
// Sometimes (e.g., when speculative task is enabled), temporary directories may be left
// uncleaned. Here we simply ignore them.
if (currentPath.getName.toLowerCase(Locale.ROOT) == "_temporary") {
return (None, None)
}
if (basePaths.contains(currentPath)) {
// If the currentPath is one of base paths. We should stop.
finished = true
} else {
// Let's say currentPath is a path of "/table/a=1/", currentPath.getName will give us a=1.
// Once we get the string, we try to parse it and find the partition column and value.
val maybeColumn =
parsePartitionColumn(currentPath.getName, typeInference, timeZone)
maybeColumn.foreach(columns += _)
// Now, we determine if we should stop.
// When we hit any of the following cases, we will stop:
// - In this iteration, we could not parse the value of partition column and value,
// i.e. maybeColumn is None, and columns is not empty. At here we check if columns is
// empty to handle cases like /table/a=1/_temporary/something (we need to find a=1 in
// this case).
// - After we get the new currentPath, this new currentPath represent the top level dir
// i.e. currentPath.getParent == null. For the example of "/table/a=1/",
// the top level dir is "/table".
finished =
(maybeColumn.isEmpty && !columns.isEmpty) || currentPath.getParent == null
if (!finished) {
// For the above example, currentPath will be "/table/".
currentPath = currentPath.getParent
}
}
}
if (columns.isEmpty) {
(None, Some(path))
} else {
val (columnNames, values) = columns.reverse.unzip
(Some(PartitionValues(columnNames, values)), Some(currentPath))
}
}
private def parsePartitionColumn(
columnSpec: String,
typeInference: Boolean,
timeZone: TimeZone): Option[(String, Literal)] = {
val equalSignIndex = columnSpec.indexOf('=')
if (equalSignIndex == -1) {
None
} else {
val columnName = unescapePathName(columnSpec.take(equalSignIndex))
assert(columnName.nonEmpty, s"Empty partition column name in '$columnSpec'")
val rawColumnValue = columnSpec.drop(equalSignIndex + 1)
assert(rawColumnValue.nonEmpty, s"Empty partition column value in '$columnSpec'")
val literal = inferPartitionColumnValue(rawColumnValue, typeInference, timeZone)
Some(columnName -> literal)
}
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `TablePartitionSpec`, e.g. `Map(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragment(pathFragment: String): TablePartitionSpec = {
parsePathFragmentAsSeq(pathFragment).toMap
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `Seq[(String, String)]`, e.g.
* `Seq(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragmentAsSeq(pathFragment: String): Seq[(String, String)] = {
pathFragment.split("/").map { kv =>
val pair = kv.split("=", 2)
(unescapePathName(pair(0)), unescapePathName(pair(1)))
}
}
/**
* This is the inverse of parsePathFragment().
*/
def getPathFragment(spec: TablePartitionSpec, partitionSchema: StructType): String = {
partitionSchema.map { field =>
escapePathName(field.name) + "=" + escapePathName(spec(field.name))
}.mkString("/")
}
/**
* Normalize the column names in partition specification, w.r.t. the real partition column names
* and case sensitivity. e.g., if the partition spec has a column named `monTh`, and there is a
* partition column named `month`, and it's case insensitive, we will normalize `monTh` to
* `month`.
*/
def normalizePartitionSpec[T](
partitionSpec: Map[String, T],
partColNames: Seq[String],
tblName: String,
resolver: Resolver): Map[String, T] = {
val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
val normalizedKey = partColNames.find(resolver(_, key)).getOrElse {
throw new AnalysisException(s"$key is not a valid partition column in table $tblName.")
}
normalizedKey -> value
}
SchemaUtils.checkColumnNameDuplication(
normalizedPartSpec.map(_._1), "in the partition schema", resolver)
normalizedPartSpec.toMap
}
/**
* Resolves possible type conflicts between partitions by up-casting "lower" types. The up-
* casting order is:
* {{{
* NullType ->
* IntegerType -> LongType ->
* DoubleType -> StringType
* }}}
*/
def resolvePartitions(
pathsWithPartitionValues: Seq[(Path, PartitionValues)],
timeZone: TimeZone): Seq[PartitionValues] = {
if (pathsWithPartitionValues.isEmpty) {
Seq.empty
} else {
// TODO: Selective case sensitivity.
val distinctPartColNames =
pathsWithPartitionValues.map(_._2.columnNames.map(_.toLowerCase())).distinct
assert(
distinctPartColNames.size == 1,
listConflictingPartitionColumns(pathsWithPartitionValues))
// Resolves possible type conflicts for each column
val values = pathsWithPartitionValues.map(_._2)
val columnCount = values.head.columnNames.size
val resolvedValues = (0 until columnCount).map { i =>
resolveTypeConflicts(values.map(_.literals(i)), timeZone)
}
// Fills resolved literals back to each partition
values.zipWithIndex.map { case (d, index) =>
d.copy(literals = resolvedValues.map(_(index)))
}
}
}
private[datasources] def listConflictingPartitionColumns(
pathWithPartitionValues: Seq[(Path, PartitionValues)]): String = {
val distinctPartColNames = pathWithPartitionValues.map(_._2.columnNames).distinct
def groupByKey[K, V](seq: Seq[(K, V)]): Map[K, Iterable[V]] =
seq.groupBy { case (key, _) => key }.mapValues(_.map { case (_, value) => value })
val partColNamesToPaths = groupByKey(pathWithPartitionValues.map {
case (path, partValues) => partValues.columnNames -> path
})
val distinctPartColLists = distinctPartColNames.map(_.mkString(", ")).zipWithIndex.map {
case (names, index) =>
s"Partition column name list #$index: $names"
}
// Lists out those non-leaf partition directories that also contain files
val suspiciousPaths = distinctPartColNames.sortBy(_.length).flatMap(partColNamesToPaths)
s"Conflicting partition column names detected:\\n" +
distinctPartColLists.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"For partitioned table directories, data files should only live in leaf directories.\\n" +
"And directories at the same level should have the same partition column name.\\n" +
"Please check the following directories for unexpected files or " +
"inconsistent partition column names:\\n" +
suspiciousPaths.map("\\t" + _).mkString("\\n", "\\n", "")
}
/**
* Converts a string to a [[Literal]] with automatic type inference. Currently only supports
* [[IntegerType]], [[LongType]], [[DoubleType]], [[DecimalType]], [[DateType]]
* [[TimestampType]], and [[StringType]].
*/
private[datasources] def inferPartitionColumnValue(
raw: String,
typeInference: Boolean,
timeZone: TimeZone): Literal = {
val decimalTry = Try {
// `BigDecimal` conversion can fail when the `field` is not a form of number.
val bigDecimal = new JBigDecimal(raw)
// It reduces the cases for decimals by disallowing values having scale (eg. `1.1`).
require(bigDecimal.scale <= 0)
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
Literal(bigDecimal)
}
if (typeInference) {
// First tries integral types
Try(Literal.create(Integer.parseInt(raw), IntegerType))
.orElse(Try(Literal.create(JLong.parseLong(raw), LongType)))
.orElse(decimalTry)
// Then falls back to fractional types
.orElse(Try(Literal.create(JDouble.parseDouble(raw), DoubleType)))
// Then falls back to date/timestamp types
.orElse(Try(
Literal.create(
DateTimeUtils.getThreadLocalTimestampFormat(timeZone)
.parse(unescapePathName(raw)).getTime * 1000L,
TimestampType)))
.orElse(Try(
Literal.create(
DateTimeUtils.millisToDays(
DateTimeUtils.getThreadLocalDateFormat.parse(raw).getTime),
DateType)))
// Then falls back to string
.getOrElse {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
} else {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
}
private val upCastingOrder: Seq[DataType] =
Seq(NullType, IntegerType, LongType, FloatType, DoubleType, StringType)
def validatePartitionColumn(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): Unit = {
partitionColumnsSchema(schema, partitionColumns, caseSensitive).foreach {
field => field.dataType match {
case _: AtomicType => // OK
case _ => throw new AnalysisException(s"Cannot use ${field.dataType} for partition column")
}
}
if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) {
throw new AnalysisException(s"Cannot use all columns for partition columns")
}
}
def partitionColumnsSchema(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): StructType = {
val equality = columnNameEquality(caseSensitive)
StructType(partitionColumns.map { col =>
schema.find(f => equality(f.name, col)).getOrElse {
throw new AnalysisException(s"Partition column $col not found in schema $schema")
}
}).asNullable
}
private def columnNameEquality(caseSensitive: Boolean): (String, String) => Boolean = {
if (caseSensitive) {
org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
} else {
org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
}
}
/**
* Given a collection of [[Literal]]s, resolves possible type conflicts by up-casting "lower"
* types.
*/
private def resolveTypeConflicts(literals: Seq[Literal], timeZone: TimeZone): Seq[Literal] = {
val desiredType = {
val topType = literals.map(_.dataType).maxBy(upCastingOrder.indexOf(_))
// Falls back to string if all values of this column are null or empty string
if (topType == NullType) StringType else topType
}
literals.map { case l @ Literal(_, dataType) =>
Literal.create(Cast(l, desiredType, Some(timeZone.getID)).eval(), desiredType)
}
}
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala | Scala | apache-2.0 | 19,064 |
package com.twitter.finagle.memcached.integration
import java.net.{InetAddress, InetSocketAddress}
import com.twitter.common.io.FileUtils._
import com.twitter.common.quantity.{Time, Amount}
import com.twitter.common.zookeeper.{ZooKeeperUtils, ServerSets, ZooKeeperClient}
import com.twitter.conversions.time._
import com.twitter.finagle.MemcachedClient
import com.twitter.finagle.memcached.CachePoolConfig
import com.twitter.finagle.memcached.migration._
import com.twitter.finagle.memcached.util.ChannelBufferUtils._
import com.twitter.finagle.zookeeper.ZookeeperServerSetCluster
import com.twitter.io.Charsets
import com.twitter.util._
import java.io.ByteArrayOutputStream
import org.apache.zookeeper.server.persistence.FileTxnSnapLog
import org.apache.zookeeper.server.{NIOServerCnxn, ZooKeeperServer}
import org.junit.runner.RunWith
import org.scalatest.concurrent.{IntegrationPatience, Eventually}
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfter, BeforeAndAfterEach, FunSuite}
@RunWith(classOf[JUnitRunner])
class MigrationClientTest extends FunSuite with BeforeAndAfterEach with BeforeAndAfter with Eventually with IntegrationPatience {
/**
* Note: This integration test requires a real Memcached server to run.
*/
val basePath = "/cache/test/silly-cache"
val oldPoolPath = basePath + "/OldPool"
val newPoolPath = basePath + "/NewPool"
var zookeeperServer: ZooKeeperServer = null
var zookeeperServerPort: Int = 0
var zookeeperClient: ZooKeeperClient = null
var connectionFactory: NIOServerCnxn.Factory = null
var testServers: List[TestMemcachedServer] = List()
val TIMEOUT = 15.seconds
override def beforeEach() {
val loopback = InetAddress.getLoopbackAddress
// start zookeeper server and create zookeeper client
zookeeperServer = new ZooKeeperServer(
new FileTxnSnapLog(createTempDir(), createTempDir()),
new ZooKeeperServer.BasicDataTreeBuilder)
connectionFactory = new NIOServerCnxn.Factory(new InetSocketAddress(loopback, 0))
connectionFactory.startup(zookeeperServer)
zookeeperServerPort = zookeeperServer.getClientPort
zookeeperClient = new ZooKeeperClient(
Amount.of(10, Time.MILLISECONDS),
new InetSocketAddress(loopback, zookeeperServerPort))
// set-up old pool
val oldPoolCluster = new ZookeeperServerSetCluster(
ServerSets.create(zookeeperClient, ZooKeeperUtils.OPEN_ACL_UNSAFE, oldPoolPath))
(0 to 1) foreach { _ =>
TestMemcachedServer.start() match {
case Some(server) =>
oldPoolCluster.join(server.address)
testServers :+= server
case None => fail("Cannot start memcached.")
}
}
// set-up new pool
val newPoolCluster = new ZookeeperServerSetCluster(
ServerSets.create(zookeeperClient, ZooKeeperUtils.OPEN_ACL_UNSAFE, newPoolPath))
(0 to 1) foreach { _ =>
TestMemcachedServer.start() match {
case Some(server) =>
newPoolCluster.join(server.address)
testServers :+= server
case None => fail("Cannot start memcached.")
}
}
// set config data
val cachePoolConfig: CachePoolConfig = new CachePoolConfig(cachePoolSize = 2)
val output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(cachePoolConfig, output)
zookeeperClient.get().setData(oldPoolPath, output.toByteArray, -1)
zookeeperClient.get().setData(newPoolPath, output.toByteArray, -1)
val migrationConfig = MigrationConstants.MigrationConfig("Pending", false, false)
val migrationDataArray = MigrationConstants.jsonMapper.writeValueAsString(migrationConfig).getBytes()
zookeeperClient.get().setData(basePath, migrationDataArray, -1)
}
override def afterEach() {
zookeeperClient.close()
connectionFactory.shutdown()
// shutdown memcached server
testServers foreach { _.stop() }
testServers = List()
}
test("not migrating yet") {
val client1 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+oldPoolPath)
val client2 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+newPoolPath)
val migrationClient = MigrationClient.newMigrationClient("localhost:"+zookeeperServerPort, basePath)
migrationClient.loadZKData() // force loading the config to fully set-up the client
eventually { Await.result(migrationClient.get("test")) }
assert(Await.result(migrationClient.get("foo"), TIMEOUT) === None)
Await.result(migrationClient.set("foo", "bar"), TIMEOUT)
assert(Await.result(migrationClient.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
eventually { assert(Await.result(client2.get("foo")) === None) }
}
if (!sys.props.contains("SKIP_FLAKY")) {
test("sending dark traffic") {
val migrationConfig = MigrationConstants.MigrationConfig("Warming", false, false)
val migrationDataArray = MigrationConstants.jsonMapper.writeValueAsString(migrationConfig)
zookeeperClient.get().setData(basePath, migrationDataArray, -1)
val client1 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+oldPoolPath)
val client2 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+newPoolPath)
val migrationClient = MigrationClient.newMigrationClient("localhost:"+zookeeperServerPort, basePath)
migrationClient.loadZKData() // force loading the config to fully set-up the client
eventually { Await.result(migrationClient.get("test")) }
assert(Await.result(migrationClient.get("foo"), TIMEOUT) === None)
Await.result(migrationClient.set("foo", "bar"), TIMEOUT)
assert(Await.result(migrationClient.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
eventually { assert(Await.result(client2.get("foo")).map(_.toString(Charsets.Utf8)) === Some("bar")) }
}
}
test("dark read w/ read repair") {
val migrationConfig = MigrationConstants.MigrationConfig("Warming", true, false)
val migrationDataArray = MigrationConstants.jsonMapper.writeValueAsString(migrationConfig)
zookeeperClient.get().setData(basePath, migrationDataArray, -1)
val client1 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+oldPoolPath)
val client2 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+newPoolPath)
val migrationClient = MigrationClient.newMigrationClient("localhost:"+zookeeperServerPort, basePath)
migrationClient.loadZKData() // force loading the config to fully set-up the client
eventually { Await.result(migrationClient.get("test")) }
Await.result(client1.set("foo", "bar"), TIMEOUT)
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client2.get("foo"), TIMEOUT) === None)
assert(Await.result(migrationClient.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
eventually { assert(Await.result(client2.get("foo")).map(_.toString(Charsets.Utf8)) === Some("bar")) }
}
test("use new pool with fallback to old pool") {
val migrationConfig = MigrationConstants.MigrationConfig("Verifying", false, false)
val migrationDataArray = MigrationConstants.jsonMapper.writeValueAsString(migrationConfig)
zookeeperClient.get().setData(basePath, migrationDataArray, -1)
val client1 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+oldPoolPath)
val client2 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+newPoolPath)
val migrationClient = MigrationClient.newMigrationClient("localhost:"+zookeeperServerPort, basePath)
migrationClient.loadZKData() // force loading the config to fully set-up the client
eventually { Await.result(migrationClient.get("test")) }
Await.result(client1.set("foo", "bar"), TIMEOUT)
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client2.get("foo"), TIMEOUT) === None)
assert(Await.result(migrationClient.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
eventually { assert(Await.result(client2.get("foo")) === None) }
}
test("use new pool with fallback to old pool and readrepair") {
val migrationConfig = MigrationConstants.MigrationConfig("Verifying", false, true)
val migrationDataArray = MigrationConstants.jsonMapper.writeValueAsString(migrationConfig)
zookeeperClient.get().setData(basePath, migrationDataArray, -1)
val client1 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+oldPoolPath)
val client2 = MemcachedClient.newKetamaClient(
dest = "twcache!localhost:"+zookeeperServerPort+"!"+newPoolPath)
val migrationClient = MigrationClient.newMigrationClient("localhost:"+zookeeperServerPort, basePath)
migrationClient.loadZKData() // force loading the config to fully set-up the client
eventually { Await.result(migrationClient.get("test")) }
Await.result(client1.set("foo", "bar"), TIMEOUT)
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client2.get("foo"), TIMEOUT) === None)
assert(Await.result(migrationClient.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
assert(Await.result(client1.get("foo"), TIMEOUT).get.toString(Charsets.Utf8) === "bar")
eventually { assert(Await.result(client2.get("foo")).map(_.toString(Charsets.Utf8)) === Some("bar")) }
}
}
| kristofa/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/integration/MigrationClientTest.scala | Scala | apache-2.0 | 10,077 |
package lila.memo
import com.typesafe.config.Config
import lila.db.Types._
final class Env(config: Config, db: lila.db.Env) {
private val CollectionCache = config getString "collection.cache"
lazy val mongoCache: MongoCache.Builder = MongoCache(db(CollectionCache))
}
object Env {
lazy val current = "[boot] memo" describes new Env(
lila.common.PlayApp loadConfig "memo",
lila.db.Env.current)
}
| Happy0/lila | modules/memo/src/main/Env.scala | Scala | mit | 415 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, File}
import java.net.URI
import java.nio.charset.StandardCharsets
import java.util.zip.{ZipInputStream, ZipOutputStream}
import com.google.common.io.{ByteStreams, Files}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfter
import org.apache.spark.{LocalSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.EventLogTestHelper._
import org.apache.spark.deploy.history.RollingEventLogFilesWriter._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.io.CompressionCodec
import org.apache.spark.util.Utils
abstract class EventLogFileReadersSuite extends SparkFunSuite with LocalSparkContext
with BeforeAndAfter with Logging {
protected val fileSystem = Utils.getHadoopFileSystem("/", SparkHadoopUtil.get.conf)
protected var testDir: File = _
protected var testDirPath: Path = _
before {
testDir = Utils.createTempDir(namePrefix = s"event log")
testDirPath = new Path(testDir.getAbsolutePath())
}
after {
Utils.deleteRecursively(testDir)
}
test("Retrieve EventLogFileReader correctly") {
def assertInstanceOfEventLogReader(
expectedClazz: Option[Class[_ <: EventLogFileReader]],
actual: Option[EventLogFileReader]): Unit = {
if (expectedClazz.isEmpty) {
assert(actual.isEmpty, s"Expected no EventLogFileReader instance but was " +
s"${actual.map(_.getClass).getOrElse("<None>")}")
} else {
assert(actual.isDefined, s"Expected an EventLogFileReader instance but was empty")
assert(expectedClazz.get.isAssignableFrom(actual.get.getClass),
s"Expected ${expectedClazz.get} but was ${actual.get.getClass}")
}
}
def testCreateEventLogReaderWithPath(
path: Path,
isFile: Boolean,
expectedClazz: Option[Class[_ <: EventLogFileReader]]): Unit = {
if (isFile) {
Utils.tryWithResource(fileSystem.create(path)) { is =>
is.writeInt(10)
}
} else {
fileSystem.mkdirs(path)
}
val reader = EventLogFileReader(fileSystem, path)
assertInstanceOfEventLogReader(expectedClazz, reader)
val reader2 = EventLogFileReader(fileSystem,
fileSystem.getFileStatus(path))
assertInstanceOfEventLogReader(expectedClazz, reader2)
}
// path with no last index - single event log
val reader1 = EventLogFileReader(fileSystem, new Path(testDirPath, "aaa"),
None)
assertInstanceOfEventLogReader(Some(classOf[SingleFileEventLogFileReader]), Some(reader1))
// path with last index - rolling event log
val reader2 = EventLogFileReader(fileSystem,
new Path(testDirPath, s"${EVENT_LOG_DIR_NAME_PREFIX}aaa"), Some(3))
assertInstanceOfEventLogReader(Some(classOf[RollingEventLogFilesFileReader]), Some(reader2))
// path - file (both path and FileStatus)
val eventLogFile = new Path(testDirPath, "bbb")
testCreateEventLogReaderWithPath(eventLogFile, isFile = true,
Some(classOf[SingleFileEventLogFileReader]))
// path - file starting with "."
val invalidEventLogFile = new Path(testDirPath, ".bbb")
testCreateEventLogReaderWithPath(invalidEventLogFile, isFile = true, None)
// path - directory with "eventlog_v2_" prefix
val eventLogDir = new Path(testDirPath, s"${EVENT_LOG_DIR_NAME_PREFIX}ccc")
testCreateEventLogReaderWithPath(eventLogDir, isFile = false,
Some(classOf[RollingEventLogFilesFileReader]))
// path - directory with no "eventlog_v2_" prefix
val invalidEventLogDir = new Path(testDirPath, "ccc")
testCreateEventLogReaderWithPath(invalidEventLogDir, isFile = false, None)
}
val allCodecs = Seq(None) ++
CompressionCodec.ALL_COMPRESSION_CODECS.map { c => Some(CompressionCodec.getShortName(c)) }
allCodecs.foreach { codecShortName =>
test(s"get information, list event log files, zip log files - with codec $codecShortName") {
val appId = getUniqueApplicationId
val attemptId = None
val conf = getLoggingConf(testDirPath, codecShortName)
val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
val writer = createWriter(appId, attemptId, testDirPath.toUri, conf, hadoopConf)
writer.start()
// The test for writing events into EventLogFileWriter is covered to its own test suite.
val dummyData = Seq("dummy1", "dummy2", "dummy3")
dummyData.foreach(writer.writeEvent(_, flushLogger = true))
val logPathIncompleted = getCurrentLogPath(writer.logPath, isCompleted = false)
val readerOpt = EventLogFileReader(fileSystem, new Path(logPathIncompleted))
assertAppropriateReader(readerOpt)
val reader = readerOpt.get
verifyReader(reader, new Path(logPathIncompleted), codecShortName, isCompleted = false)
writer.stop()
val logPathCompleted = getCurrentLogPath(writer.logPath, isCompleted = true)
val readerOpt2 = EventLogFileReader(fileSystem, new Path(logPathCompleted))
assertAppropriateReader(readerOpt2)
val reader2 = readerOpt2.get
verifyReader(reader2, new Path(logPathCompleted), codecShortName, isCompleted = true)
}
}
protected def createWriter(
appId: String,
appAttemptId : Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration): EventLogFileWriter
protected def getCurrentLogPath(logPath: String, isCompleted: Boolean): String
protected def assertAppropriateReader(actualReader: Option[EventLogFileReader]): Unit
protected def verifyReader(
reader: EventLogFileReader,
logPath: Path,
compressionCodecShortName: Option[String],
isCompleted: Boolean): Unit
}
class SingleFileEventLogFileReaderSuite extends EventLogFileReadersSuite {
override protected def createWriter(
appId: String,
appAttemptId: Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration): EventLogFileWriter = {
new SingleEventLogFileWriter(appId, appAttemptId, logBaseDir, sparkConf, hadoopConf)
}
override protected def assertAppropriateReader(actualReader: Option[EventLogFileReader]): Unit = {
assert(actualReader.isDefined, s"Expected an EventLogReader instance but was empty")
assert(actualReader.get.isInstanceOf[SingleFileEventLogFileReader],
s"Expected SingleFileEventLogReader but was ${actualReader.get.getClass}")
}
override protected def getCurrentLogPath(logPath: String, isCompleted: Boolean): String = {
if (!isCompleted) logPath + EventLogFileWriter.IN_PROGRESS else logPath
}
override protected def verifyReader(
reader: EventLogFileReader,
logPath: Path,
compressionCodecShortName: Option[String],
isCompleted: Boolean): Unit = {
val status = fileSystem.getFileStatus(logPath)
assert(status.isFile)
assert(reader.rootPath === fileSystem.makeQualified(logPath))
assert(reader.lastIndex.isEmpty)
assert(reader.fileSizeForLastIndex === status.getLen)
assert(reader.completed === isCompleted)
assert(reader.modificationTime === status.getModificationTime)
assert(reader.listEventLogFiles.length === 1)
assert(reader.listEventLogFiles.map(_.getPath.toUri.getPath) ===
Seq(logPath.toUri.getPath))
assert(reader.compressionCodec === compressionCodecShortName)
assert(reader.totalSize === status.getLen)
val underlyingStream = new ByteArrayOutputStream()
Utils.tryWithResource(new ZipOutputStream(underlyingStream)) { os =>
reader.zipEventLogFiles(os)
}
Utils.tryWithResource(new ZipInputStream(
new ByteArrayInputStream(underlyingStream.toByteArray))) { is =>
var entry = is.getNextEntry
assert(entry != null)
val actual = new String(ByteStreams.toByteArray(is), StandardCharsets.UTF_8)
val expected = Files.toString(new File(logPath.toString), StandardCharsets.UTF_8)
assert(actual === expected)
assert(is.getNextEntry === null)
}
}
}
class RollingEventLogFilesReaderSuite extends EventLogFileReadersSuite {
allCodecs.foreach { codecShortName =>
test(s"rolling event log files - codec $codecShortName") {
val appId = getUniqueApplicationId
val attemptId = None
val conf = getLoggingConf(testDirPath, codecShortName)
conf.set(EVENT_LOG_ENABLE_ROLLING, true)
conf.set(EVENT_LOG_ROLLING_MAX_FILE_SIZE.key, "10m")
val writer = createWriter(appId, attemptId, testDirPath.toUri, conf,
SparkHadoopUtil.get.newConfiguration(conf))
writer.start()
// write log more than 20m (intended to roll over to 3 files)
val dummyStr = "dummy" * 1024
writeTestEvents(writer, dummyStr, 1024 * 1024 * 20)
val logPathIncompleted = getCurrentLogPath(writer.logPath, isCompleted = false)
val readerOpt = EventLogFileReader(fileSystem,
new Path(logPathIncompleted))
verifyReader(readerOpt.get, new Path(logPathIncompleted), codecShortName, isCompleted = false)
assert(readerOpt.get.listEventLogFiles.length === 3)
writer.stop()
val logPathCompleted = getCurrentLogPath(writer.logPath, isCompleted = true)
val readerOpt2 = EventLogFileReader(fileSystem, new Path(logPathCompleted))
verifyReader(readerOpt2.get, new Path(logPathCompleted), codecShortName, isCompleted = true)
assert(readerOpt2.get.listEventLogFiles.length === 3)
}
}
override protected def createWriter(
appId: String,
appAttemptId: Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration): EventLogFileWriter = {
new RollingEventLogFilesWriter(appId, appAttemptId, logBaseDir, sparkConf, hadoopConf)
}
override protected def assertAppropriateReader(actualReader: Option[EventLogFileReader]): Unit = {
assert(actualReader.isDefined, s"Expected an EventLogReader instance but was empty")
assert(actualReader.get.isInstanceOf[RollingEventLogFilesFileReader],
s"Expected RollingEventLogFilesReader but was ${actualReader.get.getClass}")
}
override protected def getCurrentLogPath(logPath: String, isCompleted: Boolean): String = logPath
override protected def verifyReader(
reader: EventLogFileReader,
logPath: Path,
compressionCodecShortName: Option[String],
isCompleted: Boolean): Unit = {
import RollingEventLogFilesWriter._
val status = fileSystem.getFileStatus(logPath)
assert(status.isDirectory)
val statusInDir = fileSystem.listStatus(logPath)
val eventFiles = statusInDir.filter(isEventLogFile).sortBy { s => getIndex(s.getPath.getName) }
assert(eventFiles.nonEmpty)
val lastEventFile = eventFiles.last
val allLen = eventFiles.map(_.getLen).sum
assert(reader.rootPath === fileSystem.makeQualified(logPath))
assert(reader.lastIndex === Some(getIndex(lastEventFile.getPath.getName)))
assert(reader.fileSizeForLastIndex === lastEventFile.getLen)
assert(reader.completed === isCompleted)
assert(reader.modificationTime === lastEventFile.getModificationTime)
assert(reader.listEventLogFiles.length === eventFiles.length)
assert(reader.listEventLogFiles.map(_.getPath) === eventFiles.map(_.getPath))
assert(reader.compressionCodec === compressionCodecShortName)
assert(reader.totalSize === allLen)
val underlyingStream = new ByteArrayOutputStream()
Utils.tryWithResource(new ZipOutputStream(underlyingStream)) { os =>
reader.zipEventLogFiles(os)
}
Utils.tryWithResource(new ZipInputStream(
new ByteArrayInputStream(underlyingStream.toByteArray))) { is =>
val entry = is.getNextEntry
assert(entry != null)
// directory
assert(entry.getName === logPath.getName + "/")
val allFileNames = fileSystem.listStatus(logPath).map(_.getPath.getName).toSet
var count = 0
var noMoreEntry = false
while (!noMoreEntry) {
val entry = is.getNextEntry
if (entry == null) {
noMoreEntry = true
} else {
count += 1
assert(entry.getName.startsWith(logPath.getName + "/"))
val fileName = entry.getName.stripPrefix(logPath.getName + "/")
assert(allFileNames.contains(fileName))
val actual = new String(ByteStreams.toByteArray(is), StandardCharsets.UTF_8)
val expected = Files.toString(new File(logPath.toString, fileName),
StandardCharsets.UTF_8)
assert(actual === expected)
}
}
assert(count === allFileNames.size)
}
}
}
| jkbradley/spark | core/src/test/scala/org/apache/spark/deploy/history/EventLogFileReadersSuite.scala | Scala | apache-2.0 | 13,567 |
package slamdata.engine.physical.mongodb
import slamdata.engine._
import scalaz._
import Scalaz._
import collection.immutable.ListMap
import org.specs2.mutable._
class FindQuerySpec extends Specification {
implicit def toBson(x: Int) = Bson.Int32(x)
implicit def toField(name: String) = BsonField.Name(name)
"SelectorExpr" should {
import Selector._
"render simple expr" in {
Expr(Lt(10)).bson must_== Bson.Doc(ListMap("$lt" -> 10))
}
"render $not expr" in {
NotExpr(Lt(10)).bson must_== Bson.Doc(ListMap("$not" -> Bson.Doc(ListMap("$lt" -> 10))))
}
"render simple selector" in {
val sel = Doc(BsonField.Name("foo") -> Gt(10))
sel.bson must_== Bson.Doc(ListMap("foo" -> Bson.Doc(ListMap("$gt" -> 10))))
}
"render simple selector with path" in {
val sel = Doc(
BsonField.Name("foo") \\ BsonField.Index(3) \\ BsonField.Name("bar") -> Gt(10)
)
sel.bson must_== Bson.Doc(ListMap("foo.3.bar" -> Bson.Doc(ListMap("$gt" -> 10))))
}
"render flattened $and" in {
val cs = And(
Doc(BsonField.Name("foo") -> Gt(10)),
And(
Doc(BsonField.Name("foo") -> Lt(20)),
Doc(BsonField.Name("foo") -> Neq(15))
)
)
cs.bson must_==
Bson.Doc(ListMap("$and" -> Bson.Arr(List(
Bson.Doc(ListMap("foo" -> Bson.Doc(ListMap("$gt" -> 10)))),
Bson.Doc(ListMap("foo" -> Bson.Doc(ListMap("$lt" -> 20)))),
Bson.Doc(ListMap("foo" -> Bson.Doc(ListMap("$ne" -> 15))))
))))
}
"define nested $and and $or" in {
val cs =
Or(
And(
Doc(BsonField.Name("foo") -> Gt(10)),
Doc(BsonField.Name("foo") -> Lt(20))
),
And(
Doc(BsonField.Name("bar") -> Gte(1)),
Doc(BsonField.Name("bar") -> Lte(5))
)
)
1 must_== 1
}
}
}
| mossprescott/quasar | src/test/scala/slamdata/engine/physical/mongodb/findquery.scala | Scala | agpl-3.0 | 1,962 |
package com.github.j5ik2o.chatwork.infrastructure.api.my
import com.github.j5ik2o.chatwork.infrastructure.api.ClientFactory
import com.github.j5ik2o.chatwork.infrastructure.api.me.MeApiService
import org.specs2.mutable.Specification
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
class TaskApiServiceImplSpec extends Specification {
"" should {
"" in {
val client = ClientFactory.create("api.chatwork.com")
val meApi = MeApiService(client)
val taskApi = TaskApiService(client)
val f = for {
me <- meApi.get
tasks <- taskApi.list(TaskParams())
} yield {
println(me, tasks)
}
Await.result(f, Duration.Inf)
true must beTrue
}
}
}
| j5ik2o/chatwork-client | src/test/scala/com/github/j5ik2o/chatwork/infrastructure/api/my/TaskApiServiceImplSpec.scala | Scala | apache-2.0 | 799 |
package at.forsyte.apalache.tla.bmcmt.rewriter
/**
* Configuration options for SymbStateRewriter, see tuning.md.
*
* @author Igor Konnov
*/
class RewriterConfig {
/**
* If true, translate 'or' and 'and' into 'if-then-else'.
*/
var shortCircuit = true
/**
* If true, for A /\ B, check satisfiability of A with SMT and only if it is true, rewrite B.
*/
var lazyCircuit = false
}
object RewriterConfig {
/**
* Construct config from a map of string, e.g., produced by tuning.properties
* @param options a map of strings
* @return a new config
*/
def apply(options: Map[String, String]): RewriterConfig = {
val config = new RewriterConfig
config.shortCircuit = options.getOrElse("rewriter.shortCircuit", "").toLowerCase == "true"
config.lazyCircuit = options.getOrElse("rewriter.lazyCircuit", "").toLowerCase == "true"
config
}
} | konnov/dach | tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/rewriter/RewriterConfig.scala | Scala | apache-2.0 | 896 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data
import org.apache.accumulo.core.client.{BatchWriterConfig, Connector}
import org.apache.accumulo.core.data.{Mutation, Range, Value}
import org.apache.accumulo.core.security.ColumnVisibility
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.core.data.AccumuloBackedMetadata._
import org.locationtech.geomesa.core.security.AuthorizationsProvider
import org.locationtech.geomesa.core.util.SelfClosingIterator
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* GeoMesa Metadata/Catalog abstraction using key/value String pairs storing
* them on a per-featurename basis
*/
trait GeoMesaMetadata {
def delete(featureName: String, numThreads: Int)
def insert(featureName: String, key: String, value: String)
def insert(featureName: String, kvPairs: Map[String, String])
def insert(featureName: String, key: String, value: String, vis: String)
def read(featureName: String, key: String): Option[String]
def readRequired(featureName: String, key: String): String
def readRequiredNoCache(featureName: String, key: String): Option[String]
def expireCache(featureName: String)
def getFeatureTypes: Array[String]
}
class AccumuloBackedMetadata(connector: Connector,
catalogTable: String,
writeVisibilities: String,
authorizationsProvider: AuthorizationsProvider) extends GeoMesaMetadata {
private val metaDataCache = new mutable.HashMap[(String, String), Option[String]]()
with mutable.SynchronizedMap[(String, String), Option[String]]
// TODO memory should be configurable
private val metadataBWConfig =
new BatchWriterConfig().setMaxMemory(10000L).setMaxWriteThreads(1)
/**
* Handles creating a mutation for writing metadata
*
* @param featureName
* @return
*/
private def getMetadataMutation(featureName: String) = new Mutation(getMetadataRowKey(featureName))
/**
* Handles encoding metadata into a mutation.
*
* @param featureName
* @param mutation
* @param key
* @param value
*/
private def putMetadata(featureName: String,
mutation: Mutation,
key: String,
value: String) {
mutation.put(new Text(key), EMPTY_COLQ, new Value(value.getBytes))
// also pre-fetch into the cache
if (!value.isEmpty) {
metaDataCache.put((featureName, key), Some(value))
}
}
/**
* Handles writing mutations
*
* @param mutations
*/
private def writeMutations(mutations: Mutation*): Unit = {
val writer = connector.createBatchWriter(catalogTable, metadataBWConfig)
for (mutation <- mutations) {
writer.addMutation(mutation)
}
writer.flush()
writer.close()
}
/**
* Handles deleting metadata from the catalog by using the Range obtained from the METADATA_TAG and featureName
* and setting that as the Range to be handled and deleted by Accumulo's BatchDeleter
*
* @param featureName the name of the table to query and delete from
* @param numThreads the number of concurrent threads to spawn for querying
*/
override def delete(featureName: String, numThreads: Int): Unit = {
val range = new Range(getMetadataRowKey(featureName))
val deleter = connector.createBatchDeleter(catalogTable,
authorizationsProvider.getAuthorizations,
numThreads,
metadataBWConfig)
deleter.setRanges(List(range))
deleter.delete()
deleter.close()
}
/**
* Creates the row id for a metadata entry
*
* @param featureName
* @return
*/
private def getMetadataRowKey(featureName: String) = new Text(METADATA_TAG + "_" + featureName)
/**
* Reads metadata from cache or scans if not available
*
* @param featureName
* @param key
* @return
*/
override def read(featureName: String, key: String): Option[String] =
metaDataCache.getOrElseUpdate((featureName, key), readRequiredNoCache(featureName, key))
override def readRequired(featureName: String, key: String): String =
read(featureName, key)
.getOrElse(throw new RuntimeException(s"Unable to find required metadata property for key $key"))
/**
* Gets metadata by scanning the table, without the local cache
*
* Read metadata using scheme: ~METADATA_featureName metadataFieldName: insertionTimestamp metadataValue
*
* @param featureName
* @param key
* @return
*/
override def readRequiredNoCache(featureName: String, key: String): Option[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(getMetadataRowKey(featureName)))
scanner.fetchColumn(new Text(key), EMPTY_COLQ)
SelfClosingIterator(scanner).map(_.getValue.toString).toList.headOption
}
/**
* Create an Accumulo Scanner to the Catalog table to query Metadata for this store
*/
private def createCatalogScanner = connector.createScanner(catalogTable, authorizationsProvider.getAuthorizations)
override def expireCache(featureName: String) =
metaDataCache.keys
.filter { case (fn, cf) => fn == featureName }
.foreach(metaDataCache.remove)
override def insert(featureName: String, key: String, value: String) =
insert(featureName, Map(key -> value))
override def insert(featureName: String, kvPairs: Map[String, String]) = {
val mutation = getMetadataMutation(featureName)
kvPairs.foreach { case (k,v) =>
putMetadata(featureName, mutation, k, v)
}
writeMutations(mutation)
}
override def insert(featureName: String, key: String, value: String, vis: String) = {
val mutation = getMetadataMutation(featureName)
mutation.put(new Text(key), EMPTY_COLQ, new ColumnVisibility(vis), new Value(vis.getBytes))
writeMutations(mutation)
}
/**
* Scans metadata rows and pulls out the different feature types in the table
*
* @return
*/
override def getFeatureTypes: Array[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(METADATA_TAG, METADATA_TAG_END))
// restrict to just schema cf so we only get 1 hit per feature
scanner.fetchColumnFamily(new Text(SCHEMA_KEY))
val resultItr = new Iterator[String] {
val src = scanner.iterator()
def hasNext = {
val next = src.hasNext
if (!next) {
scanner.close()
}
next
}
def next() = src.next().getKey.getRow.toString
}
resultItr.toArray.map(getFeatureNameFromMetadataRowKey)
}
/**
* Reads the feature name from a given metadata row key
*
* @param rowKey
* @return
*/
private def getFeatureNameFromMetadataRowKey(rowKey: String): String = {
val MetadataRowKeyRegex(featureName) = rowKey
featureName
}
}
object AccumuloBackedMetadata {
val MetadataRowKeyRegex = (METADATA_TAG + """_(.*)""").r
}
| drmathochist/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/GeoMesaMetadata.scala | Scala | apache-2.0 | 7,666 |
package scorex.settings
import java.net.{InetAddress, InetSocketAddress}
import play.api.libs.json.{JsObject, Json}
import scorex.crypto.Base58
import scorex.utils.ScorexLogging
import scala.util.Try
/**
* Changeable settings here
*/
trait Settings extends ScorexLogging {
lazy val Port = 9084
val filename:String
lazy val settingsJSON: JsObject = Try {
val jsonString = scala.io.Source.fromFile(filename).mkString
Json.parse(jsonString).as[JsObject]
}.recoverWith { case t =>
Try {
val jsonString = scala.io.Source.fromURL(getClass.getResource(s"/$filename")).mkString
Json.parse(jsonString).as[JsObject]
}
}.getOrElse {
log.error(s"Unable to read $filename, closing")
//catch error?
System.exit(10)
Json.obj()
}
private def directoryEnsuring(dirPath: String): Boolean = {
val f = new java.io.File(dirPath)
f.mkdirs()
f.exists()
}
lazy val knownPeers = Try {
(settingsJSON \ "knownpeers").as[List[String]].flatMap { addr =>
val inetAddress = InetAddress.getByName(addr)
if (inetAddress == InetAddress.getLocalHost) None else Some(new InetSocketAddress(inetAddress, Port))
}
}.getOrElse(Seq[InetSocketAddress]())
lazy val maxConnections = (settingsJSON \ "maxconnections").asOpt[Int].getOrElse(DefaultMaxConnections)
lazy val connectionTimeout = (settingsJSON \ "connectiontimeout").asOpt[Int].getOrElse(DefaultConnectionTimeout)
lazy val rpcPort = (settingsJSON \ "rpcport").asOpt[Int].getOrElse(DefaultRpcPort)
lazy val rpcAllowed: Seq[String] = (settingsJSON \ "rpcallowed").asOpt[List[String]].getOrElse(DefaultRpcAllowed.split(""))
lazy val pingInterval = (settingsJSON \ "pinginterval").asOpt[Int].getOrElse(DefaultPingInterval)
lazy val offlineGeneration = (settingsJSON \ "offline-generation").asOpt[Boolean].getOrElse(false)
lazy val bindAddress = (settingsJSON \ "bindAddress").asOpt[String].getOrElse(DefaultBindAddress)
lazy val walletDirOpt = (settingsJSON \ "walletdir").asOpt[String]
.ensuring(pathOpt => pathOpt.map(directoryEnsuring).getOrElse(true))
lazy val walletPassword = (settingsJSON \ "walletpassword").as[String]
lazy val walletSeed = Base58.decode((settingsJSON \ "walletseed").as[String])
//NETWORK
private val DefaultMaxConnections = 20
private val DefaultConnectionTimeout = 60
private val DefaultPingInterval = 30000
private val DefaultBindAddress = "127.0.0.1"
val MaxBlocksChunks = 5
//API
private val DefaultRpcPort = 9085
private val DefaultRpcAllowed = "127.0.0.1"
} | beni55/Scorex-Lagonaki | scorex-basics/src/main/scala/scorex/settings/Settings.scala | Scala | cc0-1.0 | 2,559 |
package net.tomasherman.specus.server.api.net
import org.specs2.mutable.Specification
import org.jboss.netty.buffer.ChannelBuffers.dynamicBuffer
import org.jboss.netty.buffer.ChannelBuffers.buffer
import net.tomasherman.specus.server.api.net.DecodingUtils._
import org.jboss.netty.util.CharsetUtil
import java.nio.charset.Charset
import org.jboss.netty.buffer.ChannelBuffer
import net.tomasherman.specus.common.api.net._
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
class DecodingUtilsSpec extends Specification {
"DecodingUtils" should {
"decode byte" in {
val b = buffer(1)
val v = 0x7A.toByte
b.writeByte(v)
decodeByte(b) must_== v
}
"decode short" in {
val b = buffer(2)
val v = 0x7A7A.toShort
b.writeShort(v)
decodeShort(b) must_== v
}
"decode int" in {
val b = buffer(4)
val v = 0x7A7A7A7A.toInt
b.writeInt(v)
decodeInt(b) must_== v
}
"decode long" in {
val b = buffer(8)
val v = 0x0011223344l.toLong
b.writeLong(v)
decodeLong(b) must_== v
}
"decode flaot" in {
val b = buffer(4)
val v = 0x00112233f.toFloat
b.writeFloat(v)
decodeFloat(b) must_== v
}
"decode double" in {
val b = buffer(8)
val v = 0x00112233d.toDouble
b.writeDouble(v)
decodeDouble(b) must_== v
}
"decode Boolean" in {
val b = buffer(3)
b.writeByte(0x01)
b.writeByte(0x00)
b.writeByte(0x03)
decodeBoolean(b) must_== true
decodeBoolean(b) must_== false
decodeBoolean(b) must throwA[DecodingErrorException]
}
"decode String 16" in {
val v = String16("The cake is a lie!")
val b = dynamicBuffer(2)
writeString(v,b,CharsetUtil.UTF_16BE)
decodeString16(b) must_== v
}
"decode String 8" in {
val v = String8("You dangerous, mute lunetic!")
val b = dynamicBuffer(2)
writeString(v,b,CharsetUtil.UTF_8)
decodeString8(b) must_== v
}
"decode multiple" in {
val v1 = 0xAA.toByte
val v2 = String16("Hello there, friend.")
val v3 = 0xCAFEBABE.toDouble
val b = dynamicBuffer(2)
b.writeByte(v1)
writeString(v2,b,CharsetUtil.UTF_16BE)
b.writeDouble(v3)
decodeByte(b) must_== v1
decodeString16(b) must_== v2
decodeDouble(b) must_== v3
}
"decode slot" in {
val b = dynamicBuffer(10)
b.writeShort(-1)
decodeSlot(b) must_== Empty
val bb = dynamicBuffer(10)
val x: Short = 123
val y: Byte = 121
val z: Short = 111
bb.writeShort(x)
bb.writeByte(y)
bb.writeShort(z)
decodeSlot(bb) must_== SimpleEntity(x,y,z)
}
"decode Metadata" in {
val v0 = 0xAA.toByte
val v1 = 0xAAAA.toShort
val v2 = 0xAAAAAAAA
val v3 = 0xABCDf
val v4 = String16("And now for something completely different")
val v5 = (0xCAFE.toShort,0xAA.toByte,0xBABE.toShort)
val v6 = (0xAA,0xBB,0xCC)
val b = dynamicBuffer(20)
// metadata id numbers are so weird because the id is actually being bit-shifted 5 times to the right ... therefore 4 actually needs to be 0x8whatever, or
// 0x9(whatever), depending on what is the fifth bit of the byte
b.writeByte(0x0F)
b.writeByte(v0)
b.writeByte(0x2F)
b.writeShort(v1)
b.writeByte(0x4F)
b.writeInt(v2)
b.writeByte(0x6F)
b.writeFloat(v3)
b.writeByte(0x8F)
writeString(v4,b,CharsetUtil.UTF_16BE)
b.writeByte(0xAF)
b.writeShort(v5._1);b.writeByte(v5._2);b.writeShort(v5._3)
b.writeByte(0xCF)
b.writeInt(v6._1);b.writeInt(v6._2);b.writeInt(v6._3)
b.writeByte(0x7F)
val res = decodeMetadata(b)
res must_== List[Metadata](MByte(v0),MShort(v1),MInt(v2),MFloat(v3),MStringWrapper(v4),MSBS(v5._1,v5._2,v5._3),MIII(v6._1,v6._2,v6._3))
}
}
private def writeString(s:StringWrapper,b:ChannelBuffer,c:Charset){
s match {
case String16(x) => {
b.writeShort(x.length)
x.foreach(b.writeChar(_))
}
case String8(x) => {
b.writeShort(x.getBytes.length)
b.writeBytes(x.getBytes)
}
}
}
} | tomasherman/specus | server_api/src/test/scala/net/tomasherman/specus/server/api/net/DecodingUtilsSpec.scala | Scala | gpl-3.0 | 4,850 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.parsing.hive
import com.flaminem.flamy.conf.FlamyContext
import com.flaminem.flamy.model.Variables
import com.flaminem.flamy.model.names.TableName
import com.flaminem.flamy.model.partitions.transformation._
import org.scalatest.FreeSpec
/**
* Created by fpin on 8/15/16.
*/
class AnnotationParser$Test extends FreeSpec {
implicit val context = new FlamyContext("flamy.model.dir.paths" -> "src/test/resources/empty_test")
"a text with IGNORE partition transformation should be OK" in {
val text =
"""
|@regen(
| IGNORE db1.source
| ;
|)
|INSERT OVERWRITE TABLE db2.dest PARTITION(part2)
|SELECT id, part1
|FROM db1.source
|;
""".stripMargin
val Seq(actual: Annotation) = AnnotationParser.parseText(text, new Variables(), isView = false)
assert( actual.isInstanceOf[Ignore] )
assert( actual.table === TableName("db1.source") )
}
"a text with IGNORE TIMESTAMP partition transformation should be OK" in {
val text =
"""
|@regen(
| IGNORE TIMESTAMP db1.source
| ;
|)
|INSERT OVERWRITE TABLE db2.dest PARTITION(part2)
|SELECT id, part1
|FROM db1.source
|;
""".stripMargin
val Seq(actual: Annotation) = AnnotationParser.parseText(text, new Variables(), isView = false)
assert( actual.isInstanceOf[IgnoreTimestamp] )
assert( actual.table === TableName("db1.source") )
}
}
| flaminem/flamy | src/test/scala/com/flaminem/flamy/parsing/hive/AnnotationParser$Test.scala | Scala | apache-2.0 | 2,069 |
package scife
package enumeration
package iterable
package testcase
import scife.{ enumeration => e }
import e.iterable._
import e.dependent._
import scife.util.logging._
import scife.util._
import org.scalatest._
import org.scalatest.prop._
import org.scalacheck.Gen
import scala.language.postfixOps
class BinarySearchTreeEnum extends FunSuite with Matchers
with GeneratorDrivenPropertyChecks with HasLogger with ProfileLogger {
import common._
import enumdef.lazytest._
import scife.util.structures._
test("correctness of enumeration") {
val enum = BinarySearchTreeEnum.constructEnumerator
BinarySearchTreeTestHelper.testCorrectness( Depend.fin {
in: (Int, Range) =>
enum(in) map LazyBSTrees.toRegularBSTTree
})
}
test("resetting enumerator in the middle") {
val depEnum = BinarySearchTreeEnum.constructEnumerator
val enum = depEnum(10, 1 to 10)
val halfOfTheEnum =
for (ind <- 0 until enum.size/2)
yield enum(ind)
enum.reset
for ((el, ind) <- halfOfTheEnum.zipWithIndex)
enum(ind) should be (el)
}
}
| kaptoxic/SciFe | src/test/scala/scife/enumeration/iterable/testcase/BinarySearchTreeEnum.scala | Scala | gpl-2.0 | 1,119 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.catalyst.catalog.{CatalogTestUtils, ExternalCatalog, SessionCatalogSuite}
import org.apache.spark.sql.hive.test.TestHiveSingleton
class HiveExternalSessionCatalogSuite extends SessionCatalogSuite with TestHiveSingleton {
protected override val isHiveExternalCatalog = true
private val externalCatalog = {
val catalog = spark.sharedState.externalCatalog
catalog.asInstanceOf[HiveExternalCatalog].client.reset()
catalog
}
protected val utils = new CatalogTestUtils {
override val tableInputFormat: String = "org.apache.hadoop.mapred.SequenceFileInputFormat"
override val tableOutputFormat: String =
"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"
override val defaultProvider: String = "hive"
override def newEmptyCatalog(): ExternalCatalog = externalCatalog
}
}
| aokolnychyi/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalSessionCatalogSuite.scala | Scala | apache-2.0 | 1,686 |
package com.daxin
/**
* address 主构造器中参数不写var或者val的话默认是private[this] val 型 , val 和var 隐式的指定了权限
* 所以address是private[this]
* class Dog(var name: String, var age: Int, address: String) 主构造器
*
*
*/
class Dog(var name: String, var age: Int, address: String) {
/**
* 从构造器,在重构造器中必须调用主构造器初始化
*
* 使用this关键字定义辅助构造器(也叫从构造器)。
*/
def this(name: String) {
this(name, 18, "China")
}
def getAddress():String={
address
}
// def setAddress(addr:String){
// this.address=addr
// }
/**
*
* 从构造器,在重构造器中必须调用主构造器初始化
*/
def this() {
this("Maomao", 18, "Liaoning")
}
}
object Dog {
def main(args: Array[String]): Unit = {
val dog = new Dog("Maomao")
println(dog.name + " " + dog.age)
println(dog.getAddress())
}
} | Dax1n/Scala | ConstructorDemo/src/com/daxin/Dog.scala | Scala | apache-2.0 | 988 |
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lenses.streamreactor.connect.aws.s3.formats
import io.lenses.streamreactor.connect.aws.s3.model.{StringSinkData, StructSinkData}
import io.lenses.streamreactor.connect.aws.s3.sink.utils.TestSampleSchemaAndData._
import io.lenses.streamreactor.connect.aws.s3.storage.S3ByteArrayOutputStream
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class TextFormatWriterTest extends AnyFlatSpec with Matchers {
"convert" should "write byte output stream with text format for a single record" in {
val outputStream = new S3ByteArrayOutputStream()
val jsonFormatWriter = new TextFormatWriter(() => outputStream)
jsonFormatWriter.write(None, StringSinkData("Sausages"), topic)
outputStream.toString should be("Sausages\\n")
}
"convert" should "write byte output stream with json for multiple records" in {
val outputStream = new S3ByteArrayOutputStream()
val jsonFormatWriter = new TextFormatWriter(() => outputStream)
jsonFormatWriter.write(None, StringSinkData("Sausages"), topic)
jsonFormatWriter.write(None, StringSinkData("Mash"), topic)
jsonFormatWriter.write(None, StringSinkData("Peas"), topic)
outputStream.toString should be("Sausages\\nMash\\nPeas\\n")
}
"convert" should "throw error when avro value is supplied" in {
val outputStream = new S3ByteArrayOutputStream()
val jsonFormatWriter = new TextFormatWriter(() => outputStream)
assertThrows[IllegalStateException] {
jsonFormatWriter.write(None, StructSinkData(users.head), topic)
}
}
}
| datamountaineer/stream-reactor | kafka-connect-aws-s3/src/test/scala/io/lenses/streamreactor/connect/aws/s3/formats/TextFormatWriterTest.scala | Scala | apache-2.0 | 2,167 |
package lettergenerator
package mediator
import formatter.{DocxMakerFormatter,Template,Details}
import generators._
import org.docx4j.openpackaging.io.SaveToZipFile
import org.docx4j.openpackaging.packages.WordprocessingMLPackage
import java.util.{HashMap => JHashMap}
class DocxMediator(gui: renderer.Wizard, template: Template,
formatter: DocxMakerFormatter, generator: Generator) {
private val columnNameToFilterOut: String = gui.fnAlsoInTemplate match {
case true => ""
case false => gui.fNameColumn
}
def this(gui: renderer.Wizard, template: Template,
formatter: DocxMakerFormatter = new DocxMakerFormatter) {
this(gui,template,new DocxMakerFormatter,new DocxGenerator(template))
}
def generateDocx(details: Details, valMed: ValidationMediator)(
saver: SaveToZipFile = new SaveToZipFile(template.docPack)): Unit = {
details.tuples.foreach(generateDocx(_,valMed)(saver))
}
def generateDocx(detailsTuple: Map[String,String],
valMed: ValidationMediator)(saver: SaveToZipFile): Unit = {
val detailsAsJMap = formatter.prepareMap(detailsTuple,columnNameToFilterOut)
val tempFileName = formatter.fileName(detailsTuple,gui.fNameColumn)
val finalFileName = valMed.fileNameIfDuplicate(tempFileName, ".docx")
gui.message(s"Saving $finalFileName ...")
generator.generate(detailsAsJMap,finalFileName)(saver)
}
}
| claudiusbr/LetterGenerator | src/main/scala/lettergenerator/mediator/DocxMediator.scala | Scala | mit | 1,394 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.config
import akka.stream.ActorMaterializer
import com.kenshoo.play.metrics.MetricsFilter
import play.api.Configuration
import play.api.mvc.EssentialFilter
import play.filters.csrf.CSRFFilter
import play.filters.headers.SecurityHeadersFilter
import uk.gov.hmrc.helptosavefrontend.controllers.ControllerSpecWithGuiceAppPerTest
import uk.gov.hmrc.integration.servicemanager.ServiceManagerClient.system
import uk.gov.hmrc.play.bootstrap.filters._
import uk.gov.hmrc.play.bootstrap.frontend.filters.crypto.SessionCookieCryptoFilter
import uk.gov.hmrc.play.bootstrap.frontend.filters.deviceid.DeviceIdFilter
import uk.gov.hmrc.play.bootstrap.frontend.filters.{FrontendFilters, _}
class FiltersSpec extends ControllerSpecWithGuiceAppPerTest {
// can't use scalamock for CacheControlFilter since a logging statement during class
// construction requires a parameter from the CacheControlConfig. Using scalamock
// reuslts in a NullPointerException since no CacheControlConfig is there
implicit val mat: ActorMaterializer = ActorMaterializer()
val mockCacheControllerFilter = new CacheControlFilter(CacheControlConfig(), mat)
val mockMDCFilter = new MDCFilter(fakeApplication.materializer, fakeApplication.configuration, "")
val mockWhiteListFilter = mock[uk.gov.hmrc.play.bootstrap.frontend.filters.AllowlistFilter]
val mockSessionIdFilter = mock[SessionIdFilter]
class TestableFrontendFilters
extends FrontendFilters(
stub[Configuration],
stub[LoggingFilter],
stub[HeadersFilter],
stub[SecurityHeadersFilter],
stub[FrontendAuditFilter],
stub[MetricsFilter],
stub[DeviceIdFilter],
stub[CSRFFilter],
stub[SessionCookieCryptoFilter],
stub[SessionTimeoutFilter],
mockCacheControllerFilter,
mockMDCFilter,
mockWhiteListFilter,
mockSessionIdFilter
) {
lazy val enableSecurityHeaderFilter: Boolean = false
override val filters: Seq[EssentialFilter] = Seq()
}
val frontendFilters = new TestableFrontendFilters
val allowListFilter = mock[AllowListFilter]
"Filters" must {
"include the allowList filter if the allowList from config is non empty" in {
val config = Configuration("http-header-ip-allowlist" → List("1.2.3"))
val filters = new Filters(config, allowListFilter, frontendFilters)
filters.filters shouldBe Seq(allowListFilter)
}
"not include the allowList filter if the allowList from config is empty" in {
val config = Configuration("http-header-ip-allowlist" → List())
val filters = new Filters(config, allowListFilter, frontendFilters)
filters.filters shouldBe Seq()
}
}
}
| hmrc/help-to-save-frontend | test/uk/gov/hmrc/helptosavefrontend/config/FiltersSpec.scala | Scala | apache-2.0 | 3,341 |
package spatial.codegen.chiselgen
import argon.codegen.chiselgen.ChiselCodegen
import argon.core._
import spatial.aliases._
import spatial.nodes._
trait ChiselGenHostTransfer extends ChiselCodegen {
// Does not belong in chisel
// override protected def emitNode(lhs: Sym[_], rhs: Op[_]): Unit = rhs match {
// case _ => super.emitNode(lhs, rhs)
// }
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/codegen/chiselgen/ChiselGenHostTransfer.scala | Scala | mit | 368 |
import leon.lang._
import leon.annotation._
import leon.lang.synthesis._
import leon.collection._
import scala.reflect.runtime.universe._
import scala.reflect.api.{TypeCreator, Universe, Mirror}
import scala.collection.immutable.{List => ScalaList, Nil => ScalaNil}
object Editor {
abstract class Mode
case object Edit extends Mode
case object Quitted extends Mode
case class State(line: List[Int], cursor: Int, buffer: List[Int], actions: List[Action], mode: Mode) {
def setLine(l: List[Int]) = State(l, cursor, buffer, actions, mode)
def setCursor(c: Int) = State(line, c, buffer, actions, mode)
def setBuffer(b: List[Int]) = State(line, cursor, b, actions, mode)
def addAction(a: Action) = State(line, cursor, buffer, Cons(a, actions), mode)
def setMode(m: Mode) = State(line, cursor, buffer, actions, m)
}
sealed abstract class Action
case object Unknown extends Action
case object Write extends Action
case object Quit extends Action
case class MoveCursor(to: Int) extends Action
case object Replace extends Action
case object Erase extends Action
case class Content(l: List[Int]) extends Action
//@extern
//def getCommand(): List[Int] = {
// print("> ")
// readLine().toList.map(_.toInt)
//}
def getCommand()(implicit o: Oracle[List[Int]]): List[Int] = {
???
}
@extern
def unknown() = {
println("?")
}
@extern
def displayState(s: State) = {
println(" | Line : "+listToString(s.line))
println(" | Cursor : "+(" "*s.cursor)+"^")
println(" | Buffer : "+listToString(s.buffer))
println(" | A* : "+s.actions.collect {
case Content(l) => "Content("+listToString(l)+")"
case a => a.toString
}.mkString(", "))
}
@extern
def display(input: List[Int], a: Action, s: State) = {
println(" | Input : "+listToString(input))
println(" | Action : "+a)
println(" ~~~~~~~~~~~~~~~~~~~")
displayState(s)
}
def replStep(state: State)(implicit o: Oracle[List[Int]]): State = {
if (state.mode == Quitted) {
state
} else {
val i = getCommand()
val a = getAction(i, state)
doAction(state, a)
}
}
def getAction(input: List[Int], state: State): Action = {
if (input == Cons(113, Nil())) {
Quit
} else if (input == Cons(100, Nil())) {
Erase
} else if (input == Cons(94, Nil())) {
MoveCursor(0)
} else if (input == Cons(36, Nil())) {
MoveCursor(-1)
} else if (input == Cons(114, Nil())) {
Replace
} else if (input == Cons(119, Nil())) {
Write
} else if (input.size > 1) {
Content(input)
} else {
Unknown
}
}
def doAction(state: State, action: Action): State = {
val c = state.cursor
val l = state.line
val ns = (action, state.actions) match {
case (Content(cnt), Cons(Write, _)) =>
val nl = l.take(c) ++ cnt ++ l.drop(c)
state.setLine(nl).setCursor(c + cnt.size)
case (Content(cnt), Cons(Replace, _)) =>
val nl = l.take(c) ++ cnt ++ l.drop(c+cnt.size)
state.setLine(nl).setCursor(c + cnt.size)
case (MoveCursor(i), _) =>
if (i < 0) {
state.setCursor(state.line.size+1+i)
} else {
state.setCursor(i)
}
case (Erase, _) =>
state.setLine(Nil()).setCursor(0)
case (Quit, _) =>
state.setMode(Quitted)
case (Unknown, _) =>
//unknown()
state
case _ =>
state
}
ns.addAction(action)
}
def repl() = {
val finalState = {
withOracle { implicit o: Oracle[List[Int]] =>
{
val state = State(Nil(), 0, Nil(), Nil(), Edit)
val res = replStep(replStep(replStep(state)(o.left))(o.right.left))(o.right.right.left)
val tmp = displayState(res)
res
} ensuring {
s => s.line == Cons(97, Cons(97, Cons(97, Nil()))) && s.mode == Quitted
}
}
}
finalState
}
@ignore
@extern
implicit def scalaToList[T](l: ScalaList[T]): List[T] = {
l.foldRight[List[T]](Nil())( (e, l) => Cons(e, l) )
}
@ignore
@extern
implicit def listToScala[T](l: List[T]): ScalaList[T] = l match {
case Nil() => ScalaNil
case Cons(h, t) => h :: listToScala(t)
}
@ignore
@extern
implicit def listToString(l: List[Int]): String = {
l.map(_.toChar).mkString("")
}
@ignore
@extern
def asList(l: String): List[Int] = {
l.toList.map(_.toInt)
}
}
| epfl-lara/leon | testcases/extern/EditorSimple.scala | Scala | gpl-3.0 | 4,500 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.parsing.hive.ast
import com.flaminem.flamy.parsing.hive.HiveParserUtils._
import org.apache.hadoop.hive.ql.parse.ASTNode
/**
* Recursively applies a set of rules to the children of the node
*/
class DefaultTransformer[Context](ruleSet: => RuleSet[Context, Seq[ASTNode]]) extends Rule[Context, Seq[ASTNode]] {
def apply(pt: ASTNode, context: Context): Seq[ASTNode] = {
pt.transformChildren(ruleSet(_, context))::Nil
}
}
| flaminem/flamy | src/main/scala/com/flaminem/flamy/parsing/hive/ast/DefaultTransformer.scala | Scala | apache-2.0 | 1,029 |
//-----------------------------------------------------------------------
// FILE : CertificateWriter.scala
// SUBJECT : An object that writes certificates.
// AUTHOR : (C) Copyright 2011 by Peter C. Chapin <PChapin@vtc.vsc.edu>
//
//-----------------------------------------------------------------------
package edu.uvm.sprocket
import java.io._
import edu.uvm.rtadmin
object CertificateWriter {
def writeAllCertificates(settings: ConfigurationSettings, outputFolder: File) {
// Copy and specialize the template.
val Some(templateFolderName) = settings("TemplateFolder")
val templateFolder = new File(templateFolderName)
val templateFileName = new File(templateFolder, "Spkt_CertificatesC.nc")
val outputFileName = new File(outputFolder, "Spkt_CertificatesC.nc")
// TODO: Make this more exception safe.
val templateFile = new BufferedReader(new FileReader(templateFileName))
val outputFile = new BufferedWriter(new FileWriter(outputFileName))
var line: String = null
while ({ line = templateFile.readLine(); line != null }) {
if (line.contains("%CERTIFICATES%"))
rtadmin.Main.printCertificates(outputFile)
else {
outputFile.write(line)
outputFile.write("\\n")
}
}
outputFile.close()
templateFile.close()
}
}
| pchapin/sprocket | src/edu/uvm/sprocket/CertificateWriter.scala | Scala | bsd-3-clause | 1,324 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.