code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package json.schema.codegen
import json.schema.parser.JsonSchemaParser
import org.scalatest.{FlatSpec, Matchers}
import scalaz.\/-
class ScalaGeneratorTest extends FlatSpec with Matchers with ScalaGenerator with ConsoleLogging {
def parse(s: String): SValidation[Set[LangType]] = JsonSchemaParser.parse(s).flatMap(ScalaModelGenerator(_))
def gen(s: String): SValidation[String] =
parse(s) map { ts =>
ts.map(genTypeDeclaration).mkString("\n").trim
}
"ScalaGenerator" should "generate type with optional properties" in {
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"properties": {
|"a":{"type":"string"},
|"b":{"type":"number"}
|},
|"required":["a"]
|}
""".stripMargin) shouldBe \/-("""case class Product(a:String, b:Option[Double])""".stripMargin.trim)
}
it should "generate type with array properties" in {
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"properties": {
|"a":{"type":"array", "items":{"type":"string"}},
|"b":{"type":"array", "items":{"type":"number"}}
|},
|"required":["a"]
|}
""".stripMargin) shouldBe \/-("""case class Product(a:List[String], b:Option[List[Double]])""".stripMargin.trim)
}
it should "generate type with nested types" in {
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"properties": {
|"a":{"type":"array", "items":{"$ref":"#/definitions/nested"}},
|"b":{"type":"array", "items":{"type":"number"}}
|},
|"required":["a"],
|"definitions": {
|"nested": {
|"id":"#/definitions/nested",
|"type":"object"
| }
|}
|
|}
""".stripMargin) shouldBe \/-("""
|case class Product(a:List[product.definitions.Nested], b:Option[List[Double]])
|case class Nested()
| """.stripMargin.trim)
}
it should "generate enumeration with values " in {
gen("""
|{
| "id": "http://some/product",
|"type":"string",
|"enum":["a 1","b"]
|}
""".stripMargin) shouldBe \/-("""
|object Product extends Enumeration { val a_1 = Value("a 1")
|val b = Value("b") }""".stripMargin.trim)
gen("""
|{
| "id": "http://some/product",
|"type":"integer",
|"enum":[1,2]
|}
""".stripMargin).map(_.replaceAll("\\s", "")) shouldBe \/-("""
|object Product extends Enumeration { val v1 = Value(1)
|val v2 = Value(2) }""".stripMargin.trim.replaceAll("\\s", ""))
}
it should "generate type with additional properties in a map" in {
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"additionalProperties":{"$ref":"#/definitions/nested"},
|"definitions": {
|"nested": {
|"id":"#/definitions/nested",
|"type":"object"
| }
|}
|}
""".stripMargin) shouldBe
\/-("""
|case class Product(_additional:Option[Map[String, product.definitions.Nested]])
|case class Nested()
| """.stripMargin.trim)
}
it should "generate type with escaped properties" in {
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"properties": {
|"type":{"type":"string"},
|"b":{"type":"number"}
|},
|"required":["type"]
|}
""".stripMargin) shouldBe \/-("""case class Product(_type:String, b:Option[Double])""".stripMargin.trim)
gen("""
|{
| "id": "http://some/product",
|"type":"object",
|"properties": {
|"big number":{"type":"number"}
|}
|}
""".stripMargin) shouldBe \/-("""case class Product(big_number:Option[Double])""".stripMargin.trim)
}
it should "generate type with escaped name" in {
gen("""
|{
| "id": "http://some/type",
|"type":"string",
|"enum":["a 1","b"]
|}
""".stripMargin) shouldBe \/-("""
|object Type extends Enumeration { val a_1 = Value("a 1")
|val b = Value("b") }""".stripMargin.trim)
}
it should "generate recursive references to a single class" in {
gen("""
|{
| "id": "http://some/reference",
| "type": "object",
| "properties": {
| "a": {
| "$ref": "#/definitions/b"
| }
| },
| "definitions": {
| "b": {
| "type": "object",
| "required": ["us"],
| "properties": {
| "us": {
| "$ref": "#/definitions/b"
| }
| }
| }
| }
|}
|""".stripMargin.trim) shouldBe \/-("""
|case class Reference(a:Option[reference.definitions.B])
|case class B(us:reference.definitions.B)
|""".stripMargin.trim)
}
it should "generate recursive references through multiple classes" in {
gen("""
|{
| "id": "http://some/reference",
| "type": "object",
| "properties": {
| "a": {
| "$ref": "#/definitions/b"
| }
| },
| "definitions": {
| "b": {
| "type": "object",
| "required": ["next"],
| "properties": {
| "next": {
| "$ref": "#/definitions/c"
| }
| }
| },
| "c": {
| "type": "object",
| "required": ["next"],
| "properties": {
| "next": {
| "$ref": "#/definitions/b"
| }
| }
| }
| }
|}
|""".stripMargin.trim) shouldBe \/-("""
|case class Reference(a:Option[reference.definitions.B])
|case class C(next:reference.definitions.B)
|case class B(next:reference.definitions.C)
|""".stripMargin.trim)
}
it should "generate array of types" in {
gen("""
|{
| "id": "http://some/StringArray",
| "type": "array",
| "items": {
| "type": "string"
| }
|}
|""".stripMargin.trim) shouldBe \/-("""
|type StringArray = List[String]
|""".stripMargin.trim)
}
it should "generate simple types" in {
gen("""
|{
| "id": "http://some/MyString",
| "type": "string"
|}
|""".stripMargin.trim) shouldBe \/-("""
|type MyString = String
|""".stripMargin.trim)
}
}
| VoxSupplyChain/json-schema-codegen | codegen/src/test/scala/json/schema/codegen/ScalaGeneratorTest.scala | Scala | apache-2.0 | 6,584 |
package myplugins
import sbt._
import sbt.Keys._
object PluginKeys extends AutoPlugin {
object autoImport {
lazy val checkedSettingSet =
Def.settingKey[Set[String]]("checked setting set")
}
}
import PluginKeys.autoImport._
object PluginForJVM extends AutoPlugin {
override def projectSettings: Seq[Setting[_]] = Def.settings(
checkedSettingSet += "jvm"
)
}
object PluginForJS extends AutoPlugin {
override def projectSettings: Seq[Setting[_]] = Def.settings(
checkedSettingSet += "js"
)
}
object PluginForNative extends AutoPlugin {
override def projectSettings: Seq[Setting[_]] = Def.settings(
checkedSettingSet += "native"
)
}
object PluginForJVMAndJS extends AutoPlugin {
override def projectSettings: Seq[Setting[_]] = Def.settings(
checkedSettingSet += "jvm and js"
)
}
object PluginForAll extends AutoPlugin {
override def projectSettings: Seq[Setting[_]] = Def.settings(
checkedSettingSet += "all"
)
}
| densh/sbt-cross-project | sbt-crossproject-test/src/sbt-test/new-api/plugins/project/MyPlugins.scala | Scala | bsd-3-clause | 971 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import org.scalatest.events.Event
import org.scalatest.events.Ordinal
import org.scalatest.events.AlertProvided
import scala.collection.mutable
import java.util.regex.Pattern
import java.net.URL
import java.io.File
import org.scalatest.tools.Runner.deglobSuiteParams
class RunnerSpec extends FunSpec with PrivateMethodTester {
it("deglobSuiteParams should work correctly") {
val suiteParam =
SuiteParam("", Array.empty[String], Array.empty[String],
Array.empty[NestedSuiteParam])
val classNames =
List(
"foo.FooSpec",
"foo.tools.FooToolsSpec",
"foo.tools.FooToolsSuite",
"foo.events.EventsFooSuite")
def runDeglob(globs: List[String], expecteds: List[String]): Unit = {
val globSuiteParams =
globs.map(glob => suiteParam.copy(className = glob))
val result = deglobSuiteParams(globSuiteParams, classNames.toSet)
assert(result.map(_.className).toSet === expecteds.toSet)
}
runDeglob(List("org.*"), Nil)
runDeglob(List("f?.*"), Nil)
runDeglob(List("f??.*"), classNames)
runDeglob(List("foo.tools.*"),
List("foo.tools.FooToolsSpec",
"foo.tools.FooToolsSuite"))
runDeglob(List("*.Foo*"),
List("foo.FooSpec",
"foo.tools.FooToolsSpec",
"foo.tools.FooToolsSuite"))
runDeglob(List("*.Foo*ls*"),
List("foo.tools.FooToolsSpec",
"foo.tools.FooToolsSuite"))
runDeglob(List("*FooS[u]*"),
List("foo.events.EventsFooSuite"))
runDeglob(List("*FooS[up]*"),
List("foo.FooSpec",
"foo.events.EventsFooSuite"))
}
it("readMemoryFiles should issue alert if a Memento isn't runnable") {
val events = mutable.Set.empty[Event]
var tracker = new Tracker(new Ordinal(12))
val reporter = new Reporter {
def apply(event: Event): Unit = {
events += event
}
}
Runner.readMemoryFiles(
List("scalatest-test/src/test/scala/org/scalatest/tools/memoryfile.eg"),
reporter, tracker)
assert(1 === events.filter(_.isInstanceOf[AlertProvided]).size)
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/tools/RunnerSpec.scala | Scala | apache-2.0 | 2,738 |
/*
* Copyright 2020 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.extra.hll.zetasketch.syntax
import com.spotify.scio.extra.hll.zetasketch.ZetaSketchHll.ZetaSketchHllAggregator
import com.spotify.scio.extra.hll.zetasketch.{HllPlus, ZetaSketchHll}
import com.spotify.scio.values.SCollection
trait SCollectionSyntax {
implicit final class ZetaSCollection[T](private val scol: SCollection[T]) {
/**
* Convert each element to [[ZetaSketchHll]]. Only support for Int, Long, String and ByteString
* types.
*
* @Example
* {{{
* val input: SCollection[T] = ...
* val zCol: SCollection[ZetaSketchHll[T]] = input.asZetaSketchHll
* val approxDistCount: SCollection[Long] = zCol.sumHll.approxDistinctCount
* }}}
*
* [[ZetaSketchHll]] has few extra methods to access precision, sparse precision.
*
* @return
* [[SCollection]] of [[ZetaSketchHll]]. This will have the exactly the same number of element
* as input [[SCollection]]
*/
def asZetaSketchHll(implicit hp: HllPlus[T]): SCollection[ZetaSketchHll[T]] =
scol.map(ZetaSketchHll.create[T](_))
/**
* Calculate the approximate distinct count using HyperLogLog++ algorithm. Only support for Int,
* Long, String and ByteString types.
*
* @Example
* {{{
* val input: SCollection[T] = ...
* val approxDistCount: SCollection[Long] = input.approxDistinctCountWithZetaHll
* }}}
*
* @return
* - [[SCollection]] with one [[Long]] value.
*/
def approxDistinctCountWithZetaHll(implicit hp: HllPlus[T]): SCollection[Long] =
scol.aggregate(ZetaSketchHllAggregator())
}
implicit final class PairedZetaSCollection[K, V](private val kvScol: SCollection[(K, V)]) {
/**
* Convert each value in key-value pair to [[ZetaSketchHll]]. Only support for Int, Long, String
* and ByteString value types.
*
* @Example
* {{{
* val input: SCollection[(K, V)] = ...
* val zCol: SCollection[(K, ZetaSketchHll[V])] = input.asZetaSketchHllByKey
* val approxDistCount: SCollection[(K, Long)] = zCol.sumHllByKey.approxDistinctCountByKey
* }}}
*
* [[ZetaSketchHll]] has few extra methods to access precision, sparse precision.
*
* @return
* key-value [[SCollection]] where value being [[ZetaSketchHll]]. This will have the similar
* number of elements as input [[SCollection]].
*/
def asZetaSketchHllByKey(implicit hp: HllPlus[V]): SCollection[(K, ZetaSketchHll[V])] =
kvScol.mapValues(ZetaSketchHll.create[V](_))
/**
* Calculate the approximate distinct count using HyperLogLog++ algorithm. Only support for Int,
* Long, String and ByteString value types.
*
* @Example
* {{{
* val input: SCollection[(K, V)] = ...
* val approxDistCount: SCollection[(K, Long)] = input.approxDistinctCountWithZetaHllByKey
* }}}
*
* @return
* - [[SCollection]] with one [[Long]] value per each unique key.
*/
def approxDistinctCountWithZetaHllByKey(implicit hp: HllPlus[V]): SCollection[(K, Long)] =
kvScol.aggregateByKey(ZetaSketchHllAggregator())
}
implicit final class ZetaSketchHllSCollection[T](
private val scol: SCollection[ZetaSketchHll[T]]
) {
def sumHll: SCollection[ZetaSketchHll[T]] = scol.reduce(_.merge(_))
def approxDistinctCount: SCollection[Long] = scol.map(_.estimateSize())
}
implicit final class ZetaSketchHllSCollectionKV[K, V](
private val kvSCol: SCollection[(K, ZetaSketchHll[V])]
) {
def sumHllByKey: SCollection[(K, ZetaSketchHll[V])] = kvSCol.reduceByKey(_.merge(_))
def approxDistinctCountByKey: SCollection[(K, Long)] = kvSCol.mapValues(_.estimateSize())
}
}
| spotify/scio | scio-extra/src/main/scala/com/spotify/scio/extra/hll/zetasketch/syntax/SCollectionSyntax.scala | Scala | apache-2.0 | 4,371 |
package org.jetbrains.plugins.scala.externalHighlighters
import com.intellij.lang.annotation.HighlightSeverity
import com.intellij.openapi.fileEditor.FileEditorManager
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.vfs.VirtualFile
import org.jetbrains.plugins.scala.ScalaVersion
import org.jetbrains.plugins.scala.compilation.CompilerTestUtil.runWithErrorsFromCompiler
import org.jetbrains.plugins.scala.compiler.{CompilerEvent, CompilerEventListener}
import org.jetbrains.plugins.scala.extensions.invokeAndWait
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, Promise}
import scala.util.Success
abstract class ScalaWorksheetCompilerHighlightingTestBase extends ScalaCompilerHighlightingTestBase {
protected val worksheetContent =
"""42
|val option: Option[Int] = Some(1)
|option match {
| case Some(_) =>
|}
|unknownFunction()
|val x = 23 //actually, in worksheets this should be treated as OK, but for now we just fix the behaviour in tests
|val x = 23
|""".stripMargin
protected def runTestCaseForWorksheet(
fileName: String,
content: String,
expectedResult: ExpectedResult
): Unit = runWithErrorsFromCompiler(getProject) {
val waitUntilFileIsHighlighted: VirtualFile => Unit = virtualFile => {
// Compilation is done on file opening (see RegisterCompilationListener.MyFileEditorManagerListener)
// There is no explicit compile worksheet action for now, like we have in Build with JPS.
// In order to detect the end of we wait until CompilationFinished event is generated
val promise = Promise[Unit]()
getProject.getMessageBus.connect().subscribe(CompilerEventListener.topic, new CompilerEventListener {
override def eventReceived(event: CompilerEvent): Unit = event match {
case CompilerEvent.CompilationFinished(_, _, files) =>
// todo (minor): we should also ensure that the file is actually the tested file
promise.complete(Success(()))
case _ =>
()
}
})
invokeAndWait {
FileEditorManager.getInstance(getProject).openFile(virtualFile, true)
}
val timeout = 60.seconds
Await.result(promise.future, timeout)
}
runTestCase(fileName, content, expectedResult, waitUntilFileIsHighlighted)
}
}
class ScalaWorksheetCompilerHighlightingTest_2_13 extends ScalaWorksheetCompilerHighlightingTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean = version == ScalaVersion.Latest.Scala_2_13
def testOnlyErrorsAreExpectedInWorksheet(): Unit = runTestCaseForWorksheet(
fileName = "worksheet.sc",
content = worksheetContent.stripMargin,
expectedResult = expectedResult(
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(72, 87)),
quickFixDescriptions = Nil,
msgPrefix = "not found: value unknownFunction"
),
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(208, 209)),
quickFixDescriptions = Nil,
msgPrefix = "x is already defined as value x"
)
)
)
}
class ScalaWorksheetCompilerHighlightingTest_3_0 extends ScalaWorksheetCompilerHighlightingTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean = version == ScalaVersion.Latest.Scala_3_0
/* see [[org.jetbrains.plugins.scala.worksheet.processor.WorksheetCompiler.WrappedWorksheetCompilerMessagesFixer]] */
def testOnlyErrorsAreExpectedInWorksheet(): Unit = runTestCaseForWorksheet(
fileName = "worksheet.sc",
content = worksheetContent.stripMargin,
expectedResult = expectedResult(
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(72, 87)),
quickFixDescriptions = Nil,
msgPrefix = "Not found: unknownFunction"
),
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(208, 209)),
quickFixDescriptions = Nil,
msgPrefix = "Double definition:\\nval x: Int in worksheet.sc at line 8 and\\nval x: Int in worksheet.sc at line 9"
)
)
)
def testReplaceWrapperClassNameFromErrorMessages(): Unit = runTestCaseForWorksheet(
fileName = "worksheet.sc",
content =
"""object X {}
|X.foo()
|this.bar()""".stripMargin,
expectedResult = expectedResult(
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(14, 17)),
quickFixDescriptions = Nil,
msgPrefix = "value foo is not a member of object X"
),
ExpectedHighlighting(
severity = HighlightSeverity.ERROR,
range = Some(new TextRange(25, 28)),
quickFixDescriptions = Nil,
msgPrefix = "value bar is not a member of worksheet.sc"
)
)
)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/externalHighlighters/ScalaWorksheetCompilerHighlightingTest.scala | Scala | apache-2.0 | 4,968 |
/***********************************************************************
* Copyright (c) 2017 IBM
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.cassandra.index
import java.nio.charset.StandardCharsets
import com.google.common.primitives.Longs
import org.locationtech.geomesa.cassandra.{NamedColumn, RowValue}
import org.opengis.feature.simple.SimpleFeatureType
trait CassandraZ2Layout extends CassandraFeatureIndex {
private val Shard = NamedColumn("shard", 0, "tinyint", classOf[Byte], partition = true)
private val ZValue = NamedColumn("z", 1, "bigint", classOf[Long])
private val FeatureId = NamedColumn("fid", 2, "text", classOf[String])
override protected val columns: Seq[NamedColumn] = Seq(Shard, ZValue, FeatureId)
// * - 1 byte identifying the sft (OPTIONAL - only if table is shared)
// * - 1 byte shard
// * - 8 bytes z value
// * - n bytes feature ID
override protected def rowToColumns(sft: SimpleFeatureType, row: Array[Byte]): Seq[RowValue] = {
import CassandraFeatureIndex.RichByteArray
var shard: java.lang.Byte = null
var z: java.lang.Long = null
var fid: String = null
if (row.length > 0) {
shard = row(0)
if (row.length > 1) {
z = Longs.fromBytes(row(1), row.getOrElse(2, 0), row.getOrElse(3, 0), row.getOrElse(4, 0), row.getOrElse(5, 0),
row.getOrElse(6, 0), row.getOrElse(7, 0), row.getOrElse(8, 0))
if (row.length > 9) {
fid = new String(row, 9, row.length - 9, StandardCharsets.UTF_8)
}
}
}
Seq(RowValue(Shard, shard), RowValue(ZValue, z), RowValue(FeatureId, fid))
}
override protected def columnsToRow(columns: Seq[RowValue]): Array[Byte] = {
val shard = columns.head.value.asInstanceOf[Byte]
val z = Longs.toByteArray(columns(1).value.asInstanceOf[Long])
val fid = columns(2).value.asInstanceOf[String].getBytes(StandardCharsets.UTF_8)
val row = Array.ofDim[Byte](9 + fid.length)
row(0) = shard
System.arraycopy(z, 0, row, 1, 8)
System.arraycopy(fid, 0, row, 9, fid.length)
row
}
}
| MutahirKazmi/geomesa | geomesa-cassandra/geomesa-cassandra-datastore/src/main/scala/org/locationtech/geomesa/cassandra/index/CassandraZ2Layout.scala | Scala | apache-2.0 | 2,470 |
package net.ettinsmoor
import javax.xml.bind.DatatypeConverter
object KeyFormat {
def apply(key: String) : String = {
val _key_bingformat = key + ":" + key
DatatypeConverter.printBase64Binary(_key_bingformat.getBytes("UTF-8"))
}
} | dbarowy/bingerator | src/main/scala/net/ettinsmoor/KeyFormat.scala | Scala | bsd-2-clause | 244 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1
import java.util.Date
import scala.collection.mutable.LinkedHashMap
import org.apache.spark.SparkFunSuite
import org.apache.spark.scheduler.{StageInfo, TaskInfo, TaskLocality}
import org.apache.spark.ui.jobs.UIData.{StageUIData, TaskUIData}
class AllStagesResourceSuite extends SparkFunSuite {
def getFirstTaskLaunchTime(taskLaunchTimes: Seq[Long]): Option[Date] = {
val tasks = new LinkedHashMap[Long, TaskUIData]
taskLaunchTimes.zipWithIndex.foreach { case (time, idx) =>
tasks(idx.toLong) = TaskUIData(
new TaskInfo(idx, idx, 1, time, "", "", TaskLocality.ANY, false), None)
}
val stageUiData = new StageUIData()
stageUiData.taskData = tasks
val status = StageStatus.ACTIVE
val stageInfo = new StageInfo(
1, 1, "stage 1", 10, Seq.empty, Seq.empty, "details abc")
val stageData = AllStagesResource.stageUiToStageData(status, stageInfo, stageUiData, false)
stageData.firstTaskLaunchedTime
}
test("firstTaskLaunchedTime when there are no tasks") {
val result = getFirstTaskLaunchTime(Seq())
assert(result == None)
}
test("firstTaskLaunchedTime when there are tasks but none launched") {
val result = getFirstTaskLaunchTime(Seq(-100L, -200L, -300L))
assert(result == None)
}
test("firstTaskLaunchedTime when there are tasks and some launched") {
val result = getFirstTaskLaunchTime(Seq(-100L, 1449255596000L, 1449255597000L))
assert(result == Some(new Date(1449255596000L)))
}
}
| Panos-Bletsos/spark-cost-model-optimizer | core/src/test/scala/org/apache/spark/status/api/v1/AllStagesResourceSuite.scala | Scala | apache-2.0 | 2,326 |
package org.scalatra
import java.io.{ ByteArrayOutputStream, ObjectOutputStream }
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{ BeforeAndAfterEach, FunSuite, Matchers }
@RunWith(classOf[JUnitRunner])
class FlashMapTest extends FunSuite with Matchers with BeforeAndAfterEach {
var flash: FlashMap = _
override def beforeEach = flash = new FlashMap()
test("values are visible immmediately") {
flash("foo") = "bar"
flash.get("foo") should equal(Some("bar"))
}
test("gotten values are removed on sweep") {
flash("foo") = "bar"
flash.get("foo")
flash.get("foo") should equal(Some("bar"))
flash.sweep()
flash.get("foo") should equal(None)
}
test("ungotten values are not removed on sweep") {
flash("foo") = "bar"
flash.sweep()
flash.get("foo") should equal(Some("bar"))
}
test("values are overwritten immediately") {
flash("foo") = "bar"
flash.sweep()
flash.get("foo") should equal(Some("bar"))
flash("foo") = "baz"
flash.get("foo") should equal(Some("baz"))
}
test("values overwritten since last gotten are not removed on sweep") {
flash("foo") = "bar"
flash.get("foo")
flash("foo") = "baz"
flash.sweep()
flash.get("foo") should equal(Some("baz"))
}
test("gotten keys are not remembered across sweeps") {
flash("foo") = "bar"
flash.get("foo")
flash.sweep()
flash("foo") = "baz"
flash.sweep()
flash.get("foo") should equal(Some("baz"))
}
test("values are removed immediately") {
flash("foo") = "bar"
flash -= "foo"
flash.get("foo") should equal(None)
}
test("iterates over previously and currently added keys") {
flash("one") = 1
flash("two") = 2
flash.sweep()
flash("three") = 3
flash.toSet should equal(Set("one" -> 1, "two" -> 2, "three" -> 3))
}
test("iterated keys are removed on sweep") {
val keys = Set("1", "2")
keys foreach { k => flash(k) = true }
// Iteration order is unspecified
val (gottenKey, _) = flash.iterator.next
val ungottenKey = (keys - gottenKey).head
flash.sweep()
flash.get(gottenKey) should equal(None)
flash.get(ungottenKey) should equal(Some(true))
}
test("keep without arguments retains used keys through one sweep") {
flash("1") = "one"
flash("2") = "two"
flash.get("1")
flash.keep()
flash.sweep()
flash.get("1") should equal(Some("one"))
flash.get("2") should equal(Some("two"))
flash.sweep()
flash.get("1") should equal(None)
}
test("keep with an argument retains just those keys through one sweep") {
flash("1") = "one"
flash("2") = "two"
flash("3") = "three"
flash.get("1")
flash.get("2")
flash.get("3")
flash.keep("1")
flash.keep("3")
flash.sweep()
flash.get("1") should equal(Some("one"))
flash.get("2") should equal(None)
flash.get("3") should equal(Some("three"))
flash.sweep()
flash.get("1") should equal(None)
flash.get("3") should equal(None)
}
test("values set with now are visible immediately") {
flash.now("foo") = "baz"
flash.get("foo") should equal(Some("baz"))
}
test("ungotten values set with now are removed on sweep") {
flash.now("foo") = "baz"
flash.sweep()
flash.get("foo") should equal(None)
}
test("supports symbols as keys") {
flash("foo") = "bar"
flash.sweep()
flash('foo) should equal("bar")
}
test("is serializable") {
flash("foo") = "bar"
val out = new ObjectOutputStream(new ByteArrayOutputStream)
out.writeObject(flash)
}
test("flag marks all ungotten entries for sweeping") {
flash("one") = 1
flash.flag()
flash.sweep()
flash.get("one") should equal(None)
}
test("flag does not apply to entries added after flagging") {
flash.flag()
flash("one") = 1
flash.sweep()
flash.get("one") should equal(Some(1))
}
}
| lightvector/scalatra | core/src/test/scala/org/scalatra/FlashMapTest.scala | Scala | bsd-2-clause | 3,944 |
package spotlight.app
import java.nio.file.{ Paths, WatchEvent }
import java.util.concurrent.atomic.AtomicLong
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{ Failure, Success }
import akka.{ Done, NotUsed }
import akka.actor.{ Actor, ActorRef, ActorSystem, DeadLetter, Props }
import akka.event.LoggingReceive
import akka.stream.scaladsl.{ FileIO, Flow, Framing, GraphDSL, Keep, Sink, Source }
import akka.stream._
import akka.util.ByteString
import cats.syntax.either._
import better.files.ThreadBackedFileMonitor
import net.ceedubs.ficus.Ficus._
import com.persist.logging._
import nl.grons.metrics.scala.MetricName
import org.joda.time.DateTime
import org.json4s._
import org.json4s.jackson.JsonMethods
import omnibus.akka.metrics.Instrumented
import omnibus.akka.stream.StreamMonitor
import demesne.BoundedContext
import omnibus.akka.stream.Limiter
import omnibus.commons.ErrorOr
import spotlight.{ Settings, Spotlight, SpotlightContext }
import spotlight.analysis.DetectFlow
import spotlight.model.outlier._
import spotlight.model.timeseries.{ DataPoint, ThresholdBoundary, TimeSeries }
import spotlight.protocol.GraphiteSerializationProtocol
/** Created by rolfsd on 11/17/16.
*/
object FileBatchExample extends Instrumented with ClassLogging {
val inputCount: AtomicLong = new AtomicLong( 0L )
val resultCount: AtomicLong = new AtomicLong( 0L )
def main( args: Array[String] ): Unit = {
import scala.concurrent.ExecutionContext.Implicits.global
import SpotlightContext.{ Builder ⇒ B }
val context = {
SpotlightContext.Builder
.builder
.set( B.Arguments, args )
// .set( SpotlightContext.StartTasks, Set( /*SharedLeveldbStore.start(true), Spotlight.kamonStartTask*/ ) )
// .set( SpotlightContext.System, Some( system ) )
.build()
}
// implicit val actorSystem = ActorSystem( "Spotlight" )
// startLogging( actorSystem )
// log.info( "Starting Application Up" )
val deadListener = context.system.actorOf( DeadListenerActor.props, "dead-listener" )
context.system.eventStream.subscribe( deadListener, classOf[DeadLetter] )
start( context )
// .onComplete {
// case Success( results ) ⇒ {
// println( "\\n\\nAPP: ********************************************** " )
// println( s"\\nAPP:${count.get()} batch completed finding ${results.size} outliers:" )
// results.zipWithIndex foreach { case ( o, i ) ⇒ println( s"${i + 1}: ${o}" ) }
// println( "APP: **********************************************\\n\\n" )
// context.terminate()
// }
//
// case Failure( ex ) ⇒ {
// log.error( Map( "@msg" → "batch finished with error", "count" → count.get() ), ex )
// println( "\\n\\nAPP: ********************************************** " )
// println( s"\\nAPP: ${count.get()} batch completed with ERROR: ${ex}" )
// println( "APP: **********************************************\\n\\n" )
// context.terminate()
// }
// }
}
// case class OutlierInfo( metricName: String, metricWebId: String, metricSegment: String )
case class OutlierTimeSeriesObject( timeStamp: DateTime, value: Double )
// case class Threshold( timeStamp: DateTime, ceiling: Option[Double], expected: Option[Double], floor: Option[Double] )
case class SimpleFlattenedOutlier(
algorithm: String,
outliers: Seq[OutlierTimeSeriesObject],
threshold: Seq[ThresholdBoundary],
topic: String
)
object DeadListenerActor {
def props: Props = Props( new DeadListenerActor )
}
class DeadListenerActor extends Actor with ActorLogging {
override def receive: Receive = LoggingReceive {
case DeadLetter( m, s, r ) ⇒ {
log.debug(
Map(
"@msg" → "dead letter received",
"sender" → sender.path.name,
"recipient" → r.path.name,
"message" → m.toString
)
)
}
}
}
override lazy val metricBaseName: MetricName = {
import omnibus.commons.util._
MetricName( getClass.getPackage.getName, getClass.safeSimpleName )
}
object WatchPoints {
val DataSource = 'source
val Intake = 'intake
val Rate = 'rate
val Scoring = 'scoring
val Publish = 'publish
val Results = 'results
}
//todo
def start( context: SpotlightContext ): Future[Done] = {
log.debug( "starting the detecting flow logic" )
import scala.concurrent.ExecutionContext.Implicits.global
Spotlight()
.run( context )
.map { e ⇒ log.debug( "bootstrapping process..." ); e }
.flatMap {
case ( boundedContext, ctx, None ) ⇒ {
log.info( Map( "@msg" → "spotlight node started", "role" → ctx.settings.role.entryName ) )
Future.successful( Done )
}
case ( boundedContext, ctx, Some( scoring ) ) ⇒
log.debug( "process bootstrapped. processing data..." )
import StreamMonitor._
import WatchPoints._
import spotlight.analysis.OutlierScoringModel.{ WatchPoints ⇒ OSM }
import spotlight.analysis.PlanCatalog.{ WatchPoints ⇒ C }
StreamMonitor.set(
DataSource,
Intake,
Rate,
// 'timeseries,
// 'blockPriors,
// 'preBroadcast,
// OSM.ScoringPlanned, // keep later?
// 'passPlanned,
// 'passUnrecognizedPreFilter,
// OSM.ScoringUnrecognized,
'regulator,
// OSM.PlanBuffer,
OSM.Catalog,
// C.Outlet,
// Publish,
// 'filterOutliers,
Results //,
// OSM.ScoringUnrecognized
)
implicit val system = ctx.system
implicit val materializer = ActorMaterializer( ActorMaterializerSettings( system ) )
val publish = Flow[SimpleFlattenedOutlier].buffer( 10, OverflowStrategy.backpressure ).watchFlow( Publish )
val sink = Sink.foreach[SimpleFlattenedOutlier] { o ⇒
log.alternative(
category = "results",
Map(
"topic" → o.topic,
"outliers-in-batch" → o.outliers.size,
"algorithm" → o.algorithm,
"outliers" → o.outliers.mkString( "[", ", ", "]" )
)
)
}
val process = sourceData( ctx.settings )
.via( Flow[String].watchSourced( DataSource ) )
// .via( Flow[String].buffer( 10, OverflowStrategy.backpressure ).watchSourced( Data ) )
.via( detectionWorkflow( boundedContext, ctx.settings, scoring ) )
.via( publish )
.map { o ⇒
log.warn( Map( "@msg" → "published result - BEFORE SINK", "result" → o.toString ) )
resultCount.incrementAndGet()
o
}
.runWith( sink )
process
.map { d ⇒
log.info(
Map(
"@msg" → "APP: Example processed records successfully and found result(s)",
"nr-records" → inputCount.get().toString,
"nr-results" → resultCount.get().toString
)
)
d
}
.onComplete {
case Success( _ ) ⇒ {
println( "\\n\\nAPP: ********************************************** " )
println( s"\\nAPP:${inputCount.get()} batch completed finding ${resultCount.get()} outliers:" )
// results.zipWithIndex foreach { case ( o, i ) ⇒ println( s"${i + 1}: ${o}" ) }
println( "APP: **********************************************\\n\\n" )
context.terminate()
}
case Failure( ex: akka.stream.AbruptTerminationException ) ⇒ {
println( "\\n\\nAPP: ********************************************** " )
println( s"\\nAPP:${inputCount.get()} batch completed with manual termination finding ${resultCount.get()} outliers:" )
// results.zipWithIndex foreach { case ( o, i ) ⇒ println( s"${i + 1}: ${o}" ) }
println( "APP: **********************************************\\n\\n" )
context.terminate()
}
case Failure( ex ) ⇒ {
log.error( Map( "@msg" → "batch finished with error", "count" → inputCount.get() ), ex )
println( "\\n\\nAPP: ********************************************** " )
println( s"\\nAPP: ${inputCount.get()} batch completed with ERROR - found ${resultCount.get()} outliers: ${ex}" )
println( "APP: **********************************************\\n\\n" )
context.terminate()
}
}
process
}
}
// def sourceData( settings: Settings ): Source[String, Future[IOResult]] = {
def sourceData( settings: Settings )( implicit materializer: Materializer ): Source[String, NotUsed] = {
val dataPath = Paths.get( settings.args.lastOption getOrElse "source.txt" )
log.info( Map( "@msg" → "using data file", "path" → dataPath.toString ) )
// FileIO
// .fromPath( Paths.get( data ) )
// .via( Framing.delimiter( ByteString( "\\n" ), maximumFrameLength = 1024 ) )
// .map { _.utf8String }
val ( recordPublisherRef, recordPublisher ) = {
Source
.actorPublisher[RecordPublisher.Record]( RecordPublisher.props )
.toMat( Sink.asPublisher( false ) )( Keep.both )
.run()
}
import better.files._
val watcher = new ThreadBackedFileMonitor( File( dataPath ), recursive = true ) {
override def onCreate( file: File ): Unit = {
log.info(
Map(
"@msg" → "loading newly CREATED data file",
"name" → file.path.toString,
"records" → file.lines.size
)
)
recordPublisherRef ! RecordPublisher.Record( payload = file.lines.to[scala.collection.immutable.Iterable] )
}
override def onModify( file: File ): Unit = {
log.warn(
Map(
"@msg" → "newly MODIFIED data file",
"name" → file.path.toString,
"records" → file.lines.size
)
)
}
override def onDelete( file: File ): Unit = {
log.warn(
Map(
"@msg" → "newly DELETED data file",
"name" → file.path.toString,
"records" → file.lines.size
)
)
}
override def onUnknownEvent( event: WatchEvent[_] ): Unit = {
log.error(
Map(
"@msg" → "UNKNOWN EVENT",
"kind" → event.kind(),
"count" → event.count(),
"event" → event.toString
)
)
}
override def onException( exception: Throwable ): Unit = {
log.error(
Map(
"@msg" → "EXCEPTION",
"message" → exception.getMessage
),
ex = exception
)
}
}
watcher.start()
Source.fromPublisher( recordPublisher ).map( _.payload ).mapConcat { identity }
// import better.files._
//
// val file = File( data )
// Source.fromIterator( () => file.lines.to[Iterator] )
}
def rateLimitFlow( parallelism: Int, refreshPeriod: FiniteDuration )( implicit system: ActorSystem ): Flow[TimeSeries, TimeSeries, NotUsed] = {
// val limiterRef = system.actorOf( Limiter.props( parallelism, refreshPeriod, parallelism ), "rate-limiter" )
// val limitDuration = 9.minutes // ( settings.detectionBudget * 1.1 )
// val limitWait = FiniteDuration( limitDuration._1, limitDuration._2 )
// Limiter.limitGlobal[TimeSeries](limiterRef, limitWait)( system.dispatcher )
Flow[TimeSeries].map { identity }
// Flow[TimeSeries].throttle( parallelism, refreshPeriod, parallelism, akka.stream.ThrottleMode.shaping )
}
def detectionWorkflow(
context: BoundedContext,
settings: Settings,
scoring: DetectFlow
)(
implicit
system: ActorSystem,
materializer: Materializer
): Flow[String, SimpleFlattenedOutlier, NotUsed] = {
val graph = GraphDSL.create() { implicit b ⇒
import GraphDSL.Implicits._
import omnibus.akka.stream.StreamMonitor._
def watch[T]( label: String ): Flow[T, T, NotUsed] = Flow[T].map { e ⇒ log.info( Map( label → e.toString ) ); e }
val intakeBuffer = b.add(
Flow[String]
.buffer( settings.tcpInboundBufferSize, OverflowStrategy.backpressure )
.watchFlow( WatchPoints.Intake )
)
val timeSeries = b.add(
Flow[String]
.via( unmarshalTimeSeriesData )
.map { ts ⇒ inputCount.incrementAndGet(); ts }
)
val limiter = b.add( rateLimitFlow( settings.parallelism, 25.milliseconds ).watchFlow( WatchPoints.Rate ) )
val score = b.add( scoring )
//todo remove after working
// val publishBuffer = b.add(
// Flow[Outliers]
// .buffer( 10, OverflowStrategy.backpressure )
// .watchFlow( WatchPoints.Publish )
// )
val filterOutliers = b.add(
Flow[Outliers]
// .buffer( 10, OverflowStrategy.backpressure ).watchFlow( 'filterOutliers )
.collect { case s: SeriesOutliers ⇒ s } //.watchFlow( WatchPoints.Results )
)
val flatter = b.add(
Flow[SeriesOutliers]
.map { s ⇒
flattenObject( s ) valueOr { ex ⇒
log.error( Map( "@msg" → "Failure: flatter.flattenObject", "series-outlier" → s.toString ), ex )
throw ex
}
}
)
val unwrap = b.add(
Flow[List[SimpleFlattenedOutlier]]
.mapConcat( identity )
// .map { o => logger.info( "RESULT: {}", o ); o }
)
intakeBuffer ~> timeSeries ~> limiter ~> score ~> /*publishBuffer ~>*/ filterOutliers ~> flatter ~> unwrap
FlowShape( intakeBuffer.in, unwrap.out )
}
Flow.fromGraph( graph ).withAttributes( ActorAttributes.supervisionStrategy( workflowSupervision ) )
}
val workflowSupervision: Supervision.Decider = {
case ex ⇒ {
log.info( "Error caught by Supervisor:", ex )
Supervision.Restart
}
}
// def flatten: Flow[SeriesOutliers , List[SimpleFlattenedOutlier],NotUsed] = {
// Flow[SeriesOutliers ]
// .map[List[SimpleFlattenedOutlier]] { so =>
// flattenObject( so ) match {
// case \\/-( f ) => f
// case -\\/( ex ) => {
// logger.error( s"Failure: flatten.flattenObject[${so}]:", ex )
// throw ex
// }
// }
// }
// }
def flattenObject( outlier: SeriesOutliers ): ErrorOr[List[SimpleFlattenedOutlier]] = {
Either catchNonFatal {
outlier.algorithms.toList.map { a ⇒
val o = parseOutlierObject( outlier.outliers )
val t = outlier.thresholdBoundaries.get( a ) getOrElse {
outlier.source.points.map { dp ⇒ ThresholdBoundary.empty( dp.timestamp ) }
}
SimpleFlattenedOutlier( algorithm = a, outliers = o, threshold = t, topic = outlier.topic.toString )
}
}
}
// def parseThresholdBoundaries( thresholdBoundaries: Seq[ThresholdBoundary] ) : Seq[Threshold] = trace.briefBlock(s"parseThresholdBoundaries(${thresholdBoundaries})"){
// thresholdBoundaries map { a => Threshold(a.timestamp, a.ceiling, a.expected, a.floor ) }
// }
def parseOutlierObject( dataPoints: Seq[DataPoint] ): Seq[OutlierTimeSeriesObject] = {
dataPoints map { a ⇒ OutlierTimeSeriesObject( a.timestamp, a.value ) }
}
// def parseTopic( topic: String ) : TryV[OutlierInfo] = trace.briefBlock( s"parseTopic(${topic})" ) {
// val result = \\/ fromTryCatchNonFatal {
// val splits = topic.split("""[.-]""")
// val metricType = splits(0)
// val webId = splits(1).concat( "." ).concat( splits(2).split("_")(0) )
// val segment = splits(2).split( "_" )(1)
// OutlierInfo( metricType, webId, segment )
// }
//
// result.leftMap( ex => logger.error( s"PARSE_TOPIC ERROR on [${topic}]", ex ))
//
// result
// }
def unmarshalTimeSeriesData: Flow[String, TimeSeries, NotUsed] = {
Flow[String]
.mapConcat { s ⇒
toTimeSeries( s ) valueOr { ex ⇒
log.error( Map( "@msg" → "Failure: unmarshalTimeSeries.toTimeSeries", "time-series" → s.toString ), ex )
throw ex
}
}
// .map { ts => logger.info( "unmarshalled time series: [{}]", ts ); ts }
.withAttributes( ActorAttributes.supervisionStrategy( GraphiteSerializationProtocol.decider ) )
}
def toTimeSeries( bytes: String ): ErrorOr[List[TimeSeries]] = {
import spotlight.model.timeseries._
Either catchNonFatal {
for {
JObject( obj ) ← JsonMethods parse bytes
JField( "topic", JString( topic ) ) ← obj
JField( "points", JArray( points ) ) ← obj
} yield {
val datapoints = for {
JObject( point ) ← points
JField( "timestamp", JInt( ts ) ) ← point
JField( "value", JDouble( v ) ) ← point
} yield DataPoint( new DateTime( ts.toLong ), v )
TimeSeries.apply( topic, datapoints )
}
}
}
}
| dmrolfs/lineup | app-batch/src/main/scala/spotlight/app/FileBatchExample.scala | Scala | mit | 17,769 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark
import org.apache.spark.sql.{SQLContext, SchemaRDD}
package object avro {
/**
* Adds a method, `avroFile`, to SQLContext that allows reading data stored in Avro.
*/
implicit class AvroContext(sqlContext: SQLContext) {
def avroFile(filePath: String) =
sqlContext.baseRelationToSchemaRDD(AvroRelation(filePath)(sqlContext))
}
// TODO: Implement me.
implicit class AvroSchemaRDD(schemaRDD: SchemaRDD) {
def saveAsAvroFile(path: String): Unit = ???
}
}
| JoshRosen/spark-avro | src/main/scala/com/databricks/spark/avro/package.scala | Scala | apache-2.0 | 1,108 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async.ws
import io.gatling.core.action.Action
import io.gatling.core.session._
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import io.gatling.http.action.async.ReconciliateAction
class WsReconciliate(
requestName: Expression[String],
wsName: String,
statsEngine: StatsEngine,
next: Action
) extends ReconciliateAction(requestName, wsName, statsEngine, next) with WsAction with NameGen {
override val name = genName("wsReconciliate")
}
| MykolaB/gatling | gatling-http/src/main/scala/io/gatling/http/action/async/ws/WsReconciliate.scala | Scala | apache-2.0 | 1,148 |
package spire
package tests
import org.scalatest.Matchers
import org.scalatest._
import prop._
trait SpireProperties extends PropSpec with Matchers with PropertyChecks {
// disable scalatest ===
override def convertToEqualizer[T](left: T): Equalizer[T] = ???
}
trait SpireTests extends FunSuite with Matchers {
// disable scalatest ===
override def convertToEqualizer[T](left: T): Equalizer[T] = ???
}
| kevinmeredith/spire | tests/src/test/scala/spire/tests/package.scala | Scala | mit | 413 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.stream
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Values
import org.apache.calcite.rex.RexLiteral
/**
* Stream physical RelNode for [[Values]].
*/
class StreamExecValues(
cluster: RelOptCluster,
traitSet: RelTraitSet,
tuples: ImmutableList[ImmutableList[RexLiteral]],
outputRowType: RelDataType)
extends Values(cluster, outputRowType, tuples, traitSet)
with StreamPhysicalRel {
override def producesUpdates: Boolean = false
override def needsUpdatesAsRetraction(input: RelNode): Boolean = false
override def consumesRetractions: Boolean = false
override def producesRetractions: Boolean = false
override def requireWatermark: Boolean = false
override def deriveRowType(): RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new StreamExecValues(cluster, traitSet, getTuples, outputRowType)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/stream/StreamExecValues.scala | Scala | apache-2.0 | 1,927 |
package com.nthportal.versions
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{Inside, OptionValues}
abstract class SimpleSpec extends AnyFlatSpec with Matchers with Inside with OptionValues
| NthPortal/versions | src/test/scala/com/nthportal/versions/SimpleSpec.scala | Scala | apache-2.0 | 255 |
package demy.mllib;
import demy.mllib.evaluation.{BinaryMetrics, HasBinaryMetrics}
import demy.mllib.util.log
import demy.mllib.params._
import demy.storage.Storage
import org.apache.spark.ml.{Transformer, Estimator, PipelineStage}
import org.apache.spark.ml.param.{Params}
import org.apache.spark.sql.{DataFrame, Row, Dataset}
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.{col}
import org.apache.spark.sql.SparkSession
import scala.collection.JavaConverters._
import java.net.{URLDecoder, URLEncoder}
case class Model(project:String, model:String, modelGroup:String, steps:Seq[ModelStep], snapshotPath:Option[String]) {
def getVersion(steps:String*) = {
val stepSet = steps.toSet
val notFounds = stepSet.diff(this.steps.map(s => s.name).toSet)
if(notFounds.size>0)
throw new Exception(s"The step(s) ${notFounds.mkString(",")} cannot be found on model")
ModelVersion(steps = this.steps.flatMap(s => if(stepSet.contains(s.name)) Some(s) else None), comment = "")
}
def fullVersion() = ModelVersion(steps = this.steps)
def defaultVersion() = {
ModelVersion(steps = {
val iSteps = this.steps.zipWithIndex
iSteps.flatMap(p => p match {case(step, i) => if(iSteps.filter(pp => pp match{ case (sstep, ii)=> step.name == sstep.name && step.version == sstep.version && ii<i}).size == 0)
Some(step)
else
None})
})
}
def plan() = ModelPlan()
def show(source:DataFrame, namedDataFrames:Map[String,DataFrame]= Map[String, DataFrame]()):Unit = {
this.run(source = source, base = ModelVersion(this.defaultVersion.steps.map(s => s.option("show"->"true"))), logOn = None, namedDataFrames = namedDataFrames )
}
def show(source:DataFrame, steps:String*):Unit = this.show(source, Map[String, DataFrame](), steps:_*)
def show(source:DataFrame, namedDataFrames:Map[String, DataFrame], steps:String*):Unit = {
val stepSet = steps.toSet
this.run(source = source, base = ModelVersion(this.defaultVersion.steps.map(s => s.option("show"-> (if(stepSet.contains(s.name)) "true" else "false"))))
, logOn = None, namedDataFrames = namedDataFrames)
}
def run(source:DataFrame, plan:ModelPlan=ModelPlan(), base:ModelVersion=this.fullVersion, logOn:Option[String]=None
, namedDataFrames:Map[String, DataFrame] = Map[String, DataFrame](), showSteps:Seq[String]=Seq[String](), stopAfter:Option[String]=None, maxVersions:Option[Int]=None
, outDataFrames:Seq[String]=Seq[String]()) = {
var i = 0
val versions = plan.build(base, stopAfter) match {case vers => maxVersions match {case Some(max) => vers.take(max) case _ => vers}}
val storage = Storage.getSparkStorage
versions.map(modelVersion => {
//modelVersion.printSchema()
log.msg(s"(${i}/${versions.size}:${Math.round(100.0* i/versions.size)}%) Starting Version: ${modelVersion.comment}")
i = i + 1
var binaryMetrics:Option[BinaryMetrics] = None
var execMetrics = scala.collection.mutable.Map[String, Double]()
var namedInputs = ((modelVersion.steps
.flatMap(s => s.input match {case Some(sName) => Some(sName -> None.asInstanceOf[Option[DataFrame]]) case _ => None})
.toMap) + ("#model"->Some(source))
++ modelVersion.steps
.flatMap(s => s.paramInputs.filter(p => p._2.startsWith("$")).map(p => (p._2 -> None.asInstanceOf[Option[DataFrame]]))
.toMap)
++ modelVersion.steps
.flatMap(s => s.paramInputs.filter(p => p._2.startsWith("$")).map(p => (p._2 -> None.asInstanceOf[Option[DataFrame]]))
.toMap)
++ namedDataFrames.map(p => p match {case (name, df) => ("#"+name, Some(df))})
)
val resdf = modelVersion.steps.foldLeft(source)((current, step) => {
log.msg(s"Step ${step.name}")
val stepSource = step.input match {
case Some(stepName) =>
namedInputs.get(stepName) match {
case Some(Some(df)) => df
case _ => throw new Exception(s"Step $stepName has not yet been executed so its result cannot be used for step ${step.name}")
}
case _ => current
}
var theAction = step.paramInputs.foldLeft(step.action)((current, dfParam)
=> step.action.set(step.action.getParam(dfParam._1), namedInputs.get(dfParam._2) match {
case Some(Some(df)) => df
case _ => throw new Exception(s"Cannot find the specified dataframe ${dfParam._2}")
})
)
val (outDF, executedStep, outputDFs) =
(theAction, getStepSnapshot(modelVersion, step.name, source.sparkSession)) match {
case (t, Some((snapshoted, snapshotParams))) => (snapshoted, t, snapshotParams)
case (t:Transformer, _) => (t.transform(stepSource), t, step.paramOutputs.map( outputName => outputName -> this.getDFParam(t, outputName)).toMap)
case (e:Estimator[_], _) => {
val model = e.fit(stepSource)
(model.transform(stepSource), model, step.paramOutputs.map( outputName => outputName -> this.getDFParam(model, outputName)).toMap)
}
case _ => throw new Exception("The current action type ${o.getClass.getName} is not supported @epi")
}
var df = if(step.select.size>0) outDF.select(step.select.map(s => col(s)):_*) else outDF
df = if(step.drop.size>0) df.drop(step.drop:_*) else df
df = step.renameCols.foldLeft(df)((current, p)=> current.drop(p._2).withColumnRenamed(p._1, p._2))
df = if(step.repartitionInputAs == 0) df else df.repartition(step.repartitionInputAs)
//Caching or snapshoting the results step result dataframe if set
df = (
if(step.snapshot) {
this.setStepSnapshot(df, modelVersion, step.name, outputDFs)
} else if(step.cache) {
df.cache()
} else {
df
})
//Storing output params if used as named input on another step or model output
outputDFs.foreach{ case(outputName, df) =>
namedInputs.get("$"+step.name+"."+outputName) match {
case Some(s) =>
namedInputs = (namedInputs + (s"${"$"}${step.name}.$outputName" -> Some(df)))
case _ => {}
}
if(outDataFrames.contains("$"+step.name+"."+outputName)) {
namedInputs = (namedInputs + (s"${"$"}${step.name}.$outputName" -> Some(df)))
}
}
//Storing result if used as named input on another step
namedInputs.get("$"+step.name) match {
case Some(s) => namedInputs = namedInputs + ("$"+step.name -> Some(df))
case _ => {}
}
//Logging binary metrics uf set
(logOn, executedStep) match {
case (Some(path), binEvaluator:HasBinaryMetrics) => binaryMetrics = Some(binEvaluator.metrics)
case _ =>{}
}
//Logging execution metrics uf set
(logOn, executedStep) match {
case (Some(path), metricStep:HasExecutionMetrics) => execMetrics ++= metricStep.metrics.filter(p => metricStep.getLogMetrics && (metricStep.getMetricsToLog.size == 0 || metricStep.getMetricsToLog.contains(p._1))).map(p => (step.name+"_"+p._1, p._2))
case _ =>{}
}
//Showing results if set
if(step.show || showSteps.contains(step.name))
df.show
df
})
logOn match {
case Some(logPath) => {
var execRow = this.toRow(modelVersion)
execRow = binaryMetrics match {
case Some(metrics) => {
val mDF = source.sparkSession.createDataFrame(Seq(metrics))
new GenericRowWithSchema((execRow.toSeq ++ mDF.first.toSeq).toArray, StructType(execRow.schema.fields ++ mDF.schema.fields))
}
case _ => execRow
}
execRow = execMetrics.size match {
case 0 => execRow
case _ => {
val seq = execMetrics.toSeq
val names = seq.map(p => p._1)
val values = seq.map(p => p._2)
new GenericRowWithSchema((execRow.toSeq ++ values).toArray, StructType(execRow.schema.fields ++ names.map(n => new StructField(name = n, dataType = DoubleType))))
}
}
source.sparkSession.createDataFrame(List(execRow.asInstanceOf[Row]).asJava, execRow.schema).write.mode("append").partitionBy("modelGroup", "project", "model").json(logPath)
}
case _ =>{}
}
(resdf, outDataFrames.map(inputName => namedInputs.get(inputName) match {
case Some(Some(df)) => (inputName -> df)
case _ => throw new Exception(s"Cannot output dataframe ${inputName} since it has not been stored")
}).toMap)
})
}
def getDFParam(stage:PipelineStage, paramName:String) = {
val param = stage.getParam(paramName)
stage.get(param) match {
case Some(v) => v match {
case v:Dataset[_] => v.toDF
case _ => throw new Exception(s"Output parameters are expected to be dataframes found ${v.getClass.getName} instead")
}
case _ => throw new Exception(s"The output paramater ${"$"+stage+"."+paramName} cannot be used as input because is not set")
}
}
def toRow(comment:String):GenericRowWithSchema = new GenericRowWithSchema(values = Array(project, model, modelGroup, new java.sql.Timestamp(System.currentTimeMillis()), comment)
,schema = StructType(fields = Array(StructField(name="project", dataType=StringType)
,StructField(name="model", dataType=StringType)
,StructField(name="modelGroup", dataType=StringType)
,StructField(name="executedOn", dataType=TimestampType)
,StructField(name="comment", dataType=StringType)
)))
def toRow(version:ModelVersion):GenericRowWithSchema = {
val modelRow = this.toRow(version.comment)
val stepsToLog = version.steps.filter(step => step.log)
// .flatMap(p => p match {case (step, vIndex) => if(step.log) Some(step.versions(vIndex)) else None})
val stepsValAndTypes = stepsToLog
.map(step => (step.version, StructField(name=step.family, dataType = StringType)))
val paramsValAndTypes = stepsToLog.flatMap(step => step.structFieldAndValuesToLog().map(sv => sv match {case (structField, value) => (value, structField) } ))
val customLogs = version.customLogs
val allValAndTypes = stepsValAndTypes ++ paramsValAndTypes
var values:Seq[Any] = modelRow.toSeq
values = values ++ customLogs.toSeq
values = values ++ allValAndTypes.map(_._1).toSeq
var schema:Seq[StructField] = modelRow.schema.fields
schema = schema ++ customLogs.schema.fields
schema = schema ++ allValAndTypes.map(_._2)
new GenericRowWithSchema(values = values.toArray ,schema = new StructType(schema.toArray))
}
def stepSnapshotPath(version:ModelVersion, stepName:String) = {
this.snapshotPath match {
case Some(lPath)=> lPath+"/"+this.project+"/"+this.model+"/"+stepName
case _ => throw new Exception("Cannot snapshot since snapshot folder is not set @epi")
}
}
def getStepSnapshot(version:ModelVersion, stepName:String, spark:SparkSession) = {
val storage = Storage.getSparkStorage
val theStep = version.steps.filter(s => s.name == stepName).head
if(theStep.reuseSnapshot) {
val snapPath = this.stepSnapshotPath(version, stepName)
val namedOutputPaths = theStep.paramOutputs.map{paramName => (paramName, this.stepSnapshotPath(version, stepName)+"."+paramName)}.toMap
if(storage.exists(storage.getNode(snapPath)) && namedOutputPaths.toSeq.forall{case(name, path) => storage.exists(storage.getNode(path))})
Some((decodeCols(spark.read.parquet(snapPath)), namedOutputPaths.mapValues{path => decodeCols(spark.read.parquet(path))}))
else None
} else {
None
}
}
def setStepSnapshot(df:DataFrame, version:ModelVersion, stepName:String, outDataFrames:Map[String, DataFrame]) = {
val storage = Storage.getSparkStorage
val theStep = version.steps.filter(s => s.name == stepName).head
val snapPath = this.stepSnapshotPath(version, stepName)
if(!theStep.reuseSnapshot || !storage.exists(storage.getNode(snapPath)))
encodeCols(df).write.mode("overwrite").parquet(snapPath)
outDataFrames.toSeq.foreach{case (outName, outDF) =>
if(!theStep.reuseSnapshot || !storage.exists(storage.getNode(snapPath+"."+outName)))
encodeCols(outDF).write.mode("overwrite").parquet(snapPath+"."+outName)
}
decodeCols(df.sparkSession.read.parquet(snapPath))
}
def encodeCols(df:DataFrame) = df.select(df.columns.map(c => col(c).as(URLEncoder.encode(c, "UTF-8"))):_*)
def decodeCols(df:DataFrame) = df.select(df.columns.map(c => col(c).as(URLDecoder.decode(c, "UTF-8"))):_*)
def step(step:ModelStep):Model = Model(project = this.project, model = this.model, modelGroup=this.modelGroup, steps = this.steps :+ step, snapshotPath = this.snapshotPath)
def step(name:String, action:Params, options:(String, String)*):Model = this.step(ModelStep(name = name, action = action).option(options:_*))
def step(name:String, version:String, action:Params, options:(String, String)*):Model = this.step(ModelStep(name = name, version= version, family = name , action = action).option(options:_*))
def snapshotPath(snapshotPath:String):Model = Model(project = this.project, model = this.model, modelGroup=this.modelGroup, steps = this.steps, snapshotPath = Some(snapshotPath))
}
object Model {
def apply(project:String, model:String, modelGroup:String):Model = Model(project = project, model = model, modelGroup = modelGroup, steps = Seq[ModelStep](), snapshotPath = None)
def apply(project:String):Model = Model(project = project, model = project, modelGroup = "none", steps = Seq[ModelStep](), snapshotPath = None)
def apply():Model = Model(project = "none", model = "none", modelGroup = "none", steps = Seq[ModelStep](), snapshotPath = None)
def textClassifier(project:String, model:String):Model = Model(project = project, model = model, modelGroup = "Text Classification")
def textClassifier(project:String):Model = Model(project = project, model = project, modelGroup = "Text Classification")
}
| forchard-epi/demy | mllib/src/main/scala/Model.scala | Scala | bsd-3-clause | 16,046 |
package agile.android
import sbt._
import scala.xml._
protected object Android
{
def findManifestPath(sourceDirectory: File): File =
{
try
{
new File((sourceDirectory ** "AndroidManifest.xml").getPaths(0))
}
catch
{
case _ : Throwable => throw new Exception("""Manifest file was not found!
|Is this an Android project?""".stripMargin)
}
}
def findPackageName(sourceDirectory: File): String =
{
val manifestFile = findManifestPath(sourceDirectory)
XML.loadFile(manifestFile).attribute("package").get.head.text
}
def getModelsPath(sourceDirectory: File): File =
{
new File(sourceDirectory.getPath + "/main/scala/" + findPackageName(sourceDirectory).replace('.', '/') + "/models/")
}
def getModels(sourceDirectory: File): Seq[String] =
{
val modelsPath = getModelsPath(sourceDirectory)
if (modelsPath.isDirectory == false)
{
Nil
}
else
{
modelsPath.listFiles().map(_.getName().split('.').head)
}
}
def getManifestPermissions(sourceDirectory: File): Seq[String] =
{
val manifestFile = findManifestPath(sourceDirectory)
val pemissionsXML = XML.loadFile(manifestFile).child.filter(_.label == "uses-permission")
pemissionsXML.map(_.attribute("http://schemas.android.com/apk/res/android", "name").getOrElse(new Text("")).head.text)
}
def manifestAddPermissions(sourceDirectory: File, missingPermissions: Array[String]): Unit = {
val manifestFile = findManifestPath(sourceDirectory)
val manifest = XML.loadFile(manifestFile)
val missingPermissionsXML = missingPermissions.map(permission => <uses-permission android:name={permission}></uses-permission>)
val newManifest = manifest.copy(child = missingPermissionsXML ++ manifest.child)
IO.write(manifestFile, newManifest.toString)
}
} | luismfonseca/agile-scala-android | src/main/scala/Android.scala | Scala | mit | 1,960 |
package es.uvigo.ei.sing.biomsef
package database
import scala.concurrent.Future
import play.api.Play
import play.api.db.slick.{ DatabaseConfigProvider, HasDatabaseConfig }
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import slick.driver.JdbcProfile
import entity._
import util.Page
trait SearchTermsComponent {
self: ArticlesComponent with KeywordsComponent with HasDatabaseConfig[JdbcProfile] =>
import driver.api._
class SearchTerms(tag: Tag) extends Table[SearchTerm](tag, "search_index") {
def term = column[String]("search_index_term")
def tf = column[Double]("search_index_tf")
def idf = column[Double]("search_index_idf")
def tfidf = column[Double]("search_index_tfidf")
def articleId = column[Article.ID]("article_id")
def keywordId = column[Keyword.ID]("keyword_id")
def pk = primaryKey("search_index_pk", (term, keywordId, articleId))
def keyword = foreignKey("search_index_keyword_fk", keywordId, keywords)(_.id, onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def article = foreignKey("search_index_article_fk", articleId, articles)(_.id, onUpdate = ForeignKeyAction.Cascade, onDelete = ForeignKeyAction.Cascade)
def * = (term, tf, idf, tfidf, articleId, keywordId) <> (SearchTerm.tupled, SearchTerm.unapply)
}
lazy val terms = TableQuery[SearchTerms]
}
final class SearchTermsDAO extends SearchTermsComponent with ArticlesComponent with KeywordsComponent with HasDatabaseConfig[JdbcProfile] {
import driver.api._
protected val dbConfig = DatabaseConfigProvider.get[JdbcProfile](Play.current)
def count: Future[Int] =
db.run(terms.length.result)
def count(termFilter: String): Future[Int] =
db.run {
terms.filter(_.term.toLowerCase like termFilter.toLowerCase).length.result
}
def get(id: SearchTerm.ID): Future[Option[SearchTerm]] =
db.run {
terms.filter(term =>
(term.articleId === id._2) &&
(term.keywordId === id._3) &&
(term.term.toLowerCase === id._1.toLowerCase)
).result.headOption
}
def getKeywordIds(termFilter: String = "%"): Future[Set[Keyword.ID]] =
db.run(terms.filter(
_.term.toLowerCase like termFilter.toLowerCase
).groupBy(_.keywordId).map(_._1).result).map(_.toSet)
def searchTerm(page: Int = 0, pageSize: Int = 10, termFilter: String = "%"): Future[Page[SearchTerm]] = {
val offset = pageSize * page
val query = terms.filter(
_.term.toLowerCase like termFilter.toLowerCase
).sortBy(_.tfidf.desc).drop(offset).take(pageSize)
for {
total <- count(termFilter)
result <- db.run(query.result)
} yield Page(result, page, offset, total)
}
// TODO: uglyness at its finest, clean up
def searchKeywords(page: Int = 0, pageSize: Int = 10, keywordIds: Set[Keyword.ID], fromYear: Long, toYear: Long): Future[Page[(Article, Double, Set[Keyword])]] = {
val offset = pageSize * page
val total = (for {
term <- terms if term.keywordId inSet keywordIds
article <- this.articles if article.id === term.articleId && article.year >= fromYear && article.year <= toYear
} yield term).groupBy(_.articleId).map(_._1).length
val articleIds = (for {
term <- terms if term.keywordId inSet keywordIds
article <- this.articles if article.id === term.articleId && article.year >= fromYear && article.year <= toYear
} yield term)
.groupBy { _.articleId }
.map { case (id, ts) => id -> ts.map(_.tfidf).sum }
.sortBy { case (_, sum) => sum.desc }
.drop { offset }
.take { pageSize }
val articles = articleIds flatMap { case (id, sum) =>
this.articles
.filter { _.id === id }
.map { (_, sum) }
}
val found = for {
term <- terms
article <- articles if article._1.id === term.articleId // && article._1.year >= fromYear && article._1.year <= toYear
keyword <- keywords if keyword.id === term.keywordId
} yield (article, keyword)
val query = found.result map {
_.groupBy(_._1).mapValues(_.map(_._2).toSet).toList
}
for {
total <- db.run(total.result)
result <- db.run(query)
} yield Page(
result map { res => (res._1._1, res._1._2.get, res._2) } sortWith { _._2 > _._2 },
page, offset, total
)
}
def insert(term: SearchTerm): Future[SearchTerm] =
db.run((terms += term)).map(_ => term)
def insert(terms: SearchTerm*): Future[Seq[SearchTerm]] =
db.run((this.terms ++= terms)).map(_ => terms)
def clear(): Future[Unit] =
db.run(terms.delete.transactionally).map(_ => ())
}
| agjacome/biomsef | src/main/scala/database/SearchTermsDAO.scala | Scala | mit | 4,741 |
package dk.bayes.dsl.variable.gaussian.univariate
import dk.bayes.dsl.factor.SingleFactor
import dk.bayes.math.gaussian.Gaussian
import dk.bayes.dsl.variable.gaussian._
trait UnivariateGaussianFactor extends SingleFactor[Gaussian] {
this: UnivariateGaussian =>
val factorMsgDown: Gaussian = Gaussian(this.m, this.v)
} | danielkorzekwa/bayes-scala | src/main/scala/dk/bayes/dsl/variable/gaussian/univariate/UnivariateGaussianFactor.scala | Scala | bsd-2-clause | 325 |
package security
import helpers._
import models._
import play.api.mvc._
import scala.concurrent._
import scala.util._
/**
* @author zepeng.li@gmail.com
*/
trait Authentication extends PAMLogging {
self: DefaultPlayExecutor =>
def _users: Users
def pam: PAM
def authenticate(req: RequestHeader) =
pam(_users)(req).andThen {
loggingPAMExceptions { reason =>
s"PAM[${pam.basicName}] auth failed, because $reason"
}
}
}
trait PAM extends (Users => RequestHeader => Future[User])
with CanonicalNamed {
self: I18nLogging =>
def thenTry(that: PAM)(
implicit ec: ExecutionContext
): PAM = new ThenTryPAM(self, that)
def >>(that: PAM)(implicit ec: ExecutionContext) = thenTry(that)
}
class ThenTryPAM(first: PAM, second: PAM)(
implicit
val loggingMessages: LoggingMessages,
val ec: ExecutionContext
) extends PAM
with CanonicalNamed
with I18nLogging
with PAMLogging {
def basicName = second.basicName
def apply(v1: Users): (RequestHeader) => Future[User] = {
req => first.apply(v1)(req).andThen {
loggingPAMExceptions { reason =>
s"PAM[${first.basicName}] auth failed, because $reason, then try PAM[${second.basicName}]"
}
}.recoverWith {
case _: Throwable => second.apply(v1)(req)
}
}
}
trait PAMLogging extends I18nLogging {
def loggingPAMExceptions(message: String => String): PartialFunction[Try[User], Unit] = {
case Failure(e: User.NoCredentials) => Logger.trace(s"${message(e.reason)}")
case Failure(e: User.SessionIdNotMatch) => Logger.debug(s"${message(e.reason)}")
case Failure(e: User.AccessTokenNotMatch) => Logger.debug(s"${message(e.reason)}")
case Failure(e: User.NotFound) => Logger.error(s"${message(e.reason)}")
case Failure(e: BaseException) => Logger.debug(s"${message(e.reason)}", e)
case Failure(e: Throwable) => Logger.error(s"${message(e.getMessage)}", e)
}
}
trait PAMBuilderComponents {
implicit def pamBuilder: BasicPlayApi => PAM
} | lizepeng/app.io | modules/security/app/security/Authentication.scala | Scala | apache-2.0 | 2,049 |
package com.olvind.crud
package frontend
import scalacss.Defaults._
import scalacss.ScalaCssReact._
import scalacss.ext.CssReset
import scalacss.mutable.{GlobalRegistry, StyleSheet}
object Styles extends StyleSheet.Inline {
import dsl._
val body = style(
CssReset.normaliseCss,
fontFamily := "'Roboto', sans-serif",
fontSize(13.px),
lineHeight(20.px)
)
def load() = {
GlobalRegistry.register(
this,
EditorController.Style,
TableStyle
)
GlobalRegistry.onRegistration(_.addToDocument())
}
}
| skytteren/slick-crud | crud/js/src/main/scala/com/olvind/crud/frontend/Styles.scala | Scala | apache-2.0 | 549 |
// Copyright (c) 2016 Yuichiroh Matsubayashi
package yuima.nuimo.action
import yuima.nuimo.Nuimode
import yuima.nuimo.config.LedImage
object ItunesAction {
def playpause() = {
val cmd = """tell application "Music"
| playpause
|end tell
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def activate() = {
val cmd = """tell application "Music"
| activate
|end tell
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def choosePlayListWithActivatingItunes() = {
val cmd = """set prevApp to (path to frontmost application as text)
|
|tell application "Music"
| activate
| set shuffle enabled to true
| play playlist (item 1 of (choose from list (get name of playlists as list)))
|end tell
|
|tell application prevApp to activate
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def choosePlayList() = {
val cmd = """tell application "Music"
| set plists to (get name of playlists as list)
|end tell
|
|activate
|choose from list (plists) with prompt "Choose a playlist"
|
|if result is not false then
| tell application "Music"
| play playlist (item 1 of result)
| end tell
|end if
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def fadeInOut(client: Nuimode, uuid: String, duration: Double = 0.5) = {
if (isPlaying)
client.writeLedImage(uuid, LedImage.pause)
else
client.writeLedImage(uuid, LedImage.play)
val resolution = getSoundVolume min 40
val cmd = s"""property resolution : $resolution
|property delayIncr : ${ duration / resolution }
|tell application "Music"
| set originalVol to sound volume
| set volIncr to originalVol div resolution
| if player state is not playing then
| set sound volume to 0
| play
| -- Fade in
| repeat while (sound volume ≤ (originalVol - volIncr))
| set sound volume to (sound volume + volIncr)
| delay delayIncr
| end repeat
| else
| -- Fade out
| repeat while (sound volume > 0)
| set sound volume to (sound volume - volIncr)
| delay delayIncr
| end repeat
| pause
| end if
| set sound volume to originalVol
|end tell
""".stripMargin
Nuimode.runAppleScriptSync(cmd)
}
def getSoundVolume = {
Nuimode.runAppleScriptSync( """tell application "Music"
| return sound volume
|end tell
""".stripMargin).toInt
}
def isPlaying = {
val cmd = """tell application "Music"
| if player state is playing then
| return true
| else
| return false
| end if
|end tell
""".stripMargin
Nuimode.runAppleScriptSync(cmd).toBoolean
}
def prevTrack(client: Nuimode, uuid: String) = {
client.writeLedImage(uuid, LedImage.backward)
val cmd = """tell application "Music"
| back track
|end tell
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def nextTrack(client: Nuimode, uuid: String) = {
client.writeLedImage(uuid, LedImage.forward)
val cmd = """tell application "Music"
| next track
|end tell
""".stripMargin
Nuimode.runAppleScript(cmd)
}
def notifyCurrentTrack() = {
val cmd = """tell application "Music"
| set trackName to name of current track
| set trackArtist to artist of current track
| set trackAlbum to album of current track
| set trackTime to time of current track
| set trackPosition to player position
|
| set min to "0"
| set sec to "00"
| if 59 < trackPosition then
| set min to trackPosition div 60
| set sec to round (trackPosition mod 60)
| else
| set min to "0"
| set sec to round (trackPosition)
| end if
|
| if sec < 10 then
| set sec to "0" & sec
| end if
|
| set currentTime to (min & ":" & sec as text)
|
| set str to trackArtist & " - " & trackAlbum & return & currentTime & " / " & trackTime
| display notification str with title trackName
|end tell
""".stripMargin
Nuimode.runAppleScript(cmd)
}
}
| Yuichiroh/nuimode | src/main/scala/yuima/nuimo/action/ItunesAction.scala | Scala | mit | 5,335 |
package at.nonblocking.cliwix.integrationtest
import at.nonblocking.cliwix.core.ExecutionContext
import at.nonblocking.cliwix.core.command.{CompanyInsertCommand, PageSetReadCommand, SiteInsertCommand, _}
import at.nonblocking.cliwix.core.compare.LiferayEntityComparator
import at.nonblocking.cliwix.core.handler._
import at.nonblocking.cliwix.integrationtest.TestEntityFactory._
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import scala.beans.BeanProperty
@RunWith(classOf[CliwixIntegrationTestRunner])
class PageSetHandlerIntegrationTest {
@BeanProperty
var dispatchHandler: DispatchHandler = _
@BeanProperty
var liferayEntityComparator: LiferayEntityComparator = _
@Test
@TransactionalRollback
def updatePublicPageSetTest() {
val company = createTestCompany()
val insertedCompany = this.dispatchHandler.execute(CompanyInsertCommand(company)).result
ExecutionContext.updateCompanyContext(insertedCompany)
val site = createTestSite()
val insertedSite = this.dispatchHandler.execute(SiteInsertCommand(insertedCompany.getCompanyId, site)).result
ExecutionContext.updateGroupContext(insertedSite.getSiteId)
//The page set is implicitly crated by SiteInsertCommand
val publicPageSet = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = false)).result
publicPageSet.setDefaultThemeId("classic")
publicPageSet.setDefaultColorSchemeId("02")
val updatedPublicPageSet = this.dispatchHandler.execute(UpdateCommand(publicPageSet)).result
assertTrue(this.liferayEntityComparator.equals(publicPageSet, updatedPublicPageSet))
val publicPageSetFromDb = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = false)).result
assertTrue(this.liferayEntityComparator.equals(publicPageSet, publicPageSetFromDb))
}
@Test
@TransactionalRollback
def updatePrivatePageSetTest() {
val company = createTestCompany()
val insertedCompany = this.dispatchHandler.execute(CompanyInsertCommand(company)).result
ExecutionContext.updateCompanyContext(insertedCompany)
val site = createTestSite()
val insertedSite = this.dispatchHandler.execute(SiteInsertCommand(insertedCompany.getCompanyId, site)).result
ExecutionContext.updateGroupContext(insertedSite.getSiteId)
//The page set is implicitly crated by SiteInsertCommand
val privatePageSet = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = true)).result
privatePageSet.setDefaultThemeId("classic")
privatePageSet.setDefaultColorSchemeId("02")
val updatedPrivatePageSet = this.dispatchHandler.execute(UpdateCommand(privatePageSet)).result
assertTrue(this.liferayEntityComparator.equals(privatePageSet, updatedPrivatePageSet))
val privatePageSetFromDb = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = true)).result
assertTrue(this.liferayEntityComparator.equals(privatePageSet, privatePageSetFromDb))
}
@Test
@TransactionalRollback
def deletePrivatePageSetTest() {
val company = createTestCompany()
val insertedCompany = this.dispatchHandler.execute(CompanyInsertCommand(company)).result
ExecutionContext.updateCompanyContext(insertedCompany)
val site = createTestSite()
val insertedSite = this.dispatchHandler.execute(SiteInsertCommand(insertedCompany.getCompanyId, site)).result
ExecutionContext.updateGroupContext(insertedSite.getSiteId)
//The page set is implicitly crated by SiteInsertCommand
val privatePageSet = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = true)).result
this.dispatchHandler.execute(DeleteCommand(privatePageSet))
val ps = this.dispatchHandler.execute(PageSetReadCommand(insertedSite.getSiteId, privatePages = true)).result
assertNull(ps)
}
}
| nonblocking/cliwix | cliwix-test-integration/src/test/scala/at/nonblocking/cliwix/integrationtest/PageSetHandlerIntegrationTest.scala | Scala | agpl-3.0 | 3,924 |
package org.kokho.scheduling.multicritical.schedulers
import org.kokho.scheduling.ScheduledJob
import org.kokho.scheduling.multicritical.system.MulticriticalTask
/**
* @author: Mikhail Kokho
* @date 7/3/15.
*/
class SwapScheduler (override val partition: Seq[Seq[MulticriticalTask]]) extends MulticriticalScheduler(partition){
self =>
/**
* Infinite iterator over a sequence of scheduled jobs
*/
override def iterate(): Iterator[Seq[ScheduledJob]] = new Iterator[Seq[ScheduledJob]] {
val schedulingWorkers = self.partition.map(new MulticriticalWorker(_))
override def hasNext: Boolean = true
override def next(): Seq[ScheduledJob] = {
// schedulingWorkers.foreach(_.releaseLocally())
val jobs = schedulingWorkers.map(_.next())
jobs
}
}
}
| mkokho/dynoslack | src/main/scala/org/kokho/scheduling/multicritical/schedulers/SwapScheduler.scala | Scala | apache-2.0 | 793 |
/** *****************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ******************************************************************************/
package hydrograph.engine.spark.core.reusablerow
import java.util.{Date, LinkedHashSet}
import hydrograph.engine.transformation.userfunctions.base.ReusableRow
/**
* The Class OutputReusableRow.
*
* @author Bitwise
*
*/
class OutputReusableRow(var outputRow: Array[Any], fieldsIndexMap: Map[String, Int], fieldsIndexList: Array[Int], fields: LinkedHashSet[String])
extends ReusableRow(fields) with Serializable {
def setRow(row: Array[Any]): OutputReusableRow = {
outputRow = row; this
}
def getFieldInternal(index: Int) = outputRow(fieldsIndexList(index)).asInstanceOf[Comparable[_]]
def getFieldInternal(field: String) = outputRow(fieldsIndexMap(field)).asInstanceOf[Comparable[_]]
def setFieldInternal(index: Int, value: Comparable[_]) = {
outputRow(fieldsIndexList(index)) = value
}
def setFieldInternal(field: String, value: Comparable[_]) = {
outputRow(fieldsIndexMap(field)) = value
}
override def setDate(fieldName: String, value: Comparable[_]): Unit = {
value match {
case date: Date => super.setField(fieldName, new java.sql.Date(date.getTime))
case _ => super.setField(fieldName, value)
}
}
override def setDate(index: Int, value: Comparable[_]): Unit = {
value match {
case date: Date => super.setField(index, new java.sql.Date(date.getTime))
case _ => super.setField(index, value)
}
}
}
object OutputReusableRow {
def apply(outputRow: Array[Any], mapper: RowToReusableMapper): OutputReusableRow = new OutputReusableRow(outputRow, mapper.fieldIndexMap, mapper.fieldIndexList, mapper.requiredFieldsSet)
} | capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/core/reusablerow/OutputReusableRow.scala | Scala | apache-2.0 | 2,402 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.{List => JList}
import java.util.concurrent._
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.util.DynamicVariable
import com.codahale.metrics.{Counter, MetricRegistry, Timer}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.metrics.source.Source
/**
* Asynchronously passes SparkListenerEvents to registered SparkListeners.
*
* Until `start()` is called, all posted events are only buffered. Only after this listener bus
* has started will events be actually propagated to all attached listeners. This listener bus
* is stopped when `stop()` is called, and it will drop further events after stopping.
*/
private[spark] class LiveListenerBus(conf: SparkConf) {
import LiveListenerBus._
private var sparkContext: SparkContext = _
private[spark] val metrics = new LiveListenerBusMetrics(conf)
// Indicate if `start()` is called
private val started = new AtomicBoolean(false)
// Indicate if `stop()` is called
private val stopped = new AtomicBoolean(false)
/** A counter for dropped events. It will be reset every time we log it. */
private val droppedEventsCounter = new AtomicLong(0L)
/** When `droppedEventsCounter` was logged last time in milliseconds. */
@volatile private var lastReportTimestamp = 0L
private val queues = new CopyOnWriteArrayList[AsyncEventQueue]()
// Visible for testing.
@volatile private[scheduler] var queuedEvents = new mutable.ListBuffer[SparkListenerEvent]()
/** Add a listener to queue shared by all non-internal listeners. */
def addToSharedQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, SHARED_QUEUE)
}
/** Add a listener to the executor management queue. */
def addToManagementQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, EXECUTOR_MANAGEMENT_QUEUE)
}
/** Add a listener to the application status queue. */
def addToStatusQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, APP_STATUS_QUEUE)
}
/** Add a listener to the event log queue. */
def addToEventLogQueue(listener: SparkListenerInterface): Unit = {
addToQueue(listener, EVENT_LOG_QUEUE)
}
/**
* Add a listener to a specific queue, creating a new queue if needed. Queues are independent
* of each other (each one uses a separate thread for delivering events), allowing slower
* listeners to be somewhat isolated from others.
*/
private[spark] def addToQueue(
listener: SparkListenerInterface,
queue: String): Unit = synchronized {
if (stopped.get()) {
throw new IllegalStateException("LiveListenerBus is stopped.")
}
queues.asScala.find(_.name == queue) match {
case Some(queue) =>
queue.addListener(listener)
case None =>
val newQueue = new AsyncEventQueue(queue, conf, metrics, this)
newQueue.addListener(listener)
if (started.get()) {
newQueue.start(sparkContext)
}
queues.add(newQueue)
}
}
def removeListener(listener: SparkListenerInterface): Unit = synchronized {
// Remove listener from all queues it was added to, and stop queues that have become empty.
queues.asScala
.filter { queue =>
queue.removeListener(listener)
queue.listeners.isEmpty()
}
.foreach { toRemove =>
if (started.get() && !stopped.get()) {
toRemove.stop()
}
queues.remove(toRemove)
}
}
/** Post an event to all queues. */
def post(event: SparkListenerEvent): Unit = {
if (stopped.get()) {
return
}
metrics.numEventsPosted.inc()
// If the event buffer is null, it means the bus has been started and we can avoid
// synchronization and post events directly to the queues. This should be the most
// common case during the life of the bus.
if (queuedEvents == null) {
postToQueues(event)
return
}
// Otherwise, need to synchronize to check whether the bus is started, to make sure the thread
// calling start() picks up the new event.
synchronized {
if (!started.get()) {
queuedEvents += event
return
}
}
// If the bus was already started when the check above was made, just post directly to the
// queues.
postToQueues(event)
}
private def postToQueues(event: SparkListenerEvent): Unit = {
val it = queues.iterator()
while (it.hasNext()) {
it.next().post(event)
}
}
/**
* Start sending events to attached listeners.
*
* This first sends out all buffered events posted before this listener bus has started, then
* listens for any additional events asynchronously while the listener bus is still running.
* This should only be called once.
*
* @param sc Used to stop the SparkContext in case the listener thread dies.
*/
def start(sc: SparkContext, metricsSystem: MetricsSystem): Unit = synchronized {
if (!started.compareAndSet(false, true)) {
throw new IllegalStateException("LiveListenerBus already started.")
}
this.sparkContext = sc
queues.asScala.foreach { q =>
q.start(sc)
queuedEvents.foreach(q.post)
}
queuedEvents = null
metricsSystem.registerSource(metrics)
}
/**
* For testing only. Wait until there are no more events in the queue, or until the default
* wait time has elapsed. Throw `TimeoutException` if the specified time elapsed before the queue
* emptied.
* Exposed for testing.
*/
@throws(classOf[TimeoutException])
private[spark] def waitUntilEmpty(): Unit = {
waitUntilEmpty(TimeUnit.SECONDS.toMillis(10))
}
/**
* For testing only. Wait until there are no more events in the queue, or until the specified
* time has elapsed. Throw `TimeoutException` if the specified time elapsed before the queue
* emptied.
* Exposed for testing.
*/
@throws(classOf[TimeoutException])
def waitUntilEmpty(timeoutMillis: Long): Unit = {
val deadline = System.currentTimeMillis + timeoutMillis
queues.asScala.foreach { queue =>
if (!queue.waitUntilEmpty(deadline)) {
throw new TimeoutException(s"The event queue is not empty after $timeoutMillis ms.")
}
}
}
/**
* Stop the listener bus. It will wait until the queued events have been processed, but drop the
* new events after stopping.
*/
def stop(): Unit = {
if (!started.get()) {
throw new IllegalStateException(s"Attempted to stop bus that has not yet started!")
}
if (!stopped.compareAndSet(false, true)) {
return
}
queues.asScala.foreach(_.stop())
queues.clear()
}
// For testing only.
private[spark] def findListenersByClass[T <: SparkListenerInterface : ClassTag](): Seq[T] = {
queues.asScala.flatMap { queue => queue.findListenersByClass[T]() }
}
// For testing only.
private[spark] def listeners: JList[SparkListenerInterface] = {
queues.asScala.flatMap(_.listeners.asScala).asJava
}
// For testing only.
private[scheduler] def activeQueues(): Set[String] = {
queues.asScala.map(_.name).toSet
}
// For testing only.
private[scheduler] def getQueueCapacity(name: String): Option[Int] = {
queues.asScala.find(_.name == name).map(_.capacity)
}
}
private[spark] object LiveListenerBus {
// Allows for Context to check whether stop() call is made within listener thread
val withinListenerThread: DynamicVariable[Boolean] = new DynamicVariable[Boolean](false)
private[scheduler] val SHARED_QUEUE = "shared"
private[scheduler] val APP_STATUS_QUEUE = "appStatus"
private[scheduler] val EXECUTOR_MANAGEMENT_QUEUE = "executorManagement"
private[scheduler] val EVENT_LOG_QUEUE = "eventLog"
}
private[spark] class LiveListenerBusMetrics(conf: SparkConf)
extends Source with Logging {
override val sourceName: String = "LiveListenerBus"
override val metricRegistry: MetricRegistry = new MetricRegistry
/**
* The total number of events posted to the LiveListenerBus. This is a count of the total number
* of events which have been produced by the application and sent to the listener bus, NOT a
* count of the number of events which have been processed and delivered to listeners (or dropped
* without being delivered).
*/
val numEventsPosted: Counter = metricRegistry.counter(MetricRegistry.name("numEventsPosted"))
// Guarded by synchronization.
private val perListenerClassTimers = mutable.Map[String, Timer]()
/**
* Returns a timer tracking the processing time of the given listener class.
* events processed by that listener. This method is thread-safe.
*/
def getTimerForListenerClass(cls: Class[_ <: SparkListenerInterface]): Option[Timer] = {
synchronized {
val className = cls.getName
val maxTimed = conf.get(LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED)
perListenerClassTimers.get(className).orElse {
if (perListenerClassTimers.size == maxTimed) {
logError(s"Not measuring processing time for listener class $className because a " +
s"maximum of $maxTimed listener classes are already timed.")
None
} else {
perListenerClassTimers(className) =
metricRegistry.timer(MetricRegistry.name("listenerProcessingTime", className))
perListenerClassTimers.get(className)
}
}
}
}
}
| goldmedal/spark | core/src/main/scala/org/apache/spark/scheduler/LiveListenerBus.scala | Scala | apache-2.0 | 10,490 |
package dotty.tools.dotc
package printing
import core._
import Texts._, ast.Trees._
import Types.Type, Symbols.Symbol, Contexts.Context, Scopes.Scope, Constants.Constant,
Names.Name, Denotations._, Annotations.Annotation
/** The base class of all printers
*/
abstract class Printer {
private[this] var prec: Precedence = GlobalPrec
/** The current precedence level */
def currentPrecedence = prec
/** Generate text using `op`, assuming a given precedence level `prec`. */
def atPrec(prec: Precedence)(op: => Text): Text = {
val outerPrec = this.prec
this.prec = prec
try op
finally this.prec = outerPrec
}
/** Generate text using `op`, assuming a given precedence level `prec`.
* If new level `prec` is lower than previous level, put text in parentheses.
*/
def changePrec(prec: Precedence)(op: => Text): Text =
if (prec < this.prec) atPrec(prec) ("(" ~ op ~ ")") else atPrec(prec)(op)
/** The name, possibley with with namespace suffix if debugNames is set:
* /L for local names, /V for other term names, /T for type names
*/
def nameString(name: Name): String
/** The name of the given symbol.
* If !settings.debug, the original name where
* expansions of operators are translated back to operator symbol.
* E.g. $eq => =.
* If settings.uniqid, adds id.
*/
def nameString(sym: Symbol): String
/** The fully qualified name of the symbol */
def fullNameString(sym: Symbol): String
/** The kind of the symbol */
def kindString(sym: Symbol): String
/** The name as a text */
def toText(name: Name): Text
/** Textual representation, including symbol's kind e.g., "class Foo", "method Bar".
* If hasMeaninglessName is true, uses the owner's name to disambiguate identity.
*/
def toText(sym: Symbol): Text
/** Textual representation of symbol's declaration */
def dclText(sym: Symbol): Text
/** Textual representation of single denotation's declaration */
def dclText(sd: SingleDenotation): Text
/** If symbol's owner is a printable class C, the text "in C", otherwise "" */
def locationText(sym: Symbol): Text
/** Textual representation of symbol and its location */
def locatedText(sym: Symbol): Text
/** Textual representation of denotation */
def toText(denot: Denotation): Text
/** Textual representation of constant */
def toText(const: Constant): Text
/** Textual representation of annotation */
def toText(annot: Annotation): Text
/** Textual representation of type */
def toText(tp: Type): Text
/** Textual representation of all symbols in given list,
* using `dclText` for displaying each.
*/
def dclsText(syms: List[Symbol], sep: String = "\\n"): Text
/** Textual representation of all definitions in a scope using `dclText` for each */
def toText(sc: Scope): Text
/** Textual representation of tree */
def toText[T >: Untyped](tree: Tree[T]): Text
/** Perform string or text-producing operation `op` so that only a
* summarized text with given recursion depth is shown
*/
def summarized[T](depth: Int)(op: => T): T
/** A plain printer without any embellishments */
def plain: Printer
}
| yusuke2255/dotty | src/dotty/tools/dotc/printing/Printer.scala | Scala | bsd-3-clause | 3,192 |
/** **\\
** Copyright (c) 2012 Center for Organic and Medicinal Chemistry **
** Zurich University of Applied Sciences **
** Wädenswil, Switzerland **
\\** **/
import chemf.graph.{Edge, LGraph}
import scalaz._, Scalaz._
/**
* @author Stefan Höck
*/
package object chemf {
type Formula = Map[Isotope,Int]
type Molecule = LGraph[Bond,Atom]
type ValNel[+E,+A] = Validation[NonEmptyList[E],A]
type ValRes[+A] = ValNel[String,A]
type DisRes[+A] = String \\/ A
/**
* Adjust all error messages (if any) in v by applying function f.
*/
def mapErr[E,F,A](v: ValNel[E,A])(f: E ⇒ F): ValNel[F,A] =
Bifunctor[Validation].leftMap(v)(_ map f)
}
// vim: set ts=2 sw=2 et:
| stefan-hoeck/chemf | src/main/scala/chemf/package.scala | Scala | gpl-3.0 | 885 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.agent.amqp
import collection.mutable.HashMap
import net.lshift.diffa.kernel.config.Endpoint
import net.lshift.diffa.kernel.frontend.Changes
import net.lshift.diffa.kernel.participants.InboundEndpointFactory
import org.slf4j.LoggerFactory
import net.lshift.accent.AccentConnection
import com.rabbitmq.client.ConnectionFactory
class AmqpInboundEndpointFactory(changes: Changes)
extends InboundEndpointFactory {
case class ConnectionKey(host: String, port: Int, username: String, password: String, vHost: String)
object ConnectionKey {
def fromUrl(url: AmqpQueueUrl) =
ConnectionKey(host = url.host, port = url.port, username = url.username, password = url.password, vHost = url.vHost)
}
case class ReceiverKey(domain: String, endpoint: String)
object ReceiverKey {
def fromEndpoint(e: Endpoint) =
ReceiverKey(domain = e.domain.name, endpoint = e.name)
}
class Receivers(val connection: AccentConnection,
val connectionKey: ConnectionKey) extends HashMap[ReceiverKey, AccentReceiver]
val log = LoggerFactory.getLogger(getClass)
val receivers = new HashMap[ConnectionKey, Receivers]
def canHandleInboundEndpoint(inboundUrl: String) =
inboundUrl.startsWith("amqp://")
def ensureEndpointReceiver(e: Endpoint) {
log.info("Starting receiver for endpoint: %s".format(e))
val amqpUrl = AmqpQueueUrl.parse(e.inboundUrl)
val receiversForUrl = getReceiversByUrl(amqpUrl)
val receiverKey = ReceiverKey.fromEndpoint(e)
receiversForUrl.put(receiverKey,
createReceiver(receiversForUrl.connection, amqpUrl.queue, receiverKey))
}
def endpointGone(domain: String, endpoint: String) {
val key = ReceiverKey(domain, endpoint)
getReceiversByKey(key) match {
case None =>
log.error("No receivers for endpoint name: %s".format(endpoint))
case Some(rcv) =>
rcv.get(key) map { c =>
try {
c.close()
} catch {
case _ => log.error("Unable to shutdown receiver for endpoint name %s".format(endpoint))
}
}
rcv.remove(key)
// if there are no more receivers on the connection, close it
if (rcv.isEmpty) {
try {
rcv.connection.close()
} catch {
case _ => log.error("Unable to shutdown connection for endpoint name %s".format(endpoint))
}
receivers.remove(rcv.connectionKey)
}
}
}
protected def createConnectionFactory(url: AmqpQueueUrl) = {
val cf = new ConnectionFactory()
cf.setHost(url.host)
cf.setPort(url.port)
cf.setUsername(url.username)
cf.setPassword(url.password)
if (! url.isDefaultVHost) {
cf.setVirtualHost(url.vHost)
}
cf
}
protected def createConnection(cf: ConnectionFactory) =
new AccentConnection(cf, new AccentConnectionFailureHandler)
protected def createReceiver(connection: AccentConnection, queue: String, key: ReceiverKey) = {
val params = new ReceiverParameters(queue)
new AccentReceiver(connection,
params,
key.domain,
key.endpoint,
changes)
}
private def getReceiversByUrl(url: AmqpQueueUrl): Receivers = {
val connectionKey = ConnectionKey.fromUrl(url)
receivers.getOrElseUpdate(connectionKey, {
val connection = createConnection(createConnectionFactory(url))
new Receivers(connection, connectionKey)
})
}
private def getReceiversByKey(key: ReceiverKey): Option[Receivers] = {
receivers.values.find(_.contains(key))
}
} | aprescott/diffa | agent/src/main/scala/net/lshift/diffa/agent/amqp/AmqpInboundEndpointFactory.scala | Scala | apache-2.0 | 4,277 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.repl
import java.io._
import java.nio.file.Files
import org.apache.log4j.{Level, LogManager, PropertyConfigurator}
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
class ReplSuite extends SparkFunSuite with BeforeAndAfterAll {
private var originalClassLoader: ClassLoader = null
override def beforeAll(): Unit = {
originalClassLoader = Thread.currentThread().getContextClassLoader
}
override def afterAll(): Unit = {
if (originalClassLoader != null) {
// Reset the class loader to not affect other suites. REPL will set its own class loader but
// doesn't reset it.
Thread.currentThread().setContextClassLoader(originalClassLoader)
}
}
def runInterpreter(master: String, input: String): String = {
val CONF_EXECUTOR_CLASSPATH = "spark.executor.extraClassPath"
val oldExecutorClasspath = System.getProperty(CONF_EXECUTOR_CLASSPATH)
val classpath = System.getProperty("java.class.path")
System.setProperty(CONF_EXECUTOR_CLASSPATH, classpath)
Main.sparkContext = null
Main.sparkSession = null // causes recreation of SparkContext for each test.
Main.conf.set("spark.master", master)
val in = new BufferedReader(new StringReader(input + "\\n"))
val out = new StringWriter()
Main.doMain(Array("-classpath", classpath), new SparkILoop(in, new PrintWriter(out)))
if (oldExecutorClasspath != null) {
System.setProperty(CONF_EXECUTOR_CLASSPATH, oldExecutorClasspath)
} else {
System.clearProperty(CONF_EXECUTOR_CLASSPATH)
}
out.toString
}
// Simulate the paste mode in Scala REPL.
def runInterpreterInPasteMode(master: String, input: String): String =
runInterpreter(master, ":paste\\n" + input + 4.toChar) // 4 is the ascii code of CTRL + D
def assertContains(message: String, output: String): Unit = {
val isContain = output.contains(message)
assert(isContain,
"Interpreter output did not contain '" + message + "':\\n" + output)
}
def assertDoesNotContain(message: String, output: String): Unit = {
val isContain = output.contains(message)
assert(!isContain,
"Interpreter output contained '" + message + "':\\n" + output)
}
test("SPARK-15236: use Hive catalog") {
// turn on the INFO log so that it is possible the code will dump INFO
// entry for using "HiveMetastore"
val rootLogger = LogManager.getRootLogger()
val logLevel = rootLogger.getLevel
rootLogger.setLevel(Level.INFO)
try {
Main.conf.set(CATALOG_IMPLEMENTATION.key, "hive")
val output = runInterpreter("local",
"""
|spark.sql("drop table if exists t_15236")
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
// only when the config is set to hive and
// hive classes are built, we will use hive catalog.
// Then log INFO entry will show things using HiveMetastore
if (SparkSession.hiveClassesArePresent) {
assertContains("HiveMetaStore", output)
} else {
// If hive classes are not built, in-memory catalog will be used
assertDoesNotContain("HiveMetaStore", output)
}
} finally {
rootLogger.setLevel(logLevel)
}
}
test("SPARK-15236: use in-memory catalog") {
val rootLogger = LogManager.getRootLogger()
val logLevel = rootLogger.getLevel
rootLogger.setLevel(Level.INFO)
try {
Main.conf.set(CATALOG_IMPLEMENTATION.key, "in-memory")
val output = runInterpreter("local",
"""
|spark.sql("drop table if exists t_16236")
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertDoesNotContain("HiveMetaStore", output)
} finally {
rootLogger.setLevel(logLevel)
}
}
test("broadcast vars") {
// Test that the value that a broadcast var had when it was created is used,
// even if that variable is then modified in the driver program
// TODO: This doesn't actually work for arrays when we run in local mode!
val output = runInterpreter("local",
"""
|var array = new Array[Int](5)
|val broadcastArray = sc.broadcast(array)
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
|array(0) = 5
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertContains("res0: Array[Int] = Array(0, 0, 0, 0, 0)", output)
assertContains("res2: Array[Int] = Array(5, 0, 0, 0, 0)", output)
}
if (System.getenv("MESOS_NATIVE_JAVA_LIBRARY") != null) {
test("running on Mesos") {
val output = runInterpreter("localquiet",
"""
|var v = 7
|def getV() = v
|sc.parallelize(1 to 10).map(x => getV()).collect().reduceLeft(_+_)
|v = 10
|sc.parallelize(1 to 10).map(x => getV()).collect().reduceLeft(_+_)
|var array = new Array[Int](5)
|val broadcastArray = sc.broadcast(array)
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
|array(0) = 5
|sc.parallelize(0 to 4).map(x => broadcastArray.value(x)).collect()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
assertContains("res0: Int = 70", output)
assertContains("res1: Int = 100", output)
assertContains("res2: Array[Int] = Array(0, 0, 0, 0, 0)", output)
assertContains("res4: Array[Int] = Array(0, 0, 0, 0, 0)", output)
}
}
test("line wrapper only initialized once when used as encoder outer scope") {
val output = runInterpreter("local",
"""
|val fileName = "repl-test-" + System.currentTimeMillis
|val tmpDir = System.getProperty("java.io.tmpdir")
|val file = new java.io.File(tmpDir, fileName)
|def createFile(): Unit = file.createNewFile()
|
|createFile();case class TestCaseClass(value: Int)
|sc.parallelize(1 to 10).map(x => TestCaseClass(x)).collect()
|
|file.delete()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
}
test("define case class and create Dataset together with paste mode") {
val output = runInterpreterInPasteMode("local-cluster[1,1,1024]",
"""
|import spark.implicits._
|case class TestClass(value: Int)
|Seq(TestClass(1)).toDS()
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)
}
test(":replay should work correctly") {
val output = runInterpreter("local",
"""
|sc
|:replay
""".stripMargin)
assertDoesNotContain("error: not found: value sc", output)
}
test("spark-shell should find imported types in class constructors and extends clause") {
val output = runInterpreter("local",
"""
|import org.apache.spark.Partition
|class P(p: Partition)
|class P(val index: Int) extends Partition
""".stripMargin)
assertDoesNotContain("error: not found: type Partition", output)
}
test("spark-shell should shadow val/def definitions correctly") {
val output1 = runInterpreter("local",
"""
|def myMethod() = "first definition"
|val tmp = myMethod(); val out = tmp
|def myMethod() = "second definition"
|val tmp = myMethod(); val out = s"$tmp aabbcc"
""".stripMargin)
assertContains("second definition aabbcc", output1)
val output2 = runInterpreter("local",
"""
|val a = 1
|val b = a; val c = b;
|val a = 2
|val b = a; val c = b;
|s"!!$b!!"
""".stripMargin)
assertContains("!!2!!", output2)
}
test("SPARK-26633: ExecutorClassLoader.getResourceAsStream find REPL classes") {
val output = runInterpreterInPasteMode("local-cluster[1,1,1024]",
"""
|case class TestClass(value: Int)
|
|sc.parallelize(1 to 1).map { _ =>
| val clz = classOf[TestClass]
| val name = clz.getName.replace('.', '/') + ".class";
| val stream = clz.getClassLoader.getResourceAsStream(name)
| if (stream == null) {
| "failed: stream is null"
| } else {
| val magic = new Array[Byte](4)
| try {
| stream.read(magic)
| // the magic number of a Java Class file
| val expected = Array[Byte](0xCA.toByte, 0xFE.toByte, 0xBA.toByte, 0xBE.toByte)
| if (magic sameElements expected) {
| "successful"
| } else {
| "failed: unexpected contents from stream"
| }
| } finally {
| stream.close()
| }
| }
|}.collect()
""".stripMargin)
assertDoesNotContain("failed", output)
assertContains("successful", output)
}
test("SPARK-30167: Log4j configuration for REPL should override root logger properly") {
val testConfiguration =
"""
|# Set everything to be logged to the console
|log4j.rootCategory=INFO, console
|log4j.appender.console=org.apache.log4j.ConsoleAppender
|log4j.appender.console.target=System.err
|log4j.appender.console.layout=org.apache.log4j.PatternLayout
|log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
|
|# Set the log level for this class to WARN same as the default setting.
|log4j.logger.org.apache.spark.repl.Main=ERROR
|""".stripMargin
val log4jprops = Files.createTempFile("log4j.properties.d", "log4j.properties")
Files.write(log4jprops, testConfiguration.getBytes)
val originalRootLogger = LogManager.getRootLogger
val originalRootAppender = originalRootLogger.getAppender("file")
val originalStderr = System.err
val originalReplThresholdLevel = Logging.sparkShellThresholdLevel
val replLoggerLogMessage = "Log level for REPL: "
val warnLogMessage1 = "warnLogMessage1 should not be output"
val errorLogMessage1 = "errorLogMessage1 should be output"
val infoLogMessage1 = "infoLogMessage2 should be output"
val infoLogMessage2 = "infoLogMessage3 should be output"
val out = try {
PropertyConfigurator.configure(log4jprops.toAbsolutePath.toString)
// Re-initialization is needed to set SparkShellLoggingFilter to ConsoleAppender
Main.initializeForcefully(true, false)
runInterpreter("local",
s"""
|import java.io.{ByteArrayOutputStream, PrintStream}
|
|import org.apache.log4j.{ConsoleAppender, Level, LogManager}
|
|val replLogger = LogManager.getLogger("${Main.getClass.getName.stripSuffix("$")}")
|
|// Log level for REPL is expected to be ERROR
|"$replLoggerLogMessage" + replLogger.getLevel()
|
|val bout = new ByteArrayOutputStream()
|
|// Configure stderr to let log messages output to ByteArrayOutputStream.
|val defaultErrStream: PrintStream = System.err
|try {
| System.setErr(new PrintStream(bout))
|
| // Reconfigure ConsoleAppender to reflect the stderr setting.
| val consoleAppender =
| LogManager.getRootLogger.getAllAppenders.nextElement.asInstanceOf[ConsoleAppender]
| consoleAppender.activateOptions()
|
| // customLogger1 is not explicitly configured neither its log level nor appender
| // so this inherits the settings of rootLogger
| // but ConsoleAppender can use a different log level.
| val customLogger1 = LogManager.getLogger("customLogger1")
| customLogger1.warn("$warnLogMessage1")
| customLogger1.error("$errorLogMessage1")
|
| // customLogger2 is explicitly configured its log level as INFO
| // so info level messages logged via customLogger2 should be output.
| val customLogger2 = LogManager.getLogger("customLogger2")
| customLogger2.setLevel(Level.INFO)
| customLogger2.info("$infoLogMessage1")
|
| // customLogger2 is explicitly configured its log level
| // so its child should inherit the settings.
| val customLogger3 = LogManager.getLogger("customLogger2.child")
| customLogger3.info("$infoLogMessage2")
|
| // echo log messages
| bout.toString
|} finally {
| System.setErr(defaultErrStream)
|}
|""".stripMargin)
} finally {
// Restore log4j settings for this suite
val log4jproperties = Thread.currentThread()
.getContextClassLoader.getResource("log4j.properties")
LogManager.resetConfiguration()
PropertyConfigurator.configure(log4jproperties)
Logging.sparkShellThresholdLevel = originalReplThresholdLevel
}
// Ensure stderr configuration is successfully restored.
assert(originalStderr eq System.err)
// Ensure log4j settings are successfully restored.
val restoredRootLogger = LogManager.getRootLogger
val restoredRootAppender = restoredRootLogger.getAppender("file")
assert(originalRootAppender.getClass == restoredRootAppender.getClass)
assert(originalRootLogger.getLevel == restoredRootLogger.getLevel)
// Ensure loggers added in this test case are successfully removed.
assert(LogManager.getLogger("customLogger2").getLevel == null)
assert(LogManager.getLogger("customLogger2.child").getLevel == null)
// Ensure log level threshold for REPL is ERROR.
assertContains(replLoggerLogMessage + "ERROR", out)
assertDoesNotContain(warnLogMessage1, out)
assertContains(errorLogMessage1, out)
assertContains(infoLogMessage1, out)
assertContains(infoLogMessage2, out)
}
}
| wangmiao1981/spark | repl/src/test/scala/org/apache/spark/repl/ReplSuite.scala | Scala | apache-2.0 | 15,134 |
package defacto.crf
import breeze.linalg.{Vector, SparseVector}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.reflect.runtime.universe._
/**
* Created by samanzaroot on 9/23/14.
*/
abstract class Domain[A](implicit val ev: TypeTag[A]) {
val values : ArrayBuffer[A]
val length : Int
val domainClass : Class[_] = ev.getClass
type inner = A
type valuetype = Value
}
class IntDomain(from : Int, to : Int) extends Domain[Int] {
val values = ArrayBuffer(from until to)
val length = values.length
}
class RealDomain(from : Int, to : Int) extends Domain[Double] {
val values = ArrayBuffer()
val length = 0
}
abstract class DiscreteDomain[A] extends Domain[A] {
override type inner = Vector
}
class FeaturesDomain[F] extends DiscreteDomain[F] {
override type valuetype = FeatureValue
val values = new ArrayBuffer[F]()
def length = values.length
val reverseMap = new mutable.HashMap[F, Int]()
var _frozen = false
override type inner = SparseVector[Int]
def apply(i : Int) : F = {
values(i)
}
def apply(f : F) : Int = {
reverseMap(f)
}
def freeze(): Unit = {
_frozen = true
}
def unfreeze() : Unit = {
_frozen = false
}
def +=(feature : F): Unit = {
if(!_frozen) {
if(!reverseMap.contains(feature)) {
values += feature
reverseMap(feature) = values.length - 1
}
}
}
def featureIndex(feature : F) : Int = {
reverseMap(feature)
}
def featureValue(index : Int) : F = {
values(index)
}
def size = values.size
}
class DoubleIndexedFeatureDomain extends FeaturesDomain[String] {
var features = new ArrayBuffer[String]
var featuresValues = new ArrayBuffer[ArrayBuffer[String]]()
var reverseFeatureValuesMap = new ArrayBuffer[mutable.HashMap[String, Int]]()
var reverseFeatureMap = new mutable.HashMap[String, Int]()
/*def initialize(file : String): Unit = {
for(line <- Source.fromFile(file).getLines()) {
var split = line.split("->")
features.append((split(0).toInt to split(1).toInt).toArray)
}
}*/
override def +=(feature : String) {
System.err.println("Tried to to add unindexed feature into indexed feature domain")
System.exit(1)
}
def +=(feature : String, value : String): Unit = {
if(!_frozen) {
if(!reverseFeatureMap.contains(feature)) {
features += feature
reverseFeatureMap(feature) = features.length-1
featuresValues(features.length-1) = new ArrayBuffer[String]()
reverseFeatureValuesMap(features.length-1) = new mutable.HashMap[String,Int]()
}
val featureIndex = reverseFeatureMap(feature)
if(!reverseFeatureValuesMap(featureIndex).contains(value)) {
featuresValues(featureIndex) += value
reverseFeatureValuesMap(featureIndex)(value) = featuresValues(featureIndex).length-1
}
}
super.+=(feature+"+:+"+value)
}
/*def featureIndex(featureIndex : Int, value : Int) : Int = {
val start = features(featureIndex).head
value - start
}*/
}
class StringFeaturesDomain[String] extends FeaturesDomain[String] {
def +=(feature : String, value : String) : Unit = {
val s = feature + ":+:" + value
super.+=(s.asInstanceOf[String]) //TODO: Is this a intellij problem or scala?
}
}
class LabelDomain extends DiscreteDomain[String] {
override type valuetype = LabelValue
val values = new ArrayBuffer[String]()
val labels = values
def until = labels.length
def length = labels.length
def initialize(till : Int): Unit = {
values ++= (0 until till).map(_.toString).toArray
}
override type inner = SparseVector[Double]
}
| samanz/DeFacto | src/main/scala/defacto/crf/Domain.scala | Scala | apache-2.0 | 3,667 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.responsiblepeople
import jto.validation.forms._
import jto.validation.{From, Rule, Write}
import models.FormTypes._
import org.joda.time.{DateTimeFieldType, LocalDate}
import play.api.libs.json.Json
import play.api.libs.json.JodaWrites._
import play.api.libs.json.JodaReads._
case class DateOfBirth(dateOfBirth: LocalDate)
object DateOfBirth {
implicit val formRule: Rule[UrlFormEncoded, DateOfBirth] = From[UrlFormEncoded] { __ =>
import jto.validation.forms.Rules._
(__ \\ "dateOfBirth").read(newAllowedPastAndFutureDateRule("error.rp.dob.required.date",
"error.rp.dob.invalid.date.after.1900",
"error.rp.dob.invalid.date.future",
"error.rp.dob.invalid.date.not.real")) map DateOfBirth.apply
}
implicit def formWrites = Write[DateOfBirth, UrlFormEncoded] { data =>
Map(
"dateOfBirth.day" -> Seq(data.dateOfBirth.get(DateTimeFieldType.dayOfMonth()).toString),
"dateOfBirth.month" -> Seq(data.dateOfBirth.get(DateTimeFieldType.monthOfYear()).toString),
"dateOfBirth.year" -> Seq(data.dateOfBirth.get(DateTimeFieldType.year()).toString)
)
}
implicit val format = Json.format[DateOfBirth]
}
| hmrc/amls-frontend | app/models/responsiblepeople/DateOfBirth.scala | Scala | apache-2.0 | 1,778 |
object power_table {
def main(args: Array[String]) {
// Put code here
}
}
| LoyolaChicagoBooks/introcs-scala-examples | power_table/power_table.scala | Scala | gpl-3.0 | 82 |
package org.jetbrains.plugins.scala.codeInsight.intention.types
import org.jetbrains.plugins.scala.codeInsight.intentions.ScalaIntentionTestBase
class ConvertJavaToScalaCollectionIntentionTest extends ScalaIntentionTestBase {
def familyName: String = ConvertJavaToScalaCollectionIntention.getFamilyName
def testIntentionIsAvailable() {
checkIntentionIsAvailable(
"""
|class UsesJavaCollections {
| val list = new java.util.ArrayList<caret>[String]()
|}
""")
}
def testIntentionIsAvailable_Iterable() {
checkIntentionIsAvailable(
"""
|class UsesJavaCollections {
| val list: java.lang.Iterable = new java.util.ArrayList[String]()
| val scalaList = lis<caret>t
|}
"""
)
}
def testIntentionIsAvailable_Collection() {
checkIntentionIsAvailable(
"""
|class UsesJavaCollections {
| val list: java.util.Collection[String] = new java.util.ArrayList[String]()
| val scalaList = lis<caret>t
|}
"""
)
}
def testIntentionIsAvailable_Iterator() {
checkIntentionIsAvailable(
"""
|class UsesJavaCollections {
| val iter = new java.util.ArrayList[String]().itera<caret>tor
|}
"""
)
}
def testIntentionIsNotAvailable() {
checkIntentionIsNotAvailable(
"""
|import scala.collection.JavaConverters._
|
|class UsesJavaCollections {
| val list = new java.util.ArrayList<caret>[String]().asScala
|}
""")
}
def testIntentionAction_Simple() {
val text =
"""
|class UsesJavaCollections {
| val list = new java.util.HashMap<caret>[String, Int]()
|}
"""
val resultText =
"""
|import scala.collection.JavaConverters._
|
|class UsesJavaCollections {
| val list = new java.util.HashMap<caret>[String, Int]().asScala
|}
"""
doTest(text, resultText)
}
def testIntentionAction_Import_Already_Exists() {
val text =
"""
|import java.util
|import scala.collection.JavaConverters._
|
|class UsesJavaCollections {
| val list = new util.HashMap<caret>[String, Int]()
|}
"""
val resultText =
"""
|import java.util
|import scala.collection.JavaConverters._
|
|class UsesJavaCollections {
| val list = new util.HashMap<caret>[String, Int]().asScala
|}
"""
doTest(text, resultText)
}
} | LPTK/intellij-scala | test/org/jetbrains/plugins/scala/codeInsight/intention/types/ConvertJavaToScalaCollectionIntentionTest.scala | Scala | apache-2.0 | 2,570 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.neoremind.kraps.rpc
/**
* A callback that [[RpcEndpoint]] can use to send back a message or failure. It's thread-safe
* and can be called in any thread.
*/
trait RpcCallContext {
/**
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
* will be called.
*/
def reply(response: Any): Unit
/**
* Report a failure to the sender.
*/
def sendFailure(e: Throwable): Unit
/**
* The sender of this message.
*/
def senderAddress: RpcAddress
}
| neoremind/kraps-rpc | kraps-core/src/main/scala/com/neoremind/kraps/rpc/RpcCallContext.scala | Scala | apache-2.0 | 1,340 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.cfclerk.domain
import net.liftweb.common._
import org.slf4j.{Logger,LoggerFactory}
import scala.xml._
import com.normation.utils.HashcodeCaching
/**
* Representation of a template qualified name.
* A template is linked to a policy, so that two
* different policies can have templates with identical
* names.
*
* A template qualified name has a string representation
* that is by convention "policy name" / "template name".
*/
class Cf3PromisesFileTemplateId(val techniqueId:TechniqueId, val name:String) {
override def toString() = techniqueId.toString + "/" + name
override def equals(other:Any) = other match {
case that:Cf3PromisesFileTemplateId => this.techniqueId == that.techniqueId && this.name == that.name
case _ => false
}
override lazy val hashCode = this.techniqueId.hashCode + 61 * this.name.hashCode
}
object Cf3PromisesFileTemplateId {
def apply(techniqueId:TechniqueId, name:String) = new Cf3PromisesFileTemplateId(techniqueId, name)
def unapply(str:String) : Option[(TechniqueId,String)] = {
val parts = str.split("/").map(_.replaceAll("""\\s""", ""))
if(parts.size == 3 && parts(1).size > 0) Some(TechniqueId(TechniqueName(parts(0)), TechniqueVersion(parts(1))),parts(2))
else None
}
}
/**
* The Tml class holds the representation of a template, containing the template name,
* and if the file must be included, and where it should be written
*
* The way it's meant to be used :
* the template is fetch from the StringTemplateGroup using the path/template information
* vars are replaced
* it is written at outPath/name
*
*/
case class Cf3PromisesFileTemplate(
/*
* This is the template identifier of the file.
* The path of the matching template will be derived from that name by adding
* the template extension to the end of the name.
* (by default, ".st")
*/
id : Cf3PromisesFileTemplateId,
included: Boolean, // by default, we include the template in the promises.cf
/*
* Path where to PUT the template (e.g. for resources for ips)
* This path is relative to the "cf-engine" root directory on the
* server.
* It must be the full path, with the name of the cf-engine promise.
* By default, it will be set to: ${POLICY NAME}/${template name}.cf
*/
outPath : String
) extends HashcodeCaching
object Cf3PromisesFileTemplate {
val templateExtension = ".st"
val promiseExtension = ".cf"
}
| fanf/cf-clerk | src/main/scala/com/normation/cfclerk/domain/Cf3PromisesFileTemplate.scala | Scala | agpl-3.0 | 4,138 |
package com.basho.riak.spark.rdd.failover
import java.util.concurrent.CountDownLatch
import com.basho.riak.spark._
import com.basho.riak.stub.{RequestBasedMessageAdapter, RiakMessageHandler}
import org.junit.rules.ExpectedException
import org.junit.{After, Rule, Test}
import shaded.com.basho.riak.protobuf.RiakKvPB._
import shaded.com.google.protobuf.ByteString
import scala.collection.JavaConverters._
class RequestTimeoutTest extends AbstractFailoverOfflineTest {
val _expectedException: ExpectedException = ExpectedException.none()
@Rule
def expectedException: ExpectedException = _expectedException
override val riakMessageHandler: Option[RiakMessageHandler] = Some(new RequestBasedMessageAdapter {
override def handleCoverageRequest(req: RpbCoverageReq): RpbCoverageResp = RpbCoverageResp.newBuilder()
.addAllEntries(riakNodes
.zip(distributeEvenly(COVERAGE_ENTRIES_COUNT, riakHosts))
.flatMap {
case ((a, _), partitionsPerNode) => (0 until partitionsPerNode).map {
case partitionIndex: Int => RpbCoverageEntry.newBuilder()
.setIp(ByteString.copyFromUtf8(a.getHost))
.setPort(a.getPort)
.setCoverContext(ByteString.copyFromUtf8(s"StubCoverageEntry-${a.toString}-$partitionIndex"))
.setKeyspaceDesc(ByteString.copyFromUtf8(s"StubCoverageEntry-${a.toString}-$partitionIndex"))
.build()
}
}.asJava)
.build()
override def handleIndexRequest(req: RpbIndexReq): RpbIndexResp = {
logInfo("Index Request is going to stuck...")
latch.await()
logInfo("Timeout verified. Thread execution continued.")
RpbIndexResp.newBuilder().build()
}
})
val latch = new CountDownLatch(1)
@Test(timeout = 5000) // scalastyle:ignore
def fullBucketReadShouldFailWithTimeout(): Unit = {
expectedException.expectMessage("test timed out after 5000 milliseconds")
sc.riakBucket[String](NAMESPACE).queryAll().collect()
}
@After
def after(): Unit = {
latch.countDown()
}
}
| basho/spark-riak-connector | connector/src/test/scala/com/basho/riak/spark/rdd/failover/RequestTimeoutTest.scala | Scala | apache-2.0 | 2,067 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.persistence.jdbc
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.AbstractSlickOffsetStoreConfiguration
import play.api.Configuration
import scala.concurrent.duration.FiniteDuration
/**
* INTERNAL API
*/
class OffsetTableConfiguration(config: Configuration, readSideConfig: ReadSideConfig)
extends AbstractSlickOffsetStoreConfiguration(config) {
override def minBackoff: FiniteDuration = readSideConfig.minBackoff
override def maxBackoff: FiniteDuration = readSideConfig.maxBackoff
override def randomBackoffFactor: Double = readSideConfig.randomBackoffFactor
override def globalPrepareTimeout: FiniteDuration = readSideConfig.globalPrepareTimeout
override def role: Option[String] = readSideConfig.role
override def toString: String = s"OffsetTableConfiguration($tableName,$schemaName)"
}
| edouardKaiser/lagom | persistence-jdbc/scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/persistence/jdbc/JdbcOffsetStore.scala | Scala | apache-2.0 | 999 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.op
import dbis.pig.plan.InvalidPlanException
import dbis.pig.schema._
import dbis.pig.expr.DerefTuple
import dbis.pig.expr.NamedField
import dbis.pig.expr.PositionalField
import dbis.pig.expr.Ref
/**
* This operator is a pseudo operator used inside a nested FOREACH to construct a new bag from an expression.
*
* @param out the output pipe (relation).
* @param refExpr a reference referring to an expression constructing a relation (bag).
*/
case class ConstructBag(
private val out: Pipe,
refExpr: Ref
) extends PigOperator(out) {
// TODO: what do we need here?
var parentSchema: Option[Schema] = None
var parentOp: Option[PigOperator] = None
override def constructSchema: Option[Schema] = {
parentSchema match {
case Some(s) => {
// first, we determine the field in the schema
val field = refExpr match {
case DerefTuple(t, r) => t match {
case nf@NamedField(n, _) => {
// Either we refer to the input pipe (inputSchema) ...
if (parentOp.isDefined && parentOp.get.inPipeName == n)
// then we create a temporary pseudo field ...
Field(n, s.element)
else
// ... or we refer to a real field of the schema
s.field(nf)
}
case PositionalField(p) => s.field(p)
case _ => throw new InvalidPlanException("unexpected expression in ConstructBag")
}
case _ => throw new InvalidPlanException("unexpected expression in ConstructBag")
}
// 2. we extract the type (which should be a BagType, MapType or TupleType)
if (!field.fType.isInstanceOf[ComplexType])
throw InvalidPlanException("invalid expression in ConstructBag")
val fieldType = field.fType.asInstanceOf[ComplexType]
val (componentName, componentType) = refExpr match {
case DerefTuple(t, r) => r match {
case NamedField(n, _) => (n, fieldType.typeOfComponent(n))
case PositionalField(p) => ("", fieldType.typeOfComponent(p))
case _ => throw InvalidPlanException("unexpected expression in ConstructBag")
}
case _ => throw InvalidPlanException("unexpected expression in ConstructBag")
}
// construct a schema from the component type
// val resSchema = new Schema(new BagType(new TupleType(Array(Field(componentName, componentType))), outPipeName))
val resSchema = Schema(if (componentType.isInstanceOf[BagType])
componentType.asInstanceOf[BagType]
else
BagType(TupleType(Array(Field(componentName, componentType)))))
schema = Some(resSchema)
}
case None => None
}
schema
}
override def printOperator(tab: Int): Unit = {
println(indent(tab) + s"CONSTRUCT_BAG { out = ${outPipeNames.mkString(",")} }")
println(indent(tab + 2) + "inSchema = " + inputSchema)
println(indent(tab + 2) + "outSchema = " + schema)
println(indent(tab + 2) + "ref = " + refExpr)
}
} | ksattler/piglet | src/main/scala/dbis/pig/op/ConstructBag.scala | Scala | apache-2.0 | 3,890 |
package frameless
import org.scalacheck.Prop
import org.scalacheck.Prop._
class SparkSessionTests extends TypedDatasetSuite {
test("sparkSession") {
def prop[A: TypedEncoder](data: Vector[A]): Prop = {
val dataset = TypedDataset.create[A](data)
dataset.sparkSession =? dataset.dataset.sparkSession
}
check(forAll(prop[Int] _))
check(forAll(prop[String] _))
}
} | adelbertc/frameless | dataset/src/test/scala/frameless/forward/SparkSessionTests.scala | Scala | apache-2.0 | 396 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.integration;
import java.sql.Timestamp
import java.util.concurrent.TimeUnit
import scala.collection.JavaConversions.propertiesAsScalaMap
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.IntegerType
import org.apache.spark.sql.Row
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.StringType
import org.apache.spark.sql.StructField
import org.apache.spark.sql.StructType
import org.apache.spark.sql.TimestampType
import org.elasticsearch.hadoop.mr.RestUtils
import org.elasticsearch.hadoop.util.TestSettings
import org.elasticsearch.hadoop.util.TestUtils
import org.elasticsearch.spark._
import org.elasticsearch.spark.sql._
import org.elasticsearch.spark.sql.sqlContextFunctions
import org.hamcrest.Matchers.containsString
import org.hamcrest.Matchers.is
import org.junit.AfterClass
import org.junit.Assert.assertThat
import org.junit.Assert.assertTrue
import org.junit.BeforeClass
import org.junit.FixMethodOrder
import org.junit.runners.MethodSorters
import org.elasticsearch.hadoop.cfg.ConfigurationOptions._
import org.junit.Test
import javax.xml.bind.DatatypeConverter
object AbstractScalaEsScalaSparkSQL {
@transient val conf = new SparkConf().setAll(TestSettings.TESTING_PROPS).setMaster("local").setAppName("estest");
@transient var cfg: SparkConf = null
@transient var sc: SparkContext = null
@transient var sqc: SQLContext = null
@BeforeClass
def setup() {
sc = new SparkContext(conf)
sqc = new SQLContext(sc)
}
@AfterClass
def cleanup() {
if (sc != null) {
sc.stop
// give jetty time to clean its act up
Thread.sleep(TimeUnit.SECONDS.toMillis(3))
}
}
}
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
class AbstractScalaEsScalaSparkSQL extends Serializable {
val sc = AbstractScalaEsScalaSparkSQL.sc
val sqc = AbstractScalaEsScalaSparkSQL.sqc
@Test
def testBasicRead() {
val schemaRDD = artistsAsSchemaRDD
assertTrue(schemaRDD.count > 300)
schemaRDD.registerTempTable("datfile")
println(schemaRDD.schemaString)
//schemaRDD.take(5).foreach(println)
val results = sqc.sql("SELECT name FROM datfile WHERE id >=1 AND id <=10")
//results.take(5).foreach(println)
}
@Test
def testEsSchemaRDD1Write() {
val schemaRDD = artistsAsSchemaRDD
val target = "sparksql-test/scala-basic-write"
schemaRDD.saveToEs(target)
assertTrue(RestUtils.exists(target))
assertThat(RestUtils.get(target + "/_search?"), containsString("345"))
}
@Test
def testEsSchemaRDD1WriteWithMapping() {
val schemaRDD = artistsAsSchemaRDD
val target = "sparksql-test/scala-basic-write-id-mapping"
schemaRDD.saveToEs(target, Map(ES_MAPPING_ID -> "id"))
assertTrue(RestUtils.exists(target))
assertThat(RestUtils.get(target + "/_search?"), containsString("345"))
assertThat(RestUtils.exists(target + "/1"), is(true))
}
@Test
def testEsSchemaRDD2Read() {
val target = "sparksql-test/scala-basic-write"
val schemaRDD = sqc.esRDD(target)
assertTrue(schemaRDD.count > 300)
val schema = schemaRDD.schemaString
assertTrue(schema.contains("id: long"))
assertTrue(schema.contains("name: string"))
assertTrue(schema.contains("pictures: string"))
assertTrue(schema.contains("time: long"))
assertTrue(schema.contains("url: string"))
//schemaRDD.take(5).foreach(println)
schemaRDD.registerTempTable("basicRead")
val nameRDD = sqc.sql("SELECT name FROM basicRead WHERE id >= 1 AND id <=10")
nameRDD.take(7).foreach(println)
assertTrue(nameRDD.count == 10)
}
private def artistsAsSchemaRDD = {
val input = TestUtils.sampleArtistsDat()
val data = sc.textFile(input)
val schema = StructType(Seq(StructField("id", IntegerType, false),
StructField("name", StringType, false),
StructField("url", StringType, true),
StructField("pictures", StringType, true),
StructField("time", TimestampType, true)))
val rowRDD = data.map(_.split("\\t")).map(r => Row(r(0).toInt, r(1), r(2), r(3), new Timestamp(DatatypeConverter.parseDateTime(r(4)).getTimeInMillis())))
val schemaRDD = sqc.applySchema(rowRDD, schema)
schemaRDD
}
} | eliasah/elasticsearch-hadoop | spark/src/itest/scala/org/elasticsearch/spark/integration/AbstractScalaEsSparkSQL.scala | Scala | apache-2.0 | 5,226 |
package cookbook
trait Recipe {
def title()
def cook()
}
| ykchat/cookbook | scala/src/main/scala/cookbook/Recipe.scala | Scala | mit | 69 |
package gitbucket.core.util
import java.net.{URLDecoder, URLEncoder}
import org.mozilla.universalchardet.UniversalDetector
import ControlUtil._
import org.apache.commons.io.input.BOMInputStream
import org.apache.commons.io.IOUtils
object StringUtil {
def sha1(value: String): String =
defining(java.security.MessageDigest.getInstance("SHA-1")){ md =>
md.update(value.getBytes)
md.digest.map(b => "%02x".format(b)).mkString
}
def md5(value: String): String = {
val md = java.security.MessageDigest.getInstance("MD5")
md.update(value.getBytes)
md.digest.map(b => "%02x".format(b)).mkString
}
def urlEncode(value: String): String = URLEncoder.encode(value, "UTF-8").replace("+", "%20")
def urlDecode(value: String): String = URLDecoder.decode(value, "UTF-8")
def splitWords(value: String): Array[String] = value.split("[ \\\\t ]+")
def escapeHtml(value: String): String =
value.replace("&", "&").replace("<", "<").replace(">", ">").replace("\\"", """)
/**
* Make string from byte array. Character encoding is detected automatically by [[StringUtil.detectEncoding]].
* And if given bytes contains UTF-8 BOM, it's removed from returned string.
*/
def convertFromByteArray(content: Array[Byte]): String =
IOUtils.toString(new BOMInputStream(new java.io.ByteArrayInputStream(content)), detectEncoding(content))
def detectEncoding(content: Array[Byte]): String =
defining(new UniversalDetector(null)){ detector =>
detector.handleData(content, 0, content.length)
detector.dataEnd()
detector.getDetectedCharset match {
case null => "UTF-8"
case e => e
}
}
/**
* Converts line separator in the given content.
*
* @param content the content
* @param lineSeparator "LF" or "CRLF"
* @return the converted content
*/
def convertLineSeparator(content: String, lineSeparator: String): String = {
val lf = content.replace("\\r\\n", "\\n").replace("\\r", "\\n")
if(lineSeparator == "CRLF"){
lf.replace("\\n", "\\r\\n")
} else {
lf
}
}
/**
* Appends LF if the given string does not end with LF.
*
* @param content the content
* @param lineSeparator "LF" or "CRLF"
* @return the converted content
*/
def appendNewLine(content: String, lineSeparator: String): String = {
if(lineSeparator == "CRLF") {
if (content.endsWith("\\r\\n")) content else content + "\\r\\n"
} else {
if (content.endsWith("\\n")) content else content + "\\n"
}
}
/**
* Extract issue id like ```#issueId``` from the given message.
*
*@param message the message which may contains issue id
* @return the iterator of issue id
*/
def extractIssueId(message: String): Iterator[String] =
"(^|\\\\W)#(\\\\d+)(\\\\W|$)".r.findAllIn(message).matchData.map(_.group(2))
/**
* Extract close issue id like ```close #issueId ``` from the given message.
*
* @param message the message which may contains close command
* @return the iterator of issue id
*/
def extractCloseId(message: String): Iterator[String] =
"(?i)(?<!\\\\w)(?:fix(?:e[sd])?|resolve[sd]?|close[sd]?)\\\\s+#(\\\\d+)(?!\\\\w)".r.findAllIn(message).matchData.map(_.group(1))
}
| uli-heller/gitbucket | src/main/scala/gitbucket/core/util/StringUtil.scala | Scala | apache-2.0 | 3,246 |
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Many parts of the code are taken with much appreciation from the
* class `com.gitblit.utils.JGitUtils` of the gitblit project. Gitblit
* is licensed under the Apache License 2.0.
*
* Copyright 2011 gitblit.com.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.gitr
import collection.JavaConversions._
import org.eclipse.jgit.api.Git
import java.nio.file.Files
import io.Source
import collection.mutable.ListBuffer
import org.eclipse.jgit.treewalk.TreeWalk
import java.nio.charset.Charset
import org.eclipse.jgit.lib._
import org.eclipse.jgit.revwalk._
import java.util.Date
import org.eclipse.jgit.treewalk.filter.{AndTreeFilter, TreeFilter, PathFilter}
import org.eclipse.jgit.diff.{DiffFormatter, RawTextComparator}
import java.io.{ByteArrayOutputStream, InputStream, File}
import org.eclipse.jgit.diff.DiffEntry.ChangeType
import GitrRepository._
import org.eclipse.jgit.util.io.DisabledOutputStream
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 09.05.12 23:43
*/
class GitrRepository(val self: Repository, val name: RepositoryName) {
private val daemonExportOk = "git-daemon-export-ok"
val git = Git.wrap(self)
def addHttpReceivePack() {
val cfg = self.getConfig
cfg.setBoolean("http", null, "receivepack", true)
}
def isHttpReceivePack = {
val cfg = self.getConfig
cfg.getBoolean("http", null, "receivepack", false)
}
def isTandem = {
val cfg = self.getConfig
cfg.getBoolean("gitr", null, "tandem", false)
}
/**
* Sets `str` in the description file. Only working
* for bare repositories!
*
* @param str
*/
def setDescription(str: String) {
if (self.isBare) {
val descFile = new File(self.getDirectory, "description")
Files.write(descFile.toPath, str.getBytes)
} else {
sys.error("Not a bare repository! Cannot set description")
}
}
/**
* If this is a bare repository, looks for a `description`
* file and returns its contents. An exception is thrown
* if this is not a bare repository and [[scala.None]] is
* returned, if the file does not exist.
*
* @return
*/
def getDescription: Option[String] = {
if (self.isBare) {
val descFile = new File(self.getDirectory, "description")
if (descFile.exists()) Some(Source.fromFile(descFile).mkString)
else None
} else {
sys.error("'"+ name.name+ "' is not a bare repository! Cannot get description")
}
}
/**
* Checks whether this repository contains the `git-export-ok`
* file that allows access to the repo via http/s.
*
* @return
*/
def isExportOk = getExportOkFile.exists()
private def getExportOkFile = {
if (!self.isBare) sys.error("git-daemon-export-ok files only for bare repos")
new File(self.getDirectory, daemonExportOk)
}
/**
* Sets the `git-export-ok` file or removes it as indicated
* by the `flag` argument. Returns the previous state.
*
* @param flag
* @return
*/
def setExportOk(flag: Boolean) = {
val exportok = getExportOkFile
val prev = exportok.exists()
if (!flag) exportok.delete()
else if (!exportok.exists()) {
exportok.createNewFile()
}
prev
}
def getCommit(id: String): Option[RevCommit] = {
Option(self.resolve(id)) map { objId =>
val walk = new RevWalk(self)
val commit = walk.parseCommit(objId)
walk.dispose()
commit
}
}
def getLastCommit(branch: String, path: Option[String]): Option[RevCommit] = {
Option(self.resolve(branch)) map { objId =>
val walk = new RevWalk(self)
path.collect({case p if (!p.isEmpty && p != "/") => p })
.foreach(p => walk.setTreeFilter(AndTreeFilter.create(TreeFilter.ANY_DIFF, PathFilter.create(p))))
walk.sort(RevSort.COMMIT_TIME_DESC)
val head = walk.parseCommit(objId)
walk.markStart(head)
val commit = walk.next()
walk.dispose()
commit
}
}
def hasCommits: Boolean = self.resolve(Constants.HEAD) != null
/**
* Get a list of refs in the repository.
*
* Adopted from gitblit.
*
* @param prefix the ref to get, like "refs/heads/", "refs/tags" etc, look at [[org.eclipse.jgit.lib.Constants]]
* @return
*/
def getRefs(prefix: String): List[RefModel] = {
if (!hasCommits) List()
else {
val walk = new RevWalk(self)
val buffer = ListBuffer[RefModel]()
for (t <- self.getRefDatabase.getRefs(prefix)) {
buffer.append(RefModel(t._1, t._2, walk.parseAny(t._2.getObjectId)))
}
walk.dispose()
buffer.sorted.toList
}
}
def getLocalBranches:List[RefModel] = getRefs(Constants.R_HEADS)
def getLocalTags:List[RefModel] = getRefs(Constants.R_TAGS)
/**
* Gets the byte contents of a file in the tree.
*
* This method is taken from gitblit projects JGitUtils class.
*
* @param tree
* @param path
* @return
*/
def getObject(tree: RevTree, path: String): Option[InputStream] = {
getBlobLoader(tree, path) map { loader => loader.openStream() }
}
def getBlobLoader(tree: RevTree, path: String): Option[ObjectLoader] = {
val rw = new RevWalk(self)
def readBlob(walk: TreeWalk): Option[ObjectLoader] = {
if (walk.isSubtree && path != walk.getPathString) {
walk.enterSubtree()
if (walk.next()) readBlob(walk)
else None
} else {
val objid = walk.getObjectId(0)
val objmode = walk.getFileMode(0)
val ro = rw.lookupAny(objid, objmode.getObjectType)
rw.parseBody(ro)
Some(self.open(ro.getId, Constants.OBJ_BLOB))
}
}
val tw = new TreeWalk(self)
tw.setFilter(PathFilter.create(path))
tw.reset(tree)
if (tw.next()) readBlob(tw)
else None
}
def getStringContents(tree: RevTree, path: String): Option[String] = {
getBlobLoader(tree, path) map { c =>
new String(c.getCachedBytes, Charset.forName(Constants.CHARACTER_ENCODING))
}
}
def getDefaultBranch: Option[ObjectId] = {
Option(self.resolve(Constants.HEAD)) match {
case Some(h) => Some(h)
case None => {
getLocalBranches
.sortWith((rf1, rf2) => rf1.getDate.after(rf2.getDate))
.headOption.map(_.obj)
}
}
}
/**
* Creates a diff between two commits.
*
* @param base
* @param commit
* @param path
*/
def getDiff(base: Option[RevCommit], commit: RevCommit, path: Option[String]) = {
val baos = new ByteArrayOutputStream()
val df = new DiffFormatter(baos)
formatDiff(commit, df, base, path)
val diff = baos.toString
df.flush()
diff
}
/**
* Formats a diff between two commits using the supplied [[org.eclipse.jgit.diff.DiffFormatter]]
*
* This method was originally found at gitblit project (http://gitblit.com/) in `DiffUtils`
* and changed to scala code.
*
* @param commit
* @param formatter
* @param base
* @param path
*/
def formatDiff(commit: RevCommit, formatter: DiffFormatter, base: Option[RevCommit], path: Option[String]) {
val cmp = RawTextComparator.DEFAULT
formatter.setRepository(self)
formatter.setDiffComparator(cmp)
formatter.setDetectRenames(true)
val commitTree = commit.getTree
val baseTree = base.map(_.getTree).getOrElse {
if (commit.getParentCount > 0) {
val rw = new RevWalk(self)
val par = rw.parseCommit(commit.getParent(0).getId)
rw.dispose()
par.getTree
} else {
commitTree
}
}
val diffEntries = formatter.scan(baseTree, commitTree)
path.collect({case s if (!s.isEmpty)=>s}) match {
case Some(p) => diffEntries.find(_.getNewPath == p).map(formatter.format(_))
case _ => formatter.format(diffEntries)
}
}
/**
* Returns the lines of the specified source file annotated with
* the author information.
*
* This method was originally found in the gitblit project (http://gitblit.com)
* and formatted to scala code.
*
* @param path
* @param objectId
* @return
*/
def getBlame(path: String, objectId: String): Seq[AnnotatedLine] = {
val result = git.blame()
.setFilePath(path)
.setStartCommit(self.resolve(objectId))
.call()
val rawText = result.getResultContents
for (i <- 0 to rawText.size()-1) yield {
new AnnotatedLine(result.getSourceCommit(i), i+1, rawText.toString)
}
}
/**
* Returns a list of files that changed in the given commit.
*
* This method was found at the gitblit project (http://gitblit.com) in
* `JGitUtils` class.
*
* @param commit
* @return
*/
def getFilesInCommit(commit: RevCommit): List[PathModel] = {
if (!hasCommits) {
List()
} else {
if (commit.getParentCount == 0) {
val tw = new TreeWalk(self)
tw.reset()
tw.setRecursive(true)
tw.addTree(commit.getTree)
val models = withTreeWalk(tw) { t =>
PathModel(t.getPathString, t.getPathString, 0, t.getRawMode(0), commit.getId.getName, Some(ChangeType.ADD))
}
tw.release()
models
} else {
val parent = getParentCommit(commit).get
val df = new DiffFormatter(DisabledOutputStream.INSTANCE)
df.setRepository(self)
df.setDiffComparator(RawTextComparator.DEFAULT)
df.setDetectRenames(true)
val entries = df.scan(parent.getTree, commit.getTree)
val models = for (entry <- entries) yield {
entry.getChangeType match {
case ChangeType.DELETE => PathModel(entry.getOldPath,
entry.getOldPath, 0, entry.getNewMode.getBits,
commit.getId.getName, Some(entry.getChangeType))
case ChangeType.RENAME => PathModel(entry.getOldPath,
entry.getNewPath, 0, entry.getNewMode.getBits,
commit.getId.getName, Some(entry.getChangeType))
case _ => PathModel(entry.getNewPath,
entry.getNewPath, 0, entry.getNewMode.getBits,
commit.getId.getName, Some(entry.getChangeType))
}
}
models.toList
}
}
}
def getParentCommit(commit: RevCommit): Option[RevCommit] = {
if (commit.getParentCount > 0) {
withRevWalk { rw =>
Some(rw.parseCommit(commit.getParent(0).getId))
}
} else {
None
}
}
override def toString = self.toString
def withRevWalk[A](f: RevWalk => A) = {
val rw = new RevWalk(self)
try {
f(rw)
} finally {
rw.dispose()
}
}
}
case class RefModel(name: String, ref: Ref, obj: RevObject) extends Ordered[RefModel] {
def compare(that: RefModel) = name.compare(that.name)
def getDate: Date = {
val dateOpt = obj match {
case c:RevCommit => Option(c.getCommitterIdent).map(_.getWhen)
case t:RevTag => Option(t.getTaggerIdent).map(_.getWhen)
case _ => None
}
dateOpt.getOrElse(new Date(0))
}
}
case class AnnotatedLine(commitId: String, author: String, when: Date, line: Int, data: String) {
def this(commit: RevCommit, line: Int, data: String) = this(commit.getName,
commit.getAuthorIdent.getName, commit.getAuthorIdent.getWhen, line, data)
}
case class PathModel(name: String,
path: String,
size: Long,
mode: Int,
commitId: String,
changeType: Option[ChangeType] = None)
object GitrRepository {
def apply(repo: Repository, name: RepositoryName) = new GitrRepository(repo, name)
implicit def repoToGitrRepo(repo: Repository, name: RepositoryName): GitrRepository = GitrRepository(repo, name)
implicit def gitrRepoToRepo(grepo: GitrRepository): Repository = grepo.self
def withTreeWalk[A](tw: TreeWalk)(f: TreeWalk=>A): List[A] = {
if (tw.next()) f(tw) :: withTreeWalk(tw)(f)
else Nil
}
}
| eikek/publet | gitr/src/main/scala/org/eknet/gitr/GitrRepository.scala | Scala | apache-2.0 | 12,977 |
/*
* Copyright University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.parameters
import scalismo.color.RGB
import scalismo.faces.render.ColorTransformWithColorContrast
/** parametrization of color transform in image after rendering (color gain, offset and color contrast) */
case class ColorTransform(gain: RGB, colorContrast: Double, offset: RGB) {
def transform: ColorTransformWithColorContrast = ColorTransformWithColorContrast(gain, colorContrast, offset)
}
object ColorTransform {
val neutral = ColorTransform(RGB.White, 1f, RGB.Black)
}
| unibas-gravis/scalismo-faces | src/main/scala/scalismo/faces/parameters/ColorTransform.scala | Scala | apache-2.0 | 1,143 |
package com.lot.generators
import org.joda.time.DateTime
import org.scalacheck.Gen.oneOf
import com.lot.security.model.SecurityType
import com.lot.marketEvent.model.MarketEventType
import com.lot.marketEvent.model.MarketEvent
object MarketEventFactory {
def generate(id: Option[Long] = None,
name: String,
event_type: String = oneOf(MarketEventType.TYPE_MARKET, MarketEventType.TYPE_NON_MARKET).sample.get,
summary: String,
description: Option[String] = None,
direction: String = oneOf(MarketEventType.DIRECTION_DOWN, MarketEventType.DIRECTION_UP).sample.get,
intensity: String = oneOf(MarketEventType.INTENSITY_HIGH, MarketEventType.INTENSITY_LOW, MarketEventType.INTENSITY_MED).sample.get,
asset_class: Option[String] = Some(oneOf(SecurityType.ASSET_CLASSES).sample.get),
region: Option[String] = Some(oneOf(SecurityType.REGIONS).sample.get),
sector: Option[String] = Some(oneOf(SecurityType.SECTORS).sample.get),
ticker: Option[String] = None,
external_url: Option[String] = None,
created_at: Option[DateTime] = None,
updated_at: Option[DateTime] = None) = {
MarketEvent(id, name, event_type, summary, description, direction,
intensity, asset_class, region, sector,
ticker, external_url, created_at, updated_at)
}
} | thimmaiah/life_of_a_trade_scala | src/test/scala/com/lot/generators/MarketEventFactory.scala | Scala | apache-2.0 | 1,438 |
package controllers.backend
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import models.Posts
import entities.Post
import util.Location
object Blog extends Controller {
val postModel = new Posts
val postForm = Form(
mapping(
"title" -> nonEmptyText,
"body" -> nonEmptyText,
"expcert" -> optional(text),
"slug" -> nonEmptyText, // TODO make optional
"posted" -> optional(text),
"id" -> optional(number)
)(Post.apply)(Post.unapply))
Location.set("Blog")
def list = page("1")
def page(pageNumStr: String)= Action {
request =>
request.session.get("username").map {
user =>
val pageNum = pageNumStr.toInt
val posts = postModel.getLatest(pageNum)
val pageCount = postModel.getPageCount
Ok(views.html.admin.blog.list(posts, user, pageNum, pageCount))
}.getOrElse {
Unauthorized("Oops, you are not connected")
}
}
def create = Action {
request =>
request.session.get("username").map {
user =>
Ok(views.html.admin.blog.edit(postForm, user))
}.getOrElse {
Unauthorized("Oops, you are not connected")
}
}
def edit(id: String) = Action {
request =>
request.session.get("username").map {
user =>
val blogPostOption = postModel.getPostById(id.toInt)
blogPostOption match {
case Some(blogPost) => {
val filled = postForm.fill(blogPost)
Ok(views.html.admin.blog.edit(filled, user))
}
case None => Ok(views.html.blog.notfound())
}
}.getOrElse {
Unauthorized("Oops, you are not connected")
}
}
def save = Action {
implicit request =>
request.session.get("username").map {
user =>
val result = postForm.bindFromRequest.fold(
{
formFail => Ok(views.html.admin.blog.edit(formFail, user))
}, { // TODO change this to something adminy
post => postModel.save(post); Ok(views.html.blog.view(post))
}
)
result
}.getOrElse {
Unauthorized("Oops, you are not connected")
}
}
} | icambridge-old/inspector | app/controllers/backend/Blog.scala | Scala | mit | 2,346 |
import scala.quoted.*
import scala.quoted.staging.*
object Test extends App {
// make available the necessary toolbox for runtime code generation
given Compiler = Compiler.make(getClass.getClassLoader)
run {
val expr: Expr[Int] = '{ var x = 1; x = 2; 42 }
expr match {
case '{ var x: Int = $binding; $body(x): Int } => // error
val res = '{ var y = $binding; ${ Expr.betaReduce('{ $body(y) })}}
println(res.show)
res
case _ => println(expr.show); '{0}
}
}
}
| dotty-staging/dotty | tests/neg-staging/i9693.scala | Scala | apache-2.0 | 506 |
package org.scalajs.testsuite.niobuffer
import java.nio._
import scala.language.implicitConversions
import scala.reflect._
sealed abstract class BufferFactory {
type BufferType <: Buffer with Comparable[BufferType]
type ElementType
implicit val elemClassTag: ClassTag[ElementType]
implicit def elemFromInt(value: Int): ElementType
implicit def elemToAnyRef(elem: ElementType): AnyRef
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType]
def boxed(array: Array[ElementType]): Array[AnyRef] =
array.map(elemToAnyRef)
def boxedElemsFromInt(elems: Int*): Array[AnyRef] =
boxed(elems.map(elemFromInt).toArray)
val createsReadOnly: Boolean = false
def allocBuffer(capacity: Int): BufferType
def allocBuffer(pos: Int, limit: Int, capacity: Int): BufferType = {
val buf = allocBuffer(capacity)
buf.limit(limit).position(pos)
buf
}
def elemRange(start: Int, end: Int): Array[ElementType] =
(start until end).map(elemFromInt).toArray
def withContent(capacity: Int, content: ElementType*): BufferType =
withContent(0, capacity, capacity, content: _*)
def withContent(pos: Int, limit: Int, capacity: Int,
content: ElementType*): BufferType = {
val buf = allocBuffer(pos, limit, capacity)
buf.put(content.toArray)
buf.position(pos)
buf
}
}
object BufferFactory {
abstract class ByteBufferFactory extends BufferFactory {
type BufferType = ByteBuffer
type ElementType = Byte
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Byte
implicit def elemFromInt(value: Int): ElementType = value.toByte
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Byte
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.ByteBufferAdapater(buffer)
}
abstract class CharBufferFactory extends BufferFactory {
type BufferType = CharBuffer
type ElementType = Char
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Char
implicit def elemFromInt(value: Int): ElementType = value.toChar
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Character
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.CharBufferAdapater(buffer)
}
abstract class ShortBufferFactory extends BufferFactory {
type BufferType = ShortBuffer
type ElementType = Short
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Short
implicit def elemFromInt(value: Int): ElementType = value.toShort
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Short
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.ShortBufferAdapater(buffer)
}
abstract class IntBufferFactory extends BufferFactory {
type BufferType = IntBuffer
type ElementType = Int
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Int
implicit def elemFromInt(value: Int): ElementType = value.toInt
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Integer
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.IntBufferAdapater(buffer)
}
abstract class LongBufferFactory extends BufferFactory {
type BufferType = LongBuffer
type ElementType = Long
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Long
implicit def elemFromInt(value: Int): ElementType = value.toLong
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Long
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.LongBufferAdapater(buffer)
}
abstract class FloatBufferFactory extends BufferFactory {
type BufferType = FloatBuffer
type ElementType = Float
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Float
implicit def elemFromInt(value: Int): ElementType = value.toFloat
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Float
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.FloatBufferAdapater(buffer)
}
abstract class DoubleBufferFactory extends BufferFactory {
type BufferType = DoubleBuffer
type ElementType = Double
implicit val elemClassTag: ClassTag[ElementType] = ClassTag.Double
implicit def elemFromInt(value: Int): ElementType = value.toDouble
implicit def elemToAnyRef(elem: ElementType): AnyRef = elem: java.lang.Double
implicit def bufferAdapter(
buffer: BufferType): BufferAdapter[BufferType, ElementType] =
new BufferAdapter.DoubleBufferAdapater(buffer)
}
trait WrappedBufferFactory extends BufferFactory {
protected def baseWrap(array: Array[ElementType]): BufferType
protected def baseWrap(array: Array[ElementType],
offset: Int, length: Int): BufferType
def allocBuffer(capacity: Int): BufferType =
baseWrap(new Array[ElementType](capacity))
override def allocBuffer(pos: Int, limit: Int, capacity: Int): BufferType =
baseWrap(new Array[ElementType](capacity), pos, limit-pos)
override def withContent(pos: Int, limit: Int, capacity: Int,
content: ElementType*): BufferType = {
val after = capacity - (pos + content.size)
val fullContent =
(Seq.fill(pos)(elemFromInt(0)) ++
content ++
Seq.fill(after)(elemFromInt(0))).toArray
baseWrap(fullContent, pos, limit - pos)
}
}
trait WrappedTypedArrayBufferFactory extends WrappedBufferFactory {
protected def baseWrap(array: Array[ElementType],
offset: Int, length: Int): BufferType = {
val buf = baseWrap(array)
buf.position(offset).limit(offset + length)
buf
}
}
trait ReadOnlyBufferFactory extends BufferFactory {
override val createsReadOnly = true
abstract override def allocBuffer(capacity: Int): BufferType =
super.allocBuffer(capacity).asReadOnlyBuffer()
override def allocBuffer(pos: Int, limit: Int, capacity: Int): BufferType =
super.allocBuffer(pos, limit, capacity).asReadOnlyBuffer()
override def withContent(pos: Int, limit: Int, capacity: Int,
content: ElementType*): BufferType =
super.withContent(pos, limit, capacity, content: _*).asReadOnlyBuffer()
}
trait SlicedBufferFactory extends BufferFactory {
abstract override def allocBuffer(capacity: Int): BufferType = {
if (capacity < 0)
throw new IllegalArgumentException
val buf = super.allocBuffer(capacity+25)
buf.position(17)
buf.limit(17+capacity)
buf.slice()
}
override def withContent(pos: Int, limit: Int, capacity: Int,
content: ElementType*): BufferType = {
if (!(0 <= pos && pos <= limit && limit <= capacity))
throw new IllegalArgumentException
val buf = super.allocBuffer(capacity+25)
buf.position(9+pos)
buf.put(content.toArray)
buf.position(9)
buf.limit(9+capacity)
val buf2 = buf.slice()
buf2.position(pos)
buf2.limit(limit)
buf2
}
}
trait ByteBufferViewFactory extends BufferFactory {
def baseAllocBuffer(capacity: Int): BufferType
def allocBuffer(capacity: Int): BufferType =
baseAllocBuffer(capacity)
override def allocBuffer(pos: Int, limit: Int, capacity: Int): BufferType = {
val buf = baseAllocBuffer(capacity)
buf.limit(limit).position(pos)
buf
}
override def withContent(pos: Int, limit: Int, capacity: Int,
content: ElementType*): BufferType = {
val buf = baseAllocBuffer(capacity)
buf.limit(limit).position(pos)
buf.put(content.toArray)
buf.position(pos)
buf
}
}
}
| mdedetrich/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/niobuffer/BufferFactory.scala | Scala | bsd-3-clause | 7,997 |
package gitbucket.core.util
import org.scalatest.funspec.AnyFunSpec
class StringUtilSpec extends AnyFunSpec {
describe("urlEncode") {
it("should encode whitespace to %20") {
val encoded = StringUtil.urlEncode("aa bb")
assert(encoded == "aa%20bb")
}
}
describe("urlDecode") {
it("should decode encoded string to original string") {
val encoded = StringUtil.urlEncode("あいうえお")
assert(StringUtil.urlDecode(encoded) == "あいうえお")
}
it("should decode en%20 to whitespace") {
assert(StringUtil.urlDecode("aa%20bb") == "aa bb")
}
}
describe("splitWords") {
it("should split string by whitespaces") {
val split = StringUtil.splitWords("aa bb\\tcc dd \\t ee")
assert(split === Array("aa", "bb", "cc", "dd", "ee"))
}
}
describe("escapeHtml") {
it("should escape &, <, > and \\"") {
assert(
StringUtil.escapeHtml("<a href=\\"/test\\">a & b</a>") == "<a href="/test">a & b</a>"
)
}
}
describe("md5") {
it("should generate MD5 hash") {
assert(StringUtil.md5("abc") == "900150983cd24fb0d6963f7d28e17f72")
}
}
describe("sha1") {
it("should generate SHA1 hash") {
assert(StringUtil.sha1("abc") == "a9993e364706816aba3e25717850c26c9cd0d89d")
}
}
describe("extractIssueId") {
it("should extract '#xxx' and return extracted id") {
assert(StringUtil.extractIssueId("(refs #123)").toSeq == Seq("123"))
}
it("should return Nil from message which does not contain #xxx") {
assert(StringUtil.extractIssueId("this is test!").toSeq == Nil)
}
}
describe("extractGlobalIssueId") {
it("should extract '#xxx' and return extracted id") {
assert(StringUtil.extractGlobalIssueId("(refs #123)").toSeq == List((None, None, Some("123"))))
}
it("should extract 'owner/repository#xxx' and return extracted owner, repository and id") {
assert(
StringUtil.extractGlobalIssueId("(refs root/test#123)").toSeq == List((Some("root"), Some("test"), Some("123")))
)
}
it("should return Nil from message which does not contain #xxx") {
assert(StringUtil.extractGlobalIssueId("this is test!").toSeq == Nil)
}
}
describe("extractCloseId") {
it("should extract 'close #xxx' and return extracted id") {
assert(StringUtil.extractCloseId("(close #123)").toSeq == Seq("123"))
}
it("should returns Nil from message which does not contain close command") {
assert(StringUtil.extractCloseId("(refs #123)").toSeq == Nil)
}
it("should extract 'close #x, #y, #z' and return extracted multi id") {
assert(StringUtil.extractCloseId("(close #1, #2, #3, wip #4, close #5)").toSeq == Seq("1", "2", "3", "5"))
}
}
describe("getRepositoryViewerUrl") {
val baseUrl = Some("http://localhost:8080")
it("should convert GitBucket repository url") {
assert(
StringUtil
.getRepositoryViewerUrl("http://localhost:8080/git/root/gitbucket.git", baseUrl) == "http://localhost:8080/root/gitbucket"
)
assert(
StringUtil
.getRepositoryViewerUrl("http://root@localhost:8080/git/root/gitbucket.git", baseUrl) == "http://localhost:8080/root/gitbucket"
)
}
it("should convert GitHub repository url") {
assert(
StringUtil
.getRepositoryViewerUrl("https://github.com/root/gitbucket.git", baseUrl) == "https://github.com/root/gitbucket"
)
assert(
StringUtil
.getRepositoryViewerUrl("https://root@github.com/root/gitbucket.git", baseUrl) == "https://github.com/root/gitbucket"
)
}
it("should convert BitBucket repository url") {
assert(
StringUtil
.getRepositoryViewerUrl("https://bitbucket.org/root/gitbucket.git", baseUrl) == "https://bitbucket.org/root/gitbucket"
)
assert(
StringUtil
.getRepositoryViewerUrl("https://root@bitbucket.org/root/gitbucket.git", baseUrl) == "https://bitbucket.org/root/gitbucket"
)
}
it("should convert GitLab repository url") {
assert(
StringUtil
.getRepositoryViewerUrl("https://gitlab.com/root/gitbucket.git", baseUrl) == "https://gitlab.com/root/gitbucket"
)
assert(
StringUtil
.getRepositoryViewerUrl("https://root@gitlab.com/root/gitbucket.git", baseUrl) == "https://gitlab.com/root/gitbucket"
)
}
}
}
| gitbucket/gitbucket | src/test/scala/gitbucket/core/util/StringUtilSpec.scala | Scala | apache-2.0 | 4,478 |
// Adapted from i12949
object Catch22:
trait TC[V]
object TC:
export Hodor.TC.given
object Hodor:
object TC:
import Catch22.TC
given fromString[V <: String]: TC[V] = new TC[V] {}
transparent inline given fromDouble[V <: Double]: TC[V] =
new TC[V]:
type Out = Double
given fromInt[V <: Int]: TC[V] with
type Out = Int | dotty-staging/dotty | tests/run/forwardCompat-refinedGivens/Lib_1_r3.0.scala | Scala | apache-2.0 | 364 |
package ee.cone.c4generator
import scala.annotation.{StaticAnnotation, compileTimeOnly}
import scala.meta.Term.Name
import scala.meta._
import scala.collection.immutable.Seq
case class ProtoProp(
sizeStatement: String,
encodeStatement: String,
initDecodeStatement: String,
decodeCase: String,
constructArg: String,
resultFix: String,
metaProp: String,
lensOpt: Option[String]
)
case class ProtoType(
encodeStatement: (String,String), serializerType: String, empty: String, resultType: String,
resultFix: String="", reduce: (String,String)=("","")
)
case class ProtoMessage(adapterName: String, statements: List[String], lenses: String)
case class ProtoMods(id: Option[Int]=None, category: List[String], shortName: Option[String] = None, genLens: Boolean = false)
case class FieldMods(id: Option[Int]=None, shortName: Option[String] = None)
object ProtocolGenerator extends Generator {
def parseArgs: Seq[Seq[Term]] ⇒ List[String] =
_.flatMap(_.collect{case q"${Name(name:String)}" ⇒ name}).toList
def deOpt: Option[String] ⇒ String = {
case None ⇒ "None"
case Some(a) ⇒ s"""Some("$a")"""
}
def getLens(protocolName: String, origType: String, fieldId: Long, fieldName: String, fieldType: String): String =
s""" val $fieldName: ee.cone.c4actor.ProdLens[$protocolName.$origType, $fieldType] =
| ee.cone.c4actor.ProdLens.ofSet(
| _.$fieldName,
| v ⇒ _.copy($fieldName = v),
| "$protocolName.$origType.$fieldName",
| ee.cone.c4actor.IdMetaAttr($fieldId),
| ee.cone.c4actor.ClassesAttr(
| classOf[$protocolName.$origType].getName,
| classOf[$fieldType].getName
| )
| )""".stripMargin
def getTypeProp(t: Type): String = {
t match {
case t"$tpe[..$tpesnel]" => s"""ee.cone.c4proto.TypeProp(classOf[$tpe[${tpesnel.map(_ ⇒ "_").mkString(", ")}]].getName, "$tpe", ${tpesnel.map(getTypeProp)})"""
case t"$tpe" ⇒ s"""ee.cone.c4proto.TypeProp(classOf[$tpe].getName, "$tpe", Nil)"""
}
}
def getCat(origType: String, isSys: Boolean): String = {
if (isSys)
"ee.cone.c4proto.S_Cat"
else if (origType.charAt(1) == '_') {
s"ee.cone.c4proto.${origType.charAt(0)}_Cat"
}
else
throw new Exception(s"Invalid name for Orig: $origType, should start with 'W_' or unsupported orig type")
}
def get: Get = { case code@q"@protocol(...$exprss) object ${objectNameNode@Term.Name(objectName)} extends ..$ext { ..$stats }" ⇒ Util.unBase(objectName,objectNameNode.pos.end){ objectName ⇒
//println(t.structure)
val args = parseArgs(exprss)
val messages: List[ProtoMessage] = stats.flatMap{
case q"import ..$i" ⇒ None
case q"..$mods case class ${Type.Name(messageName)} ( ..$params )" =>
val protoMods = mods./:(ProtoMods(category = args))((pMods,mod)⇒ mod match {
case mod"@Cat(...$exprss)" ⇒
val old = pMods.category
pMods.copy(category = parseArgs(exprss) ::: old)
case mod"@Id(${Lit(id:Int)})" if pMods.id.isEmpty ⇒
pMods.copy(id=Option(id))
case mod"@ShortName(${Lit(shortName:String)})" if pMods.shortName.isEmpty ⇒
pMods.copy(shortName=Option(shortName))
case mod"@GenLens" ⇒
pMods.copy(genLens = true)
case mod"@deprecated(...$notes)" ⇒ pMods
})
val Sys = "Sys(.*)".r
val (resultType, factoryName, isSys) = messageName match {
case Sys(v) ⇒ (v, s"${v}Factory", true)
case v ⇒ (v, v, false)
}
val doGenLens = protoMods.genLens
val adapterOf: String=>String = {
case "Int" ⇒ "com.squareup.wire.ProtoAdapter.SINT32"
case "Long" ⇒ "com.squareup.wire.ProtoAdapter.SINT64"
case "Boolean" ⇒ "com.squareup.wire.ProtoAdapter.BOOL"
case "okio.ByteString" ⇒ "com.squareup.wire.ProtoAdapter.BYTES"
case "String" ⇒ "com.squareup.wire.ProtoAdapter.STRING"
case name ⇒ s"${name}ProtoAdapter"
}
val props: List[ProtoProp] = params.map{
case param"..$mods ${Term.Name(propName)}: $tpeopt = $v" ⇒
val fieldProps = mods./:(FieldMods())((fMods, mod) ⇒ mod match {
case mod"@Id(${Lit(id:Int)})" ⇒
fMods.copy(id = Option(id))
case mod"@ShortName(${Lit(shortName:String)})" ⇒
fMods.copy(shortName = Option(shortName))
case mod"@deprecated(...$notes)" ⇒
fMods
})
val tp = tpeopt.asInstanceOf[Option[Type]].get
/*
val (tp,meta) = tpe.get match {
case t"$tp @meta(..$ann)" ⇒ (tp,ann)
case a ⇒ (a,Nil)
}
println(meta,meta.map(_.getClass))*/
val pt: ProtoType = tp match {
case t"Int" ⇒
val name = "Int"
ProtoType(
encodeStatement = (s"if(prep_$propName != 0)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = "0",
resultType = name
)
case t"Long" ⇒
val name = "Long"
ProtoType(
encodeStatement = (s"if(prep_$propName != 0L)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = "0",
resultType = name
)
case t"Boolean" ⇒
val name = "Boolean"
ProtoType(
encodeStatement = (s"if(prep_$propName)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = "false",
resultType = name
)
case t"okio.ByteString" ⇒
val name = "okio.ByteString"
ProtoType(
encodeStatement = (s"if(prep_$propName.size > 0)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = "okio.ByteString.EMPTY",
resultType = name
)
case t"String" ⇒
val name = "String"
ProtoType(
encodeStatement = (s"if(prep_$propName.nonEmpty)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = "\"\"",
resultType = name
)
case Type.Name(name) ⇒
ProtoType(
encodeStatement = (s"if(prep_$propName != ${name}Empty)", s"prep_$propName)"),
serializerType = adapterOf(name),
empty = s"${name}Empty",
resultType = name
)
case t"Option[${Type.Name(name)}]" ⇒
ProtoType(
encodeStatement = (s"if(prep_$propName.nonEmpty)", s"prep_$propName.get)"),
serializerType = adapterOf(name),
empty = "None",
resultType = s"Option[$name]",
reduce=("Option(", ")")
)
case t"List[${Type.Name(name)}]" ⇒
ProtoType(
encodeStatement = (s"prep_$propName.foreach(item => ","item))"),
serializerType = adapterOf(name),
empty = "Nil",
resultType = s"List[$name]",
resultFix = s"prep_$propName.reverse",
reduce = ("", s":: prep_$propName")
)
/*
//ProtoType("com.squareup.wire.ProtoAdapter.BOOL", "\"\"", "String")
//String, Option[Boolean], Option[Int], Option[BigDecimal], Option[Instant], Option[$]
*/
}
val id = fieldProps.id.get
ProtoProp(
sizeStatement = s"${pt.encodeStatement._1} res += ${pt.serializerType}.encodedSizeWithTag($id, ${pt.encodeStatement._2}",
encodeStatement = s"${pt.encodeStatement._1} ${pt.serializerType}.encodeWithTag(writer, $id, ${pt.encodeStatement._2}",
initDecodeStatement = s"var prep_$propName: ${pt.resultType} = ${pt.empty}",
decodeCase = s"case $id => prep_$propName = ${pt.reduce._1} ${pt.serializerType}.decode(reader) ${pt.reduce._2}",
constructArg = s"prep_$propName",
resultFix = if(pt.resultFix.nonEmpty) s"prep_$propName = ${pt.resultFix}" else "",
metaProp = s"""ee.cone.c4proto.MetaProp($id,"$propName",${deOpt(fieldProps.shortName)},"${pt.resultType}", ${getTypeProp(tp)})""",
if (doGenLens) Some(getLens(objectName, resultType, id, propName, pt.resultType)) else None
)
}.toList
val struct = s"""${factoryName}(${props.map(_.constructArg).mkString(",")})"""
val statements = List(
s"""type ${resultType} = ${objectName}Base.${resultType}""",
s"""val ${factoryName} = ${objectName}Base.${factoryName}""",
s"""
object ${resultType}ProtoAdapter extends com.squareup.wire.ProtoAdapter[$resultType](
com.squareup.wire.FieldEncoding.LENGTH_DELIMITED,
classOf[$resultType]
) with ee.cone.c4proto.HasId {
def id = ${protoMods.id.getOrElse("throw new Exception")}
def hasId = ${protoMods.id.nonEmpty}
val ${messageName}_categories = List(${(getCat(resultType, isSys) :: protoMods.category).mkString(", ")}).distinct
def categories = ${messageName}_categories
def className = classOf[$resultType].getName
def cl = classOf[$resultType]
def shortName = ${deOpt(protoMods.shortName)}
def encodedSize(value: $resultType): Int = {
val $struct = value
var res = 0;
${props.map(_.sizeStatement).mkString("\n")}
res
}
def encode(writer: com.squareup.wire.ProtoWriter, value: $resultType) = {
val $struct = value
${props.map(_.encodeStatement).mkString("\n")}
}
def decode(reader: com.squareup.wire.ProtoReader) = {
${props.map(_.initDecodeStatement).mkString("\n")};
val token = reader.beginMessage();
var done = false;
while(!done) reader.nextTag() match {
case -1 => done = true
${props.map(_.decodeCase).mkString("\n")}
case _ => reader.peekFieldEncoding.rawProtoAdapter.decode(reader)
}
reader.endMessage(token)
${props.map(_.resultFix).mkString("\n")};
$struct
}
def props = List(${props.map(_.metaProp).mkString(",")})
}
""")
val regAdapter = s"${resultType}ProtoAdapter"
val lensesLines = props.flatMap(_.lensOpt)
val lenses =
if (lensesLines.nonEmpty)
s"""object ${resultType}Lenses {
| ${lensesLines.mkString("\n")}
|}
""".stripMargin
else
""
ProtoMessage(regAdapter, statements, lenses) :: Nil
}.toList
val imports = stats.collect{ case s@q"import ..$i" ⇒ s }
val res = q"""
object ${Term.Name(objectName)} extends Protocol {
..$imports;
..${messages.flatMap(_.statements).map(_.parse[Stat].get)};
override def adapters = List(..${messages.map(_.adapterName).filter(_.nonEmpty).map(_.parse[Term].get)})
}"""
//println(res)
//Util.comment(code)(cont) +
GeneratedCode(res.syntax) :: messages.map(_.lenses).map(GeneratedCode.apply)
}}
}
| wregs/c4proto | generator/src/main/scala/ee/cone/c4generator/Protocol.scala | Scala | apache-2.0 | 11,836 |
package ai.akka.exception
import akka.actor.ActorRef
/**
* Object with exceptions
*/
object Exception {
/**
* Trait with information and reference to actor which waiting http response
*/
trait OAuthServiceException {
val httpResponseActor: ActorRef
val msg: String
}
/**
* Exception when parsing authorization request
* @param httpResponseActor reference to actor which waiting http response
* @param msg readable message
*/
case class OAuthParseRequestException(httpResponseActor: ActorRef, msg: String) extends Throwable with OAuthServiceException {
override def getMessage: String = msg
}
/**
* Exception when unsupported response type
* @param httpResponseActor reference to actor which waiting http response
* @param msg readable message
*/
case class UnsupportedResponseTypeException(httpResponseActor: ActorRef, msg: String) extends Throwable with OAuthServiceException {
override def getMessage: String = msg
}
/**
* Exception when client identity is invalid
* @param httpResponseActor reference to actor which waiting http response
* @param msg readable message
*/
case class InvalidClientException(httpResponseActor: ActorRef, msg: String) extends Throwable with OAuthServiceException {
override def getMessage: String = msg
}
/**
* Exception when authentication is invalid
* @param httpResponseActor reference to actor which waiting http response
* @param msg readable message
*/
case class InvalidAuthenticationException(httpResponseActor: ActorRef, msg: String) extends Throwable with OAuthServiceException {
override def getMessage: String = msg
}
}
| andrew--i/spray-oauth2 | src/main/scala/ai/akka/exception/Exception.scala | Scala | apache-2.0 | 1,683 |
package spire
package macros
import spire.algebra.{Field, Ring}
import spire.macros.compat.Context
import spire.math.{Rational, UByte, UShort, UInt, ULong}
object Macros {
case class LiteralUtil(c: Context) {
import c.universe._
def getString: String = {
val Apply(_, List(Apply(_, List(Literal(Constant(s: String)))))) = c.prefix.tree
s
}
}
def parseContext(c: Context, lower: BigInt, upper: BigInt): Either[String, BigInt] =
parseNumber(LiteralUtil(c).getString, lower, upper)
def parseNumber(s: String, lower: BigInt, upper: BigInt): Either[String, BigInt] =
try {
val n = BigInt(s)
if (n < lower || n > upper) Left("illegal constant: %s" format s) else Right(n)
} catch {
case _: Exception => Left("illegal constant: %s" format s)
}
def byte(c: Context)(): c.Expr[Byte] = {
import c.universe._
parseContext(c, BigInt(-128), BigInt(255)) match {
case Right(n) => c.Expr(q"${n.toByte}")
case Left(s) => throw new NumberFormatException(s)
}
}
def ubyte(c: Context)(): c.Expr[UByte] = {
import c.universe._
parseContext(c, BigInt(0), BigInt(255)) match {
case Right(n) => c.Expr(q"spire.math.UByte(${n.toByte})")
case Left(s) => throw new NumberFormatException(s)
}
}
def short(c: Context)(): c.Expr[Short] = {
import c.universe._
parseContext(c, BigInt(-32768), BigInt(65535)) match {
case Right(n) => c.Expr(q"${n.toShort}")
case Left(s) => throw new NumberFormatException(s)
}
}
def ushort(c: Context)(): c.Expr[UShort] = {
import c.universe._
parseContext(c, BigInt(0), BigInt(65535)) match {
case Right(n) => c.Expr(q"spire.math.UShort(${n.toShort})")
case Left(s) => throw new NumberFormatException(s)
}
}
def uint(c: Context)(): c.Expr[UInt] = {
import c.universe._
parseContext(c, BigInt(0), BigInt(4294967295L)) match {
case Right(n) => c.Expr(q"spire.math.UInt(${n.toInt})")
case Left(s) => throw new NumberFormatException(s)
}
}
def ulong(c: Context)(): c.Expr[ULong] = {
import c.universe._
parseContext(c, BigInt(0), BigInt("18446744073709551615")) match {
case Right(n) => c.Expr(q"spire.math.ULong(${n.toLong})")
case Left(s) => throw new NumberFormatException(s)
}
}
def rational(c: Context)(): c.Expr[Rational] = {
import c.universe._
val Apply(_, List(Apply(_, List(Literal(Constant(s:String)))))) = c.prefix.tree
val r = Rational(s)
val (n, d) = (r.numerator, r.denominator)
if (n.isValidLong && d.isValidLong)
c.Expr(q"spire.math.Rational(${n.toLong}, ${d.toLong})")
else
c.Expr(q"spire.math.Rational(BigInt(${n.toString}), BigInt(${d.toString}))")
}
def formatWhole(c: Context, sep: String): String = {
val regex = "0|-?[1-9][0-9]{0,2}(%s[0-9]{3})*" format sep
import c.universe._
val Apply(_, List(Apply(_, List(Literal(Constant(s:String)))))) = c.prefix.tree
if (!s.matches(regex)) c.error(c.enclosingPosition, "invalid whole number")
s.replace(sep, "")
}
def formatDecimal(c: Context, sep: String, dec: String): String = {
val regex = "0|-?[1-9][0-9]{0,2}(%s[0-9]{3})*(%s[0-9]+)?" format (sep, dec)
import c.universe._
val Apply(_, List(Apply(_, List(Literal(Constant(s:String)))))) = c.prefix.tree
if (!s.matches(regex)) c.error(c.enclosingPosition, "invalid decimal number")
s.replace(sep, "").replace(dec, ".")
}
def handleInt(c: Context, name: String, sep: String): c.Expr[Int] = {
import c.universe._
try {
c.Expr[Int](Literal(Constant(formatWhole(c, sep).toInt)))
} catch {
case e: Exception =>
throw new NumberFormatException("illegal %s Int constant" format name)
}
}
def handleLong(c: Context, name: String, sep: String): c.Expr[Long] = {
import c.universe._
try {
c.Expr[Long](Literal(Constant(formatWhole(c, sep).toLong)))
} catch {
case e: Exception =>
throw new NumberFormatException("illegal %s Long constant" format name)
}
}
def handleBigInt(c: Context, name: String, sep: String): c.Expr[BigInt] = {
import c.universe._
try {
val s = formatWhole(c, sep)
val b = BigInt(s) // make sure it's ok
c.Expr[BigInt](Apply(q"scala.math.BigInt.apply", List(Literal(Constant(s)))))
} catch {
case e: Exception =>
throw new NumberFormatException("illegal %s BigInt constant" format name)
}
}
def handleBigDecimal(c: Context, name: String, sep: String, dec: String): c.Expr[BigDecimal] = {
import c.universe._
try {
val s = formatDecimal(c, sep, dec)
val b = BigDecimal(s) // make sure it's ok
c.Expr[BigDecimal](Apply(q"scala.math.BigDecimal.apply", List(Literal(Constant(s)))))
} catch {
case e: Exception =>
throw new NumberFormatException("illegal %s BigInt constant" format name)
}
}
def siInt(c: Context)(): c.Expr[Int] = handleInt(c, "SI", " ")
def siLong(c: Context)(): c.Expr[Long] = handleLong(c, "SI", " ")
def siBigInt(c: Context)(): c.Expr[BigInt] = handleBigInt(c, "SI", " ")
def siBigDecimal(c: Context)(): c.Expr[BigDecimal] = handleBigDecimal(c, "SI", " ", ".")
def usInt(c: Context)(): c.Expr[Int] = handleInt(c, "US", ",")
def usLong(c: Context)(): c.Expr[Long] = handleLong(c, "US", ",")
def usBigInt(c: Context)(): c.Expr[BigInt] = handleBigInt(c, "US", ",")
def usBigDecimal(c: Context)(): c.Expr[BigDecimal] = handleBigDecimal(c, "US", ",", ".")
def euInt(c: Context)(): c.Expr[Int] = handleInt(c, "EU", ".")
def euLong(c: Context)(): c.Expr[Long] = handleLong(c, "EU", ".")
def euBigInt(c: Context)(): c.Expr[BigInt] = handleBigInt(c, "EU", ".")
def euBigDecimal(c: Context)(): c.Expr[BigDecimal] = handleBigDecimal(c, "EU", ".", ",")
def radix(c: Context)(): c.Expr[Int] = {
import c.universe._
val Apply(_, List(Apply(_, List(Literal(Constant(s:String)))))) = c.prefix.tree
val name = c.macroApplication.symbol.name.toString
val base = name.substring(1).toInt
if (base < 2 || 36 < base)
throw new NumberFormatException("invalid radix: %s" format base)
val n = java.lang.Integer.parseInt(s, base)
c.Expr[Int](Literal(Constant(n)))
}
def intAs[A : c.WeakTypeTag](c:Context)(ev : c.Expr[Ring[A]]):c.Expr[A] = {
import c.universe._
c.Expr[A](c.prefix.tree match {
case Apply((_, List(Literal(Constant(0))))) => q"$ev.zero"
case Apply((_, List(Literal(Constant(1))))) => q"$ev.one"
case Apply((_, List(n))) => q"$ev.fromInt($n)"
})
}
def dblAs[A : c.WeakTypeTag](c:Context)(ev : c.Expr[Field[A]]):c.Expr[A]= {
import c.universe._
c.Expr[A](c.prefix.tree match {
case Apply((_, List(Literal(Constant(0.0))))) => q"$ev.zero"
case Apply((_, List(Literal(Constant(1.0))))) => q"$ev.one"
case Apply((_, List(n))) => q"$ev.fromDouble($n)"
})
}
}
| tixxit/spire | core/shared/src/main/scala/spire/macros/Macros.scala | Scala | mit | 6,955 |
object test {
class AAA[T, S <: T](i: Int)
}
object test2 {
import test.{AAA => BBB}
val x = new BBB[/*caret*/]
}
//T, S <: T | whorbowicz/intellij-scala | testdata/parameterInfo/typeParameterInfo/SimpleTests/AliasedClassTypeParams.scala | Scala | apache-2.0 | 133 |
package org.bitcoins.script.stack
import org.scalatest.{MustMatchers, FlatSpec}
/**
* Created by chris on 1/8/16.
*/
class StackOperationFactoryTest extends FlatSpec with MustMatchers {
"StackOperationFactory" must "match correct operations with their strings" in {
StackOperation.fromString("OP_DUP") must be (Some(OP_DUP))
StackOperation.fromString("OP_FROMALTSTACK") must be (Some(OP_FROMALTSTACK))
StackOperation.fromString("RANDOM_OP") must be (None)
StackOperation.fromString("OP_IFDUP") must be (Some(OP_IFDUP))
}
}
| Christewart/scalacoin | src/test/scala/org/bitcoins/script/stack/StackOperationFactoryTest.scala | Scala | mit | 548 |
package com.sksamuel.elastic4s.fields.builders
import com.sksamuel.elastic4s.fields.RankFeatureField
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
object RankFeatureFieldBuilderFn {
def build(field: RankFeatureField): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.field("type", field.`type`)
field.positiveScoreImpact.foreach(builder.field("positive_score_impact", _))
builder.endObject()
}
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/fields/builders/RankFeatureFieldBuilderFn.scala | Scala | apache-2.0 | 466 |
package filters
import play.api.mvc.{RequestHeader, Filter}
import scala.concurrent.Future
import play.api.mvc.SimpleResult
import play.api.libs.concurrent.Execution.Implicits.defaultContext
object VersionFilter extends Filter {
override def apply(f: (RequestHeader) => Future[SimpleResult])(rh: RequestHeader) = {
f(rh).map(_.withHeaders(VERSION_HEADER -> release.CurrentVersion()))
}
}
| tsechov/shoehorn | app/filters/VersionFilter.scala | Scala | apache-2.0 | 400 |
package modules
import com.google.inject.{ AbstractModule, Provides }
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.services._
import com.mohiva.play.silhouette.api.util._
import com.mohiva.play.silhouette.api.{ Environment, EventBus }
import com.mohiva.play.silhouette.impl.authenticators._
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth1._
import com.mohiva.play.silhouette.impl.providers.oauth1.secrets.{ CookieSecretProvider, CookieSecretSettings }
import com.mohiva.play.silhouette.impl.providers.oauth1.services.PlayOAuth1Service
import com.mohiva.play.silhouette.impl.providers.oauth2._
import com.mohiva.play.silhouette.impl.providers.oauth2.state.{ CookieStateProvider, CookieStateSettings, DummyStateProvider }
import com.mohiva.play.silhouette.impl.providers.openid.YahooProvider
import com.mohiva.play.silhouette.impl.providers.openid.services.PlayOpenIDService
import com.mohiva.play.silhouette.impl.repositories.DelegableAuthInfoRepository
import com.mohiva.play.silhouette.impl.services._
import com.mohiva.play.silhouette.impl.util._
import models.User
import models.daos._
import models.services.{ UserService, UserServiceImpl }
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
import net.codingwell.scalaguice.ScalaModule
import play.api.Configuration
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.openid.OpenIdClient
import play.api.libs.ws.WSClient
/**
* The Guice module which wires all Silhouette dependencies.
*/
class SilhouetteModule extends AbstractModule with ScalaModule {
/**
* Configures the module.
*/
def configure() {
bind[UserService].to[UserServiceImpl]
bind[UserDAO].to[UserDAOImpl]
bind[DelegableAuthInfoDAO[PasswordInfo]].to[PasswordInfoDAO]
bind[DelegableAuthInfoDAO[OAuth1Info]].to[OAuth1InfoDAO]
bind[DelegableAuthInfoDAO[OAuth2Info]].to[OAuth2InfoDAO]
bind[DelegableAuthInfoDAO[OpenIDInfo]].to[OpenIDInfoDAO]
bind[CacheLayer].to[PlayCacheLayer]
bind[IDGenerator].toInstance(new SecureRandomIDGenerator())
bind[PasswordHasher].toInstance(new BCryptPasswordHasher)
bind[FingerprintGenerator].toInstance(new DefaultFingerprintGenerator(false))
bind[EventBus].toInstance(EventBus())
bind[Clock].toInstance(Clock())
}
/**
* Provides the HTTP layer implementation.
*
* @param client Play's WS client.
* @return The HTTP layer implementation.
*/
@Provides
def provideHTTPLayer(client: WSClient): HTTPLayer = new PlayHTTPLayer(client)
/**
* Provides the Silhouette environment.
*
* @param userService The user service implementation.
* @param authenticatorService The authentication service implementation.
* @param eventBus The event bus instance.
* @return The Silhouette environment.
*/
@Provides
def provideEnvironment(
userService: UserService,
authenticatorService: AuthenticatorService[CookieAuthenticator],
eventBus: EventBus): Environment[User, CookieAuthenticator] = {
Environment[User, CookieAuthenticator](
userService,
authenticatorService,
Seq(),
eventBus
)
}
/**
* Provides the social provider registry.
*
* @param facebookProvider The Facebook provider implementation.
* @param googleProvider The Google provider implementation.
* @param vkProvider The VK provider implementation.
* @param clefProvider The Clef provider implementation.
* @param twitterProvider The Twitter provider implementation.
* @param xingProvider The Xing provider implementation.
* @param yahooProvider The Yahoo provider implementation.
* @return The Silhouette environment.
*/
@Provides
def provideSocialProviderRegistry(
facebookProvider: FacebookProvider,
googleProvider: GoogleProvider,
vkProvider: VKProvider,
clefProvider: ClefProvider,
twitterProvider: TwitterProvider,
xingProvider: XingProvider,
yahooProvider: YahooProvider): SocialProviderRegistry = {
SocialProviderRegistry(Seq(
googleProvider,
facebookProvider,
twitterProvider,
vkProvider,
xingProvider,
yahooProvider,
clefProvider
))
}
/**
* Provides the authenticator service.
*
* @param fingerprintGenerator The fingerprint generator implementation.
* @param idGenerator The ID generator implementation.
* @param configuration The Play configuration.
* @param clock The clock instance.
* @return The authenticator service.
*/
@Provides
def provideAuthenticatorService(
fingerprintGenerator: FingerprintGenerator,
idGenerator: IDGenerator,
configuration: Configuration,
clock: Clock): AuthenticatorService[CookieAuthenticator] = {
val config = configuration.underlying.as[CookieAuthenticatorSettings]("silhouette.authenticator")
new CookieAuthenticatorService(config, None, fingerprintGenerator, idGenerator, clock)
}
/**
* Provides the auth info repository.
*
* @param passwordInfoDAO The implementation of the delegable password auth info DAO.
* @param oauth1InfoDAO The implementation of the delegable OAuth1 auth info DAO.
* @param oauth2InfoDAO The implementation of the delegable OAuth2 auth info DAO.
* @param openIDInfoDAO The implementation of the delegable OpenID auth info DAO.
* @return The auth info repository instance.
*/
@Provides
def provideAuthInfoRepository(
passwordInfoDAO: DelegableAuthInfoDAO[PasswordInfo],
oauth1InfoDAO: DelegableAuthInfoDAO[OAuth1Info],
oauth2InfoDAO: DelegableAuthInfoDAO[OAuth2Info],
openIDInfoDAO: DelegableAuthInfoDAO[OpenIDInfo]): AuthInfoRepository = {
new DelegableAuthInfoRepository(passwordInfoDAO, oauth1InfoDAO, oauth2InfoDAO, openIDInfoDAO)
}
/**
* Provides the avatar service.
*
* @param httpLayer The HTTP layer implementation.
* @return The avatar service implementation.
*/
@Provides
def provideAvatarService(httpLayer: HTTPLayer): AvatarService = new GravatarService(httpLayer)
/**
* Provides the OAuth1 token secret provider.
*
* @param configuration The Play configuration.
* @param clock The clock instance.
* @return The OAuth1 token secret provider implementation.
*/
@Provides
def provideOAuth1TokenSecretProvider(configuration: Configuration, clock: Clock): OAuth1TokenSecretProvider = {
val settings = configuration.underlying.as[CookieSecretSettings]("silhouette.oauth1TokenSecretProvider")
new CookieSecretProvider(settings, clock)
}
/**
* Provides the OAuth2 state provider.
*
* @param idGenerator The ID generator implementation.
* @param configuration The Play configuration.
* @param clock The clock instance.
* @return The OAuth2 state provider implementation.
*/
@Provides
def provideOAuth2StateProvider(idGenerator: IDGenerator, configuration: Configuration, clock: Clock): OAuth2StateProvider = {
val settings = configuration.underlying.as[CookieStateSettings]("silhouette.oauth2StateProvider")
new CookieStateProvider(settings, idGenerator, clock)
}
/**
* Provides the credentials provider.
*
* @param authInfoRepository The auth info repository implementation.
* @param passwordHasher The default password hasher implementation.
* @return The credentials provider.
*/
@Provides
def provideCredentialsProvider(
authInfoRepository: AuthInfoRepository,
passwordHasher: PasswordHasher): CredentialsProvider = {
new CredentialsProvider(authInfoRepository, passwordHasher, Seq(passwordHasher))
}
/**
* Provides the Facebook provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @param configuration The Play configuration.
* @return The Facebook provider.
*/
@Provides
def provideFacebookProvider(
httpLayer: HTTPLayer,
stateProvider: OAuth2StateProvider,
configuration: Configuration): FacebookProvider = {
new FacebookProvider(httpLayer, stateProvider, configuration.underlying.as[OAuth2Settings]("silhouette.facebook"))
}
/**
* Provides the Google provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @param configuration The Play configuration.
* @return The Google provider.
*/
@Provides
def provideGoogleProvider(
httpLayer: HTTPLayer,
stateProvider: OAuth2StateProvider,
configuration: Configuration): GoogleProvider = {
new GoogleProvider(httpLayer, stateProvider, configuration.underlying.as[OAuth2Settings]("silhouette.google"))
}
/**
* Provides the VK provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @param configuration The Play configuration.
* @return The VK provider.
*/
@Provides
def provideVKProvider(
httpLayer: HTTPLayer,
stateProvider: OAuth2StateProvider,
configuration: Configuration): VKProvider = {
new VKProvider(httpLayer, stateProvider, configuration.underlying.as[OAuth2Settings]("silhouette.vk"))
}
/**
* Provides the Clef provider.
*
* @param httpLayer The HTTP layer implementation.
* @param configuration The Play configuration.
* @return The Clef provider.
*/
@Provides
def provideClefProvider(httpLayer: HTTPLayer, configuration: Configuration): ClefProvider = {
new ClefProvider(httpLayer, new DummyStateProvider, configuration.underlying.as[OAuth2Settings]("silhouette.clef"))
}
/**
* Provides the Twitter provider.
*
* @param httpLayer The HTTP layer implementation.
* @param tokenSecretProvider The token secret provider implementation.
* @param configuration The Play configuration.
* @return The Twitter provider.
*/
@Provides
def provideTwitterProvider(
httpLayer: HTTPLayer,
tokenSecretProvider: OAuth1TokenSecretProvider,
configuration: Configuration): TwitterProvider = {
val settings = configuration.underlying.as[OAuth1Settings]("silhouette.twitter")
new TwitterProvider(httpLayer, new PlayOAuth1Service(settings), tokenSecretProvider, settings)
}
/**
* Provides the Xing provider.
*
* @param httpLayer The HTTP layer implementation.
* @param tokenSecretProvider The token secret provider implementation.
* @param configuration The Play configuration.
* @return The Xing provider.
*/
@Provides
def provideXingProvider(
httpLayer: HTTPLayer,
tokenSecretProvider: OAuth1TokenSecretProvider,
configuration: Configuration): XingProvider = {
val settings = configuration.underlying.as[OAuth1Settings]("silhouette.xing")
new XingProvider(httpLayer, new PlayOAuth1Service(settings), tokenSecretProvider, settings)
}
/**
* Provides the Yahoo provider.
*
* @param cacheLayer The cache layer implementation.
* @param httpLayer The HTTP layer implementation.
* @param client The OpenID client implementation.
* @param configuration The Play configuration.
* @return The Yahoo provider.
*/
@Provides
def provideYahooProvider(
cacheLayer: CacheLayer,
httpLayer: HTTPLayer,
client: OpenIdClient,
configuration: Configuration): YahooProvider = {
val settings = configuration.underlying.as[OpenIDSettings]("silhouette.yahoo")
new YahooProvider(httpLayer, new PlayOpenIDService(client, settings), settings)
}
}
| sbrunk/play-silhouette-slick-seed | app/modules/SilhouetteModule.scala | Scala | apache-2.0 | 11,655 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.portlet
import liferay.FormRunnerRequestFilter
import org.scalatest.junit.AssertionsForJUnit
import org.orbeon.oxf.test.ResourceManagerTestBase
import org.junit.Test
import java.lang.String
import collection.JavaConversions._
import org.scalatest.mock.MockitoSugar
import javax.portlet.PortletRequest
import javax.portlet.filter.PortletRequestWrapper
import org.mockito.Mockito
import com.liferay.portal.model.{Group, Role, User}
import java.util.Arrays
import collection.immutable.TreeMap
import org.orbeon.oxf.fr.FormRunnerPermissions._
class FormRunnerRequestFilterTest extends ResourceManagerTestBase with AssertionsForJUnit with MockitoSugar {
@Test def amendPortletRequest() {
// Initial properties
val initialProperties = Map("p1" → Seq("v1a", "v1b"))
// Request with initial properties
val mockRequest = new PortletRequestWrapper(mock[PortletRequest]) {
override def getProperty(name: String) = initialProperties.get(name) map (_.head) orNull
override def getProperties(name: String) =
asJavaEnumeration(initialProperties.get(name) map (_.iterator) getOrElse Iterator.empty)
override def getPropertyNames = initialProperties.keysIterator
}
val mockRoleManager = mock[Role]
Mockito when mockRoleManager.getName thenReturn "manager"
val mockRoleEmployee = mock[Role]
Mockito when mockRoleEmployee.getName thenReturn "employee"
val mockGroup = mock[Group]
Mockito when mockGroup.getGroupId thenReturn 42
Mockito when mockGroup.getName thenReturn "universe"
val mockUser = mock[User]
Mockito when mockUser.getUserId thenReturn 123
Mockito when mockUser.getScreenName thenReturn "jsmith"
Mockito when mockUser.getFullName thenReturn "John Smith"
Mockito when mockUser.getEmailAddress thenReturn "test@orbeon.com"
Mockito when mockUser.getRoles thenReturn Arrays.asList(mockRoleManager, mockRoleEmployee)
Mockito when mockUser.getGroup thenReturn mockGroup
val amendedRequest = (new FormRunnerRequestFilter).amendRequest(mockRequest, mockUser)
// NOTE: Use Seq or List but not Array for comparison, because Array's == doesn't work as expected in Scala
val expectedProperties = initialProperties ++ Map(
"orbeon-liferay-user-id" → Seq("123"),
"orbeon-liferay-user-screen-name" → Seq("jsmith"),
"orbeon-liferay-user-full-name" → Seq("John Smith"),
"orbeon-liferay-user-email" → Seq("test@orbeon.com"),
"orbeon-liferay-user-group-id" → Seq("42"),
"orbeon-liferay-user-group-name" → Seq("universe"),
"orbeon-liferay-user-roles" → Seq("manager", "employee"),
OrbeonUsernameHeaderName → Seq("test@orbeon.com"),
OrbeonGroupHeaderName → Seq("universe"),
OrbeonRolesHeaderName → Seq("manager", "employee")
)
val actualProperties = amendedRequest.getPropertyNames map (n ⇒ n → amendedRequest.getProperties(n).toList) toMap
// Compare using TreeMap to get a reliable order
def toTreeMap[K, V](map: Map[K, V])(implicit ord: Ordering[K]) = TreeMap[K, V]() ++ map
assert(toTreeMap(expectedProperties) === toTreeMap(actualProperties))
}
} | martinluther/orbeon-forms | src/test/scala/org/orbeon/oxf/portlet/FormRunnerRequestFilterTest.scala | Scala | lgpl-2.1 | 4,128 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnet.optimizer
import ml.dmlc.mxnet.util.SerializerUtils
import ml.dmlc.mxnet.{NDArray, Optimizer, LRScheduler}
import ml.dmlc.mxnet.NDArrayConversions._
/**
* RMSProp optimizer as described in Tieleman & Hinton, 2012.
* http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
*
* @param learningRate Float, Step size.
* @param gamma1 Float, decay factor of moving average for gradient, gradient^^2.
* @param gamma2 Float, momentum factor of moving average for gradient.
* @param rescaleGradient Float, rescaling factor of gradient.
* @param wd Float, L2 regularization coefficient add to all the weights
* @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient]
* @param lrScheduler The learning rate scheduler
*/
class RMSProp(val learningRate: Float = 0.002f, rescaleGradient: Float = 1.0f,
gamma1: Float = 0.95f, gamma2: Float = 0.9f, wd: Float = 0.0f,
lrScheduler: LRScheduler = null, clipGradient: Float = 0f) extends Optimizer {
/**
* Update the parameters.
* @param index An unique integer key used to index the parameters
* @param weight weight ndarray
* @param grad grad ndarray
* @param state NDArray or other objects returned by initState
* The auxiliary state used in optimization.
*/
override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = {
val lr = this.learningRate * lrScale.getOrElse(index, 1f)
val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)]
val wd = getWd(index, this.wd)
var resdGrad = grad * this.rescaleGrad
if (clipGradient != 0f) {
val oldResdGrad = resdGrad
resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient)
oldResdGrad.dispose()
}
val nUpdated = ((1 - this.gamma1) * (resdGrad * resdGrad) + this.gamma1 * n)
.disposeDepsExcept(resdGrad, n)
n.set(nUpdated)
nUpdated.dispose()
val gUpdated = ((1 - this.gamma1) * resdGrad + this.gamma1 * g)
.disposeDepsExcept(resdGrad, g)
g.set(gUpdated)
gUpdated.dispose()
val deltaUpdated =
(this.gamma2 * delta - lr * (resdGrad / NDArray.sqrt(n - g * g + 1e-4f) + wd * weight))
.disposeDepsExcept(delta, resdGrad, n, g, weight)
delta.set(deltaUpdated)
deltaUpdated.dispose()
weight += delta
resdGrad.dispose()
}
override def createState(index: Int, weight: NDArray): (NDArray, NDArray, NDArray) = {
(NDArray.zeros(weight.shape, weight.context), // n
NDArray.zeros(weight.shape, weight.context), // g
NDArray.zeros(weight.shape, weight.context)) // delta
}
// Dispose the state it created
override def disposeState(state: AnyRef): Unit = {
if (state != null) {
val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)]
n.dispose()
g.dispose()
delta.dispose()
}
}
override def serializeState(state: AnyRef): Array[Byte] = {
if (state != null) {
val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)]
SerializerUtils.serializeNDArrays(n, g, delta)
} else {
null
}
}
override def deserializeState(bytes: Array[Byte]): AnyRef = {
if (bytes != null) {
val ndArrays = SerializerUtils.deserializeNDArrays(bytes)
require(ndArrays.size == 3, s"Got ${ndArrays.size} arrays, expected 3.")
val state = (ndArrays(0), ndArrays(1), ndArrays(2))
state.asInstanceOf[AnyRef]
} else {
null
}
}
}
| rishita/mxnet | scala-package/core/src/main/scala/ml/dmlc/mxnet/optimizer/RMSProp.scala | Scala | apache-2.0 | 4,330 |
package com.xuanyuansen.algo
import breeze.linalg._
import breeze.numerics.{ sqrt, exp, tanh, sigmoid }
import com.typesafe.scalalogging.slf4j.Logger
import org.slf4j.LoggerFactory
import scala.collection.mutable.ArrayBuffer
/**
* Created by wangshuai on 16/7/14.
* basic lstm network
*/
case class LSTMLayerParam(val input_dim: Int, val out_dim: Int) {
/**
* for simple case, 1 step lstm, hidden_dim is output_dim
*/
val concat_len = input_dim + out_dim
/**
* f: forget gate
* g: cell gate
* o: output gate
* i: input gate
*/
var Wo = DenseMatrix.rand[Double](out_dim, concat_len)
var Wf = DenseMatrix.rand[Double](out_dim, concat_len)
var Wi = DenseMatrix.rand[Double](out_dim, concat_len)
var Wg = DenseMatrix.rand[Double](out_dim, concat_len)
var Bo = DenseMatrix.rand[Double](out_dim, 1)
var Bf = DenseMatrix.rand[Double](out_dim, 1)
var Bi = DenseMatrix.rand[Double](out_dim, 1)
var Bg = DenseMatrix.rand[Double](out_dim, 1)
/**
*
*/
var Wo_theta_pre = DenseMatrix.zeros[Double](out_dim, concat_len)
var Wf_theta_pre = DenseMatrix.zeros[Double](out_dim, concat_len)
var Wi_theta_pre = DenseMatrix.zeros[Double](out_dim, concat_len)
var Wg_theta_pre = DenseMatrix.zeros[Double](out_dim, concat_len)
var Bo_theta_pre = DenseMatrix.zeros[Double](out_dim, 1)
var Bf_theta_pre = DenseMatrix.zeros[Double](out_dim, 1)
var Bi_theta_pre = DenseMatrix.zeros[Double](out_dim, 1)
var Bg_theta_pre = DenseMatrix.zeros[Double](out_dim, 1)
/**
*
*/
var wo_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
var wf_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
var wi_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
var wg_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
var bo_diff = DenseMatrix.zeros[Double](out_dim, 1)
var bf_diff = DenseMatrix.zeros[Double](out_dim, 1)
var bi_diff = DenseMatrix.zeros[Double](out_dim, 1)
var bg_diff = DenseMatrix.zeros[Double](out_dim, 1)
/**
* http://arxiv.org/pdf/1212.5701v1.pdf
*/
var Eg2_w_ofig = Seq(
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1)
)
var X2_w_ofig = Seq(
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1)
)
var detla_w_ofig = Seq(
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, concat_len),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1),
DenseMatrix.zeros[Double](out_dim, 1)
)
/**
* http://sebastianruder.com/optimizing-gradient-descent/
*/
def update_param_rmsprop(
lr: Double = 0.001,
rho: Double = 0.9,
epsilon: Double = 1e-8
): Unit = {
val gradient_t = Seq(
this.wo_diff,
this.wf_diff,
this.wi_diff,
this.wg_diff,
this.bo_diff,
this.bf_diff,
this.bi_diff,
this.bg_diff
)
this.Eg2_w_ofig = this.Eg2_w_ofig.zip(gradient_t).map {
r =>
val Eg2_t = rho * (r._1 :* r._1) + (1.0 - rho) * (r._2 :* r._2)
Eg2_t.asInstanceOf[DenseMatrix[Double]]
}
this.detla_w_ofig = gradient_t.zip(this.Eg2_w_ofig).map {
r =>
val delta = lr * (r._1 :/ sqrt(r._2 + epsilon))
delta.asInstanceOf[DenseMatrix[Double]]
}
this.Wo -= this.detla_w_ofig.head
this.Wf -= this.detla_w_ofig.apply(1)
this.Wi -= this.detla_w_ofig.apply(2)
this.Wg -= this.detla_w_ofig.apply(3)
this.Bo -= this.detla_w_ofig.apply(4)
this.Bf -= this.detla_w_ofig.apply(5)
this.Bi -= this.detla_w_ofig.apply(6)
this.Bg -= this.detla_w_ofig.apply(7)
this.reset_diff()
}
def update_param_adadelta(decay_rate: Double): Unit = {
val gradient_t = Seq(
this.wo_diff,
this.wf_diff,
this.wi_diff,
this.wg_diff,
this.bo_diff,
this.bf_diff,
this.bi_diff,
this.bg_diff
)
this.Eg2_w_ofig = this.Eg2_w_ofig.zip(gradient_t).map { r =>
val Eg2_t = decay_rate * r._1 + (1.0 - decay_rate) * (r._2 :* r._2)
Eg2_t.asInstanceOf[DenseMatrix[Double]]
}
/**
* adaptive learning rate, using W and delta_W
*/
this.detla_w_ofig = this.X2_w_ofig.zip(this.Eg2_w_ofig).map {
r =>
val gra_theta_t = -1.0 * (sqrt(r._1 + 1e-6).asInstanceOf[DenseMatrix[Double]] :/ sqrt(r._2 + 1e-6).asInstanceOf[DenseMatrix[Double]])
gra_theta_t.asInstanceOf[DenseMatrix[Double]]
}.zip(gradient_t).map {
r =>
r._1 :* r._2
}
//println(this.detla_w_ofig.head.toString())
this.X2_w_ofig = this.X2_w_ofig.zip(this.detla_w_ofig).map {
r =>
val X2_theta_t = decay_rate * r._1 + (1.0 - decay_rate) * (r._2 :* r._2)
X2_theta_t.asInstanceOf[DenseMatrix[Double]]
}
this.Wo += this.detla_w_ofig.head
this.Wf += this.detla_w_ofig.apply(1)
this.Wi += this.detla_w_ofig.apply(2)
this.Wg += this.detla_w_ofig.apply(3)
this.Bo += this.detla_w_ofig.apply(4)
this.Bf += this.detla_w_ofig.apply(5)
this.Bi += this.detla_w_ofig.apply(6)
this.Bg += this.detla_w_ofig.apply(7)
this.reset_diff()
}
def update_param(lr: Double, momentum_p: Double = 0.5, momentum: Boolean = false): Unit = {
if (momentum) {
val theta_wo = (this.Wo_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.wo_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_wf = (this.Wf_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.wf_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_wi = (this.Wi_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.wi_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_wg = (this.Wg_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.wg_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_bo = (this.Bo_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.bo_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_bf = (this.Bf_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.bf_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_bi = (this.Bi_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.bi_diff * lr).asInstanceOf[DenseMatrix[Double]]
val theta_bg = (this.Bg_theta_pre * momentum_p).asInstanceOf[DenseMatrix[Double]] - (this.bg_diff * lr).asInstanceOf[DenseMatrix[Double]]
this.retain_param(theta_wo, theta_wf, theta_wi, theta_wg, theta_bo, theta_bf, theta_bi, theta_bg)
this.Wo += theta_wo
this.Wf += theta_wf
this.Wi += theta_wi
this.Wg += theta_wg
this.Bo += theta_bo
this.Bf += theta_bf
this.Bi += theta_bi
this.Bg += theta_bg
} else {
this.Wo -= this.wo_diff * lr
this.Wf -= this.wf_diff * lr
this.Wi -= this.wi_diff * lr
this.Wg -= this.wg_diff * lr
this.Bo -= this.bo_diff * lr
this.Bf -= this.bf_diff * lr
this.Bi -= this.bi_diff * lr
this.Bg -= this.bg_diff * lr
}
this.reset_diff()
}
def reset_diff(): Unit = {
this.wo_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
this.wf_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
this.wi_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
this.wg_diff = DenseMatrix.zeros[Double](out_dim, concat_len)
this.bo_diff = DenseMatrix.zeros[Double](out_dim, 1)
this.bf_diff = DenseMatrix.zeros[Double](out_dim, 1)
this.bi_diff = DenseMatrix.zeros[Double](out_dim, 1)
this.bg_diff = DenseMatrix.zeros[Double](out_dim, 1)
}
def retain_param(theta_wo: DenseMatrix[Double], theta_wf: DenseMatrix[Double], theta_wi: DenseMatrix[Double], theta_wg: DenseMatrix[Double],
theta_bo: DenseMatrix[Double], theta_bf: DenseMatrix[Double], theta_bi: DenseMatrix[Double], theta_bg: DenseMatrix[Double]): Unit = {
/**
* retain pre status
*/
this.Wo_theta_pre = theta_wo
this.Wf_theta_pre = theta_wf
this.Wi_theta_pre = theta_wi
this.Wg_theta_pre = theta_wg
this.Bo_theta_pre = theta_bo
this.Bf_theta_pre = theta_bf
this.Bi_theta_pre = theta_bi
this.Bg_theta_pre = theta_bg
}
}
class LSTMLayerNode(val input_dim: Int, val out_dim: Int) {
/**
* one state for each time,
* we do not need to save these states in forward propagation,
* but we need them in backward propagation.
*/
var f = DenseMatrix.zeros[Double](out_dim, 1)
var o = DenseMatrix.zeros[Double](out_dim, 1)
var i = DenseMatrix.zeros[Double](out_dim, 1)
var g = DenseMatrix.zeros[Double](out_dim, 1)
var state_cell = DenseMatrix.zeros[Double](out_dim, 1)
var state_h = DenseMatrix.zeros[Double](out_dim, 1)
var diff_cell_t = DenseMatrix.zeros[Double](out_dim, 1)
var bottom_diff_h_t_minus_1 = DenseMatrix.zeros[Double](out_dim, 1)
var bottom_diff_cell_t_minus_1 = DenseMatrix.zeros[Double](out_dim, 1)
var bottom_diff_x_t_minus_1 = DenseMatrix.zeros[Double](input_dim, 1)
var cell_prev_t_minus_1 = DenseMatrix.zeros[Double](out_dim, 1)
var h_prev_t_minus_1 = DenseMatrix.zeros[Double](out_dim, 1)
var xt = DenseMatrix.zeros[Double](input_dim, 1)
var xc = DenseMatrix.zeros[Double](input_dim + out_dim, 1)
/**
* forward propagation
*
* @param xt
* @param cell_prev
* @param h_prev
*/
def forward(xt: DenseMatrix[Double], cell_prev: DenseMatrix[Double], h_prev: DenseMatrix[Double], param: LSTMLayerParam): Unit = {
this.cell_prev_t_minus_1 = cell_prev
this.h_prev_t_minus_1 = h_prev
this.xt = xt
this.xc = DenseMatrix.vertcat(this.xt, this.h_prev_t_minus_1)
this.f = sigmoid((param.Wf * this.xc).asInstanceOf[DenseMatrix[Double]] + param.Bf)
this.i = sigmoid((param.Wi * this.xc).asInstanceOf[DenseMatrix[Double]] + param.Bi)
this.o = sigmoid((param.Wo * this.xc).asInstanceOf[DenseMatrix[Double]] + param.Bo)
this.g = tanh((param.Wg * this.xc).asInstanceOf[DenseMatrix[Double]] + param.Bg)
/**
* :* means element wise
*/
this.state_cell = this.g :* this.i + this.cell_prev_t_minus_1 :* this.f
this.state_h = this.o :* tanh(this.state_cell)
}
/**
* backward propagation
*
* @param top_diff_H_t all lose after time t, dH(t) = dh(t) + dH(t+1)
* @param top_diff_cell_t_plus_1 cell loss at t+1
*/
def backward(top_diff_H_t: DenseMatrix[Double], top_diff_cell_t_plus_1: DenseMatrix[Double], param: LSTMLayerParam): Unit = {
this.diff_cell_t = this.o :* ((1.0 - this.g :* this.g) :* this.g) :* top_diff_H_t + top_diff_cell_t_plus_1
val diff_o = tanh(this.state_cell) :* top_diff_H_t
val diff_f = this.cell_prev_t_minus_1 :* this.diff_cell_t
val diff_i = this.g :* this.diff_cell_t
val diff_g = this.i :* this.diff_cell_t
/**
* diffs w.r.t. vector inside sigma / tanh function
*/
val do_input = (1.0 - this.o) :* this.o :* diff_o
val df_input = (1.0 - this.f) :* this.f :* diff_f
val di_input = (1.0 - this.i) :* this.i :* diff_i
val dg_input = (1.0 - this.g :* this.g) :* diff_g
/**
* diffs w.r.t. inputs
*/
param.wi_diff += LSTM.concatDiff(di_input, this.xt, this.h_prev_t_minus_1)
param.wf_diff += LSTM.concatDiff(df_input, this.xt, this.h_prev_t_minus_1)
param.wo_diff += LSTM.concatDiff(do_input, this.xt, this.h_prev_t_minus_1)
param.wg_diff += LSTM.concatDiff(dg_input, this.xt, this.h_prev_t_minus_1)
param.bi_diff += di_input
param.bf_diff += df_input
param.bo_diff += do_input
param.bg_diff += dg_input
var dxc = DenseMatrix.zeros[Double](param.concat_len, 1)
dxc += param.Wo.t * do_input
dxc += param.Wf.t * df_input
dxc += param.Wi.t * di_input
dxc += param.Wg.t * dg_input
bottom_diff_h_t_minus_1 = this.diff_cell_t :* this.f
bottom_diff_cell_t_minus_1 = dxc(param.input_dim to -1, ::)
//bottom_diff_x_t_minus_1 = dxc( 0 to param.input_dim, ::)
bottom_diff_x_t_minus_1 = dxc(0 until param.input_dim, ::)
}
}
/**
* good paper: http://freemind.pluskid.org/machine-learning/softmax-vs-softmax-loss-numerical-stability/
*/
class LossLayer extends Serializable {
def negative_log_likelihood(label: DenseMatrix[Double], pred: DenseMatrix[Double]): Double = {
0.0
}
def diff(label: DenseMatrix[Double], pred: DenseMatrix[Double]): DenseMatrix[Double] = {
null
}
}
class softMaxLossLayer extends LossLayer {
override def negative_log_likelihood(label: DenseMatrix[Double], pred: DenseMatrix[Double]): Double = {
/*
println("pred")
println(pred)
println("labels")
println(label)
*/
val pre_exp = exp(pred)
val pre = argmax(pre_exp)
val pre_label = label(pre)
scala.math.log(sum(pre_exp)) - pre_label
}
override def diff(label: DenseMatrix[Double], pred: DenseMatrix[Double]): DenseMatrix[Double] = {
val pre_exp = exp(pred)
val softmax_pre = (pre_exp / sum(pre_exp)).asInstanceOf[DenseMatrix[Double]]
softmax_pre - label
}
}
class simpleLossLayer extends LossLayer {
override def negative_log_likelihood(label: DenseMatrix[Double], pred: DenseMatrix[Double]): Double = {
/*
println("pred")
println(pred)
println("labels")
println(label)
*/
val diff = pred - label
sum(diff :* diff)
}
override def diff(label: DenseMatrix[Double], pred: DenseMatrix[Double]): DenseMatrix[Double] = {
(pred - label) :*= 2.0
}
}
class LstmNeuralNetwork(val input_dim: Int, val hidden_dims: Seq[Int], val layer_size: Int = 1, val lossLayer: LossLayer) {
@transient lazy protected val logger = Logger(LoggerFactory.getLogger(this.getClass))
assert(this.hidden_dims.length == this.layer_size && layer_size >= 1)
val LstmParams = new ArrayBuffer[LSTMLayerParam]()
val y_out = new ArrayBuffer[DenseMatrix[Double]]()
val y_out_seq = new ArrayBuffer[ArrayBuffer[DenseMatrix[Double]]]()
val node_seq = new ArrayBuffer[Seq[LSTMLayerNode]]()
LstmParams.append(LSTMLayerParam(this.input_dim, hidden_dims.head))
for (idx <- 1 until hidden_dims.length) {
LstmParams.append(LSTMLayerParam(hidden_dims.apply(idx - 1), hidden_dims.apply(idx)))
}
def multilayer_forward_propagation(x_input: Seq[DenseMatrix[Double]]): Unit = {
this.y_out_seq.clear()
this.node_seq.clear()
var idx = 0
this.LstmParams.foreach {
r =>
val y_temp = new ArrayBuffer[DenseMatrix[Double]]()
val nodes = new ArrayBuffer[LSTMLayerNode]()
val input_t = if (idx == 0) x_input else y_out_seq.apply(idx - 1)
val first_node = new LSTMLayerNode(r.input_dim, r.out_dim)
first_node.forward(input_t.head, DenseMatrix.zeros[Double](r.out_dim, 1), DenseMatrix.zeros[Double](r.out_dim, 1), r)
nodes.append(first_node)
y_temp.append(first_node.state_h)
for (idx <- 1 until input_t.size) {
val cell_pre = nodes.apply(idx - 1).state_cell
val h_pre = nodes.apply(idx - 1).state_h
val cur_node = new LSTMLayerNode(r.input_dim, r.out_dim)
cur_node.forward(input_t.apply(idx), cell_pre, h_pre, r)
nodes.append(cur_node)
y_temp.append(cur_node.state_h)
}
this.y_out_seq.append(y_temp)
this.node_seq.append(nodes)
idx += 1
/*
logger.info("round %d".format(idx))
logger.info(this.y_out_seq.map{k=>k.toString()}.mkString("\\t"))
println("round %d".format(idx))
println(this.y_out_seq.map{k=>k.toString()}.mkString("\\t"))
*/
}
}
def forward_propagation(x_input: Seq[DenseMatrix[Double]]): Seq[LSTMLayerNode] = {
this.y_out.clear()
val nodes = new ArrayBuffer[LSTMLayerNode]()
val first_node = new LSTMLayerNode(input_dim, hidden_dims.head)
first_node.forward(x_input.head, DenseMatrix.zeros[Double](hidden_dims.head, 1), DenseMatrix.zeros[Double](hidden_dims.head, 1), LstmParams.head)
nodes.append(first_node)
this.y_out.append(first_node.state_h)
for (idx <- 1 until x_input.size) {
val cell_pre = nodes.apply(idx - 1).state_cell
val h_pre = nodes.apply(idx - 1).state_h
val cur_node = new LSTMLayerNode(input_dim, hidden_dims.head)
cur_node.forward(x_input.apply(idx), cell_pre, h_pre, LstmParams.head)
nodes.append(cur_node)
this.y_out.append(cur_node.state_h)
}
nodes
}
def multilayer_backward_propagation(
x_input: Seq[DenseMatrix[Double]],
labels: Seq[DenseMatrix[Double]]
): Seq[Double] = {
this.multilayer_forward_propagation(x_input)
val next_diff = new ArrayBuffer[DenseMatrix[Double]]()
val losses = new ArrayBuffer[Double]()
assert(x_input.length == this.node_seq.last.length)
/**
* last layer last node
*/
var loss = lossLayer.negative_log_likelihood(labels.last, this.node_seq.last.last.state_h)
val diff_h = lossLayer.diff(labels.last, this.node_seq.last.last.state_h)
val diff_cell = DenseMatrix.zeros[Double](hidden_dims.last, 1)
this.node_seq.last.last.backward(diff_h, diff_cell, LstmParams.last)
next_diff.append(this.node_seq.last.last.bottom_diff_x_t_minus_1)
/**
* last layer, other nodes
*/
for (idx <- (0 until this.node_seq.last.length - 1).reverse) {
loss += lossLayer.negative_log_likelihood(labels.apply(idx), this.node_seq.last.apply(idx).state_h)
var diff_h = lossLayer.diff(labels.apply(idx), this.node_seq.last.apply(idx).state_h)
diff_h += this.node_seq.last.apply(idx + 1).bottom_diff_h_t_minus_1
val diff_cell = this.node_seq.last.apply(idx + 1).bottom_diff_cell_t_minus_1
this.node_seq.last.apply(idx).backward(diff_h, diff_cell, LstmParams.last)
next_diff.append(this.node_seq.last.apply(idx).bottom_diff_x_t_minus_1)
}
losses.append(loss)
/**
* sub lstm layers
*/
for (layer_idx <- (0 until layer_size - 1).reverse) {
losses.append(next_diff.map { k => sum(k :* k) }.sum)
/**
* last node
*/
val diff_h = next_diff.last
val diff_cell = DenseMatrix.zeros[Double](hidden_dims.apply(layer_idx), 1)
this.node_seq.apply(layer_idx).last.backward(diff_h, diff_cell, LstmParams.apply(layer_idx))
next_diff.update(next_diff.length - 1, this.node_seq.apply(layer_idx).last.bottom_diff_x_t_minus_1)
for (node_idx <- (0 until this.node_seq.apply(layer_idx).length - 1).reverse) {
var diff_h = next_diff.apply(node_idx)
diff_h += this.node_seq.apply(layer_idx).apply(node_idx + 1).bottom_diff_h_t_minus_1
val diff_cell = this.node_seq.apply(layer_idx).apply(node_idx + 1).bottom_diff_cell_t_minus_1
this.node_seq.apply(layer_idx).apply(node_idx).backward(diff_h, diff_cell, LstmParams.apply(layer_idx))
next_diff.update(node_idx, this.node_seq.apply(layer_idx).apply(node_idx).bottom_diff_x_t_minus_1)
}
}
losses.reverse
}
def backward_propagation(
x_input: Seq[DenseMatrix[Double]],
labels: Seq[DenseMatrix[Double]]
): Double = {
val nodes = this.forward_propagation(x_input)
val last_node = x_input.length - 1
assert(x_input.length == nodes.length)
var loss = lossLayer.negative_log_likelihood(labels.apply(last_node), nodes.apply(last_node).state_h)
val diff_h = lossLayer.diff(labels.apply(last_node), nodes.apply(last_node).state_h)
val diff_cell = DenseMatrix.zeros[Double](hidden_dims.head, 1)
nodes.apply(last_node).backward(diff_h, diff_cell, LstmParams.head)
for (idx <- (0 until nodes.length - 1).reverse) {
loss += lossLayer.negative_log_likelihood(labels.apply(idx), nodes.apply(idx).state_h)
var diff_h = lossLayer.diff(labels.apply(idx), nodes.apply(idx).state_h)
diff_h += nodes.apply(idx + 1).bottom_diff_h_t_minus_1
val diff_cell = nodes.apply(idx + 1).bottom_diff_cell_t_minus_1
nodes.apply(idx).backward(diff_h, diff_cell, LstmParams.head)
}
loss
}
}
object LSTM {
def concatDiff(diff: DenseMatrix[Double], xt: DenseMatrix[Double], ht_minus_1: DenseMatrix[Double]): DenseMatrix[Double] = {
val wi_diff_x = (diff * xt.t).asInstanceOf[DenseMatrix[Double]]
val wi_diff_h = (diff * ht_minus_1.t).asInstanceOf[DenseMatrix[Double]]
DenseMatrix.horzcat(wi_diff_x, wi_diff_h)
}
/*
implicit def +=(x : DenseMatrix[Double],y: DenseMatrix[Double]): DenseMatrix[Double] = {
(x + y).asInstanceOf[DenseMatrix[Double]]
}
*/
def main(args: Array[String]) {
val data = Seq(
DenseMatrix.rand[Double](5, 1),
DenseMatrix.rand[Double](5, 1),
DenseMatrix.rand[Double](5, 1),
DenseMatrix.rand[Double](5, 1),
DenseMatrix.rand[Double](5, 1),
DenseMatrix.rand[Double](5, 1)
)
val labels = Seq(
DenseMatrix((0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)).t,
DenseMatrix((0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)).t,
DenseMatrix((0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0)).t,
DenseMatrix((0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0)).t,
DenseMatrix((0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0)).t,
DenseMatrix((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0)).t
)
val simpleLSTM = new LstmNeuralNetwork(5, Seq(6, 7, 8), 3, new simpleLossLayer)
//simpleLSTM.multilayer_forward_propagation(data)
for (idx <- 0 to 500) {
val loss = simpleLSTM.multilayer_backward_propagation(data, labels)
println(loss)
simpleLSTM.LstmParams.foreach {
k => k.update_param_adadelta(0.95)
}
}
val out = simpleLSTM.y_out_seq.last
for (idx <- out.indices) {
val outnode = out.apply(idx)
println(outnode)
println("------")
val pre = DenseMatrix.zeros[Double](outnode.rows, outnode.cols)
pre(argmax(outnode)) = 1.0
println(pre)
println("------")
}
/*
simpleLSTM.forward_propagation(data)
var loss_old = 1000.0
var diverse_cnt = 0
for(idx<- 0 to 4000){
val loss_new = simpleLSTM.backward_propagation(data, labels)
println("loss: " + loss_new.toString)
//simpleLSTM.LstmParams.head.update_param(0.1, 0.5, true)
simpleLSTM.LstmParams.head.update_param_adadelta(0.95)
//simpleLSTM.LstmParams.head.update_param_rmsprop(0.001)
if (loss_new > loss_old){
diverse_cnt += 1
}
if (diverse_cnt>10) {
println("breaking out, because of getting diverse")
val out = simpleLSTM.y_out
for(idx<- out.indices){
val outnode = out.apply(idx)
println( outnode )
println("------")
val pre = DenseMatrix.zeros[Double](outnode.rows, outnode.cols)
pre( argmax(outnode) ) = 1.0
println( pre )
println("------")
}
System.exit(0)
}
loss_old = loss_new
}
val out = simpleLSTM.y_out
for(idx<- out.indices){
val outnode = out.apply(idx)
println( outnode )
println("------")
val pre = DenseMatrix.zeros[Double](outnode.rows, outnode.cols)
pre( argmax(outnode) ) = 1.0
println( pre )
println("------")
}
*/
}
}
| xuanyuansen/scalaLSTM | src/main/scala/com/xuanyuansen/algo/LSTM.scala | Scala | apache-2.0 | 23,785 |
package pureconfig.module.magnolia
import scala.collection.JavaConverters._
import scala.language.higherKinds
import com.typesafe.config.{ConfigFactory, ConfigRenderOptions, ConfigValueFactory}
import org.scalacheck.Arbitrary
import pureconfig.ConfigConvert.catchReadError
import pureconfig._
import pureconfig.error.{KeyNotFound, WrongType}
import pureconfig.module.magnolia.auto.reader._
import pureconfig.module.magnolia.auto.writer._
class ProductConvertersSuite extends BaseSuite {
behavior of "ConfigConvert"
/* A configuration with only simple values and `Option` */
case class FlatConfig(b: Boolean, d: Double, f: Float, i: Int, l: Long, s: String, o: Option[String])
/* A configuration with a field of a type that is unknown to `ConfigConvert` */
class MyType(myField: String) {
def getMyField: String = myField
override def equals(obj: Any): Boolean =
obj match {
case mt: MyType => myField.equals(mt.getMyField)
case _ => false
}
}
case class ConfigWithUnknownType(d: MyType)
case class RecType(ls: List[RecType])
implicit val arbFlatConfig: Arbitrary[FlatConfig] = Arbitrary {
Arbitrary.arbitrary[(Boolean, Double, Float, Int, Long, String, Option[String])].map((FlatConfig.apply _).tupled)
}
implicit val arbMyType: Arbitrary[MyType] = Arbitrary {
Arbitrary.arbitrary[String].map(new MyType(_))
}
implicit val arbConfigWithUnknownType: Arbitrary[ConfigWithUnknownType] = Arbitrary {
Arbitrary.arbitrary[MyType].map(ConfigWithUnknownType.apply)
}
// tests
checkArbitrary[FlatConfig]
implicit val myTypeConvert = ConfigConvert.viaString[MyType](catchReadError(new MyType(_)), _.getMyField)
checkArbitrary[ConfigWithUnknownType]
it should s"be able to override all of the ConfigConvert instances used to parse ${classOf[FlatConfig]}" in forAll {
(config: FlatConfig) =>
implicit val readBoolean = ConfigReader.fromString[Boolean](catchReadError(_ => false))
implicit val readDouble = ConfigReader.fromString[Double](catchReadError(_ => 1d))
implicit val readFloat = ConfigReader.fromString[Float](catchReadError(_ => 2f))
implicit val readInt = ConfigReader.fromString[Int](catchReadError(_ => 3))
implicit val readLong = ConfigReader.fromString[Long](catchReadError(_ => 4L))
implicit val readString = ConfigReader.fromString[String](catchReadError(_ => "foobar"))
implicit val readOption = ConfigConvert.viaString[Option[String]](catchReadError(_ => None), _ => " ")
val cc = ConfigConvert[FlatConfig]
cc.from(cc.to(config)) shouldBe Right(FlatConfig(false, 1d, 2f, 3, 4L, "foobar", None))
}
val emptyConf = ConfigFactory.empty().root()
it should s"return a ${classOf[KeyNotFound]} when a key is not in the configuration" in {
case class Foo(i: Int)
ConfigConvert[Foo].from(emptyConf) should failWith(KeyNotFound("i"))
}
it should s"return a ${classOf[KeyNotFound]} when a custom convert is used and when a key is not in the configuration" in {
case class InnerConf(v: Int)
case class EnclosingConf(conf: InnerConf)
implicit val conv = new ConfigConvert[InnerConf] {
def from(cv: ConfigCursor) = Right(InnerConf(42))
def to(conf: InnerConf) = ConfigFactory.parseString(s"{ v: ${conf.v} }").root()
}
ConfigConvert[EnclosingConf].from(emptyConf) should failWith(KeyNotFound("conf"))
}
it should "allow custom ConfigReaders to handle missing keys" in {
case class Conf(a: Int, b: Int)
val conf = ConfigFactory.parseString("""{ a: 1 }""").root()
ConfigReader[Conf].from(conf) should failWith(KeyNotFound("b"))
implicit val defaultInt = new ConfigReader[Int] with ReadsMissingKeys {
def from(cur: ConfigCursor) =
cur.asConfigValue.fold(
_ => Right(42),
v => {
val s = v.render(ConfigRenderOptions.concise)
cur.scopeFailure(catchReadError(_.toInt)(implicitly)(s))
}
)
}
ConfigReader[Conf].from(conf).value shouldBe Conf(1, 42)
}
it should "allow custom ConfigWriters to handle missing keys" in {
case class Conf(a: Int, b: Int)
ConfigWriter[Conf].to(Conf(0, 3)) shouldBe ConfigFactory.parseString("""{ a: 0, b: 3 }""").root()
implicit val nonZeroInt = new ConfigWriter[Int] with WritesMissingKeys[Int] {
def to(v: Int) = ConfigValueFactory.fromAnyRef(v)
def toOpt(a: Int) = if (a == 0) None else Some(to(a))
}
ConfigWriter[Conf].to(Conf(0, 3)) shouldBe ConfigFactory.parseString("""{ b: 3 }""").root()
}
it should "not write empty option fields" in {
case class Conf(a: Int, b: Option[Int])
ConfigConvert[Conf].to(Conf(42, Some(1))) shouldBe ConfigFactory.parseString("""{ a: 42, b: 1 }""").root()
ConfigConvert[Conf].to(Conf(42, None)) shouldBe ConfigFactory.parseString("""{ a: 42 }""").root()
}
it should s"return a ${classOf[WrongType]} when a key has a wrong type" in {
case class Foo(i: Int)
case class Bar(foo: Foo)
case class FooBar(foo: Foo, bar: Bar)
val conf = ConfigFactory.parseMap(Map("foo.i" -> 1, "bar.foo" -> "").asJava).root()
ConfigConvert[FooBar].from(conf) should failWithReason[WrongType]
}
it should "consider default arguments by default" in {
case class InnerConf(e: Int, g: Int)
case class Conf(
a: Int,
b: String = "default",
c: Int = 42,
d: InnerConf = InnerConf(43, 44),
e: Option[Int] = Some(45)
)
val conf1 = ConfigFactory.parseMap(Map("a" -> 2).asJava).root()
ConfigConvert[Conf].from(conf1).value shouldBe Conf(2, "default", 42, InnerConf(43, 44), Some(45))
val conf2 = ConfigFactory.parseMap(Map("a" -> 2, "c" -> 50).asJava).root()
ConfigConvert[Conf].from(conf2).value shouldBe Conf(2, "default", 50, InnerConf(43, 44), Some(45))
val conf3 = ConfigFactory.parseMap(Map("c" -> 50).asJava).root()
ConfigConvert[Conf].from(conf3) should failWith(KeyNotFound("a"))
val conf4 = ConfigFactory.parseMap(Map("a" -> 2, "d.e" -> 5).asJava).root()
ConfigConvert[Conf].from(conf4) should failWith(KeyNotFound("g"), "d", emptyConfigOrigin)
val conf5 = ConfigFactory.parseMap(Map("a" -> 2, "d.e" -> 5, "d.g" -> 6).asJava).root()
ConfigConvert[Conf].from(conf5).value shouldBe Conf(2, "default", 42, InnerConf(5, 6), Some(45))
val conf6 = ConfigFactory.parseMap(Map("a" -> 2, "d" -> "notAnInnerConf").asJava).root()
ConfigConvert[Conf].from(conf6) should failWithReason[WrongType]
val conf7 = ConfigFactory.parseMap(Map("a" -> 2, "c" -> 50, "e" -> 1).asJava).root()
ConfigConvert[Conf].from(conf7).value shouldBe Conf(2, "default", 50, InnerConf(43, 44), Some(1))
val conf8 = ConfigFactory.parseMap(Map("a" -> 2, "c" -> 50, "e" -> null).asJava).root()
ConfigConvert[Conf].from(conf8).value shouldBe Conf(2, "default", 50, InnerConf(43, 44), None)
}
it should s"work properly with recursively defined product types" in {
val conf = ConfigFactory.parseString("ls = [{ ls = [] }, { ls = [{ ls = [] }] }]").root()
ConfigConvert[RecType].from(conf).value shouldBe RecType(List(RecType(Nil), RecType(List(RecType(Nil)))))
}
}
| pureconfig/pureconfig | modules/magnolia/src/test/scala/pureconfig/module/magnolia/ProductConvertersSuite.scala | Scala | mpl-2.0 | 7,190 |
package nodes.learning
import org.scalatest.FunSuite
import java.io._
import breeze.linalg._
import breeze.stats.distributions.Rand
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import pipelines._
import utils.{Stats, MatrixUtils, TestUtils}
class BlockWeightedLeastSquaresSuite extends FunSuite with Logging with LocalSparkContext {
def computeGradient(
trainingFeatures: RDD[DenseVector[Double]],
trainingLabels: RDD[DenseVector[Double]],
lambda: Double,
mixtureWeight: Double,
x: DenseMatrix[Double],
b: DenseVector[Double]): DenseMatrix[Double] = {
val nTrain = trainingLabels.count
val trainingLabelsMat = trainingLabels.mapPartitions(part =>
Iterator.single(MatrixUtils.rowsToMatrix(part)))
val trainingFeaturesMat = trainingFeatures.mapPartitions(part =>
Iterator.single(MatrixUtils.rowsToMatrix(part)))
val weights = trainingLabelsMat.map { mat =>
val numPosEx = mat.rows
val firstLabel = mat(0, ::).t.toArray
val classIdx = firstLabel.indexOf(firstLabel.max)
val negWt = (1.0 - mixtureWeight) / nTrain.toDouble
val posWt = negWt + (mixtureWeight / numPosEx.toDouble)
val out = DenseMatrix.fill(mat.rows, mat.cols)(negWt)
out(::, classIdx) := posWt
out
}
val modelBroadcast = trainingFeatures.context.broadcast(x)
val bBroadcast = trainingFeatures.context.broadcast(b)
// Compute the gradient!
val matOut = trainingFeaturesMat.zip(trainingLabelsMat.zip(weights)).map { part =>
val feats = part._1
val labels = part._2._1
val wts = part._2._2
val out = feats * modelBroadcast.value
out(*, ::) :+= bBroadcast.value
out -= labels
out :*= wts
feats.t * out
}.reduce((a: DenseMatrix[Double], b: DenseMatrix[Double]) => a += b)
val gradW = matOut + modelBroadcast.value * lambda
gradW
}
test("BlockWeighted solver solution should have zero gradient") {
val blockSize = 4
val numIter = 10
val lambda = 0.1
val mixtureWeight = 0.3
val numParts = 3
val aMat = csvread(new File(TestUtils.getTestResourceFileName("aMat.csv")))
val bMat = csvread(new File(TestUtils.getTestResourceFileName("bMat.csv")))
sc = new SparkContext("local", "test")
val fullARDD = sc.parallelize(MatrixUtils.matrixToRowArray(aMat), numParts).cache()
val bRDD = sc.parallelize(MatrixUtils.matrixToRowArray(bMat), numParts).cache()
val wsq = new BlockWeightedLeastSquaresEstimator(blockSize, numIter, lambda,
mixtureWeight).fit(fullARDD, bRDD)
val finalFullModel = wsq.xs.reduceLeft { (a, b) =>
DenseMatrix.vertcat(a, b)
}
// norm(gradient) should be close to zero
val gradient = computeGradient(fullARDD, bRDD, lambda, mixtureWeight, finalFullModel,
wsq.bOpt.get)
println("norm of gradient is " + norm(gradient.toDenseVector))
assert(Stats.aboutEq(norm(gradient.toDenseVector), 0, 1e-2))
}
}
| etrain/keystone-old | src/test/scala/nodes/learning/BlockWeightedLeastSquaresSuite.scala | Scala | apache-2.0 | 2,996 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.utils.{Utils, ZKConfig}
import kafka.message.Message
/**
* Configuration settings for the kafka server
*/
class KafkaConfig(props: Properties) extends ZKConfig(props) {
/* the port to listen and accept connections on */
val port: Int = Utils.getInt(props, "port", 6667)
/* hostname of broker. If not set, will pick up from the value returned from getLocalHost. If there are multiple interfaces getLocalHost may not be what you want. */
val hostName: String = Utils.getString(props, "hostname", null)
/* the broker id for this server */
val brokerId: Int = Utils.getInt(props, "brokerid")
/* the SO_SNDBUFF buffer of the socket sever sockets */
val socketSendBuffer: Int = Utils.getInt(props, "socket.send.buffer", 100*1024)
/* the SO_RCVBUFF buffer of the socket sever sockets */
val socketReceiveBuffer: Int = Utils.getInt(props, "socket.receive.buffer", 100*1024)
/* the maximum number of bytes in a socket request */
val maxSocketRequestSize: Int = Utils.getIntInRange(props, "max.socket.request.bytes", 100*1024*1024, (1, Int.MaxValue))
/* the number of worker threads that the server uses for handling all client requests*/
val numThreads = Utils.getIntInRange(props, "num.threads", Runtime.getRuntime().availableProcessors, (1, Int.MaxValue))
/* the interval in which to measure performance statistics */
val monitoringPeriodSecs = Utils.getIntInRange(props, "monitoring.period.secs", 600, (1, Int.MaxValue))
/* the default number of log partitions per topic */
val numPartitions = Utils.getIntInRange(props, "num.partitions", 1, (1, Int.MaxValue))
/* the directory in which the log data is kept */
val logDir = Utils.getString(props, "log.dir")
/* the maximum size of a single log file */
val logFileSize = Utils.getIntInRange(props, "log.file.size", 1*1024*1024*1024, (Message.MinHeaderSize, Int.MaxValue))
/* the number of messages accumulated on a log partition before messages are flushed to disk */
val flushInterval = Utils.getIntInRange(props, "log.flush.interval", 500, (1, Int.MaxValue))
/* the number of hours to keep a log file before deleting it */
val logRetentionHours = Utils.getIntInRange(props, "log.retention.hours", 24 * 7, (1, Int.MaxValue))
/* the maximum size of the log before deleting it */
val logRetentionSize = Utils.getLong(props, "log.retention.size", -1)
/* the number of hours to keep a log file before deleting it for some specific topic*/
val logRetentionHoursMap = Utils.getTopicRentionHours(Utils.getString(props, "topic.log.retention.hours", ""))
/* the frequency in minutes that the log cleaner checks whether any log is eligible for deletion */
val logCleanupIntervalMinutes = Utils.getIntInRange(props, "log.cleanup.interval.mins", 10, (1, Int.MaxValue))
/* enable zookeeper registration in the server */
val enableZookeeper = Utils.getBoolean(props, "enable.zookeeper", true)
/* the maximum time in ms that a message in selected topics is kept in memory before flushed to disk, e.g., topic1:3000,topic2: 6000 */
val flushIntervalMap = Utils.getTopicFlushIntervals(Utils.getString(props, "topic.flush.intervals.ms", ""))
/* the frequency in ms that the log flusher checks whether any log needs to be flushed to disk */
val flushSchedulerThreadRate = Utils.getInt(props, "log.default.flush.scheduler.interval.ms", 3000)
/* the maximum time in ms that a message in any topic is kept in memory before flushed to disk */
val defaultFlushIntervalMs = Utils.getInt(props, "log.default.flush.interval.ms", flushSchedulerThreadRate)
/* the number of partitions for selected topics, e.g., topic1:8,topic2:16 */
val topicPartitionsMap = Utils.getTopicPartitions(Utils.getString(props, "topic.partition.count.map", ""))
}
| tnachen/kafka | core/src/main/scala/kafka/server/KafkaConfig.scala | Scala | apache-2.0 | 4,658 |
package org.gbougeard.model.changes
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 13/07/13
* Time: 21:08
* To change this template use File | Settings | File Templates.
*/
case class ReviewInput(message: Option[String],
labels: Option[Map[String, String]],
comments: Option[List[CommentInput]],
strict_labels: Option[Boolean],
drafts: Option[String],
notif: Option[String])
object ReviewInput {
import play.api.libs.json.Json
import play.api.libs.functional.syntax._
import CommentInput._
implicit val format = Json.format[ReviewInput]
}
| gbougeard/gas | src/main/scala/org/gbougeard/model/changes/ReviewInput.scala | Scala | apache-2.0 | 686 |
package dispatch.github
import dispatch._
import Defaults._
import net.liftweb.json._
import java.util.Date
case class GhContributor(id: Int, login: String, avatar_url: String)
case class GhAuthorSummary(name:String, date:Date, email:String)
case class GhAuthor(avatar_url: String, url: String, login: String, gravatar_id: String, id: Int)
case class GhUser(id: Int, login: String, name: String, email: String, avatar_url: String, `type`: String)
case class GhOwner(id:Int, login: String)
object GhUser {
implicit val formats = DefaultFormats
def get_authenticated_user(access_token: String) = {
val svc = GitHub.api_host / "user"
val userJson = Http(svc.secure <<? Map("access_token" -> access_token) OK as.lift.Json)
for (js <- userJson) yield js.extract[GhUser]
}
def get_user(username: String) = {
val svc = GitHub.api_host / "users" / username
val userJson = Http(svc.secure OK as.lift.Json)
for (js <- userJson) yield js.extract[GhUser]
}
} | andreazevedo/dispatch-github | src/main/scala/dispatch/github/GhUser.scala | Scala | mit | 1,000 |
package mesosphere.marathon.api.v2
import java.net.{ HttpURLConnection, URL, URLConnection }
import javax.validation.ConstraintViolation
import mesosphere.marathon.api.v2.BeanValidation._
import mesosphere.marathon.api.v2.json.{ V2AppDefinition, V2AppUpdate, V2Group, V2GroupUpdate }
import mesosphere.marathon.state._
import scala.reflect.ClassTag
import scala.util.{ Failure, Success, Try }
/**
* Specific validation helper for specific model classes.
*/
object ModelValidation {
//scalastyle:off null
/**
* This regular expression is used to validate each path segment of an ID.
*
* If you change this, please also change "pathType" in AppDefinition.json and
* notify the maintainers of the DCOS CLI.
*/
private[this] val ID_PATH_SEGMENT_PATTERN =
"^(([a-z0-9]|[a-z0-9][a-z0-9\\\\-]*[a-z0-9])\\\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\\\-]*[a-z0-9])|(\\\\.|\\\\.\\\\.)$".r
// TODO: Re-implement this method on it's own terms
def checkGroup(
group: Group,
path: String,
parent: PathId): Iterable[ConstraintViolation[V2Group]] =
checkGroup(V2Group(group), path, parent)
def checkGroup(
group: V2Group,
path: String = "",
parent: PathId = PathId.empty): Iterable[ConstraintViolation[V2Group]] = {
val base = group.id.canonicalPath(parent)
validate(group,
idErrors(group, base, group.id, "id"),
checkPath(group, parent, group.id, path + "id"),
checkApps(group.apps, path + "apps", base),
checkGroups(group.groups, path + "groups", base),
noAppsAndGroupsWithSameName(group, path + "apps", group.apps, group.groups),
noCyclicDependencies(group, path + "dependencies")
)
}
def checkGroupUpdate(
group: V2GroupUpdate,
needsId: Boolean,
path: String = "",
parent: PathId = PathId.empty): Iterable[ConstraintViolation[V2GroupUpdate]] = {
if (group == null) {
Seq(violation(group, null, "", "Given group is empty!"))
}
else if (group.version.isDefined || group.scaleBy.isDefined) {
validate(group,
defined(
group,
group.version,
"version",
(b: V2GroupUpdate, t: Timestamp, i: String) => hasOnlyOneDefinedOption(b, t, i),
mandatory = false
),
defined(
group,
group.scaleBy,
"scaleBy",
(b: V2GroupUpdate, t: Double, i: String) => hasOnlyOneDefinedOption(b, t, i),
mandatory = false
)
)
}
else {
val base = group.id.map(_.canonicalPath(parent)).getOrElse(parent)
validate(group,
defined(
group,
group.id,
"id",
(b: V2GroupUpdate, p: PathId, i: String) => idErrors(b, group.groupId.canonicalPath(parent), p, i),
mandatory = needsId
),
group.id.map(checkPath(group, parent, _, path + "id")).getOrElse(Nil),
group.apps.map(checkApps(_, path + "apps", base)).getOrElse(Nil),
group.groups.map(checkGroupUpdates(_, path + "groups", base)).getOrElse(Nil)
)
}
}
private[this] def hasOnlyOneDefinedOption[A <: Product: ClassTag, B](product: A, prop: B, path: String) = {
val definedOptionsCount = product.productIterator.count {
case Some(_) => true
case _ => false
}
isTrue(product, prop, path, "not allowed in conjunction with other properties", definedOptionsCount == 1)
}
def noAppsAndGroupsWithSameName[T](
t: T,
path: String,
apps: Set[V2AppDefinition],
groups: Set[V2Group])(implicit ct: ClassTag[T]): Iterable[ConstraintViolation[_]] = {
val groupIds = groups.map(_.id)
val clashingIds = apps.map(_.id).intersect(groupIds)
isTrue(
t,
apps,
path,
s"Groups and Applications may not have the same identifier: ${clashingIds.mkString(", ")}",
clashingIds.isEmpty
)
}
def noCyclicDependencies(
group: V2Group,
path: String): Iterable[ConstraintViolation[V2Group]] = {
isTrue(
group,
group.dependencies,
path,
"Dependency graph has cyclic dependencies",
group.toGroup().hasNonCyclicDependencies)
}
def checkGroupUpdates(
groups: Iterable[V2GroupUpdate],
path: String = "res",
parent: PathId = PathId.empty): Iterable[ConstraintViolation[V2GroupUpdate]] =
groups.zipWithIndex.flatMap {
case (group, pos) =>
checkGroupUpdate(group, needsId = true, s"$path[$pos].", parent)
}
def checkGroups(
groups: Iterable[V2Group],
path: String = "res",
parent: PathId = PathId.empty): Iterable[ConstraintViolation[V2Group]] =
groups.zipWithIndex.flatMap {
case (group, pos) =>
checkGroup(group, s"$path[$pos].", parent)
}
def checkUpdates(
apps: Iterable[V2AppUpdate],
path: String = "res"): Iterable[ConstraintViolation[V2AppUpdate]] =
apps.zipWithIndex.flatMap {
case (app, pos) =>
checkUpdate(app, s"$path[$pos].")
}
def checkPath[T: ClassTag](
t: T,
parent: PathId,
child: PathId,
path: String): Iterable[ConstraintViolation[T]] = {
val isParent = child.canonicalPath(parent).parent == parent
if (parent != PathId.empty && !isParent)
List(violation(t, child, path, s"identifier $child is not child of $parent. Hint: use relative paths."))
else Nil
}
def checkApps(
apps: Iterable[V2AppDefinition],
path: String = "res",
parent: PathId = PathId.empty): Iterable[ConstraintViolation[V2AppDefinition]] =
apps.zipWithIndex.flatMap {
case (app, pos) =>
checkAppConstraints(app, parent, s"$path[$pos].")
}
def checkUpdate(
app: V2AppUpdate,
path: String = "",
needsId: Boolean = false): Iterable[ConstraintViolation[V2AppUpdate]] = {
validate(app,
defined(
app,
app.id,
"id",
(b: V2AppUpdate, p: PathId, i: String) => idErrors(b, PathId.empty, p, i),
needsId
),
defined(
app,
app.upgradeStrategy,
"upgradeStrategy",
(b: V2AppUpdate, p: UpgradeStrategy, i: String) => upgradeStrategyErrors(b, p, i)
),
defined(
app,
app.dependencies,
"dependencies",
(b: V2AppUpdate, p: Set[PathId], i: String) => dependencyErrors(b, PathId.empty, p, i)
),
defined(
app,
app.storeUrls,
"storeUrls",
(b: V2AppUpdate, p: Seq[String], i: String) => urlsCanBeResolved(b, p, i)
)
)
}
def checkAppConstraints(app: V2AppDefinition, parent: PathId,
path: String = ""): Iterable[ConstraintViolation[V2AppDefinition]] =
validate(app,
idErrors(app, parent, app.id, path + "id"),
checkPath(app, parent, app.id, path + "id"),
upgradeStrategyErrors(app, app.upgradeStrategy, path + "upgradeStrategy"),
dependencyErrors(app, parent, app.dependencies, path + "dependencies"),
urlsCanBeResolved(app, app.storeUrls, path + "storeUrls")
)
def urlsCanBeResolved[T: ClassTag](t: T, urls: Seq[String], path: String): Iterable[ConstraintViolation[T]] = {
def urlIsValid(url: String): Boolean = Try {
new URL(url).openConnection() match {
case http: HttpURLConnection =>
http.setRequestMethod("HEAD")
http.getResponseCode == HttpURLConnection.HTTP_OK
case other: URLConnection =>
other.getInputStream
true //if we come here, we could read the stream
}
}.getOrElse(false)
urls.toList
.zipWithIndex
.collect {
case (url, pos) if !urlIsValid(url) =>
violation(t, urls, s"$path[$pos]", s"Can not resolve url $url")
}
}
def idErrors[T: ClassTag](t: T, base: PathId, id: PathId, path: String): Iterable[ConstraintViolation[T]] = {
val valid = id.path.forall(ID_PATH_SEGMENT_PATTERN.pattern.matcher(_).matches())
val errors =
if (!valid)
List(
violation(
t,
id,
path,
"path contains invalid characters (allowed: lowercase letters, digits, hyphens, \\".\\", \\"..\\")"
)
)
else Nil
Try(id.canonicalPath(base)) match {
case Success(_) => errors
case Failure(_) => violation(t, id, path, s"canonical path can not be computed for $id") :: errors
}
errors
}
def dependencyErrors[T: ClassTag](
t: T,
base: PathId,
set: Set[PathId],
path: String): Iterable[ConstraintViolation[T]] =
set.zipWithIndex.flatMap{ case (id, pos) => idErrors(t, base, id, s"$path[$pos]") }
def upgradeStrategyErrors[T: ClassTag](
t: T,
upgradeStrategy: UpgradeStrategy,
path: String): Iterable[ConstraintViolation[T]] = {
if (upgradeStrategy.minimumHealthCapacity < 0) Some("is less than 0")
else if (upgradeStrategy.minimumHealthCapacity > 1) Some("is greater than 1")
else None
}.map { violation(t, upgradeStrategy, path + ".minimumHealthCapacity", _) }
.orElse({
if (upgradeStrategy.maximumOverCapacity < 0) Some("is less than 0")
else if (upgradeStrategy.maximumOverCapacity > 1) Some("is greater than 1")
else None
}.map { violation(t, upgradeStrategy, path + ".maximumOverCapacity", _) })
/**
* Returns a non-empty list of validation messages if the given app definition
* will conflict with existing apps.
*/
def checkAppConflicts(app: AppDefinition, root: Group): Seq[String] = {
app.containerServicePorts.toSeq.flatMap { servicePorts =>
checkServicePortConflicts(app.id, servicePorts, root)
}
}
/**
* Returns a non-empty list of validations messages if the given app definition has service ports
* that will conflict with service ports in other applications.
*
* Does not compare the app definition's service ports with the same deployed app's service ports, as app updates
* may simply restate the existing service ports.
*/
private def checkServicePortConflicts(appId: PathId, requestedServicePorts: Seq[Int],
root: Group): Seq[String] = {
for {
existingApp <- root.transitiveApps.toList
if existingApp.id != appId // in case of an update, do not compare the app against itself
existingServicePort <- existingApp.portMappings.toList.flatten.map(_.servicePort)
if existingServicePort != 0 // ignore zero ports, which will be chosen at random
if requestedServicePorts contains existingServicePort
} yield s"Requested service port $existingServicePort conflicts with a service port in app ${existingApp.id}"
}
}
| ayouwei/marathon | src/main/scala/mesosphere/marathon/api/v2/ModelValidation.scala | Scala | apache-2.0 | 10,597 |
package mesosphere.marathon
package stream
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import akka.stream.scaladsl.Source
import akka.util.ByteString
import mesosphere.AkkaUnitTest
import org.apache.commons.compress.archivers.tar.{TarArchiveInputStream, TarArchiveOutputStream, TarConstants}
import scala.annotation.tailrec
class TarFlowTest extends AkkaUnitTest {
import TarFlow._
val sampleData = ByteString("daterbase")
val tarEntries = List(
TarEntry(
"1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/long.txt",
ByteString("> 100 char file name. Look out!")
),
TarEntry("/path/to/file-2.txt", sampleData),
TarEntry("/path/to/file.txt", (1 to 1024).map(_ => sampleData).reduce(_ ++ _))
)
val tarredBytes =
Source(tarEntries).via(TarFlow.writer).runReduce(_ ++ _).futureValue
List(1, 13, 512, Int.MaxValue).foreach { n =>
s"it can roundtrip tar and untar with ${n} sized byte boundaries" in {
val untarredItems =
Source(tarredBytes.grouped(n).toList). // we vary the chunk sizes to make sure we handle boundaries properly
via(TarFlow.reader).runFold(List.empty[TarEntry]) { _ :+ _ }.futureValue
untarredItems.map(_.header.getName) shouldBe tarEntries.map(_.header.getName)
untarredItems.map(_.data) shouldBe tarEntries.map(_.data)
}
}
"it generates valid tar data that can be read by apache commons TarArchiveInputStream" in {
val bytes = new ByteArrayInputStream(tarredBytes.toArray)
val tar = new TarArchiveInputStream(bytes)
@tailrec def readEntries(tar: TarArchiveInputStream, entries: List[TarEntry] = Nil): List[TarEntry] = {
val entry = tar.getNextTarEntry
if (entry == null)
entries
else {
val data = Array.ofDim[Byte](entry.getSize.toInt)
tar.read(data)
readEntries(tar, entries :+ TarEntry(entry, ByteString(data)))
}
}
val untarredItems = readEntries(tar)
untarredItems.map(_.header.getName) shouldBe tarEntries.map(_.header.getName)
untarredItems.map(_.data) shouldBe tarEntries.map(_.data)
}
"it reads tar data generated by apache commons TarArchiveOutputStream" in {
val bos = new ByteArrayOutputStream
val tarOut = new TarArchiveOutputStream(bos)
tarOut.setLongFileMode(TarConstants.FORMAT_OLDGNU)
tarEntries.foreach { entry =>
tarOut.putArchiveEntry(entry.header)
tarOut.write(entry.data.toArray, 0, entry.data.size)
tarOut.closeArchiveEntry()
}
tarOut.finish()
val untarredItems =
Source(ByteString(bos.toByteArray()).grouped(1024).toList).via(TarFlow.reader).runFold(List.empty[TarEntry]) { _ :+ _ }.futureValue
untarredItems.map(_.header.getName) shouldBe tarEntries.map(_.header.getName)
untarredItems.map(_.data) shouldBe tarEntries.map(_.data)
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/stream/TarFlowTest.scala | Scala | apache-2.0 | 2,904 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.api.base
import com.intellij.psi.PsiParameterList
import org.argus.cit.intellij.jawa.lang.psi.JawaPsiElement
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
trait JawaParameterList extends JawaPsiElement with PsiParameterList {
} | arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/psi/api/base/JawaParameterList.scala | Scala | epl-1.0 | 671 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks.database
import android.content.SharedPreferences
import android.util.Log
import com.github.shadowsocks._
import com.github.shadowsocks.utils.Key
class ProfileManager(settings: SharedPreferences, dbHelper: DBHelper) {
def createOrUpdateProfile(profile: Profile): Boolean = {
try {
dbHelper.profileDao.createOrUpdate(profile)
true
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, "addProfile", ex)
false
}
}
def updateProfile(profile: Profile): Boolean = {
try {
dbHelper.profileDao.update(profile)
true
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, "addProfile", ex)
false
}
}
def getProfile(id: Int): Option[Profile] = {
try {
dbHelper.profileDao.queryForId(id) match {
case profile: Profile => Option(profile)
case _ => None
}
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, "getProfile", ex)
None
}
}
def delProfile(id: Int): Boolean = {
try {
dbHelper.profileDao.deleteById(id)
true
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, "delProfile", ex)
false
}
}
def getAllProfiles: Option[List[Profile]] = {
try {
import scala.collection.JavaConversions._
Option(dbHelper.profileDao.queryForAll().toList)
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, "getAllProfiles", ex)
None
}
}
def reload(id: Int): Profile = {
save()
load(id)
}
def load(id: Int): Profile = {
val profile = getProfile(id) getOrElse {
val p = new Profile()
createOrUpdateProfile(p)
p
}
val edit = settings.edit()
edit.putBoolean(Key.isGlobalProxy, profile.global)
edit.putBoolean(Key.isGFWList, profile.chnroute)
edit.putBoolean(Key.isBypassApps, profile.bypass)
edit.putBoolean(Key.isTrafficStat, profile.traffic)
edit.putBoolean(Key.isUdpDns, profile.udpdns)
edit.putBoolean(Key.isAuth, profile.auth)
edit.putBoolean(Key.isIpv6, profile.ipv6)
edit.putString(Key.profileName, profile.name)
edit.putString(Key.proxy, profile.host)
edit.putString(Key.sitekey, profile.password)
edit.putString(Key.encMethod, profile.method)
edit.putString(Key.remotePort, profile.remotePort.toString)
edit.putString(Key.localPort, profile.localPort.toString)
edit.putString(Key.proxied, profile.individual)
edit.putInt(Key.profileId, profile.id)
edit.putString(Key.route, profile.route)
edit.commit()
profile
}
private def loadFromPreferences: Profile = {
val profile = new Profile()
profile.id = settings.getInt(Key.profileId, -1)
profile.global = settings.getBoolean(Key.isGlobalProxy, false)
profile.chnroute = settings.getBoolean(Key.isGFWList, false)
profile.bypass = settings.getBoolean(Key.isBypassApps, false)
profile.traffic = settings.getBoolean(Key.isTrafficStat, false)
profile.udpdns = settings.getBoolean(Key.isUdpDns, false)
profile.auth = settings.getBoolean(Key.isAuth, false)
profile.ipv6 = settings.getBoolean(Key.isIpv6, false)
profile.name = settings.getString(Key.profileName, "default")
profile.host = settings.getString(Key.proxy, "127.0.0.1")
profile.password = settings.getString(Key.sitekey, "default")
profile.method = settings.getString(Key.encMethod, "table")
profile.route = settings.getString(Key.route, "all")
profile.remotePort = try {
Integer.valueOf(settings.getString(Key.remotePort, "1984"))
} catch {
case ex: NumberFormatException =>
1984
}
profile.localPort = try {
Integer.valueOf(settings.getString(Key.localPort, "1984"))
} catch {
case ex: NumberFormatException =>
1984
}
profile.individual = settings.getString(Key.proxied, "")
profile
}
def save(): Profile = {
val profile = loadFromPreferences
updateProfile(profile)
profile
}
def create(): Profile = {
val profile = loadFromPreferences
createOrUpdateProfile(profile)
profile
}
}
| ray26/shadowsocks-android | src/main/scala/com/github/shadowsocks/database/ProfileManager.scala | Scala | gpl-3.0 | 5,947 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.core.settings
import com.tle.legacy.LegacyGuice
import io.circe.parser._
import io.circe.syntax._
import io.circe.{Decoder, Encoder}
object UserPrefs {
def jsonPref[A](key: String)(implicit d: Decoder[A]): Option[A] = {
Option(LegacyGuice.userPreferenceService.getPreference(key)).flatMap { p =>
parse(p).flatMap(d.decodeJson).toOption
}
}
def setJsonPref[A: Encoder](k: String, a: A): Unit = {
LegacyGuice.userPreferenceService.setPreference(k, a.asJson.spaces2)
}
}
| equella/Equella | Source/Plugins/Core/com.equella.core/scalasrc/com/tle/core/settings/UserPrefs.scala | Scala | apache-2.0 | 1,319 |
package webui
import cz.kamenitxan.jakon.core.model.Page
import org.openqa.selenium.{By, WebDriver}
import org.scalatest.DoNotDiscover
import test.TestBase
import scala.jdk.CollectionConverters._
/**
* Created by TPa on 2019-03-19.
*/
@DoNotDiscover
class ObjectControllerTest extends TestBase {
private def checkPageLoad(driver: WebDriver) = {
driver.findElements(By.cssSelector(".navbar-brand")).get(0) != null
}
test("resetPassword") { f =>
val url = adminHost + "resetPassword"
f.driver.get(url)
//assert(checkPageLoad(f.driver))
val emailInput = f.driver.findElement(By.cssSelector("input[type=email]"))
emailInput.sendKeys("admin@admin.cz")
val submit = f.driver.findElement(By.cssSelector(".btn.btn-lg.btn-success"))
submit.click()
assert(f.driver.getPageSource.contains("Na váš email byl odeslán email pro změnu hesla"))
}
test("user settings") { f =>
val url = adminHost + "profile"
f.driver.get(url)
assert(checkPageLoad(f.driver))
assert(f.driver.getPageSource.contains("admin"))
val submit = f.driver.findElement(By.cssSelector(".btn.btn-primary"))
submit.click()
assert(checkPageLoad(f.driver))
assert(f.driver.getPageSource.contains("admin"))
}
test("test list filter") { f =>
val url = adminHost + "object/JakonUser?filter_id=2&filter_published=true&filter_enabled=&filter_lastName=Admin&filter_firstName=Adm*&filter_emailConfirmed=&filter_email=&filter_username="
f.driver.get(url)
assert(checkPageLoad(f.driver))
}
test("test move") { f =>
implicit val driver: WebDriver = f.driver
val p1 = new Page()
p1.title = "page1"
p1.create()
val p2 = new Page()
p2.title = "page2"
p2.create()
val p3 = new Page()
p3.title = "page3"
p3.create()
val url = adminHost + "object/Page"
f.driver.get(url)
assert(checkPageLoad(f.driver))
val objects = getAdminTableRows()
assert(objects.nonEmpty)
val first = objects.head
val firstElements = first.findElements(By.cssSelector("td")).asScala
val firstId = firstElements.head.getText
f.driver.get(adminHost + s"object/moveDown/Page/$firstId?currentOrder=1")
f.driver.get(url)
assert(checkPageLoad(f.driver))
val objects2 = findElements("#dataTables-example tbody tr")
assert(objects2.nonEmpty)
val second = objects2.tail.head
val secondElements = second.findElements(By.cssSelector("td")).asScala
val secondId = secondElements.head.getText
val secondOrder = secondElements.tail.head.getText
assert(firstId == secondId)
assert("2" == secondOrder)
}
test("test move not ordered") { f =>
implicit val driver: WebDriver = f.driver
f.driver.get(adminHost + "object/moveDown/JakonUser/4?currentOrder=1")
checkSiteMessage("OBJECT_NOT_ORDERED")
}
test("delete item") { f =>
implicit val driver: WebDriver = f.driver
val url = adminHost + "object/Page"
f.driver.get(url)
assert(checkPageLoad(f.driver))
val objects = getAdminTableRows()
assert(objects.nonEmpty)
val first = objects.head
val firstElements = first.findElements(By.cssSelector("td")).asScala
val firstId = firstElements.head.getText
f.driver.get(adminHost + s"object/delete/Page/$firstId")
f.driver.get(url)
assert(checkPageLoad(f.driver))
val objects2 = findElements("#dataTables-example tbody tr")
assert(objects2.nonEmpty)
val second = objects2.head
val secondElements = second.findElements(By.cssSelector("td")).asScala
val secondId = secondElements.head.getText
assert(firstId != secondId)
}
test("test list non existent") { f =>
val url = adminHost + "object/invalid"
f.driver.get(url)
assert(checkPageLoad(f.driver))
f.driver.getPageSource.contains("404")
}
test("TestObject List") { f =>
f.driver.get(adminHost + "object/TestObject")
assert(checkPageLoad(f.driver))
}
test("TestObject create") { f =>
implicit val driver: WebDriver = f.driver
f.driver.get(adminHost + "object/create/TestObject")
assert(checkPageLoad(f.driver))
val submits = findElements("form input[type=\\"submit\\"]")
assert(submits.nonEmpty)
submits.head.click()
assert(checkPageLoad(f.driver))
val id = findElements("tbody td").headOption
assert(id.nonEmpty)
assert(id.get.getText != "0")
}
}
| kamenitxan/Jakon | modules/backend/src/test/scala/webui/ObjectControllerTest.scala | Scala | bsd-3-clause | 4,219 |
package com.magmanics.licensing.ui.content.customer
import com.magmanics.licensing.model.Customer
import com.magmanics.vaadin.component.TableWithCheckboxes
import scala.collection.JavaConverters._
/**
* @author James Baxter - 05/09/2014.
*/
class CustomerSelectionTable(customers: Set[Customer]) extends TableWithCheckboxes {
setSelectable(true)
setMultiSelect(true)
setImmediate(true)
setPageLength(5)
override def containerProperties = List(
("customer", classOf[String], "", "Customer", null, null)
)
override def itemRows = customers.toList.sortBy(_.name).map(c => Array(c.name) -> c.name)
def setCustomers(customers: Set[Customer]) = {
val newValue = customers
.map(_.name)
.asJava
setValue(newValue)
}
}
| manicmonkey/licensing | Licensing-UI-Vaadin/src/main/scala/com/magmanics/licensing/ui/content/customer/CustomerSelectionTable.scala | Scala | gpl-3.0 | 761 |
package pl.touk.nussknacker.engine.flink.util.transformer.aggregate
import org.apache.flink.annotation.PublicEvolving
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.assigners.{EventTimeSessionWindows, TumblingEventTimeWindows}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.triggers.EventTimeTrigger
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import pl.touk.nussknacker.engine.api.context.ContextTransformation
import pl.touk.nussknacker.engine.api.{Context => NkContext, _}
import pl.touk.nussknacker.engine.flink.api.compat.ExplicitUidInOperatorsSupport
import pl.touk.nussknacker.engine.flink.api.process._
import pl.touk.nussknacker.engine.flink.util.transformer.aggregate.triggers.{ClosingEndEventTrigger, FireOnEachEvent}
import pl.touk.nussknacker.engine.flink.util.richflink._
import pl.touk.nussknacker.engine.api.NodeId
import pl.touk.nussknacker.engine.util.KeyedValue
import scala.collection.immutable.SortedMap
import scala.concurrent.duration.Duration
//TODO: think about merging these with TransformStateFunction and/or PreviousValueFunction
@PublicEvolving // will be only one version for each method, with explicitUidInStatefulOperators = true
// in the future - see ExplicitUidInOperatorsCompat for more info
object transformers {
def slidingTransformer(groupBy: LazyParameter[CharSequence],
aggregateBy: LazyParameter[AnyRef],
aggregator: Aggregator,
windowLength: Duration,
variableName: String)(implicit nodeId: NodeId): ContextTransformation =
slidingTransformer(groupBy, aggregateBy, aggregator, windowLength, variableName, emitWhenEventLeft = false,
ExplicitUidInOperatorsSupport.defaultExplicitUidInStatefulOperators)
def slidingTransformer(groupBy: LazyParameter[CharSequence],
aggregateBy: LazyParameter[AnyRef],
aggregator: Aggregator,
windowLength: Duration,
variableName: String,
emitWhenEventLeft: Boolean,
explicitUidInStatefulOperators: FlinkCustomNodeContext => Boolean
)(implicit nodeId: NodeId): ContextTransformation = {
ContextTransformation.definedBy(aggregator.toContextTransformation(variableName, !emitWhenEventLeft, aggregateBy))
.implementedBy(
FlinkCustomStreamTransformation((start: DataStream[NkContext], ctx: FlinkCustomNodeContext) => {
implicit val fctx: FlinkCustomNodeContext = ctx
val typeInfos = AggregatorTypeInformations(ctx, aggregator, aggregateBy)
val aggregatorFunction =
if (emitWhenEventLeft)
new EmitWhenEventLeftAggregatorFunction[SortedMap](aggregator, windowLength.toMillis, nodeId, aggregateBy.returnType, typeInfos.storedTypeInfo, fctx.convertToEngineRuntimeContext)
else
new AggregatorFunction[SortedMap](aggregator, windowLength.toMillis, nodeId, aggregateBy.returnType, typeInfos.storedTypeInfo, fctx.convertToEngineRuntimeContext)
start
.groupByWithValue(groupBy, aggregateBy)
.process(aggregatorFunction)
.setUidWithName(ctx, explicitUidInStatefulOperators)
}))
}
def tumblingTransformer(groupBy: LazyParameter[CharSequence],
aggregateBy: LazyParameter[AnyRef],
aggregator: Aggregator,
windowLength: Duration,
variableName: String)(implicit nodeId: NodeId): ContextTransformation = {
tumblingTransformer(groupBy, aggregateBy, aggregator, windowLength, variableName, TumblingWindowTrigger.OnEnd,
ExplicitUidInOperatorsSupport.defaultExplicitUidInStatefulOperators)
}
def tumblingTransformer(groupBy: LazyParameter[CharSequence],
aggregateBy: LazyParameter[AnyRef],
aggregator: Aggregator,
windowLength: Duration,
variableName: String,
tumblingWindowTrigger: TumblingWindowTrigger,
explicitUidInStatefulOperators: FlinkCustomNodeContext => Boolean
)(implicit nodeId: NodeId): ContextTransformation =
// TODO: to be consistent with sliding window we should probably forward context of variables for tumblingWindowTrigger == TumblingWindowTrigger.OnEvent
ContextTransformation.definedBy(aggregator.toContextTransformation(variableName, emitContext = false, aggregateBy))
.implementedBy(
FlinkCustomStreamTransformation((start: DataStream[NkContext], ctx: FlinkCustomNodeContext) => {
implicit val fctx: FlinkCustomNodeContext = ctx
val typeInfos = AggregatorTypeInformations(ctx, aggregator, aggregateBy)
val keyedStream = start
.groupByWithValue(groupBy, aggregateBy)
(tumblingWindowTrigger match {
case TumblingWindowTrigger.OnEvent =>
keyedStream
.window(TumblingEventTimeWindows.of(Time.milliseconds(windowLength.toMillis)))
.trigger(FireOnEachEvent[AnyRef, TimeWindow](EventTimeTrigger.create()))
.aggregate(
new UnwrappingAggregateFunction[AnyRef](aggregator, aggregateBy.returnType, identity),
EnrichingWithKeyFunction(fctx))(typeInfos.storedTypeInfo, typeInfos.returnTypeInfo, typeInfos.returnedValueTypeInfo)
case TumblingWindowTrigger.OnEnd =>
keyedStream
.window(TumblingEventTimeWindows.of(Time.milliseconds(windowLength.toMillis)))
.aggregate(
new UnwrappingAggregateFunction[AnyRef](aggregator, aggregateBy.returnType, identity),
EnrichingWithKeyFunction(fctx))(typeInfos.storedTypeInfo, typeInfos.returnTypeInfo, typeInfos.returnedValueTypeInfo)
case TumblingWindowTrigger.OnEndWithExtraWindow =>
keyedStream
//TODO: alignment??
.process(new EmitExtraWindowWhenNoDataTumblingAggregatorFunction[SortedMap](aggregator, windowLength.toMillis, nodeId, aggregateBy.returnType, typeInfos.storedTypeInfo, fctx.convertToEngineRuntimeContext))
}).setUidWithName(ctx, explicitUidInStatefulOperators)
}))
//Experimental component, API may change in the future
def sessionWindowTransformer(groupBy: LazyParameter[CharSequence],
aggregateBy: LazyParameter[AnyRef],
aggregator: Aggregator,
sessionTimeout: Duration,
endSessionCondition: LazyParameter[java.lang.Boolean],
sessionWindowTrigger: SessionWindowTrigger,
variableName: String
)(implicit nodeId: NodeId): ContextTransformation =
// TODO: to be consistent with sliding window we should probably forward context of variables for tumblingWindowTrigger == SessionWindowTrigger.OnEnd
ContextTransformation.definedBy(aggregator.toContextTransformation(variableName, emitContext = false, aggregateBy))
.implementedBy(
FlinkCustomStreamTransformation((start: DataStream[NkContext], ctx: FlinkCustomNodeContext) => {
implicit val fctx: FlinkCustomNodeContext = ctx
val typeInfos = AggregatorTypeInformations(ctx, aggregator, aggregateBy)
val baseTrigger =
ClosingEndEventTrigger[ValueWithContext[KeyedValue[String, (AnyRef, java.lang.Boolean)]], TimeWindow](EventTimeTrigger.create(), _.value.value._2)
val trigger = sessionWindowTrigger match {
case SessionWindowTrigger.OnEvent => FireOnEachEvent(baseTrigger)
case SessionWindowTrigger.OnEnd => baseTrigger
}
start
.groupByWithValue(groupBy, aggregateBy.product(endSessionCondition))
.window(EventTimeSessionWindows.withGap(Time.milliseconds(sessionTimeout.toMillis)))
.trigger(trigger)
.aggregate(
new UnwrappingAggregateFunction[(AnyRef, java.lang.Boolean)](aggregator, aggregateBy.returnType, _._1),
EnrichingWithKeyFunction(fctx))(typeInfos.storedTypeInfo, typeInfos.returnTypeInfo, typeInfos.returnedValueTypeInfo)
.setUidWithName(ctx, ExplicitUidInOperatorsSupport.defaultExplicitUidInStatefulOperators)
}))
case class AggregatorTypeInformations(ctx: FlinkCustomNodeContext, aggregator: Aggregator, aggregateBy: LazyParameter[AnyRef]) {
private val detection = ctx.typeInformationDetection
private val vctx = ctx.validationContext.left.get
private val returnType = aggregator.computeOutputType(aggregateBy.returnType)
.valueOr(e => throw new IllegalArgumentException(s"Validation error should have happened, got $e"))
private val storedType = aggregator.computeStoredType(aggregateBy.returnType)
.valueOr(e => throw new IllegalArgumentException(s"Validation error should have happened, got $e"))
lazy val storedTypeInfo: TypeInformation[AnyRef] = detection.forType(storedType)
lazy val returnTypeInfo: TypeInformation[AnyRef] = detection.forType(returnType)
lazy val contextTypeInfo: TypeInformation[NkContext] = detection.forContext(vctx)
lazy val returnedValueTypeInfo: TypeInformation[ValueWithContext[AnyRef]] = detection.forValueWithContext(vctx, returnType)
}
}
| TouK/nussknacker | engine/flink/components/base/src/main/scala/pl/touk/nussknacker/engine/flink/util/transformer/aggregate/transformers.scala | Scala | apache-2.0 | 9,747 |
import controllers.PowerLevelsController
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import play.api.test.Helpers._
import play.api.test._
@RunWith(classOf[JUnitRunner])
class PowerLevelsControllerSpec extends Specification {
PowerLevelsController.getClass.getName should {
"show hello's power level" in new WithApplicationAndMockGitHub {
val hello = route(FakeRequest(GET, "/powerLevels/hello")).get
status(hello) must equalTo(OK)
// TODO: JSON matcher required
contentType(hello) must beSome.which(_ == "application/json")
contentAsString(hello) must contain("userName")
}
}
}
| saturday06/github-scouter.net | test/PowerLevelsControllerSpec.scala | Scala | mit | 660 |
package engine
import org.lwjgl.LWJGLException
import org.lwjgl.opengl.Display
import org.lwjgl.opengl.DisplayMode
import org.lwjgl.opengl.GLContext
import org.lwjgl.opengl.ContextCapabilities
import org.lwjgl.input.Keyboard
import org.lwjgl.input.Mouse
import org.lwjgl.util.Timer
import scala.collection.mutable.HashMap
import utils._
trait FrameListener {
def render ()
def move (elapsedTime: Float)
}
object Kernel {
var loop = true
var width: Int = 0
var height: Int = 0
val timer = new Timer
def initialize (args: Array[String], width: Int, height: Int) {
this.width = width
this.height = height
val caption = "Scalissor !";
try {
Display.setDisplayMode(new DisplayMode(width, height))
Display.setTitle(caption)
Display.create
Keyboard.create
Mouse.create
Mouse.setGrabbed(true)
Keyboard.enableRepeatEvents(false)
checkCapabilities(GLContext.getCapabilities())
Renderer.initialize(width, height)
} catch {
case e:LWJGLException => System.out.println("LWJGL Initialization error : ")
e.printStackTrace
}
}
def checkCapabilities (cap : ContextCapabilities) {
Console.println("Context supported OpenGL versions")
Console.println("GL 1.1 : " + cap.OpenGL11)
Console.println("GL 1.2 : " + cap.OpenGL12)
Console.println("GL 1.3 : " + cap.OpenGL13)
Console.println("GL 1.4 : " + cap.OpenGL14)
Console.println("GL 1.5 : " + cap.OpenGL15)
Console.println("GL 2.0 : " + cap.OpenGL20)
Console.println("GL 2.1 : " + cap.OpenGL21)
Console.println("GL 3.0 : " + cap.OpenGL30)
Console.println("GL 3.1 : " + cap.OpenGL31)
Console.println("GL 3.2 : " + cap.OpenGL32)
Console.println("GL 3.3 : " + cap.OpenGL33)
Console.println("GL 4.0 : " + cap.OpenGL40)
Console.println("GL 4.1 : " + cap.OpenGL41)
}
//main loop. Use createFrameListenet to create the frameListener as soon as
//initialization is complete
def mainLoop(createFrameListener: Unit => FrameListener) = {
val frameListener = createFrameListener()
timer.reset
while(loop) {
Timer.tick
EventsManager.handleEvents
Renderer.preRender
frameListener.move(timer.getTime)
//FIXME: should we let the timer run instead ?
timer.reset
frameListener.render
Renderer.postRender
Display.update
}
Keyboard.destroy
Display.destroy
}
}
| julienr/scalamd5 | src/main/scala/engine/kernel.scala | Scala | bsd-2-clause | 2,442 |
package de.htwg.zeta.server.model.modelValidator.generator
import de.htwg.zeta.common.models.project.concept.Concept
import de.htwg.zeta.server.model.modelValidator.generator.consistencyRules.ConsistencyRules
class ConceptConsistencyChecker(concept: Concept) {
def checkConsistency(): ConsistencyCheckResult = ConsistencyRules.rules.foldLeft(ConsistencyCheckResult()) { (acc, rule) =>
if (acc.valid) {
if (rule.check(concept)) {
acc
}
else {
acc.copy(valid = false, failedRule = Some(rule))
}
} else {
acc
}
}
}
| Zeta-Project/zeta | api/server/app/de/htwg/zeta/server/model/modelValidator/generator/ConceptConsistencyChecker.scala | Scala | bsd-2-clause | 578 |
package gapt.examples
import gapt.expr._
import gapt.proofs.Sequent
import gapt.proofs.context.Context
import gapt.proofs.context.update.InductiveType
import gapt.proofs.context.update.{ PrimitiveRecursiveFunction => PrimRecFun }
import gapt.proofs.context.update.ProofNameDeclaration
import gapt.proofs.context.update.Sort
import gapt.proofs.gaptic._
object FirstSchema4 extends TacticsProof {
//Type
ctx += InductiveType( "nat", hoc"0 : nat", hoc"s : nat>nat" )
ctx += Sort( "i" )
//Term Constants
ctx += hoc"z:i"
ctx += hoc"g:i>i"
ctx += hoc"f:i>nat"
ctx += hoc"max:i>i>i"
//Predicate Constants
ctx += hoc"E: nat>nat>o"
ctx += hoc"LEQ: i>i>o"
ctx += hoc"LE: i>i>o"
//Theory Axioms
ctx += "efef" -> hos"E(f(p),n),E(f(q),n) :- E(f(p),f(q))"
ctx += "leq_refl" -> hos" :- LEQ(p,p)"
ctx += "leq_g" -> hos"LEQ(g(p),q):- LE(p,q)"
ctx += "leq_max1" -> hos"LEQ(max(a, b), c) :- LEQ(a, c)"
ctx += "leq_max2" -> hos"LEQ(max(a, b), c) :- LEQ(b, c)"
//Proof Names
ctx += hoc"omega: nat>nat"
ctx += hoc"phi: nat>nat"
ctx += hoc"chi: nat>i>nat"
//Primitive Recursive Definitions
ctx += PrimRecFun( hoc"POR:nat>i>o", "POR 0 x = E (f x) 0", "POR (s y) x = (E (f x) (s y) ∨ POR y x)" )
//Proof End Sequent
val esOmega = Sequent( Seq( hof"!x POR(n,x)" ), Seq( hof"?p?q (LE(p,q) & E(f(p),f(q)))" ) )
val esphi = Sequent( Seq( hof"!x?y (LEQ(x,y) & POR(n,y) )" ), Seq( hof"?p?q (LE(p,q) & E(f(p),f(q)))" ) )
val eschi = Sequent( Seq( hof" POR(n,a) " ), Seq( hof"POR(n,a)" ) )
//Proof Declarations
ctx += ProofNameDeclaration( le"omega n", esOmega )
ctx += ProofNameDeclaration( le"phi n", esphi )
ctx += ProofNameDeclaration( le"chi n a", eschi )
//We start by proving the basecase of chi. At this point it is safe to assume that each proof schema component
// has at most one stepcase and one basecase. The system can handle more, but that algorithms associated with
// proof schema only work for the above mentioned case.
// To work with the base case we need to take the sequent from the proof name declaration and instantiate
// it in the proper way, i.e. n-> 0 and a-> a
val esChiBc = Sequent( Seq( "Ant_0" -> hof" POR(0,a)" ), Seq( "Suc_0" -> hof"POR(0,a)" ) )
//notice that we associated a name with each formula this type. The propose of this naming is to
//refer to them in the tactic proof. we construct a tactic proof with the follow command. Try to run the following
//in gapt by typing FirstSchema.chiBc after loading the file and see what happens:
val chiBc = Lemma( esChiBc ) {
unfold( "POR" ) atMost 1 in "Suc_0"
}
//You should get the following:
/*
gapt> FirstSchema.chiBc
gapt.proofs.gaptic.QedFailureException: Proof not completed. There are still 1 open sub goals:
Ant_0: POR(0, a)
:-
Suc_0: E(f(a), 0)
at gapt.proofs.gaptic.LemmaMacros$.finish(language.scala:45)
at gapt.proofs.gaptic.LemmaMacros$.finishLemma(language.scala:55)
... 28 elided
*/
//The Tactic unfold( "POR" ) atMost 1 in "Suc_0" unfolds the PR symbol "POR" at most one time
// in the formula "Suc_0". If it is not unfoldable than it does not, otherwise it unfolds it once
//notice that it tells us that there is still an open goal which we must close to prove the lemma.
//go to FirstSchema5.scala to get the next step
}
| gapt/gapt | examples/schema/Schema Tutorial/FirstSchema4.scala | Scala | gpl-3.0 | 3,309 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.stream
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.planner.calcite.{FlinkContext, FlinkTypeFactory}
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef, ModifyKindSetTrait, UpdateKindTrait}
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.rules.physical.FlinkExpandConversionRule._
import org.apache.flink.table.planner.plan.utils.{AggregateUtil, ChangelogPlanUtils}
import org.apache.flink.table.planner.utils.AggregatePhaseStrategy
import org.apache.flink.table.planner.utils.TableConfigUtils.getAggPhaseStrategy
import org.apache.calcite.plan.RelOptRule.{any, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.RelNode
import java.util
/**
* Rule that matches [[StreamPhysicalGroupAggregate]] on [[StreamPhysicalExchange]]
* with the following condition:
* 1. mini-batch is enabled in given TableConfig,
* 2. two-phase aggregation is enabled in given TableConfig,
* 3. all aggregate functions are mergeable,
* 4. the input of exchange does not satisfy the shuffle distribution,
*
* and converts them to
* {{{
* StreamPhysicalGlobalGroupAggregate
* +- StreamPhysicalExchange
* +- StreamPhysicalLocalGroupAggregate
* +- input of exchange
* }}}
*/
class TwoStageOptimizedAggregateRule extends RelOptRule(
operand(classOf[StreamPhysicalGroupAggregate],
operand(classOf[StreamPhysicalExchange],
operand(classOf[RelNode], any))),
"TwoStageOptimizedAggregateRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val tableConfig = call.getPlanner.getContext.unwrap(classOf[FlinkContext]).getTableConfig
val agg: StreamPhysicalGroupAggregate = call.rel(0)
val realInput: RelNode = call.rel(2)
val needRetraction = !ChangelogPlanUtils.isInsertOnly(
realInput.asInstanceOf[StreamPhysicalRel])
val fmq = FlinkRelMetadataQuery.reuseOrCreate(call.getMetadataQuery)
val monotonicity = fmq.getRelModifiedMonotonicity(agg)
val needRetractionArray = AggregateUtil.deriveAggCallNeedRetractions(
agg.grouping.length, agg.aggCalls, needRetraction, monotonicity)
val aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(
FlinkTypeFactory.toLogicalRowType( agg.getInput.getRowType),
agg.aggCalls,
needRetractionArray,
needRetraction,
isStateBackendDataViews = true)
val isMiniBatchEnabled = tableConfig.getConfiguration.getBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED)
val isTwoPhaseEnabled = getAggPhaseStrategy(tableConfig) != AggregatePhaseStrategy.ONE_PHASE
isMiniBatchEnabled && isTwoPhaseEnabled &&
AggregateUtil.doAllSupportPartialMerge(aggInfoList.aggInfos) &&
!isInputSatisfyRequiredDistribution(realInput, agg.grouping)
}
private def isInputSatisfyRequiredDistribution(input: RelNode, keys: Array[Int]): Boolean = {
val requiredDistribution = createDistribution(keys)
val inputDistribution = input.getTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
inputDistribution.satisfies(requiredDistribution)
}
override def onMatch(call: RelOptRuleCall): Unit = {
val originalAgg: StreamPhysicalGroupAggregate = call.rel(0)
val realInput: RelNode = call.rel(2)
val needRetraction = !ChangelogPlanUtils.isInsertOnly(
realInput.asInstanceOf[StreamPhysicalRel])
val fmq = FlinkRelMetadataQuery.reuseOrCreate(call.getMetadataQuery)
val monotonicity = fmq.getRelModifiedMonotonicity(originalAgg)
val aggCallNeedRetractions = AggregateUtil.deriveAggCallNeedRetractions(
originalAgg.grouping.length, originalAgg.aggCalls, needRetraction, monotonicity)
// local agg shouldn't produce insert only messages
val localAggTraitSet = realInput.getTraitSet
.plus(ModifyKindSetTrait.INSERT_ONLY)
.plus(UpdateKindTrait.NONE)
val localHashAgg = new StreamPhysicalLocalGroupAggregate(
originalAgg.getCluster,
localAggTraitSet,
realInput,
originalAgg.grouping,
originalAgg.aggCalls,
aggCallNeedRetractions,
needRetraction,
originalAgg.partialFinalType)
// grouping keys is forwarded by local agg, use indices instead of groupings
val globalGrouping = originalAgg.grouping.indices.toArray
val globalDistribution = createDistribution(globalGrouping)
// create exchange if needed
val newInput = satisfyDistribution(
FlinkConventions.STREAM_PHYSICAL, localHashAgg, globalDistribution)
val globalAggProvidedTraitSet = originalAgg.getTraitSet
val globalAgg = new StreamPhysicalGlobalGroupAggregate(
originalAgg.getCluster,
globalAggProvidedTraitSet,
newInput,
originalAgg.getRowType,
globalGrouping,
originalAgg.aggCalls,
aggCallNeedRetractions,
realInput.getRowType,
needRetraction,
originalAgg.partialFinalType)
call.transformTo(globalAgg)
}
private def createDistribution(keys: Array[Int]): FlinkRelDistribution = {
if (keys.nonEmpty) {
val fields = new util.ArrayList[Integer]()
keys.foreach(fields.add(_))
FlinkRelDistribution.hash(fields)
} else {
FlinkRelDistribution.SINGLETON
}
}
}
object TwoStageOptimizedAggregateRule {
val INSTANCE: RelOptRule = new TwoStageOptimizedAggregateRule
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/TwoStageOptimizedAggregateRule.scala | Scala | apache-2.0 | 6,465 |
package fpinscala.datastructures
sealed trait List[+A]
// `List` data type, parameterized on a type, `A`
case object Nil extends List[Nothing]
// A `List` data constructor representing the empty list
/* Another data constructor, representing nonempty lists. Note that `tail` is another `List[A]`,
which may be `Nil` or another `Cons`.
*/
case class Cons[+A](head: A, tail: List[A]) extends List[A]
object List {
// `List` companion object. Contains functions for creating and working with lists.
def sum(ints: List[Int]): Int = ints match {
// A function that uses pattern matching to add up a list of integers
case Nil => 0 // The sum of the empty list is 0.
case Cons(x, xs) => x + sum(xs) // The sum of a list starting with `x` is `x` plus the sum of the rest of the list.
}
def product(ds: List[Double]): Double = ds match {
case Nil => 1.0
case Cons(0.0, _) => 0.0
case Cons(x, xs) => x * product(xs)
}
def apply[A](as: A*): List[A] = // Variadic function syntax
if (as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
val x = List(1, 2, 3, 4, 5) match {
case Cons(x, Cons(2, Cons(4, _))) => x
case Nil => 42
case Cons(x, Cons(y, Cons(3, Cons(4, _)))) => x + y
case Cons(h, t) => h + sum(t)
case _ => 101
}
def append[A](a1: List[A], a2: List[A]): List[A] =
a1 match {
case Nil => a2
case Cons(h, t) => Cons(h, append(t, a2))
}
def foldRight[A, B](as: List[A], z: B)(f: (A, B) => B): B = // Utility functions
as match {
case Nil => z
case Cons(x, xs) => f(x, foldRight(xs, z)(f))
}
def sum2(ns: List[Int]) =
foldRight(ns, 0)((x, y) => x + y)
def product2(ns: List[Double]) =
foldRight(ns, 1.0)(_ * _) // `_ * _` is more concise notation for `(x,y) => x * y`; see sidebar
def tail[A](l: List[A]): List[A] =
l match {
case Cons(_, t) => t
case _ => Nil
}
def setHead[A](l: List[A], h: A): List[A] =
l match {
case Cons(_, t) => Cons(h, t)
case _ => Nil
}
def drop[A](l: List[A], n: Int): List[A] =
l match {
case Cons(_, t) if (n > 0) => drop(t, n - 1)
case _ => l
}
def dropWhile[A](l: List[A], f: A => Boolean): List[A] =
l match {
case Cons(h, t) if (f(h)) => dropWhile(t, f)
case _ => l
}
def init[A](l: List[A]): List[A] =
l match {
case Cons(h, Nil) => Nil
case Cons(h, t) => Cons(h, init(t))
case _ => Nil
}
def length[A](l: List[A]): Int =
foldRight(l, 0)((_, n) => n + 1)
def foldLeft[A, B](l: List[A], z: B)(f: (B, A) => B): B =
l match {
case Nil => z
case Cons(h, t) => foldLeft(t, f(z, h))(f)
}
def sumViaFoldLeft(l: List[Int]) = foldLeft(l, 0)(_ + _)
def productViaFoldLeft(l: List[Int]) = foldLeft(l, 1)(_ * _)
def lengthViaFoldLeft[A](l: List[A]) = foldLeft[A, Int](l, 0)((l, _) => l + 1)
def reverse[A](l: List[A]) = foldLeft(l, Nil: List[A])((l, v) => Cons(v, l))
def foldRightViaFoldLeft[A, B](as: List[A], z: B)(f: (A, B) => B): B =
foldLeft(reverse(as), z)((a, b) => f(b, a))
def map[A, B](as: List[A])(f: A => B): List[B] =
List.foldRight(as, Nil: List[B])( (h,t) => Cons(f(h),t))
def appendViaFolding[A](l: List[A], k: List[A]): List[A] =
List.foldRight(l, k)(Cons(_, _))
def flatten[A](l: List[List[A]]): List[A] =
List.foldRight(l, Nil: List[A])(append)
def plusone(l: List[Int]): List[Int] =
List.foldRight(l, Nil: List[Int])((h, t) => Cons(h + 1, t))
def mapToString(l: List[Double]) =
List.foldRight(l, Nil: List[String])((h, t) => Cons(h.toString, t))
def filter[A](l: List[A])(f: A => Boolean): List[A] =
List.foldRight(l, Nil: List[A])( (h,t) => if (f(h)) Cons(h,t) else t )
def flatMap[A,B](as:List[A])(f: A => List[B]): List[B] =
List.flatten(List.map(as)(f))
def filterViaFlatmap[A](l: List[A])(f: A => Boolean): List[A] =
List.flatMap(l)(i => if (f(i)) List(i) else Nil )
def addLists(l: List[Int], r:List[Int]) : List[Int] =
(l,r) match {
case (Cons(hl, tl), Cons(hr, tr)) => Cons(hl + hr, addLists(tl, tr))
case _ => Nil
}
def zipWith[A,B,C]( l: List[A], r: List[B] ) (f: (A,B) => C ): List[C] =
(l,r) match {
case (Cons(hl, tl), Cons(hr, tr)) => Cons(f(hl,hr), zipWith(tl,tr)(f))
case _ => Nil
}
@annotation.tailrec
def startsWith[A](l:List[A], sub:List[A]):Boolean =
(l, sub) match {
case (Cons(hl,_), Cons(hr,_)) if(hl != hr) => false
case (Cons(_,tl), Cons(_,tr)) => startsWith(tl,tr)
case _ => true
}
def hasSubsequence[A](sup: List[A], sub: List[A]): Boolean =
sup match {
case Nil => false
case Cons(_,t) => startsWith(sup, sub) || hasSubsequence(t, sub)
}
}
| Tillaert/fpinscala | exercises/src/main/scala/fpinscala/datastructures/List.scala | Scala | mit | 4,778 |
package com.eharmony.aloha.models.reg
import com.eharmony.aloha.dataset.density.Sparse
import com.eharmony.aloha.semantics.func.GenAggFunc
import scala.collection.{immutable => sci, mutable => scm}
/**
* A helper trait for sparse regression models with String keys. This trait exposes the ''constructFeatures''
* method which applies the ''featureFunctions'' to the input data and keeps track of missing features.
* @author R M Deak
*/
// TODO: merge this with com.eharmony.aloha.dataset.FeatureExtractorFunction
trait RegressionFeatures[-A] {
/**
* Parallel to featureFunctions.
*/
protected[this] val featureNames: sci.IndexedSeq[String]
/**
* Parallel to featureNames. This is the sequence of functions that extract data from the input value.
*/
protected[this] val featureFunctions: sci.IndexedSeq[GenAggFunc[A, Sparse]]
/**
* A threshold dictating how many missing features to allow before making the prediction fail. None means
* the threshold is ∞. If, when mapping featureFunctions over the input, the resulting sequence
* contains more than ''numMissingThreshold'' values that are empty Iterable values, then the
* ''Features.missingOk'' value returned by ''constructFeatures'' will be '''false'''; otherwise, it will
* be '''true'''.
*/
protected[this] val numMissingThreshold: Option[Int]
/**
* Container for information returned by [[RegressionFeatures.constructFeatures]]. Note that as is,
* this declaration will cause a compiler warning:
*
* "The outer reference in this type test cannot be checked at run time."
*
* This is a known issue and is a scala bug. See:
* - https://issues.scala-lang.org/browse/SI-4440
* - http://stackoverflow.com/questions/16450008/typesafe-swing-events-the-outer-reference-in-this-type-test-cannot-be-checked-a
*
* A solution that would remove the warning is to make the class not ''final''. Not doing this just to remove a
* warning.
* @param features features that were extracted from an input value.
* @param missing map from feature name to variables in the feature function that were missing.
* @param missingOk whether the number of
*/
protected[this] case class Features[F](features: F,
missing: scm.Map[String, Seq[String]] = scm.Map.empty,
missingOk: Boolean = true)
/**
* Extract the features from the raw data by mapping ''featureFunctions'' over the input. If
* ''numMissingThreshold'' is not None and the number of resulting empty Iterables exceeds the
* ''numMissingThreshold'' value, then the resulting ''Features.missingOk'' value is '''false''';
* otherwise, it will be '''true'''. If ''Features.missingOk'' is '''false''', then go back and
* check all feature functions for missing values and add findings to the ''Features.missing''
* map. This ''Features.missing'' is a mapping from the feature specification to the list of
* variable names whose associated values are missing from the input.
*
* @param a raw input data of the model input type.
* @return a Features instance with the following:
* 1 the transformed input vector
* 1 the map of bad features to the missing values in the raw data that were needed to compute the feature
* 1 whether the amount of missing data is acceptable to still continue
*/
protected[this] final def constructFeatures(a: A): Features[IndexedSeq[Sparse]] = {
// NOTE: Since this function is at the center of the regression process and will be called many times, it
// needs to be efficient. Therefore, it uses some things that are not idiomatic scala. For instance,
// there are mutable variables, while loops instead of for comprehensions or Range.foreach, etc.
val missing = scm.Map.empty[String, Seq[String]]
val n = featureNames.size
val f = new Array[Iterable[(String, Double)]](n)
var i = 0
while (i < n) {
// Use concat based on
// http://stackoverflow.com/questions/5076740/whats-the-fastest-way-to-concatenate-two-strings-in-java
// http://stackoverflow.com/questions/47605/string-concatenation-concat-vs-operator
f(i) = featureFunctions(i)(a).map(p => (featureNames(i).concat(p._1), p._2))
// If the feature is empty, it can't contribute to the inner product. If it can't contribute to the
// inner product but appears in the inner product specification, there are two possibilities:
//
// 1) The specifier doesn't care about performance and needlessly added a NoOp.
// 2) The feature could emit a value because data necessary to do so is missing.
//
// In either case, we take those opportunities to check for missing data and assume the performance
// hit is acceptable.
if (f(i).isEmpty)
missing += (featureFunctions(i).specification -> featureFunctions(i).accessorOutputMissing(a))
i += 1
}
val numMissingOk = numMissingThreshold.forall(t => missing.size <= t)
// If we are going to err out, allow a linear scan (with repeated work so that we can get richer error
// diagnostics. Only include the values where the list of missing accessors variables is not empty.
// This could have been done with a for comprehension but this is a little faster.
if (!numMissingOk) {
0 until n foreach { i =>
val miss = featureFunctions(i).accessorOutputMissing(a)
if (miss.nonEmpty)
missing += featureFunctions(i).specification -> miss
}
}
Features(new collection.mutable.WrappedArray.ofRef(f), missing, numMissingOk)
}
}
| eHarmony/aloha | aloha-core/src/main/scala/com/eharmony/aloha/models/reg/RegressionFeatures.scala | Scala | mit | 6,025 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.unsafe.types.CalendarInterval
/**
* A trigger that runs a query periodically based on the processing time. If `interval` is 0,
* the query will run as fast as possible.
*
* Scala Example:
* {{{
* df.writeStream.trigger(ProcessingTime("10 seconds"))
*
* import scala.concurrent.duration._
* df.writeStream.trigger(ProcessingTime(10.seconds))
* }}}
*
* Java Example:
* {{{
* df.writeStream.trigger(ProcessingTime.create("10 seconds"))
*
* import java.util.concurrent.TimeUnit
* df.writeStream.trigger(ProcessingTime.create(10, TimeUnit.SECONDS))
* }}}
*
* @since 2.0.0
*/
@InterfaceStability.Evolving
@deprecated("use Trigger.ProcessingTime(intervalMs)", "2.2.0")
case class ProcessingTime(intervalMs: Long) extends Trigger {
require(intervalMs >= 0, "the interval of trigger should not be negative")
}
/**
* Used to create [[ProcessingTime]] triggers for [[StreamingQuery]]s.
*
* @since 2.0.0
*/
@InterfaceStability.Evolving
@deprecated("use Trigger.ProcessingTime(intervalMs)", "2.2.0")
object ProcessingTime {
/**
* Create a [[ProcessingTime]]. If `interval` is 0, the query will run as fast as possible.
*
* Example:
* {{{
* df.writeStream.trigger(ProcessingTime("10 seconds"))
* }}}
*
* @since 2.0.0
* @deprecated use Trigger.ProcessingTime(interval)
*/
@deprecated("use Trigger.ProcessingTime(interval)", "2.2.0")
def apply(interval: String): ProcessingTime = {
if (StringUtils.isBlank(interval)) {
throw new IllegalArgumentException(
"interval cannot be null or blank.")
}
val cal = if (interval.startsWith("interval")) {
CalendarInterval.fromString(interval)
} else {
CalendarInterval.fromString("interval " + interval)
}
if (cal == null) {
throw new IllegalArgumentException(s"Invalid interval: $interval")
}
if (cal.months > 0) {
throw new IllegalArgumentException(s"Doesn't support month or year interval: $interval")
}
new ProcessingTime(cal.microseconds / 1000)
}
/**
* Create a [[ProcessingTime]]. If `interval` is 0, the query will run as fast as possible.
*
* Example:
* {{{
* import scala.concurrent.duration._
* df.writeStream.trigger(ProcessingTime(10.seconds))
* }}}
*
* @since 2.0.0
* @deprecated use Trigger.ProcessingTime(interval)
*/
@deprecated("use Trigger.ProcessingTime(interval)", "2.2.0")
def apply(interval: Duration): ProcessingTime = {
new ProcessingTime(interval.toMillis)
}
/**
* Create a [[ProcessingTime]]. If `interval` is 0, the query will run as fast as possible.
*
* Example:
* {{{
* df.writeStream.trigger(ProcessingTime.create("10 seconds"))
* }}}
*
* @since 2.0.0
* @deprecated use Trigger.ProcessingTime(interval)
*/
@deprecated("use Trigger.ProcessingTime(interval)", "2.2.0")
def create(interval: String): ProcessingTime = {
apply(interval)
}
/**
* Create a [[ProcessingTime]]. If `interval` is 0, the query will run as fast as possible.
*
* Example:
* {{{
* import java.util.concurrent.TimeUnit
* df.writeStream.trigger(ProcessingTime.create(10, TimeUnit.SECONDS))
* }}}
*
* @since 2.0.0
* @deprecated use Trigger.ProcessingTime(interval, unit)
*/
@deprecated("use Trigger.ProcessingTime(interval, unit)", "2.2.0")
def create(interval: Long, unit: TimeUnit): ProcessingTime = {
new ProcessingTime(unit.toMillis(interval))
}
}
| sahilTakiar/spark | sql/core/src/main/scala/org/apache/spark/sql/streaming/ProcessingTime.scala | Scala | apache-2.0 | 4,523 |
package relational.attributes
import relational.comparissions._
import relational.comparissions.None
trait Comparable extends AttributeLike {
def ===(other: => Any ) = this == other
def ->(other: => Any ) = this == other
def ==(other: => Any ) = newEquality(Equality.Equals, other)
def !=(other: => Any ) = newEquality(Equality.Diferent, other)
def <=(other: Any) = newEquality(Equality.LtE, other)
def <(other: Any) = newEquality(Equality.Lt, other)
def >=(other: Any) = newEquality(Equality.GtE, other)
def >(other: Any) = newEquality(Equality.Gt, other)
def =~(other: Any) = like(other)
def like(other: Any) = newEquality(Equality.Like, other)
def !~(other: Any) = notLike(other)
def notLike(other: Any) = newEquality(Equality.NotLike, other)
protected def newEquality(kind: Equality.Comparission, other: Any): Comparission = other match {
case None => None
case _ => new Equality(kind, this, Attribute.wrap(other))
}
def in(list: Seq[Any]): Comparission = new In(this, list)
def notIn(list: Seq[Any]): Comparission = new NotIn(this, list)
def isNull: Comparission = new IsNull(this)
def notNull: Comparission = new NotNull(this)
}
| mauricioszabo/relational-scala | src/main/scala/relational/attributes/Comparable.scala | Scala | artistic-2.0 | 1,186 |
package de.choffmeister.microserviceutils.auth.models
import java.security.MessageDigest
import java.time.Instant
import java.util.Base64
import de.choffmeister.microserviceutils.auth.utils.ConstantTimeCompare
final case class AuthorizationCode(
code: String,
state: Option[String],
scopes: Set[String],
resourceOwnerId: String,
clientId: String,
challenge: Option[AuthorizationCodeChallenge],
redirectUri: String,
expiresAt: Instant,
used: Boolean
)
final case class AuthorizationCodeChallenge(challenge: String, method: AuthorizationCodeChallengeMethod)
sealed trait AuthorizationCodeChallengeMethod {
def derive(verifier: String): String
def verify(challenge: String, verifier: String): Boolean = {
ConstantTimeCompare.compare(challenge, derive(verifier))
}
}
object AuthorizationCodeChallengeMethod {
case object Plain extends AuthorizationCodeChallengeMethod {
override def derive(verifier: String): String = verifier
}
case object S256 extends AuthorizationCodeChallengeMethod {
def derive(verifier: String): String = {
val digest = MessageDigest.getInstance("SHA-256")
digest.update(verifier.getBytes("ASCII"))
Base64.getUrlEncoder.encodeToString(digest.digest()).stripSuffix("=")
}
}
}
| choffmeister/microservice-utils | microservice-utils-auth/src/main/scala/de/choffmeister/microserviceutils/auth/models/AuthorizationCode.scala | Scala | mit | 1,265 |
/*
*
* Copyright 2014 David Hall
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
package breeze.stats.distributions
import org.scalacheck.{Arbitrary, Prop}
import org.scalatest.FunSuite
import org.scalatest.prop.Checkers
/**
* TODO
*
* @author dlwh
**/
trait HasCdfTestBase extends FunSuite with Checkers {
type Distr <: Density[Double] with Rand[Double] with HasCdf
implicit def arbDistr: Arbitrary[Distr]
test("probability gets the same fraction of things as the sampler") {
check(Prop.forAll { (distr: Distr) =>
val samples = distr.sample(10000)
val (low, high) = {
if(samples(0) < samples(1)) (samples(0), samples(1))
else (samples(1), samples(0))
}
val inRange = samples.count(x => x >= low && x <= high) / (samples.length * 1.0)
val prob = distr.probability(low, high)
if(prob >= 0 && math.abs(inRange - prob) <= 2E-2) {
true
} else {
println(inRange, prob)
false
}
})
}
test("cdf gets the same fraction of things as the sampler") {
check(Prop.forAll { (distr: Distr) =>
val samples = distr.sample(10000)
val high = samples(0)
val inRange = samples.count(x => x <= high) / (samples.length * 1.0)
val prob = distr.cdf(high)
if(prob >= 0 && math.abs(inRange - prob) <= 2E-2) {
true
} else {
println(inRange, prob)
false
}
})
}
}
| wstcpyt/breeze | math/src/test/scala/breeze/stats/distributions/HasCdfTestBase.scala | Scala | apache-2.0 | 1,953 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler.rate
import scala.util.Random
import org.scalatest.Inspectors.forAll
import org.scalatest.Matchers
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.streaming.Seconds
/**
* SparkStreaming有一个rate功能可以控制流入的速率,在这个基础上提供了一套算法,
* 根据流入数据和处理时间的比例关系,根据一定的关系,将流入的rate提高或者降低.
* PIDRateEstimator 用于评估InputDStream消费数据的能力
* 根据消费数据的能力来调整接收数据的速率
*/
class PIDRateEstimatorSuite extends SparkFunSuite with Matchers {
test("the right estimator is created") {//创建正确的估计量
val conf = new SparkConf
conf.set("spark.streaming.backpressure.rateEstimator", "pid")
val pid = RateEstimator.create(conf, Seconds(1))
pid.getClass should equal(classOf[PIDRateEstimator])
}
test("estimator checks ranges") {//估计检查的范围
intercept[IllegalArgumentException] {
new PIDRateEstimator(batchIntervalMillis = 0, 1, 2, 3, 10)
}
intercept[IllegalArgumentException] {
new PIDRateEstimator(100, proportional = -1, 2, 3, 10)
}
intercept[IllegalArgumentException] {
new PIDRateEstimator(100, 0, integral = -1, 3, 10)
}
intercept[IllegalArgumentException] {
new PIDRateEstimator(100, 0, 0, derivative = -1, 10)
}
intercept[IllegalArgumentException] {
new PIDRateEstimator(100, 0, 0, 0, minRate = 0)
}
intercept[IllegalArgumentException] {
new PIDRateEstimator(100, 0, 0, 0, minRate = -10)
}
}
test("first estimate is None") {//第一估计是None
val p = createDefaultEstimator()
//compute:在指定时间生成一个RDD
p.compute(0, 10, 10, 0) should equal(None)
}
test("second estimate is not None") {//第二个估计非None
val p = createDefaultEstimator()
p.compute(0, 10, 10, 0)
// 1000 elements / s
//compute:在指定时间生成一个RDD
p.compute(10, 10, 10, 0) should equal(Some(1000))
}
//当不连续非线性估计调用之间的差异
test("no estimate when no time difference between successive calls") {
val p = createDefaultEstimator()
p.compute(0, 10, 10, 0)
p.compute(time = 10, 10, 10, 0) shouldNot equal(None)
p.compute(time = 10, 10, 10, 0) should equal(None)
}
test("no estimate when no records in previous batch") {//没有估计时,没有记录在以前的批次
val p = createDefaultEstimator()
p.compute(0, 10, 10, 0)
p.compute(10, numElements = 0, 10, 0) should equal(None)
p.compute(20, numElements = -10, 10, 0) should equal(None)
}
test("no estimate when there is no processing delay") {//没有处理延迟时的估计
val p = createDefaultEstimator()
p.compute(0, 10, 10, 0)
p.compute(10, 10, processingDelay = 0, 0) should equal(None)
p.compute(20, 10, processingDelay = -10, 0) should equal(None)
}
test("estimate is never less than min rate") {//估计是决不低于最小率
val minRate = 5D
val p = new PIDRateEstimator(20, 1D, 1D, 0D, minRate)
// prepare a series of batch updates, one every 20ms, 0 processed elements, 2ms of processing
// this might point the estimator to try and decrease the bound, but we test it never
// goes below the min rate, which would be nonsensical.
//准备一系列批量更新,每20ms处理一个,0个处理元素,2ms处理可能会使估计器尝试减少边界,
// 但是我们测试它不会低于最低速率,这将是无意义的。
val times = List.tabulate(50)(x => x * 20) // every 20ms
val elements = List.fill(50)(1) // no processing
val proc = List.fill(50)(20) // 20ms of processing
val sched = List.fill(50)(100) // strictly positive accumulation
val res = for (i <- List.range(0, 50)) yield p.compute(times(i), elements(i), proc(i), sched(i))
res.head should equal(None)
res.tail should equal(List.fill(49)(Some(minRate)))
}
//没有累积或积极的错误,| I | > 0,按照处理速度
test("with no accumulated or positive error, |I| > 0, follow the processing speed") {
val p = new PIDRateEstimator(20, 1D, 1D, 0D, 10)
// prepare a series of batch updates, one every 20ms with an increasing number of processed
// elements in each batch, but constant processing time, and no accumulated error. Even though
// the integral part is non-zero, the estimated rate should follow only the proportional term
val times = List.tabulate(50)(x => x * 20) // every 20ms
val elements = List.tabulate(50)(x => (x + 1) * 20) // increasing
val proc = List.fill(50)(20) // 20ms of processing
val sched = List.fill(50)(0)
val res = for (i <- List.range(0, 50)) yield p.compute(times(i), elements(i), proc(i), sched(i))
res.head should equal(None)
res.tail should equal(List.tabulate(50)(x => Some((x + 1) * 1000D)).tail)
}
//没有累积但有一些积极的错误,| I | > 0,按照处理速度
test("with no accumulated but some positive error, |I| > 0, follow the processing speed") {
val p = new PIDRateEstimator(20, 1D, 1D, 0D, 10)
// prepare a series of batch updates, one every 20ms with an decreasing number of processed
// elements in each batch, but constant processing time, and no accumulated error. Even though
// the integral part is non-zero, the estimated rate should follow only the proportional term,
// asking for less and less elements
val times = List.tabulate(50)(x => x * 20) // every 20ms
val elements = List.tabulate(50)(x => (50 - x) * 20) // decreasing
val proc = List.fill(50)(20) // 20ms of processing
val sched = List.fill(50)(0)
val res = for (i <- List.range(0, 50)) yield p.compute(times(i), elements(i), proc(i), sched(i))
res.head should equal(None)
res.tail should equal(List.tabulate(50)(x => Some((50 - x) * 1000D)).tail)
}
//有一些累积和一些积极的错误,| I | > 0,保持低于处理速度
test("with some accumulated and some positive error, |I| > 0, stay below the processing speed") {
val minRate = 10D
val p = new PIDRateEstimator(20, 1D, .01D, 0D, minRate)
val times = List.tabulate(50)(x => x * 20) // every 20ms
val rng = new Random()
val elements = List.tabulate(50)(x => rng.nextInt(1000) + 1000)
val procDelayMs = 20
val proc = List.fill(50)(procDelayMs) // 20ms of processing
val sched = List.tabulate(50)(x => rng.nextInt(19) + 1) // random wait
val speeds = elements map ((x) => x.toDouble / procDelayMs * 1000)
val res = for (i <- List.range(0, 50)) yield p.compute(times(i), elements(i), proc(i), sched(i))
res.head should equal(None)
forAll(List.range(1, 50)) { (n) =>
res(n) should not be None
if (res(n).get > 0 && sched(n) > 0) {
res(n).get should be < speeds(n)
res(n).get should be >= minRate
}
}
}
private def createDefaultEstimator(): PIDRateEstimator = {
new PIDRateEstimator(20, 1D, 0D, 0D, 10)
}
}
| tophua/spark1.52 | streaming/src/test/scala/org/apache/spark/streaming/scheduler/rate/PIDRateEstimatorSuite.scala | Scala | apache-2.0 | 7,869 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl
import builder.RouteBuilder
import org.junit.Test
/**
* Test case for Splitter
*/
class SplitterTokenizeXMLTest extends ScalaTestSupport {
@Test
def testSplitterTokenizeXML = {
val mock = getMockEndpoint("mock:b")
mock.expectedBodiesReceived("<person>Claus</person>", "<person>James</person>", "<person>Willem</person>")
val xml: String = "<persons><person>Claus</person><person>James</person><person>Willem</person></persons>"
template.sendBody("direct:b", xml)
assertMockEndpointsSatisfied()
}
val builder =
//START SNIPPET: e1
new RouteBuilder {
"direct:b" ==> {
split(tokenizeXML("person")) {
to("log:b")
to("mock:b")
}
}
}
//END SNIPPET: e1
}
| engagepoint/camel | components/camel-scala/src/test/scala/org/apache/camel/scala/dsl/SplitterTokenizeXMLTest.scala | Scala | apache-2.0 | 1,596 |
/**
* Copyright 2015 Ram Sriharsha
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package magellan.mapreduce
import com.google.common.base.Stopwatch
import magellan.io.{ShapeKey, ShapeWritable}
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.fs.{LocatedFileStatus, Path}
import org.apache.hadoop.mapreduce.lib.input._
import org.apache.hadoop.mapreduce.{InputSplit, JobContext, TaskAttemptContext}
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
private[magellan] class ShapeInputFormat
extends FileInputFormat[ShapeKey, ShapeWritable] {
private val log = LogFactory.getLog(classOf[ShapeInputFormat])
override def createRecordReader(inputSplit: InputSplit,
taskAttemptContext: TaskAttemptContext) = {
new ShapefileReader
}
override def isSplitable(context: JobContext, filename: Path): Boolean = true
override def getSplits(job: JobContext): java.util.List[InputSplit] = {
val splitInfos = SplitInfos.SPLIT_INFO_MAP.get()
computeSplits(job, splitInfos)
}
private def computeSplits(
job: JobContext,
splitInfos: scala.collection.Map[String, Array[Long]]) = {
val sw = new Stopwatch().start
val splits = ListBuffer[InputSplit]()
val files = listStatus(job)
for (file <- files) {
val path = file.getPath
val length = file.getLen
val blkLocations = if (file.isInstanceOf[LocatedFileStatus]) {
file.asInstanceOf[LocatedFileStatus].getBlockLocations
} else {
val fs = path.getFileSystem(job.getConfiguration)
fs.getFileBlockLocations(file, 0, length)
}
val key = path.getName.split("\\.shp$")(0)
if (splitInfos == null || !splitInfos.containsKey(key)) {
val blkIndex = getBlockIndex(blkLocations, 0)
splits.+= (makeSplit(path, 0, length, blkLocations(blkIndex).getHosts,
blkLocations(blkIndex).getCachedHosts))
} else {
val s = splitInfos(key).toSeq
val start = s
val end = s.drop(1) ++ Seq(length)
start.zip(end).foreach { case (startOffset: Long, endOffset: Long) =>
val blkIndex = getBlockIndex(blkLocations, startOffset)
splits.+=(makeSplit(path, startOffset, endOffset - startOffset, blkLocations(blkIndex).getHosts,
blkLocations(blkIndex).getCachedHosts))
}
}
}
sw.stop
if (log.isDebugEnabled) {
log.debug("Total # of splits generated by getSplits: " + splits.size + ", TimeTaken: " + sw.elapsedMillis)
}
splits
}
}
object SplitInfos {
// TODO: Can we get rid of this hack to pass split calculation to the Shapefile Reader?
val SPLIT_INFO_MAP = new ThreadLocal[scala.collection.Map[String, Array[Long]]]
} | harsha2010/magellan | src/main/scala/magellan/mapreduce/ShapeInputFormat.scala | Scala | apache-2.0 | 3,259 |
package backpresurre
import scala.concurrent.duration._
import java.util.Date
class LeakyBucket(var rate: Int, var perDuration: FiniteDuration) {
var numDropsInBucket: Int = 0
var timeOfLastDropLeak:Date = null
var msDropLeaks = perDuration.toMillis
def dropToBucket():Boolean = {
synchronized {
var now = new Date()
if (timeOfLastDropLeak != null) {
var deltaT = now.getTime() - timeOfLastDropLeak.getTime()
var numberToLeak:Long = deltaT / msDropLeaks
if (numberToLeak > 0) {
if (numDropsInBucket <= numberToLeak) {
numDropsInBucket -= numberToLeak.toInt
} else {
numDropsInBucket = 0
}
timeOfLastDropLeak = now
}
}else{
timeOfLastDropLeak = now
}
if (numDropsInBucket < rate) {
numDropsInBucket = numDropsInBucket + 1
return true;
}
return false;
}
}
}
| tnddn/iv-web | portal/rest-portal/app/backpresurre/LeakyBucket.scala | Scala | apache-2.0 | 956 |
package sgl.android
/** Provides Android implementation for games services
*
* Games services are features that request the use of a server to
* sync data between players. The most common example is the Google
* games services which provide out-of-the-box many useful services such
* as leaderboards, achievements, and saved games. But leaderboard could
* be provided by an alternative service as well, or maybe your own
* custom implementation.
*
* Note that in theory achievements don't need a shared server, so maybe
* this should not be part of services.
*/
package object services {
}
| regb/scala-game-library | android/core/src/main/scala/sgl/android/services/package.scala | Scala | mit | 618 |
package japgolly.microlibs.recursion
import cats.free.Free
import cats.{Functor, Monad}
final class FAlgebraOps[F[_], A](private val self: FAlgebra[F, A]) extends AnyVal {
def toFAlgebraM[M[_]](implicit M: Monad[M]): FAlgebraM[M, F, A] =
fa => M.point(self(fa))
def toRAlgebra(implicit F: Functor[F]): RAlgebra[F, A] =
ffa => self(F.map(ffa)(_._2))
def toCVAlgebra(implicit F: Functor[F]): CVAlgebra[F, A] =
fa => self(F.map(fa)(_.head))
def zip[B](that: FAlgebra[F, B])(implicit F: Functor[F]): FAlgebra[F, (A, B)] =
fab => {
val a = self(F.map(fab)(_._1))
val b = that(F.map(fab)(_._2))
(a, b)
}
}
final class FCoalgebraOps[F[_], A](private val self: FCoalgebra[F, A]) extends AnyVal {
def toFCoalgebraM[M[_]](implicit M: Monad[M]): FCoalgebraM[M, F, A] =
a => M.point(self(a))
def toRCoalgebra(implicit F: Functor[F]): RCoalgebra[F, A] =
a => F.map(self(a))(Right(_))
def toCVCoalgebra(implicit F: Functor[F]): CVCoalgebra[F, A] =
a => F.map(self(a))(Free.pure)
def cozip[B](that: FCoalgebra[F, B])(implicit F: Functor[F]): FCoalgebra[F, Either[A, B]] = {
case Left (a) => F.map(self(a))(Left(_))
case Right(b) => F.map(that(b))(Right(_))
}
}
| japgolly/microlibs-scala | recursion/shared/src/main/scala/japgolly/microlibs/recursion/Algebras.scala | Scala | apache-2.0 | 1,233 |
/*
* Copyright (C) 2015-2016 Paulo Angelo Alves Resende <pa@pauloangelo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License Version 2 as
* published by the Free Software Foundation. You may not use, modify or
* distribute this program under any other version of the GNU General
* Public License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.hogzilla.hbase.HogHBaseRDD
import org.hogzilla.initiate.HogInitiate
import org.hogzilla.prepare.HogPrepare
import org.hogzilla.sflow._
import org.hogzilla.http.HogHTTP
import org.hogzilla.auth.HogAuth
import org.hogzilla.dns.HogDNS
import org.hogzilla.snort.HogSnort
/**
*
* Keep it useful, simple, robust, and scalable.
*
*
*/
object Hogzilla {
def main(args: Array[String])
{
val sparkConf = new SparkConf()
.setAppName("Hogzilla")
.set("spark.executor.memory", "1g")
.set("spark.default.parallelism", "160") // 160
val spark = new SparkContext(sparkConf)
// Get the HBase RDD
val HogRDD = HogHBaseRDD.connect(spark);
// Initiate HogZilla
HogInitiate.initiate(spark);
// Prepare the data
HogPrepare.prepare(HogRDD)
// General module
HogSnort.run(HogRDD,spark)
// Run algorithms for DNS protocol
HogDNS.run(HogRDD,spark);
// Run algorithms for HTTP protocol
HogHTTP.run(HogRDD,spark);
// Run algorithms for SMTP protocol
//HogSMTP.run(HogRDD);
// ============================ Run algorithms for SFlows ============================
val HogRDDSFlow = HogHBaseRDD.connectSFlow(spark);
HogSFlow.run(HogRDDSFlow,spark);
val HogRDDHistograms = HogHBaseRDD.connectHistograms(spark);
HogSFlowHistograms.run(HogRDDHistograms,spark);
// Use continuous mode
//val HogRDDAuth = HogHBaseRDD.connectAuth(spark);
//HogAuth.run(HogRDDAuth,spark);
// Stop Spark
spark.stop()
// Close the HBase Connection
HogHBaseRDD.close();
}
} | pauloangelo/hogzilla | src/Hogzilla.scala | Scala | gpl-2.0 | 2,659 |
package org.orbroker
import JdbcCloser._
import org.orbroker.exception._
import java.sql.{ SQLException, ResultSet, Connection, PreparedStatement }
/**
* The session encapsulating the a connection
* from the data source.
* @author Nils Kilden-Pedersen
*/
private[orbroker] abstract class Session(
isolationLevel: Option[Int],
broker: Broker,
private val extConn: Option[Connection]) {
private var conn: Connection = null
protected def commit() = if (conn != null) conn.commit()
protected def rollback() = if (conn != null) conn.rollback()
private[orbroker] def alwaysPrepare = broker.alwaysPrepare
protected def getStatement(token: Token[_]) = token.getStatement(broker)
protected def getModStatement(token: Token[_]) = token.getStatement(broker).asInstanceOf[ModifyStatement]
protected def getCallStatement(token: Token[_]) = token.getStatement(broker).asInstanceOf[CallStatement]
implicit protected def callback = broker.callback
protected val readOnly: Boolean
protected def hasUncommittedChanges: Boolean
/**
* Timeout in seconds. Will cause a [[org.orbroker.exception.TimeoutException]]
* if an execution takes longer than the given time.
* 0 means no limit.
* @see java.sql.Statement#setQueryTimeout(int)
*/
var timeout = broker.timeout
/**
* Fetch size in rows.
* @see java.sql.Statement.setFetchSize(int)
*/
var fetchSize = broker.fetchSize
protected[orbroker] final def connection = extConn match {
case Some(ec) => ec
case None => {
if (conn == null) {
conn = broker.newConnection(isolationLevel)
if (conn.isReadOnly != readOnly) conn.setReadOnly(readOnly)
}
conn
}
}
private[orbroker] def discardConnection() {
if (conn != null) {
try { conn.close() } catch { case _: Exception => /* Ignore */ }
conn = null
}
}
private[orbroker] def close() {
if (conn != null) {
try {
// End potential implicit transaction for drivers that require it before closing
conn.rollback()
if (hasUncommittedChanges) {
throw new RollbackException
}
} finally {
conn.checkAndClose()
}
}
}
protected final def toMap(args: Iterable[(String, _)]): Map[String, Any] = {
var map: Map[String, Any] = broker.defaultParms
args foreach { case arg @ (key, value) =>
if (value.isInstanceOf[Traversable[_]]) {
val array = value.asInstanceOf[Traversable[Any]].toArray
map += (key -> array)
} else {
map += arg
}
}
map
}
protected final def evaluate(id: Symbol, e: SQLException) = {
if (broker.adapter.isConstraint(e)) {
new ConstraintException(id, e, broker.adapter.constraintName(e))
} else if (broker.adapter.isTimeout(e)) {
new TimeoutException(id, e)
} else if (broker.adapter.isDeadlock(e)) {
new DeadlockException(e)
} else {
e
}
}
}
| nilskp/orbroker | src/main/scala/org/orbroker/Session.scala | Scala | mit | 2,974 |
package is.hail.asm4s
import java.io._
import java.nio.charset.StandardCharsets
import is.hail.expr.ir.EmitCodeBuilder
import is.hail.lir
import is.hail.utils._
import org.apache.spark.TaskContext
import org.objectweb.asm.ClassReader
import org.objectweb.asm.Opcodes._
import org.objectweb.asm.tree._
import org.objectweb.asm.util.{Textifier, TraceClassVisitor}
import scala.collection.mutable
class Field[T: TypeInfo](classBuilder: ClassBuilder[_], val name: String) {
val ti: TypeInfo[T] = implicitly
val lf: lir.Field = classBuilder.lclass.newField(name, typeInfo[T])
def get(obj: Code[_]): Code[T] = Code(obj, lir.getField(lf))
def get(obj: Value[_]): Value[T] = new Value[T] {
override def get: Code[T] = Code(obj, lir.getField(lf))
}
def putAny(obj: Code[_], v: Code[_]): Code[Unit] = put(obj, coerce[T](v))
def put(obj: Code[_], v: Code[T]): Code[Unit] = {
obj.end.append(lir.goto(v.start))
v.end.append(lir.putField(lf, obj.v, v.v))
val newC = new VCode(obj.start, v.end, null)
obj.clear()
v.clear()
newC
}
}
class StaticField[T: TypeInfo](classBuilder: ClassBuilder[_], val name: String) {
val ti: TypeInfo[T] = implicitly
val lf: lir.StaticField = classBuilder.lclass.newStaticField(name, typeInfo[T])
def get(): Code[T] = Code(lir.getStaticField(lf))
def put(v: Code[T]): Code[Unit] = {
v.end.append(lir.putStaticField(lf, v.v))
val newC = new VCode(v.start, v.end, null)
v.clear()
newC
}
}
class ClassesBytes(classesBytes: Array[(String, Array[Byte])]) extends Serializable {
@transient @volatile var loaded: Boolean = false
def load(hcl: HailClassLoader): Unit = {
if (!loaded) {
synchronized {
if (!loaded) {
classesBytes.foreach { case (n, bytes) =>
try {
hcl.loadOrDefineClass(n, bytes)
} catch {
case e: Exception =>
val buffer = new ByteArrayOutputStream()
FunctionBuilder.bytesToBytecodeString(bytes, buffer)
val classJVMByteCodeAsEscapedStr = buffer.toString(StandardCharsets.UTF_8.name())
log.error(s"Failed to load bytecode ${e}:\\n" + classJVMByteCodeAsEscapedStr)
throw e
}
}
}
loaded = true
}
}
}
}
class AsmTuple[C](val cb: ClassBuilder[C], val fields: IndexedSeq[Field[_]], val ctor: MethodBuilder[C]) {
val ti: TypeInfo[_] = cb.ti
def newTuple(elems: IndexedSeq[Code[_]]): Code[C] = Code.newInstance(cb, ctor, elems)
def loadElementsAny(t: Value[_]): IndexedSeq[Value[_]] = fields.map(_.get(coerce[C](t) ))
def loadElements(t: Value[C]): IndexedSeq[Value[_]] = fields.map(_.get(t))
}
trait WrappedModuleBuilder {
def modb: ModuleBuilder
def newClass[C](name: String)(implicit cti: TypeInfo[C]): ClassBuilder[C] = modb.newClass[C](name)
def genClass[C](baseName: String)(implicit cti: TypeInfo[C]): ClassBuilder[C] = modb.genClass[C](baseName)
def classesBytes(print: Option[PrintWriter] = None): ClassesBytes = modb.classesBytes(print)
}
class ModuleBuilder() {
val classes = new mutable.ArrayBuffer[ClassBuilder[_]]()
def newClass[C](name: String, sourceFile: Option[String] = None)(implicit cti: TypeInfo[C]): ClassBuilder[C] = {
val c = new ClassBuilder[C](this, name, sourceFile)
if (cti != UnitInfo)
c.addInterface(cti.iname)
classes += c
c
}
private val tuples = mutable.Map[IndexedSeq[TypeInfo[_]], AsmTuple[_]]()
def tupleClass(fieldTypes: IndexedSeq[TypeInfo[_]]): AsmTuple[_] = {
tuples.getOrElseUpdate(fieldTypes, {
val kb = genClass[Unit](s"Tuple${fieldTypes.length}")
val fields = fieldTypes.zipWithIndex.map { case (ti, i) =>
kb.newField(s"_$i")(ti)
}
val ctor = kb.newMethod("<init>", fieldTypes, UnitInfo)
ctor.emitWithBuilder { cb =>
// FIXME, maybe a more elegant way to do this?
val L = new lir.Block()
L.append(
lir.methodStmt(INVOKESPECIAL,
"java/lang/Object",
"<init>",
"()V",
false,
UnitInfo,
FastIndexedSeq(lir.load(ctor._this.asInstanceOf[LocalRef[_]].l))))
cb += new VCode(L, L, null)
fields.zipWithIndex.foreach { case (f, i) =>
cb += f.putAny(ctor._this, ctor.getArg(i + 1)(f.ti).get)
}
Code._empty
}
new AsmTuple(kb, fields, ctor)
})
}
def genClass[C](baseName: String)(implicit cti: TypeInfo[C]): ClassBuilder[C] = newClass[C](genName("C", baseName))
var classesBytes: ClassesBytes = _
def classesBytes(print: Option[PrintWriter] = None): ClassesBytes = {
if (classesBytes == null) {
classesBytes = new ClassesBytes(
classes
.iterator
.flatMap(c => c.classBytes(print))
.toArray)
}
classesBytes
}
}
trait WrappedClassBuilder[C] extends WrappedModuleBuilder {
def cb: ClassBuilder[C]
def modb: ModuleBuilder = cb.modb
def className: String = cb.className
def ti: TypeInfo[_] = cb.ti
def addInterface(name: String): Unit = cb.addInterface(name)
def emitInit(c: Code[Unit]): Unit = cb.emitInit(c)
def emitClinit(c: Code[Unit]): Unit = cb.emitClinit(c)
def newField[T: TypeInfo](name: String): Field[T] = cb.newField[T](name)
def newStaticField[T: TypeInfo](name: String): StaticField[T] = cb.newStaticField[T](name)
def newStaticField[T: TypeInfo](name: String, init: Code[T]): StaticField[T] = cb.newStaticField[T](name, init)
def genField[T: TypeInfo](baseName: String): Field[T] = cb.genField(baseName)
def genFieldThisRef[T: TypeInfo](name: String = null): ThisFieldRef[T] = cb.genFieldThisRef[T](name)
def genLazyFieldThisRef[T: TypeInfo](setup: Code[T], name: String = null): Value[T] = cb.genLazyFieldThisRef(setup, name)
def getOrDefineLazyField[T: TypeInfo](setup: Code[T], id: Any): Value[T] = cb.getOrDefineLazyField(setup, id)
def fieldBuilder: SettableBuilder = cb.fieldBuilder
def newMethod(name: String, parameterTypeInfo: IndexedSeq[TypeInfo[_]], returnTypeInfo: TypeInfo[_]): MethodBuilder[C] =
cb.newMethod(name, parameterTypeInfo, returnTypeInfo)
def newMethod(name: String,
maybeGenericParameterTypeInfo: IndexedSeq[MaybeGenericTypeInfo[_]],
maybeGenericReturnTypeInfo: MaybeGenericTypeInfo[_]): MethodBuilder[C] =
cb.newMethod(name, maybeGenericParameterTypeInfo, maybeGenericReturnTypeInfo)
def newStaticMethod(name: String, parameterTypeInfo: IndexedSeq[TypeInfo[_]], returnTypeInfo: TypeInfo[_]): MethodBuilder[C] =
cb.newStaticMethod(name, parameterTypeInfo, returnTypeInfo)
def getOrGenMethod(
baseName: String, key: Any, argsInfo: IndexedSeq[TypeInfo[_]], returnInfo: TypeInfo[_]
)(body: MethodBuilder[C] => Unit): MethodBuilder[C] =
cb.getOrGenMethod(baseName, key, argsInfo, returnInfo)(body)
def result(print: Option[PrintWriter] = None): (HailClassLoader) => C = cb.result(print)
def _this: Value[C] = cb._this
def genMethod(baseName: String, argsInfo: IndexedSeq[TypeInfo[_]], returnInfo: TypeInfo[_]): MethodBuilder[C] =
cb.genMethod(baseName, argsInfo, returnInfo)
def genMethod[R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[R](baseName)
def genMethod[A: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[A, R](baseName)
def genMethod[A1: TypeInfo, A2: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[A1, A2, R](baseName)
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[A1, A2, A3, R](baseName)
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, A4: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[A1, A2, A3, A4, R](baseName)
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, A4: TypeInfo, A5: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] = cb.genMethod[A1, A2, A3, A4, A5, R](baseName)
def genStaticMethod(name: String, parameterTypeInfo: IndexedSeq[TypeInfo[_]], returnTypeInfo: TypeInfo[_]): MethodBuilder[C] =
cb.genStaticMethod(name, parameterTypeInfo, returnTypeInfo)
}
class ClassBuilder[C](
val modb: ModuleBuilder,
val className: String,
val sourceFile: Option[String]
) extends WrappedModuleBuilder {
val ti: ClassInfo[C] = new ClassInfo[C](className)
val lclass = new lir.Classx[C](className, "java/lang/Object", sourceFile)
val methods: mutable.ArrayBuffer[MethodBuilder[C]] = new mutable.ArrayBuffer[MethodBuilder[C]](16)
val fields: mutable.ArrayBuffer[FieldNode] = new mutable.ArrayBuffer[FieldNode](16)
val lazyFieldMemo: mutable.Map[Any, Value[_]] = mutable.Map.empty
val lInit = lclass.newMethod("<init>", FastIndexedSeq(), UnitInfo)
var initBody: Code[Unit] = {
val L = new lir.Block()
L.append(
lir.methodStmt(INVOKESPECIAL,
"java/lang/Object",
"<init>",
"()V",
false,
UnitInfo,
FastIndexedSeq(lir.load(lInit.getParam(0)))))
L.append(lir.returnx())
new VCode(L, L, null)
}
private var lClinit: lir.Method = _
var clinitBody: Option[Code[Unit]] = None
def emitInit(c: Code[Unit]): Unit = {
initBody = Code(initBody, c)
}
def emitClinit(c: Code[Unit]): Unit = {
clinitBody match {
case None =>
lClinit = lclass.newMethod("<clinit>", FastIndexedSeq(), UnitInfo, isStatic = true)
clinitBody = Some(c)
case Some(body) =>
clinitBody = Some(Code(body, c))
}
}
def addInterface(name: String): Unit = lclass.addInterface(name)
def newMethod(name: String, parameterTypeInfo: IndexedSeq[TypeInfo[_]], returnTypeInfo: TypeInfo[_]): MethodBuilder[C] = {
val mb = new MethodBuilder[C](this, name, parameterTypeInfo, returnTypeInfo)
methods.append(mb)
mb
}
def newMethod(name: String,
maybeGenericParameterTypeInfo: IndexedSeq[MaybeGenericTypeInfo[_]],
maybeGenericReturnTypeInfo: MaybeGenericTypeInfo[_]): MethodBuilder[C] = {
val parameterTypeInfo: IndexedSeq[TypeInfo[_]] = maybeGenericParameterTypeInfo.map(_.base)
val returnTypeInfo: TypeInfo[_] = maybeGenericReturnTypeInfo.base
val m = newMethod(name, parameterTypeInfo, returnTypeInfo)
if (maybeGenericParameterTypeInfo.exists(_.isGeneric) || maybeGenericReturnTypeInfo.isGeneric) {
val generic = newMethod(name, maybeGenericParameterTypeInfo.map(_.generic), maybeGenericReturnTypeInfo.generic)
generic.emitWithBuilder { cb =>
maybeGenericReturnTypeInfo.castToGeneric(cb,
m.invoke(cb, maybeGenericParameterTypeInfo.zipWithIndex.map { case (ti, i) =>
ti.castFromGeneric(cb, generic.getArg(i + 1)(ti.generic))
}: _*))
}
}
m
}
def newStaticMethod(name: String, parameterTypeInfo: IndexedSeq[TypeInfo[_]], returnTypeInfo: TypeInfo[_]): MethodBuilder[C] = {
val mb = new MethodBuilder[C](this, name, parameterTypeInfo, returnTypeInfo, isStatic = true)
methods.append(mb)
mb
}
def newField[T: TypeInfo](name: String): Field[T] = new Field[T](this, name)
def newStaticField[T: TypeInfo](name: String): StaticField[T] = new StaticField[T](this, name)
def newStaticField[T: TypeInfo](name: String, init: Code[T]): StaticField[T] = {
val f = new StaticField[T](this, name)
emitClinit(f.put(init))
f
}
def genField[T: TypeInfo](baseName: String): Field[T] = newField(genName("f", baseName))
private[this] val methodMemo: mutable.Map[Any, MethodBuilder[C]] = mutable.HashMap.empty
def getOrGenMethod(baseName: String, key: Any, argsInfo: IndexedSeq[TypeInfo[_]], returnInfo: TypeInfo[_])
(f: MethodBuilder[C] => Unit): MethodBuilder[C] = {
methodMemo.get(key) match {
case Some(mb) => mb
case None =>
val mb = newMethod(genName("M", baseName), argsInfo, returnInfo)
f(mb)
methodMemo(key) = mb
mb
}
}
def classBytes(print: Option[PrintWriter] = None): Array[(String, Array[Byte])] = {
assert(initBody.start != null)
lInit.setEntry(initBody.start)
clinitBody match {
case None => // do nothing
case Some(body) =>
assert(body.start != null)
body.end.append(lir.returnx())
val nbody = new VCode(body.start, body.end, null)
body.clear()
lClinit.setEntry(nbody.start)
}
lclass.asBytes(print)
}
def result(print: Option[PrintWriter] = None): (HailClassLoader) => C = {
val n = className.replace("/", ".")
val classesBytes = modb.classesBytes()
assert(TaskContext.get() == null,
"FunctionBuilder emission should happen on master, but happened on worker")
new ((HailClassLoader) => C) with java.io.Serializable {
@transient @volatile private var theClass: Class[_] = null
def apply(hcl: HailClassLoader): C = {
if (theClass == null) {
this.synchronized {
if (theClass == null) {
classesBytes.load(hcl)
theClass = loadClass(hcl, n)
}
}
}
theClass.newInstance().asInstanceOf[C]
}
}
}
def _this: Value[C] = new LocalRef[C](new lir.Parameter(null, 0, ti))
val fieldBuilder: SettableBuilder = new SettableBuilder {
def newSettable[T](name: String)(implicit tti: TypeInfo[T]): Settable[T] = genFieldThisRef[T](name)
}
def genFieldThisRef[T: TypeInfo](name: String = null): ThisFieldRef[T] =
new ThisFieldRef[T](this, genField[T](name))
def genLazyFieldThisRef[T: TypeInfo](setup: Code[T], name: String = null): Value[T] =
new ThisLazyFieldRef[T](this, name, setup)
def getOrDefineLazyField[T: TypeInfo](setup: Code[T], id: Any): Value[T] = {
lazyFieldMemo.getOrElseUpdate(id, genLazyFieldThisRef[T](setup)).asInstanceOf[ThisLazyFieldRef[T]]
}
def genMethod(baseName: String, argsInfo: IndexedSeq[TypeInfo[_]], returnInfo: TypeInfo[_]): MethodBuilder[C] =
newMethod(genName("m", baseName), argsInfo, returnInfo)
def genMethod[R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](), typeInfo[R])
def genMethod[A: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](typeInfo[A]), typeInfo[R])
def genMethod[A1: TypeInfo, A2: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](typeInfo[A1], typeInfo[A2]), typeInfo[R])
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](typeInfo[A1], typeInfo[A2], typeInfo[A3]), typeInfo[R])
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, A4: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](typeInfo[A1], typeInfo[A2], typeInfo[A3], typeInfo[A4]), typeInfo[R])
def genMethod[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, A4: TypeInfo, A5: TypeInfo, R: TypeInfo](baseName: String): MethodBuilder[C] =
genMethod(baseName, FastIndexedSeq[TypeInfo[_]](typeInfo[A1], typeInfo[A2], typeInfo[A3], typeInfo[A4], typeInfo[A5]), typeInfo[R])
def genStaticMethod(baseName: String, argsInfo: IndexedSeq[TypeInfo[_]], returnInfo: TypeInfo[_]): MethodBuilder[C] =
newStaticMethod(genName("sm", baseName), argsInfo, returnInfo)
}
object FunctionBuilder {
def bytesToBytecodeString(bytes: Array[Byte], out: OutputStream) {
val tcv = new TraceClassVisitor(null, new Textifier, new PrintWriter(out))
new ClassReader(bytes).accept(tcv, 0)
}
def apply[F](
baseName: String,
argInfo: IndexedSeq[MaybeGenericTypeInfo[_]],
returnInfo: MaybeGenericTypeInfo[_]
)(implicit fti: TypeInfo[F]): FunctionBuilder[F] = {
val modb: ModuleBuilder = new ModuleBuilder()
val cb: ClassBuilder[F] = modb.genClass[F](baseName)
val apply = cb.newMethod("apply", argInfo, returnInfo)
new FunctionBuilder[F](apply)
}
def apply[R: TypeInfo](baseName: String): FunctionBuilder[AsmFunction0[R]] =
apply[AsmFunction0[R]](baseName, FastIndexedSeq.empty[MaybeGenericTypeInfo[_]], GenericTypeInfo[R])
def apply[A1: TypeInfo, R: TypeInfo](baseName: String): FunctionBuilder[AsmFunction1[A1, R]] =
apply[AsmFunction1[A1, R]](baseName, Array(GenericTypeInfo[A1]), GenericTypeInfo[R])
def apply[A1: TypeInfo, A2: TypeInfo, R: TypeInfo](baseName: String): FunctionBuilder[AsmFunction2[A1, A2, R]] =
apply[AsmFunction2[A1, A2, R]](baseName, Array(GenericTypeInfo[A1], GenericTypeInfo[A2]), GenericTypeInfo[R])
def apply[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, R: TypeInfo](baseName: String): FunctionBuilder[AsmFunction3[A1, A2, A3, R]] =
apply[AsmFunction3[A1, A2, A3, R]](baseName, Array(GenericTypeInfo[A1], GenericTypeInfo[A2], GenericTypeInfo[A3]), GenericTypeInfo[R])
def apply[A1: TypeInfo, A2: TypeInfo, A3: TypeInfo, A4: TypeInfo, R: TypeInfo](baseName: String): FunctionBuilder[AsmFunction4[A1, A2, A3, A4, R]] =
apply[AsmFunction4[A1, A2, A3, A4, R]](baseName, Array(GenericTypeInfo[A1], GenericTypeInfo[A2], GenericTypeInfo[A3], GenericTypeInfo[A4]), GenericTypeInfo[R])
}
trait WrappedMethodBuilder[C] extends WrappedClassBuilder[C] {
def mb: MethodBuilder[C]
def cb: ClassBuilder[C] = mb.cb
def methodName: String = mb.methodName
def parameterTypeInfo: IndexedSeq[TypeInfo[_]] = mb.parameterTypeInfo
def returnTypeInfo: TypeInfo[_] = mb.returnTypeInfo
def newLocal[T: TypeInfo](name: String = null): LocalRef[T] = mb.newLocal(name)
def localBuilder: SettableBuilder = mb.localBuilder
def getArg[T: TypeInfo](i: Int): LocalRef[T] = mb.getArg[T](i)
def emitStartup(c: Code[Unit]): Unit = mb.emitStartup(c)
def emit(body: Code[_]): Unit = mb.emit(body)
def emitWithBuilder[T](f: (CodeBuilder) => Code[T]): Unit = mb.emitWithBuilder(f)
def invoke[T](cb: EmitCodeBuilder, args: Value[_]*): Value[T] = mb.invoke(cb, args: _*)
}
class MethodBuilder[C](
val cb: ClassBuilder[C], _mname: String,
val parameterTypeInfo: IndexedSeq[TypeInfo[_]],
val returnTypeInfo: TypeInfo[_],
val isStatic: Boolean = false
) extends WrappedClassBuilder[C] {
// very long method names, repeated hundreds of thousands of times can cause memory issues.
// If necessary to find the name of a method precisely, this can be set to around the constant
// limit of 65535 characters, but usually, this can be much smaller.
val methodName: String = _mname.substring(0, scala.math.min(_mname.length, 2000 /* 65535 */))
if (methodName != "<init>" && !isJavaIdentifier(methodName))
throw new IllegalArgumentException(s"Illegal method name, not Java identifier: $methodName")
val lmethod: lir.Method = cb.lclass.newMethod(methodName, parameterTypeInfo, returnTypeInfo, isStatic)
val localBuilder: SettableBuilder = new SettableBuilder {
def newSettable[T](name: String)(implicit tti: TypeInfo[T]): Settable[T] = newLocal[T](name)
}
def newLocal[T: TypeInfo](name: String = null): LocalRef[T] =
new LocalRef[T](lmethod.newLocal(name, typeInfo[T]))
def getArg[T: TypeInfo](i: Int): LocalRef[T] = {
val ti = implicitly[TypeInfo[T]]
if (i == 0 && !isStatic)
assert(ti == cb.ti, s"$ti != ${ cb.ti }")
else {
val static = (!isStatic).toInt
assert(ti == parameterTypeInfo(i - static),
s"$ti != ${ parameterTypeInfo(i - static) }\\n params: $parameterTypeInfo")
}
new LocalRef(lmethod.getParam(i))
}
private var emitted = false
private var startup: Code[Unit] = Code._empty
def emitStartup(c: Code[Unit]): Unit = {
assert(!emitted)
startup = Code(startup, c)
}
def emitWithBuilder[T](f: (CodeBuilder) => Code[T]): Unit = emit(CodeBuilder.scopedCode[T](this)(f))
def emit(body: Code[_]): Unit = {
assert(!emitted)
emitted = true
val start = startup.start
startup.end.append(lir.goto(body.start))
body.end.append(
if (body.v != null)
lir.returnx(body.v)
else
lir.returnx())
assert(start != null)
lmethod.setEntry(start)
body.clear()
}
def invokeCode[T](args: Value[_]*): Code[T] = {
val (start, end, argvs) = Code.sequenceValues(args.toFastIndexedSeq.map(_.get))
if (returnTypeInfo eq UnitInfo) {
if (isStatic) {
end.append(lir.methodStmt(INVOKESTATIC, lmethod, argvs))
} else {
end.append(
lir.methodStmt(INVOKEVIRTUAL, lmethod,
lir.load(new lir.Parameter(null, 0, cb.ti)) +: argvs))
}
new VCode(start, end, null)
} else {
val value = if (isStatic) {
lir.methodInsn(INVOKESTATIC, lmethod, argvs)
} else {
lir.methodInsn(INVOKEVIRTUAL, lmethod,
lir.load(new lir.Parameter(null, 0, cb.ti)) +: argvs)
}
new VCode(start, end, value)
}
}
def invoke[T](codeBuilder: CodeBuilderLike, args: Value[_]*): Value[T] = {
val (start, end, argvs) = Code.sequenceValues(args.toFastIndexedSeq.map(_.get))
if (returnTypeInfo eq UnitInfo) {
if (isStatic) {
end.append(lir.methodStmt(INVOKESTATIC, lmethod, argvs))
} else {
end.append(
lir.methodStmt(INVOKEVIRTUAL, lmethod,
lir.load(new lir.Parameter(null, 0, cb.ti)) +: argvs))
}
codeBuilder.append(new VCode(start, end, null))
coerce[T](Code._empty)
} else {
val value = if (isStatic) {
lir.methodInsn(INVOKESTATIC, lmethod, argvs)
} else {
lir.methodInsn(INVOKEVIRTUAL, lmethod,
lir.load(new lir.Parameter(null, 0, cb.ti)) +: argvs)
}
coerce[T](codeBuilder.memoizeAny(new VCode(start, end, value), returnTypeInfo))
}
}
}
class FunctionBuilder[F](
val apply_method: MethodBuilder[F]
) extends WrappedMethodBuilder[F] {
val mb: MethodBuilder[F] = apply_method
}
| hail-is/hail | hail/src/main/scala/is/hail/asm4s/ClassBuilder.scala | Scala | mit | 21,925 |
package japgolly.microlibs.nonempty
import utest._
object NonEmptyTest extends TestSuite {
case class X()
case class Y()
override def tests = Tests {
"vector" - {
"empty" - assert(NonEmpty(Vector()).isEmpty)
"one" - assert(NonEmpty(Vector(1)) == Some(NonEmptyVector(1)))
"two" - assert(NonEmpty(Vector(1, 2)) == Some(NonEmptyVector(1, 2)))
}
"set" - {
"empty" - assert(NonEmpty(Set.empty[Int]).isEmpty)
"one" - assert(NonEmpty(Set(1)) == Some(NonEmptySet(1)))
"two" - assert(NonEmpty(Set(1, 2)) == Some(NonEmptySet(1, 2)))
}
"map" - {
"empty" - assert(NonEmpty(Map.empty[X, Y]).isEmpty)
val ne = Map(X() -> Y())
"nonEmpty" - assert(NonEmpty(ne) == Some(NonEmpty.force(ne)))
}
}
}
| japgolly/microlibs-scala | nonempty/shared/src/test/scala/japgolly/microlibs/nonempty/NonEmptyTest.scala | Scala | apache-2.0 | 770 |
package tmeval
trait TopicModel {
def topicweights: Array[Double]
def topics: Array[Map[Int,Double]]
lazy val cachedTypeProfiles = collection.mutable.HashMap[Int,Array[Double]]()
// Get the probability of a word in each topic. Cache the values we've looked up
// already to minimize hash lookups. Could be done much better.
def typeProfile(index: Int) = cachedTypeProfiles.get(index) match {
case Some(profile) => profile
case None =>
val profile = topics.map(_(index))
cachedTypeProfiles(index) = profile
profile
}
}
class SimulatedTopicModel(val numtopics: Int = 50, val numwords: Int = 1000)
extends TopicModel {
import breeze.stats.distributions.{Gaussian, Dirichlet, Multinomial}
val topicweights = new Gaussian(0, 1).sample(numtopics).map(math.exp).toArray
val topicDistributions =
(1 to numtopics).map(k => Multinomial(Dirichlet.sym(0.1,numwords).draw)).toArray
val topics = topicDistributions.map(d=>d.params.data.zipWithIndex.map(_.swap).toMap)
def generateDocument(docsize: Int = 500) = {
val topicMultinomial = Multinomial(Dirichlet(topicweights).draw)
(1 to docsize).map(i => topicDistributions(topicMultinomial.draw).draw).toArray
}
}
class MalletLdaModel (
numtopics: Int,
alpha: Array[Double],
alphaSum: Double,
beta: Double,
typeTopicCounts: Array[Array[Int]],
tokensPerTopic: Array[Int]
) extends TopicModel {
val topicMask =
if (Integer.bitCount(numtopics) == 1)
numtopics - 1
else // otherwise add an extra bit
Integer.highestOneBit(numtopics) * 2 - 1
val topicBits = Integer.bitCount(topicMask)
val topicweights = alpha
val topics = {
val topicTypeCounts =
(0 until numtopics).map(i => collection.mutable.HashMap[Int,Double]().withDefault(x=>0.0))
typeTopicCounts.zipWithIndex.map { case(currentTypeTopicCounts,typeIndex) => {
var index = 0
while (index < currentTypeTopicCounts.length && currentTypeTopicCounts(index) > 0) {
val currentTopic = currentTypeTopicCounts(index) & topicMask
val currentValue = currentTypeTopicCounts(index) >> topicBits
topicTypeCounts(currentTopic)(typeIndex) += currentValue
index += 1
}
}}
val numTypes = typeTopicCounts.length
val pseudoCounts = numTypes*beta
// Compute the topic distributions, with appropriate default for words
// not seen with the topic.
topicTypeCounts.toArray.map { currentCounts => {
// Note: currentCounts.values.sum is also available via (and is the
// same as) tokensPerTopic.
val denominator = currentCounts.values.sum + pseudoCounts
currentCounts
.mapValues(v => (v+beta)/denominator)
.toMap
.withDefault(x=>beta/denominator)
}}
}
}
object MalletUtil {
def createPipeline() = {
import cc.mallet.pipe._
import cc.mallet.util.CharSequenceLexer
val pipeList = new java.util.ArrayList[Pipe]()
pipeList.add(new Target2Label)
pipeList.add(new SaveDataInSource)
pipeList.add(new Input2CharSequence(java.nio.charset.Charset.defaultCharset.displayName))
pipeList.add(new CharSequence2TokenSequence(CharSequenceLexer.LEX_ALPHA))
pipeList.add(new TokenSequenceLowercase)
pipeList.add(new TokenSequenceRemoveStopwords(false, false))
pipeList.add(new TokenSequence2FeatureSequence)
new SerialPipes(pipeList)
}
}
object OutputTopics {
import java.io._
import cc.mallet.topics._
import cc.mallet.types._
import cc.mallet.pipe.iterator._
import scala.collection.JavaConversions._
def main (args: Array[String]) {
val extractDir = new File(Constants.TMEVAL_DIR, "data/extracted")
// Parse and get the command-line options
val opts = CorpusExperimentOpts(args)
val numTopics = opts.numTopics()
// Get the datasets: one can either specify a single dataset, or grab all
// the datasets in the data/extracted directory.
val datasets = opts.dataset() match {
case "all" => extractDir.listFiles.map(_.getName)
case singleDataset => Array(singleDataset)
}
// Set up the output writer for producing the final CSV formatted results
val outputWriter = opts.output() match {
case "stdout" => new PrintWriter(System.out)
case file => new PrintWriter(new BufferedWriter(new FileWriter(new File(file))))
}
for (dataset <- datasets) {
val datasetDir = new File(extractDir, dataset)
// Get the instances
val allInstances = new InstanceList(MalletUtil.createPipeline)
val allFiles = new FileIterator(Array(datasetDir), FileIterator.STARTING_DIRECTORIES, true)
allInstances.addThruPipe(allFiles)
val lda = new ParallelTopicModel(numTopics, numTopics/10, 0.01)
lda.printLogLikelihood = false
lda.setTopicDisplay(500, 10)
lda.addInstances(allInstances)
lda.setNumThreads(4)
lda.numIterations = 1000
lda.estimate
outputWriter.write("\\n# Topics for " + dataset + "\\n")
outputWriter.write("```\\n")
outputWriter.write(lda.displayTopWords(20, false))
outputWriter.write("```\\n\\n")
}
outputWriter.close
}
}
| utcompling/topicmodel-eval | src/main/scala/tmeval/Model.scala | Scala | apache-2.0 | 5,189 |
package sampler.example
import java.nio.file.{Files, Paths}
import org.apache.commons.io.FileUtils
import play.api.libs.json.JsObject
import play.api.libs.json.Json
import play.api.libs.json.Json._
import sampler._
import sampler.distribution.Distribution
import sampler.io.Rounding.Roundable
import sampler.maths.Random
import sampler.r.script.RScript
import scala.collection.immutable
/*
* Bootstrap on observations to determine the power of a sampling
* experiment in terms of observing a difference between treatment
* groups.
*
* Additionally, compare the power of rank (Mann–Whitney U test)
* with a sum statistic
*/
object Randomisation extends App {
implicit val r = Random
val wd = Paths.get("results", "Randomisation").toAbsolutePath()
Files.createDirectories(wd)
trait Group
case object Control extends Group
case object Treatment extends Group
case class Response(val value: Double, val group: Group)
object Response{
implicit def orderByValue = Ordering.by{r: Response => r.value}
}
val controlObs = IndexedSeq(-2.563111554, 0.137602944, -3.833588568, -4.999003784, -5.010599635, 1.913070835, 2.448825376, -4.257028083, -3.712720119, -1.582605178, 0.610041145, 4.13967034, 1.631807722, -0.42796491, 0.035666579, -5.029770934, 3.246954255, 0.223542528, 3.803082094, -5.342843905, 8.858159774, -6.315339586, 9.584523137, 4.692787541, -0.980056662, -0.175106954, 3.570247137, 0.626695503, -0.411770106, -0.192063359, -1.222091598, -9.05367629, -3.474498646, 4.360297922, -0.664044765, 3.642900198, -2.266528981, 2.503933899, 3.159185662, 5.879731056, 2.691961071, -2.467236093, 1.942253135, -3.086385876, 8.191174117, -8.370596641, -2.455481745)
.map{Response(_, Control)}
val treatmentObs = IndexedSeq(13.65608672, -9.771070031, -1.201278583, -5.516289538, -18.59506765, -1.1918794, -0.684629634, -6.65098211, -0.708664358, -5.507504279, 6.453622365, -8.97723389, -5.360219431, -0.270222351, 1.104715823, -0.021076194, -7.144171315)
.map{Response(_, Treatment)}
object SampleSize{
val control = 16
val treatment = 20
val total = control + treatment
}
val controlDist = Distribution.uniform(controlObs)
val treatmentDist = Distribution.uniform(treatmentObs)
val combinedDist = Distribution.uniform(controlObs ++: treatmentObs)
def rankStatistic(responses: Seq[Response]) = {
val sorted = responses.sorted.zipWithIndex
val controlRankSum = sorted.collect{case (Response(_, Control), idx) => idx + 1}.sum
val uControl = controlRankSum - (SampleSize.control*(SampleSize.control + 1.0) / 2.0)
val uTreatment = SampleSize.control * SampleSize.treatment - uControl
math.max(uControl, uTreatment)
}
def sumStatistic(responses: Seq[Response]) =
math.abs(responses.foldLeft(0.0){
case (acc, Response(value, Control)) => acc + value
case (acc, Response(value, Treatment)) => acc - value
})
def buildSamplingDistribution(
control: Distribution[Response],
treatment: Distribution[Response],
statistic: Seq[Response] => Double) = {
{for{
controlOutcomes <- control.until(_.size == SampleSize.control)
treatmentOutcomes <- treatment.until(_.size == SampleSize.treatment)
} yield { controlOutcomes ++: treatmentOutcomes }}
.map(statistic)
}
case class Results(
statisticName: String,
nullObs: Seq[Double],
experimentalObs: Seq[Double]
){
val nullEmpirical = nullObs.toEmpirical
def powerAtConfidence(confSeq: Seq[Double]): Seq[Double] = {
nullEmpirical.percentile(confSeq)
.map{criticalRightValue =>
experimentalObs.count{e => e > criticalRightValue} / experimentalObs.size.toDouble
}
}
}
val results: Seq[Results] = Map(
"Rank" -> rankStatistic _ ,
"Sum" -> sumStatistic _
)
.map{case (statName,statFun) =>
val experimentalDist = buildSamplingDistribution(
controlDist,
treatmentDist,
statFun)
val nullDist = buildSamplingDistribution(
combinedDist.map(_.copy(group = Control)),
combinedDist.map(_.copy(group = Treatment)),
statFun
)
val nullObs = (1 to 100000).par.map(_ => nullDist.sample).seq
val experimentObs = (1 to 100000).par.map(_ => experimentalDist.sample).seq
Results(statName, nullObs, experimentObs)
}
.toSeq
val confidence = 0.8 until 1 by 0.0002
val jsons: Seq[JsObject] = results.map{ r => obj(
"stat_name" -> r.statisticName,
"observations" -> Json.obj(
"null" -> r.nullObs.map(_.decimalPlaces(3)),
"experimental" -> r.experimentalObs.map(_.decimalPlaces(3))
),
"powers" -> Json.obj(
"confidence" -> confidence.map(_.decimalPlaces(3)),
"power" -> r.powerAtConfidence(confidence).map(_.decimalPlaces(3))
)
)}
FileUtils.writeStringToFile(
wd.resolve("json.json").toFile(),
Json.prettyPrint(Json.arr(jsons))
)
RScript("""
library(ggplot2)
library(rjson)
jsonData = fromJSON(file = "json.json")
drawPlots = function(statName){
statData = Filter(function(x) x$stat_name == statName, jsonData)[[1]]
obsData = rbind(
data.frame(Variable = "null", Statistic = statData$observations$null),
data.frame(Variable = "experimental", Statistic = statData$observations$experimental)
)
ggplot(obsData, aes(x=Statistic, colour = Variable)) +
geom_density() +
ggtitle(paste(statName, "Statistic Density"))
}
pdf("plot.pdf", width=8.26, height=2.91)
drawPlots("Rank")
drawPlots("Sum")
rankPowers = Filter(function(x) x$stat_name == "Rank",jsonData)[[1]]$powers
sumPowers = Filter(function(x) x$stat_name == "Sum",jsonData)[[1]]$powers
powerData = rbind(
data.frame(rankPowers, Statistic = "Rank"),
data.frame(sumPowers, Statistic = "Sum")
)
ggplot(powerData, aes(x=confidence, y=power, colour=Statistic)) +
geom_line() +
ggtitle("Power at Confidence Levels")
dev.off()
""",
wd.resolve("rscript")
)
}
| tearne/Sampler | sampler-examples/src/main/scala/sampler/example/Randomisation.scala | Scala | apache-2.0 | 5,932 |
package pl.project13.scala.akka.raft
import pl.project13.scala.akka.raft.config.RaftConfig
import pl.project13.scala.akka.raft.model._
import pl.project13.scala.akka.raft.protocol._
import scala.annotation.tailrec
import scala.collection.immutable
private[raft] trait Follower {
this: RaftActor =>
protected def raftConfig: RaftConfig
val followerBehavior: StateFunction = {
case Event(msg: ClientMessage[Command], m: Meta) =>
log.info("Follower got {} from client; Respond with last Leader that took write from: {}", msg, recentlyContactedByLeader)
sender() ! LeaderIs(recentlyContactedByLeader, Some(msg))
stay()
// start of election
case Event(msg: RequestVote, m: Meta) if msg.term < m.currentTerm =>
log.info("Rejecting RequestVote msg by {} in {}. Received stale {}.", candidate, m.currentTerm, msg.term)
candidate ! DeclineCandidate(m.currentTerm)
stay()
case Event(msg: RequestVote, m: Meta) if msg.term >= m.currentTerm =>
val msgTerm = msg.term
if (m.canVoteIn(msgTerm)) {
log.info("Voting for {} in {}", candidate, msgTerm)
candidate ! VoteCandidate(msgTerm)
stay() using m.withVote(msgTerm, candidate)
} else {
log.info("Rejecting RequestVote msg by {} in {}. Already voted for {}", candidate, msgTerm, m.currentTerm, m.votes.get(msgTerm))
sender ! DeclineCandidate(msgTerm)
stay() using m.withTerm(msgTerm)
}
// end of election
// take writes
case Event(msg: AppendEntries[Command], m: Meta) =>
senderIsCurrentLeader()
appendEntries(msg, m)
// end of take writes
// timeout, may need to start an election
case Event(ElectionTimeout, m: Meta) =>
if (electionDeadline.isOverdue()) beginElection(m)
else stay()
case Event(AskForState, _) =>
sender() ! IAmInState(Follower)
stay()
}
def appendEntries(msg: AppendEntries[Command], m: Meta): State = {
implicit val self = m.clusterSelf // todo this is getting pretty crap, revert to having Cluster awareness a trait IMO
if (leaderIsLagging(msg, m)) {
if (msg.isNotHeartbeat) {
log.info("Rejecting write (Leader is lagging) of: " + msg + "; " + replicatedLog)
leader ! AppendRejected(m.currentTerm, replicatedLog.lastIndex) // no need to respond if only heartbeat
}
stay()
} else if (msg.isHeartbeat) {
acceptHeartbeat()
} else {
log.debug("Appending: " + msg.entries)
leader ! append(msg.entries, m)
replicatedLog = commitUntilLeadersIndex(m, msg)
val meta = maybeUpdateConfiguration(m, msg.entries.map(_.command))
val metaWithUpdatedTerm = meta.copy(currentTerm = replicatedLog.lastTerm)
acceptHeartbeat() using metaWithUpdatedTerm
}
}
def leaderIsLagging(msg: AppendEntries[Command], m: Meta): Boolean =
msg.term < m.currentTerm
def append(entries: immutable.Seq[Entry[Command]], m: Meta): AppendSuccessful = {
val atIndex = entries.map(_.index).min
log.debug("executing: replicatedLog = replicatedLog.append({}, {})", entries, atIndex)
replicatedLog = replicatedLog.append(entries, atIndex)
// log.debug("log after append: " + replicatedLog.entries)
AppendSuccessful(replicatedLog.lastTerm, replicatedLog.lastIndex)
}
/**
* Configurations must be used by each node right away when they get appended to their logs (doesn't matter if not committed).
* This method updates the Meta object if a configuration change is discovered.
*/
@tailrec final def maybeUpdateConfiguration(meta: Meta, entries: Seq[Command]): Meta = entries match {
case Nil =>
meta
case (newConfig: ClusterConfiguration) :: moreEntries if newConfig.isNewerThan(meta.config) =>
log.info("Appended new configuration (seq: {}), will start using it now: {}", newConfig.sequenceNumber, newConfig)
maybeUpdateConfiguration(meta.withConfig(newConfig), moreEntries)
case _ :: moreEntries =>
maybeUpdateConfiguration(meta, moreEntries)
}
def commitUntilLeadersIndex(m: Meta, msg: AppendEntries[Command]): ReplicatedLog[Command] = {
val entries = replicatedLog.between(replicatedLog.committedIndex, msg.leaderCommitId)
entries.foldLeft(replicatedLog) { case (repLog, entry) =>
log.debug("committing entry {} on Follower, leader is committed until [{}]", entry, msg.leaderCommitId)
handleCommitIfSpecialEntry.applyOrElse(entry, handleNormalEntry)
repLog.commit(entry.index)
}
}
private def senderIsCurrentLeader(): Unit =
recentlyContactedByLeader = Some(sender())
private val handleNormalEntry: PartialFunction[Any, Unit] = {
case entry: Entry[Command] => apply(entry.command)
}
private val handleCommitIfSpecialEntry: PartialFunction[Any, Unit] = {
case Entry(jointConfig: ClusterConfiguration, _, _, _) =>
// simply ignore applying cluster configurations onto the client state machine,
// it's an internal thing and the client does not care about cluster config change.
}
}
| colin-scott/akka-raft | src/main/scala/pl/project13/scala/akka/raft/Follower.scala | Scala | apache-2.0 | 5,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.impl
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkFunSuite}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
class PeriodicRDDCheckpointerSuite extends SparkFunSuite with MLlibTestSparkContext {
import PeriodicRDDCheckpointerSuite._
test("Persisting") {
var rddsToCheck = Seq.empty[RDDToCheck]
val rdd1 = createRDD(sc)
val checkpointer = new PeriodicRDDCheckpointer[Double](10, rdd1.sparkContext)
checkpointer.update(rdd1)
rddsToCheck = rddsToCheck :+ RDDToCheck(rdd1, 1)
checkPersistence(rddsToCheck, 1)
var iteration = 2
while (iteration < 9) {
val rdd = createRDD(sc)
checkpointer.update(rdd)
rddsToCheck = rddsToCheck :+ RDDToCheck(rdd, iteration)
checkPersistence(rddsToCheck, iteration)
iteration += 1
}
}
test("Checkpointing") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
val checkpointInterval = 2
var rddsToCheck = Seq.empty[RDDToCheck]
sc.setCheckpointDir(path)
val rdd1 = createRDD(sc)
val checkpointer = new PeriodicRDDCheckpointer[Double](checkpointInterval, rdd1.sparkContext)
checkpointer.update(rdd1)
rdd1.count()
rddsToCheck = rddsToCheck :+ RDDToCheck(rdd1, 1)
checkCheckpoint(rddsToCheck, 1, checkpointInterval)
var iteration = 2
while (iteration < 9) {
val rdd = createRDD(sc)
checkpointer.update(rdd)
rdd.count()
rddsToCheck = rddsToCheck :+ RDDToCheck(rdd, iteration)
checkCheckpoint(rddsToCheck, iteration, checkpointInterval)
iteration += 1
}
checkpointer.deleteAllCheckpoints()
rddsToCheck.foreach { rdd =>
confirmCheckpointRemoved(rdd.rdd)
}
Utils.deleteRecursively(tempDir)
}
}
private object PeriodicRDDCheckpointerSuite {
case class RDDToCheck(rdd: RDD[Double], gIndex: Int)
def createRDD(sc: SparkContext): RDD[Double] = {
sc.parallelize(Seq(0.0, 1.0, 2.0, 3.0))
}
def checkPersistence(rdds: Seq[RDDToCheck], iteration: Int): Unit = {
rdds.foreach { g =>
checkPersistence(g.rdd, g.gIndex, iteration)
}
}
/**
* Check storage level of rdd.
* @param gIndex Index of rdd in order inserted into checkpointer (from 1).
* @param iteration Total number of rdds inserted into checkpointer.
*/
def checkPersistence(rdd: RDD[_], gIndex: Int, iteration: Int): Unit = {
try {
if (gIndex + 2 < iteration) {
assert(rdd.getStorageLevel == StorageLevel.NONE)
} else {
assert(rdd.getStorageLevel != StorageLevel.NONE)
}
} catch {
case _: AssertionError =>
throw new Exception(s"PeriodicRDDCheckpointerSuite.checkPersistence failed with:\n" +
s"\t gIndex = $gIndex\n" +
s"\t iteration = $iteration\n" +
s"\t rdd.getStorageLevel = ${rdd.getStorageLevel}\n")
}
}
def checkCheckpoint(rdds: Seq[RDDToCheck], iteration: Int, checkpointInterval: Int): Unit = {
rdds.reverse.foreach { g =>
checkCheckpoint(g.rdd, g.gIndex, iteration, checkpointInterval)
}
}
def confirmCheckpointRemoved(rdd: RDD[_]): Unit = {
// Note: We cannot check rdd.isCheckpointed since that value is never updated.
// Instead, we check for the presence of the checkpoint files.
// This test should continue to work even after this rdd.isCheckpointed issue
// is fixed (though it can then be simplified and not look for the files).
val hadoopConf = rdd.sparkContext.hadoopConfiguration
rdd.getCheckpointFile.foreach { checkpointFile =>
val path = new Path(checkpointFile)
val fs = path.getFileSystem(hadoopConf)
assert(!fs.exists(path), "RDD checkpoint file should have been removed")
}
}
/**
* Check checkpointed status of rdd.
* @param gIndex Index of rdd in order inserted into checkpointer (from 1).
* @param iteration Total number of rdds inserted into checkpointer.
*/
def checkCheckpoint(
rdd: RDD[_],
gIndex: Int,
iteration: Int,
checkpointInterval: Int): Unit = {
try {
if (gIndex % checkpointInterval == 0) {
// We allow 2 checkpoint intervals since we perform an action (checkpointing a second rdd)
// only AFTER PeriodicRDDCheckpointer decides whether to remove the previous checkpoint.
if (iteration - 2 * checkpointInterval < gIndex && gIndex <= iteration) {
assert(rdd.isCheckpointed, "RDD should be checkpointed")
assert(rdd.getCheckpointFile.nonEmpty, "RDD should have 2 checkpoint files")
} else {
confirmCheckpointRemoved(rdd)
}
} else {
// RDD should never be checkpointed
assert(!rdd.isCheckpointed, "RDD should never have been checkpointed")
assert(rdd.getCheckpointFile.isEmpty, "RDD should not have any checkpoint files")
}
} catch {
case e: AssertionError =>
throw new Exception(s"PeriodicRDDCheckpointerSuite.checkCheckpoint failed with:\n" +
s"\t gIndex = $gIndex\n" +
s"\t iteration = $iteration\n" +
s"\t checkpointInterval = $checkpointInterval\n" +
s"\t rdd.isCheckpointed = ${rdd.isCheckpointed}\n" +
s"\t rdd.getCheckpointFile = ${rdd.getCheckpointFile.mkString(", ")}\n" +
s" AssertionError message: ${e.getMessage}")
}
}
}
| javalovelinux/SparkGroovyScript | mllib/src/test/scala/org/apache/spark/mllib/impl/PeriodicRDDCheckpointerSuite.scala | Scala | apache-2.0 | 6,325 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.