code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2010 Sanjiv Sahayam
* Licensed under the Apache License, Version 2.0
*/
package shortbread
import sbt._
import org.openqa.selenium.remote.RemoteWebDriver
import scala.Option
trait ShortbreadPlugin extends ShortBreadProperties with PluginSupport with ConsolePrinter { this:DefaultWebProject =>
//IO context where all things IO are run.
override def runTestScripts: Option[String] = {
printScriptLocation(testScriptPath)
val pages:Seq[(RemoteWebDriver) => Unit] = getUrls map (loadPage(_))
driverSeq.map(nd => runSafelyWithResource[RemoteWebDriver, Unit, Unit]{
driver => {
printDriver(nd.name)
pages.map { p =>
p(driver)
val summary = getSummary(driver)
printResults(summary)
failOnTestError { summary.hasFailures }
}
}}{open(nd)}{close(exitOnCompletion)})
}
}
| ssanj/Shortbread | src/main/scala/ShortbreadPlugin.scala | Scala | apache-2.0 | 868 |
package org.fusesource.camel.tooling.util
import java.io.File
import org.junit.Assert._
/**
*/
class XmlAttributeParseTest extends RouteXmlTestSupport {
test("parses valid XML file with > in xml attribute") {
val x = assertRoutes(new File(baseDir, "src/test/resources/blueprintWithGreaterThanInAttribute.xml"), 1, CamelNamespaces.blueprintNS)
val uris = x.endpointUris
expect(1, "endpoint uris " + uris){ uris.size }
assertTrue(uris.contains("seda:myConfiguredEndpoint"))
}
} | janstey/fuse | tooling/camel-tooling-util/src/test/scala/org/fusesource/camel/tooling/util/XmlAttributeParseTest.scala | Scala | apache-2.0 | 500 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.broker.kafka
import akka.util.ByteString
import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.{ NegotiatedDeserializer, NegotiatedSerializer }
import org.apache.kafka.common.serialization.{ Deserializer, Serializer }
/**
* Adapts a Lagom NegotiatedDeserializer into a Kafka Deserializer so that messages
* stored in Kafka can be deserialized into the expected application's type.
*/
private[lagom] class ScaladslKafkaDeserializer[T](deserializer: NegotiatedDeserializer[T, ByteString]) extends Deserializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def deserialize(topic: String, data: Array[Byte]): T =
deserializer.deserialize(ByteString(data))
override def close(): Unit = () // nothing to do
}
/**
* Adapts a Lagom NegotiatedSerializer into a Kafka Serializer so that application's
* messages can be serialized into a byte array and published into Kafka.
*/
private[lagom] class ScaladslKafkaSerializer[T](serializer: NegotiatedSerializer[T, ByteString]) extends Serializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def serialize(topic: String, data: T): Array[Byte] =
serializer.serialize(data).toArray
override def close(): Unit = () // nothing to do
}
| edouardKaiser/lagom | service/scaladsl/kafka/client/src/main/scala/com/lightbend/lagom/internal/scaladsl/broker/kafka/KafkaSerializers.scala | Scala | apache-2.0 | 1,484 |
/*
* Create a `Process[IO,O]` from the lines of a file, using
* the `resource` combinator above to ensure the file is closed
* when processing the stream of lines is finished.
*/
def lines(filename: String): Process[IO,String] =
resource
{ IO(io.Source.fromFile(filename)) }
{ src =>
lazy val iter = src.getLines // a stateful iterator
def step = if (iter.hasNext) Some(iter.next) else None
lazy val lines: Process[IO,String] = eval(IO(step)).flatMap {
case None => Halt(End)
case Some(line) => Emit(line, lines)
}
lines
}
{ src => eval_ { IO(src.close) } }
/* Exercise 11: Implement `eval`, `eval_`, and use these to implement `lines`. */
def eval[F[_],A](a: F[A]): Process[F,A] =
await[F,A,A](a) {
case Left(err) => Halt(err)
case Right(a) => Emit(a, Halt(End))
}
/* Evaluate the action purely for its effects. */
def eval_[F[_],A,B](a: F[A]): Process[F,B] =
eval[F,A](a).drain[B]
| ud3sh/coursework | functional-programming-in-scala-textbook/answerkey/streamingio/11.answer.scala | Scala | unlicense | 982 |
package com.anphoenix.demo
import java.util.Random
import scala.math.exp
import breeze.linalg.{Vector, DenseVector}
import org.apache.spark._
/**
* Logistic regression based classification.
*/
object CorrectSparkLR {
val D = 3 // Numer of dimensions
val ITERATIONS = 500
case class DataPoint(x: Vector[Double], y: Double)
def parseVector(line: String): Vector[Double] = {
DenseVector(line.split(' ').map(_.toDouble))
}
def getDataFromFile(sc: SparkContext) = {
val lines = sc.textFile("lr.data.label")
val data = lines.map(parseVector _)
val mappedData=data.map(p=>DataPoint(p(0 to 2),p(3)))
mappedData
}
def main(args: Array[String]) {
val sc = new SparkContext("local", "SparkLR",
System.getenv("SPARK_HOME"),List("target/scala-2.10/correct-spark-lr-project_2.10-1.0.jar"))
val points = getDataFromFile(sc)
// Initialize w to a random value
var w = DenseVector.ones[Double](D)
println("Initial w: " + w)
for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
/** val gradient = points.map { p =>
p.x * (1 / (1 + exp(-p.y * (w.dot(p.x)))) - 1) * p.y
}.reduce(_ + _)*/
val gradient = points.map { p => p.x * (1 / (1 + exp(-w.dot(p.x)))-p.y)}.reduce(_ + _)
w -= (gradient/2000.0)
}
println("Final w: " + w)
sc.stop()
}
}
| anphoenix/demo_spark | correctSparkLR/correctSparkLR.scala | Scala | apache-2.0 | 1,332 |
import leon.annotation._
import leon.lang._
import leon.collection._
import leon.lang._
object BatchedQueue {
sealed abstract class List
case class Cons(head: Int, tail: List) extends List
case object Nil extends List
def content(l: List): Set[Int] = l match {
case Nil => Set.empty
case Cons(head, tail) => Set(head) ++ content(tail)
}
def content(p: Queue): Set[Int] =
content(p.f) ++ content(p.r)
def isEmpty(p: Queue): Boolean = p.f == Nil
case class Queue(f: List, r: List)
def rev_append(aList: List, bList: List): List = (aList match {
case Nil => bList
case Cons(x, xs) => rev_append(xs, Cons(x, bList))
}) ensuring (content(_) == content(aList) ++ content(bList))
def reverse(list: List) = rev_append(list, Nil) ensuring (content(_) == content(list))
def checkf(f: List, r: List): Queue = (f match {
case Nil => Queue(reverse(r), Nil)
case _ => Queue(f, r)
}) ensuring {
res => content(res) == content(f) ++ content(r)
}
def head(p: Queue): Set[Int] = (
p.f match {
case Nil => Set[Int]()
case Cons(x, xs) => Set(x)
}) ensuring (
res =>
if (isEmpty(p)) true
else content(p) == res ++ content(tail(p)))
def tail(p: Queue): Queue = {
require(!isEmpty(p))
p.f match {
case Nil => p
case Cons(_, xs) => checkf(xs, p.r)
}
}
//
// def last(p: Queue): Int = {
// require(!isEmpty(p))
// p.r match {
// case Nil => reverse(p.f).asInstanceOf[Cons].head
// case Cons(x, _) => x
// }
// }
def snoc(p: Queue, x: Int): Queue =
checkf(p.f, Cons(x, p.r)) ensuring (
res =>
content(res) == content(p) ++ Set(x) &&
(if (isEmpty(p)) true
else content(tail(res)) ++ Set(x) == content(tail(res))))
@ignore
def main(args: Array[String]): Unit = {
val pair = Queue(Cons(4, Nil), Cons(3, Nil))
println(head(pair))
println(content(pair) == head(pair) ++ content(tail(pair)))
println(head(Queue(Nil, Nil)))
}
}
| regb/leon | testcases/synthesis/condabd/benchmarks/BatchedQueue/BatchedQueue.scala | Scala | gpl-3.0 | 2,048 |
package utils
import enums.LevelEnum
import models.conf._
import play.api.{Logger, Play}
import play.api.libs.json.{JsObject, Json}
import scala.language.postfixOps
/**
* Created by jinwei on 1/7/14.
*/
object TaskTools {
implicit val hostFormat = Json.format[Host_v]
implicit val envFormat = Json.format[Environment_v]
implicit val projectFormat = Json.format[Project_v]
implicit val versionFormat = Json.format[Version_v]
implicit val taskFormat = Json.format[ProjectTask_v]
/**
* 去除字符串两边的引号
* @param s
* @return
*/
def trimQuotes(s: String): String = {
s.trim.stripPrefix("\"").stripSuffix("\"").trim
}
/**
* 项目依赖
* @param pid
* @return
*/
def findDependencies(pid: Int): Map[Int, Option[String]] = {
ProjectDependencyHelper.findByProjectId(pid).map( t => t.dependencyId -> t.alias).toMap
}
def getFileName() = {
val timestamp: Long = System.currentTimeMillis / 1000
s"${timestamp}"
}
/**
* 根据环境关联的版本号
* @param envId
* @param projectId
* @return
*/
def getProperties(envId: Int, projectId: Int, templateId: Int, realVersion: String): Map[String, String] = {
//根据projectId获取attribute
val tempAttrs = TemplateItemHelper.findByTemplateId_ScriptVersion(templateId, realVersion).map(_.itemName)
val attrMap = AttributeHelper.findByProjectId(projectId).filter(a => tempAttrs.contains(a.name)).map { a => a.name -> a.value.getOrElse("")}.toMap
//根据envId + projectId 获取variable
val varMap = VariableHelper.findByEnvId_ProjectId(envId, projectId).filter(v => !v.name.startsWith("t_") || tempAttrs.contains(v.name)).map { v => v.name -> v.value}.toMap
//attribute + variable
attrMap ++ varMap
}
def findHosts(envId: Int, projectId: Int): Seq[Host] = {
HostHelper.findByEnvId_ProjectId(envId, projectId)
}
def findProject(envId: Int, projectId: Int, realVersion: String): Project_v = {
val project = ProjectHelper.findById(projectId).get
val hosts = findHosts(envId, projectId).map(c => Host_v(c.name, c.ip, Some(c.globalVariable.map(v => v.name -> v.value).toMap),c.spiritId))
val attrs = getProperties(envId, projectId, project.templateId, realVersion)
val aliases = findAlias(project.templateId, realVersion)
val users = ProjectMemberHelper.findByProjectId(projectId)
val leaders = users.filter(p => p.level == LevelEnum.safe).map(_.jobNo)
val members = users.filter(p => p.level == LevelEnum.unsafe).map(_.jobNo)
Project_v(s"$projectId", s"${project.templateId}", project.name, hosts, Some(attrs), aliases, leaders, members)
}
def findDependencies_v(envId: Int, projectId: Int, realVersion: String): Map[String, Project_v] = {
val map = findDependencies(projectId)
map.keySet.map {
pid =>
val project = ProjectHelper.findById(pid).get
val hosts = findHosts(envId, project.id.get).map(c => Host_v(c.name, c.ip, None, -1))
// val attrs = getProperties(envId, project.id.get, project.templateId, realVersion).filter { t => t._1.startsWith("t_")}
val attrs = getProperties(envId, project.id.get, project.templateId, realVersion)
val aliases = findAlias(project.templateId, realVersion)
map.get(pid).get match {
case Some(aliasName) =>
aliasName -> Project_v(s"$projectId", s"${project.templateId}", aliasName, hosts, Option(attrs), aliases, Seq.empty, Seq.empty)
case _ =>
project.name -> Project_v(s"$projectId", s"${project.templateId}", project.name, hosts, Option(attrs), aliases, Seq.empty, Seq.empty)
}
}.toMap
}
def findEnvironment_v(envId: Int): Environment_v = {
val env = EnvironmentHelper.findById(envId).get
// 如果是master,需要替换成base,在gitfs中,是需要这么映射的
val scriptVersion = env.scriptVersion match {
case ScriptVersionHelper.Master => "base"
case x => x
}
Environment_v(s"$envId", env.name, scriptVersion, env.scriptVersion, env.level.toString)
}
def findAlias(templateId: Int, scriptVersion: String): Map[String, String] = {
TemplateAliasHelper.findByTemplateId_Version(templateId, scriptVersion).map { x => x.name -> x.value}.toMap
}
/**
* 生成task meta
* @param taskId
* @param envId
* @param projectId
* @param versionId
* @return
*/
def generateTaskObject(taskId: Int, envId: Int, projectId: Int, versionId: Option[Int]): ProjectTask_v = {
val version: Option[Version_v] = versionId match {
case Some(id) =>
VersionHelper.findById(id).map { vs =>
Version_v(vs.id.get.toString, vs.vs)
}
case None => None
}
val env = findEnvironment_v(envId)
val project = findProject(envId, projectId, env.realVersion)
val d = findDependencies_v(envId, projectId, env.realVersion)
val saltComponent = ScriptVersionHelper.findByName(env.realVersion) match {
case Some(sv) =>
var json = Json.obj()
ComponentMd5sumHelper.findByScriptVersionId(sv.id.get).foreach {
t =>
json ++= Json.obj(t.componentName -> t.md5sum)
}
json
case _ =>
Logger.error(s"${env.realVersion} 找不到相应的版本")
Json.parse("{}").as[JsObject]
}
new ProjectTask_v(project, d, env, s"$taskId", version, s"${getFileName()}", None, ConfHelp.system, saltComponent = saltComponent)
}
def generateCurrent(machine: String, task: ProjectTask_v): Host_v = {
task.hosts.filter { t => t.name == machine}(0)
}
def generateCurrent(num: Int, task: ProjectTask_v): Host_v = {
task.hosts(num)
}
def generateCodeCompleter(envId: Int, projectId: Int, versionId: Int) = {
val task = generateTaskObject(0, envId, projectId, Some(versionId))
if (task.hosts nonEmpty) {
val (ret, result) = new ScriptEngineUtil(task, None).getAttrs()
if (ret) {
result
} else {
s"""{"$result", "error"}"""
}
} else {
s"""{"没有关联机器!":"error"}"""
}
}
}
object ConfHelp {
val app = Play.current
lazy val logPath = app.configuration.getString("salt.log.dir").getOrElse("target/saltlogs")
lazy val confPath: String = app.configuration.getString("salt.file.pkgs").getOrElse("target/pkgs")
lazy val componentIgnore: Seq[String] = app.configuration.getStringSeq("git.formulas.componentIgnore").getOrElse(Seq.empty[String])
lazy val system: Map[String, String] = {
app.configuration.keys.filter(_.startsWith("bugatti.system.")).map { key =>
key.replace("bugatti.system.", "") -> app.configuration.getString(key).getOrElse("")
}.toMap
}
lazy val catalinaWSUrl = app.configuration.getString("bugatti.catalina.websocketUrl").getOrElse("http://0.0.0.0:3232/")
}
case class Host_v(name: String, ip: String, attrs: Option[Map[String, String]], spiritId: Int)
case class Environment_v(id: String, name: String, scriptVersion: String, realVersion: String, level: String)
case class Project_v(id: String, templateId: String, name: String, hosts: Seq[Host_v], attrs: Option[Map[String, String]], alias: Map[String, String], leaders: Seq[String], members: Seq[String])
case class Version_v(id: String, name: String)
case class ProjectTask_v(id: String, templateId: String, name: String, hosts: Seq[Host_v],
attrs: Option[Map[String, String]], alias: Map[String, String],
leaders: Seq[String], members: Seq[String],
dependence: Map[String, Project_v], env: Environment_v,
taskId: String, version: Option[Version_v], confFileName: String,
cHost: Option[Host_v], system: Map[String, String],
taskName: String,
grains: JsObject = Json.parse("{}").as[JsObject],
saltComponent: JsObject =Json.parse("{}").as[JsObject]) {
def this(project: Project_v, dependence: Map[String, Project_v], env: Environment_v,
taskId: String, version: Option[Version_v], confFileName: String,
cHost: Option[Host_v], system: Map[String, String], saltComponent: JsObject) =
this(project.id, project.templateId, project.name, project.hosts,
project.attrs, project.alias,
project.leaders, project.members,
dependence, env,
taskId, version, confFileName,
cHost, system, "", Json.parse("{}").as[JsObject], saltComponent)
}
| sdgdsffdsfff/bugatti | app/utils/TaskTools.scala | Scala | bsd-2-clause | 8,521 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import org.apache.kafka.common.record.Record
/**
* This class contains the different Kafka versions.
* Right now, we use them for upgrades - users can configure the version of the API brokers will use to communicate between themselves.
* This is only for inter-broker communications - when communicating with clients, the client decides on the API version.
*
* Note that the ID we initialize for each version is important.
* We consider a version newer than another, if it has a higher ID (to avoid depending on lexicographic order)
*
* Since the api protocol may change more than once within the same release and to facilitate people deploying code from
* trunk, we have the concept of internal versions (first introduced during the 0.10.0 development cycle). For example,
* the first time we introduce a version change in a release, say 0.10.0, we will add a config value "0.10.0-IV0" and a
* corresponding case object KAFKA_0_10_0-IV0. We will also add a config value "0.10.0" that will be mapped to the
* latest internal version object, which is KAFKA_0_10_0-IV0. When we change the protocol a second time while developing
* 0.10.0, we will add a new config value "0.10.0-IV1" and a corresponding case object KAFKA_0_10_0-IV1. We will change
* the config value "0.10.0" to map to the latest internal version object KAFKA_0_10_0-IV1. The config value of
* "0.10.0-IV0" is still mapped to KAFKA_0_10_0-IV0. This way, if people are deploying from trunk, they can use
* "0.10.0-IV0" and "0.10.0-IV1" to upgrade one internal version at a time. For most people who just want to use
* released version, they can use "0.10.0" when upgrading to the 0.10.0 release.
*/
object ApiVersion {
// This implicit is necessary due to: https://issues.scala-lang.org/browse/SI-8541
implicit def orderingByVersion[A <: ApiVersion]: Ordering[A] = Ordering.by(_.id)
private val versionNameMap = Map(
"0.8.0" -> KAFKA_0_8_0,
"0.8.1" -> KAFKA_0_8_1,
"0.8.2" -> KAFKA_0_8_2,
"0.9.0" -> KAFKA_0_9_0,
// 0.10.0-IV0 is introduced for KIP-31/32 which changes the message format.
"0.10.0-IV0" -> KAFKA_0_10_0_IV0,
// 0.10.0-IV1 is introduced for KIP-36(rack awareness) and KIP-43(SASL handshake).
"0.10.0-IV1" -> KAFKA_0_10_0_IV1,
"0.10.0" -> KAFKA_0_10_0_IV1,
// introduced for JoinGroup protocol change in KIP-62
"0.10.1-IV0" -> KAFKA_0_10_1_IV0,
// 0.10.1-IV1 is introduced for KIP-74(fetch response size limit).
"0.10.1-IV1" -> KAFKA_0_10_1_IV1,
// introduced ListOffsetRequest v1 in KIP-79
"0.10.1-IV2" -> KAFKA_0_10_1_IV2,
"0.10.1" -> KAFKA_0_10_1_IV2
)
private val versionPattern = "\\\\.".r
def apply(version: String): ApiVersion =
versionNameMap.getOrElse(versionPattern.split(version).slice(0, 3).mkString("."),
throw new IllegalArgumentException(s"Version `$version` is not a valid version"))
def latestVersion = versionNameMap.values.max
}
sealed trait ApiVersion extends Ordered[ApiVersion] {
val version: String
val messageFormatVersion: Byte
val id: Int
override def compare(that: ApiVersion): Int =
ApiVersion.orderingByVersion.compare(this, that)
override def toString: String = version
}
// Keep the IDs in order of versions
case object KAFKA_0_8_0 extends ApiVersion {
val version: String = "0.8.0.X"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V0
val id: Int = 0
}
case object KAFKA_0_8_1 extends ApiVersion {
val version: String = "0.8.1.X"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V0
val id: Int = 1
}
case object KAFKA_0_8_2 extends ApiVersion {
val version: String = "0.8.2.X"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V0
val id: Int = 2
}
case object KAFKA_0_9_0 extends ApiVersion {
val version: String = "0.9.0.X"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V0
val id: Int = 3
}
case object KAFKA_0_10_0_IV0 extends ApiVersion {
val version: String = "0.10.0-IV0"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V1
val id: Int = 4
}
case object KAFKA_0_10_0_IV1 extends ApiVersion {
val version: String = "0.10.0-IV1"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V1
val id: Int = 5
}
case object KAFKA_0_10_1_IV0 extends ApiVersion {
val version: String = "0.10.1-IV0"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V1
val id: Int = 6
}
case object KAFKA_0_10_1_IV1 extends ApiVersion {
val version: String = "0.10.1-IV1"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V1
val id: Int = 7
}
case object KAFKA_0_10_1_IV2 extends ApiVersion {
val version: String = "0.10.1-IV2"
val messageFormatVersion: Byte = Record.MAGIC_VALUE_V1
val id: Int = 8
}
| eribeiro/kafka | core/src/main/scala/kafka/api/ApiVersion.scala | Scala | apache-2.0 | 5,522 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
object JavaRuntime {
val IsJava8 = System.getProperty("java.version").startsWith("1.8")
}
| wiacekm/gatling | gatling-commons/src/main/scala/io/gatling/commons/util/JavaRuntime.scala | Scala | apache-2.0 | 744 |
/*
* MIT License
*
* Copyright (c) 2016 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.test.oracle
import com.byteslounge.slickrepo.test.{JodaTimeVersionedRepositoryTest, OracleConfig}
class OracleJodaTimeVersionedRepositoryTest extends JodaTimeVersionedRepositoryTest(OracleConfig.config)
| gonmarques/slick-repo | src/test/scala/com/byteslounge/slickrepo/test/oracle/OracleJodaTimeVersionedRepositoryTest.scala | Scala | mit | 1,379 |
package com.codahale.jerkson.tests
import com.codahale.jerkson.Json._
import com.codahale.jerkson.ParsingException
import java.io.ByteArrayInputStream
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class EdgeCaseSpec extends FlatSpec with Matchers {
behavior of "Deserializing lists"
it should "doesn't cache Seq builders" in {
parse[List[Int]]("[1,2,3,4]") shouldBe List(1, 2, 3, 4)
parse[List[Int]]("[1,2,3,4]") shouldBe List(1, 2, 3, 4)
}
behavior of "Parsing a JSON array of ints with nulls"
it should "should be readable as a List[Option[Int]]" in {
parse[List[Option[Int]]]("[1,2,null,4]") shouldBe List(Some(1), Some(2), None, Some(4))
}
behavior of "Deserializing maps"
it should "doesn't cache Map builders" in {
parse[Map[String, Int]](""" {"one":1, "two": 2} """) shouldBe Map("one" -> 1, "two" -> 2)
parse[Map[String, Int]](""" {"one":1, "two": 2} """) shouldBe Map("one" -> 1, "two" -> 2)
}
behavior of "Parsing malformed JSON"
it should "should throw a ParsingException with an informative message" in {
the [ParsingException] thrownBy parse[Boolean]("jjf8;09") should have message
"Malformed JSON. Unrecognized token 'jjf8': was expecting ('true', 'false' or 'null') at character offset 4."
the [ParsingException] thrownBy parse[CaseClass]("{\"ye\":1") should have message
"Malformed JSON. Unexpected end-of-input: expected close marker for " +
"OBJECT at character offset 21."
}
behavior of "Parsing invalid JSON"
it should "should throw a ParsingException with an informative message" in {
val thrown = the [ParsingException] thrownBy parse[CaseClass]("900")
thrown.getMessage should fullyMatch
("""Can not deserialize instance of com.codahale.jerkson.tests.CaseClass out of VALUE_NUMBER_INT token\n""" +
""" at \[Source: java.io.StringReader@[0-9a-f]+; line: 1, column: 1\]""").r
the [ParsingException] thrownBy parse[CaseClass]("{\"woo\": 1}") should have message "Invalid JSON. Needed [id, name], but found [woo]."
}
behavior of "Parsing an empty document"
it should "should throw a ParsingException with an informative message" in {
val input = new ByteArrayInputStream(Array.empty)
val thrown = the [ParsingException] thrownBy parse[CaseClass](input)
thrown.getMessage should startWith("No content to map due to end-of-input")
}
}
| gilt/jerkson | src/test/scala/com/codahale/jerkson/tests/EdgeCaseSpec.scala | Scala | mit | 2,389 |
/*
*
* * Copyright 2015 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.dhira.core.optimize.listeners
import org.dhira.core.nnet.api.Model
import org.dhira.core.optimize.api.IterationListener
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Score iteration listener
*
* @author Adam Gibson
*/
object ScoreIterationListener {
private val log: Logger = LoggerFactory.getLogger(classOf[ScoreIterationListener])
}
class ScoreIterationListener extends IterationListener {
private var printIterations: Int = 10
private var isInvoked: Boolean = false
private var iterCount: Long = 0
/**
* @param printIterations frequency with which to print scores (i.e., every printIterations parameter updates)
*/
def this(printIterations: Int) {
this()
this.printIterations = printIterations
}
/** Default constructor printing every 10 iterations */
// def this() {
// this()
// }
def invoked: Boolean = {
return isInvoked
}
def invoke {
this.isInvoked = true
}
def iterationDone(model: Model, iteration: Int) {
if (printIterations <= 0) printIterations = 1
if (iterCount % printIterations == 0) {
invoke
val result: Double = model.score
ScoreIterationListener.log.info("Score at iteration " + iterCount + " is " + result)
}
iterCount += 1
}
} | Mageswaran1989/aja | src/main/scala/org/aja/dhira/src/main/scala/org/dhira/core/optimize/listeners/ScoreIterationListener.scala | Scala | apache-2.0 | 1,947 |
package ozmi.lambda_core
package lib
import org.kiama.rewriting.Strategy
import org.kiama.rewriting.Rewriter._
object Library {
val typeClasses : Seq[TypeClass] = Seq (Eq, Num, Ord, Row)
val typeInstances : Seq[TypeInstance] = Seq (Bool, Coll, Decimal, Integer)
lazy val evalRules : Strategy =
reduce ((typeClasses map {_.evalRules}) ++ (typeInstances map {_.evalRules}) reduceLeft {_ + _})
def eval (expr : Expr) : Expr =
rewrite (Library.evalRules) (expr)
} | ozmi/lambda_core | src/main/scala/ozmi/lambda_core/lib/Library.scala | Scala | mit | 514 |
package scalariform.formatter
import scalariform.parser._
import scalariform.formatter.preferences._
// format: OFF
class ParamGroupsOnNewlineTest extends AbstractFormatterTest {
{
implicit val formattingPreferences: FormattingPreferences = FormattingPreferences.
setPreference(AllowParamGroupsOnNewlines, true).
setPreference(DanglingCloseParenthesis, Force)
"""private[this]
|def bar
|[A <: Foo: Bar, B: Baz: Bam]
|(x: A, y: B)
|(implicit z: Int)
|: Foo
|= {
|
|val a = x
|}""" ==>
"""private[this] def bar
| [A <: Foo: Bar, B: Baz: Bam]
| (x: A, y: B)
| (implicit z: Int): Foo = {
|
| val a = x
|}"""
"""class Func[T]
|extends (Int => Int) {
|
|val body = ???
|}""" ==>
"""class Func[T]
| extends (Int => Int) {
|
| val body = ???
|}"""
"""def method
|(a: A)(b: B)
|(c: C)
|(implicit d: D) = {
|
|body(a, b, c)
|}""" ==>
"""def method
| (a: A)(b: B)
| (c: C)
| (implicit d: D) = {
|
| body(a, b, c)
|}"""
"""class X
|[T]
|(a: A)
|(b: B, c: C)""" ==>
"""class X
| [T]
| (a: A)
| (b: B, c: C)"""
"""object InlineComment {
|
|def add
|(x: Int)
|// comment
|(y: Int) = x + y
|}""" ==>
"""object InlineComment {
|
| def add
| (x: Int)
| // comment
| (y: Int) = x + y
|}"""
"""class PreserveInline (a: A)(b: B)""" ==>
"""class PreserveInline(a: A)(b: B)"""
// ensure standalone lambda retains indent level of sibling(s)
//
"""val lambdaParenFollowsBrace = {
|val foo = {1}
|(x: Int) => 42
|}""" ==>
"""val lambdaParenFollowsBrace = {
| val foo = { 1 }
| (x: Int) => 42
|}"""
"""val lambdaParenFollowsParen = {
|val foo = (1)
|(x: Int) => 42
|}""" ==>
"""val lambdaParenFollowsParen = {
| val foo = (1)
| (x: Int) => 42
|}"""
"""val lambdaParenFollowsMethodParen = {
|println("foo")
|(x: Int) => 42
|}""" ==>
"""val lambdaParenFollowsMethodParen = {
| println("foo")
| (x: Int) => 42
|}"""
"""class Outer[T]
|(a: A)
|(b: B, c: C) {
|
|class InnerA
|(a: A)
|(b: B, c: C) {
|
|class InnerA1
|(a: A)
|(b: B, c: C)
|}
|
|class InnerB
|(a: A)
|(b: B, c: C) {
|
|class InnerB1
|(a: A)
|(b: B, c: C)
|}
|}""" ==>
"""class Outer[T]
| (a: A)
| (b: B, c: C) {
|
| class InnerA
| (a: A)
| (b: B, c: C) {
|
| class InnerA1
| (a: A)
| (b: B, c: C)
| }
|
| class InnerB
| (a: A)
| (b: B, c: C) {
|
| class InnerB1
| (a: A)
| (b: B, c: C)
| }
|}"""
}
// format: ON
override val debug = false
def parse(parser: ScalaParser): Result = parser.nonLocalDefOrDcl()
type Result = FullDefOrDcl
def format(formatter: ScalaFormatter, result: Result): FormatResult =
formatter.format(result)(FormatterState(indentLevel = 0))
}
| mdr/scalariform | scalariform/src/test/scala/scalariform/formatter/ParamGroupsOnNewlineTest.scala | Scala | mit | 3,121 |
package devnull.sessions
import java.util.UUID
import devnull.UuidFromString
import org.json4s.native.JsonMethods.parse
import org.scalatest.{FunSpec, Matchers}
class SleepingPillHttpSessionClientSpec extends FunSpec with Matchers {
describe("SleepingPillHttpSessionClient") {
val baseUrl = "https://test-sleepingpill.javazone.no"
it("should be able to fetch session") {
val eventId =
UuidFromString("30d5c2f1cb214fc8b0649a44fdf3b4bf").map(EventId.apply).right.get
val sessionId =
UuidFromString("32e6aca94d934fc1bc05073c68bc681e").map(SessionId.apply).right.get
val client = new SleepingPillHttpSessionClient(baseUrl)
val session = client.session(eventId, sessionId)
session should not be empty
session.get.eventId should be(eventId)
}
it("should be able to fetch session for new api") {
val eventId =
EventId(UUID.fromString("30d5c2f1-cb21-4fc8-b064-9a44fdf3b4bf"), false)
val sessionId =
SessionId(UUID.fromString("32e6aca9-4d93-4fc1-bc05-073c68bc681e"), false)
val client = new SleepingPillHttpSessionClient(baseUrl)
val session = client.session(eventId, sessionId)
session should not be empty
session.get.eventId should be(eventId)
}
it("parse response with time slots") {
val sid = "ed8ef197-f256-443f-b907-34725cf4b038"
val eid = "3baa25d3-9cca-459a-90d7-9fc349209289"
val startTime = "2016-09-07T08:20:00Z"
val endTime = "2016-09-07T09:20:00Z"
val input = parse(s"""{
| "sessions": [{
| "sessionId": "$sid",
| "conferenceId": "$eid",
| "startTimeZulu": "$startTime",
| "endTimeZulu": "$endTime",
| "format": "workshop"
| }]
|} """.stripMargin)
val res: List[Session] = SessionJson.parse(input)
res should have length 1
}
it("parse response and ignore session without timeslots") {
val eid = "3baa25d3-9cca-459a-90d7-9fc349209289"
val sid1 = "ed8ef197-f256-443f-b907-34725cf4b038"
val sid2 = "ed8ef197-f256-443f-b907-34725cf4b039"
val startTime = "2016-09-07T08:20:00Z"
val endTime = "2016-09-07T09:20:00Z"
val input = parse(s"""{
| "sessions": [{
| "sessionId": "$sid1",
| "conferenceId": "$eid",
| "startTimeZulu": "$startTime",
| "endTimeZulu": "$endTime",
| "format": "presentation"
| },
| {
| "sessionId": "$sid2",
| "conferenceId": "$eid",
| "format": "presentation"
| }]
|} """.stripMargin)
val sessions: List[Session] = SessionJson.parse(input)
sessions should have length 1
}
}
}
| javaBin/devnull | src/test/scala/devnull/sessions/SleepingPillHttpSessionClientSpec.scala | Scala | apache-2.0 | 2,839 |
package amphip.stoch
import scala.language.implicitConversions
import scala.collection.mutable.ListBuffer
import scalaz.std.option.optionSyntax._
import scalaz.syntax.show._
import scalaz.syntax.std.boolean._
import scalaz.Scalaz.stringInstance
//import scalaz.std.list.listSyntax._
import cats.syntax.list._
import spire.math._
import spire.implicits._
import amphip.base._
import amphip.base.implicits._
import amphip.model.ast._
import amphip.model.dsl._
import amphip.data.dsl._
import amphip.data._
import amphip.data.ModelData._
import StochData._
case class StochData private (
stages: List[Stage],
basicScenarios : LinkedMap[Stage , LinkedMap[BasicScenario, Rational]],
customScenarios : LinkedMap[Scenario, LinkedMap[BasicScenario, Rational]],
deletedScenarios: LinkedMap[Scenario, Set[BasicScenario]],
defaults : LinkedMap[ParamStat, ParamStatData],
basicData : LinkedMap[Stage , LinkedMap[BasicScenario, LinkedMap[ParamStat, ParamStatData]]],
scenarioData: LinkedMap[Scenario , LinkedMap[ParamStat , ParamStatData]],
separated: Boolean) {
def stages(ts: Stage*): StochData = copy(stages = ts.toList)
/* always normalizes probabilities to 1 */
def basicScenarios(t: Stage, bss: (BasicScenario, Rational)*): StochData = {
val bssMap = LinkedMap(bss: _*)
val totProb = bssMap.unzip._2.qsum
require(totProb != 0, "sum of probabilities equals zero")
copy(basicScenarios = basicScenarios + (t -> normalize(bssMap)))
}
/*
Replaces the `BasicScenario`s at `history` with the specified `replacement`.
The basic scenarios not included in the replacement list are marked as deleted.
The probabilities are always normalized to 1.
*/
def customScenarios(history: Scenario, replacement: (BasicScenario, Rational)*): StochData = {
val replacementMap = LinkedMap(replacement: _*)
require(replacementMap.unzip._2.qsum != 0, "sum of probabilities equals zero")
val deletedBS =
(for {
t <- stages.lift(history.size)
bss <- basicScenarios.get(t)
cbssBS = replacementMap.unzip._1.toList
dbss = bss.unzip._1.filter { !cbssBS.contains(_) }.toSet
} yield {
dbss
}) | Set.empty
val currDS = deletedScenarios.getOrElse(history, Set.empty)
val normalized = normalize(replacementMap)
copy(
customScenarios = customScenarios + (history -> normalized),
deletedScenarios = deletedScenarios + (history -> (currDS ++ deletedBS)))
}
def default(p: ParamStat, values: ParamStatData): StochData = {
copy(defaults = defaults + (p -> values))
}
def basicData(t: Stage, bs: BasicScenario, p: ParamStat, values: ParamStatData): StochData = {
val valuesByStage = basicData.getOrElse(t, LinkedMap.empty)
val valuesByBS = valuesByStage.getOrElse(bs, LinkedMap.empty)
val newValuesByBS = valuesByBS + (p -> values)
val newValuesByStage = valuesByStage + (bs -> newValuesByBS)
val newBasicData = basicData + (t -> newValuesByStage)
copy(basicData = newBasicData)
}
def scenarioData(scen: Scenario, p: ParamStat, values: ParamStatData): StochData = {
val valuesByScen = scenarioData.getOrElse(scen, LinkedMap.empty)
val newValuesByScen = valuesByScen + (p -> values)
val newScenarioData = scenarioData + (scen -> newValuesByScen)
copy(scenarioData = newScenarioData)
}
def scenarioData(scen: Scenario, paramData: Iterable[(ParamStat, ParamStatData)]): StochData = {
paramData.foldLeft(this) {
case (model, (p, values)) => model.scenarioData(scen, p, values)
}
}
def separated(x: Boolean): StochData = copy(separated = x)
lazy private[this] val balancedTreeIdent: List[List[(BasicScenario, Rational)]] = balancedTree(List.empty, identity)
lazy val balancedTree: List[Scenario] = balancedTreeIdent.map(_.map(_._1)) //balancedTree(List.empty, _._1)
lazy val deletedScenariosList: List[Scenario] =
for {
(history, bss) <- deletedScenarios.toList
deleted <- bss.map(bs => history :+ bs)
} yield {
deleted
}
def balancedTree[T](seed: List[T], ext: ((BasicScenario, Rational)) => T): List[List[T]] = {
val rstages = stages.drop(seed.size)
val basis =
basicScenarios
.filterKeys(rstages.contains)
.mapValues(_.map(ext).toList)
val zero = seed.toNel.toList.flatMap(x => List(x.toList.reverse))
val tree = basis.keys.foldLeft(zero) { (data, t) =>
val bss = basis(t)
if (data.isEmpty)
bss.map(List(_))
else
for {
s <- data
bs <- bss
} yield {
bs :: s
}
}
tree.map(_.reverse)
}
lazy val finalScenariosIdent: List[List[(BasicScenario, Rational)]] = {
for {
(s,p) <- finalScenarios.zip(finalProbabilities)
} yield {
s.zip(p)
}
}
lazy val finalScenarios: List[Scenario] = {
val customTree =
for {
(history, bss) <- customScenarios.toList
scen <- bss.map(p => history :+ p._1)
scenTree <- balancedTree(scen, _._1)
// avoids generating scenarios shorter than `stages.size` if there are
// no basic scenarios, ie, `balancedTree` generates nothing.
if scenTree.size == stages.size
} yield {
scenTree
}
val base = (customTree ::: balancedTree).distinct
val target = base.filter { ss =>
deletedScenariosList.forall(!ss.startsWith(_))
}
// TODO check why scala is faster than using spire sorting ...
import Ordering.Implicits._
target.sortBy(bsIndex)
}
def byStage[T](scenarios: List[List[T]]): LinkedMap[Stage, LinkedMap[List[T], List[T]]] = {
LinkedMap() ++ stages.map(x => x -> onStage(scenarios, x))
}
def onStage[T](scenarios: List[List[T]], t: Stage): LinkedMap[List[T], List[T]] = {
val ind = stages.indexOf(t)
val group = scenarios.groupByLinked(_.take(ind))
group.map { case (k, v) => k -> v.map(x => x(ind)).distinct }
}
lazy val scenariosByStage: LinkedMap[Stage, List[Scenario]] =
for {
(stage, values) <- byStage(finalScenarios)
scenarios = values.flatMap { case (history, bss) => bss.map(history :+ _) }.toList
} yield {
stage -> scenarios
}
lazy val scenarios: List[Scenario] = scenariosByStage.values.flatten.toList
lazy val numScenarios: Int = finalScenarios.size
lazy val numStages : Int = stages.size
def TData: Range = 1 to numStages
def SData: Range = 1 to numScenarios
def STData: List[(Int, Range)] = {
for {
(stage, t_) <- stages.zipWithIndex
sts <- scenariosByStage.get(stage)
} yield {
(t_ + 1) -> (1 to sts.size)
}
}
lazy val predecessorsData: List[(List[Int], Int)] = {
for {
(stage, t_) <- stages.zipWithIndex
sts <- scenariosByStage.get(stage).toList
(scen, s_) <- sts.zipWithIndex
t = t_ + 1
s = s_ + 1
sPred <- predecesor(t, s)
} yield {
List(t, s) -> sPred
}
}
def predecesor(t: Int, s: Int): Option[Int] =
for {
stage <- stages.lift(t - 1)
sts <- scenariosByStage.get(stage)
(scen, _) <- sts.zipWithIndex
.find {
case (_, s_) => s_ == s - 1
}
pred = scen.take(t - 1)
stagePred <- stages.lift(t - 2)
stsPred <- scenariosByStage.get(stagePred)
(_, sPred_) <- stsPred.zipWithIndex
.find {
case (scen, _) => scen == pred
}
} yield {
sPred_ + 1
}
lazy val finalProbabilities: List[List[Rational]] = {
/* probabilities of basic scenarios tree taking into account deleted scenarios */
val balancedProbs = balancedTreeIdent
val balancedProbsByStage =
for {
(stage, bTree) <- byStage(balancedProbs)
} yield {
stage ->
(for {
(historyP, bss) <- bTree
history = historyP.unzip._1
usedBS =
bss.filter { case (bs, _) =>
val ss = history :+ bs
deletedScenariosList.forall(!ss.startsWith(_))
}
if usedBS.nonEmpty
} yield {
history -> (LinkedMap() ++ usedBS)
}).toMap
}
val probs =
for {
scen <- finalScenarios
} yield {
for {
p <- scen.zipWithIndex
history = scen.take(p._2)
customProb =
for {
cbss <- customScenarios.get(history)
cprob <- cbss.get(p._1)
} yield {
cprob
}
basicProb =
for {
t <- stages.lift(p._2)
bTree <- balancedProbsByStage.get(t)
bss <- bTree.get(history)
prob <- bss.get(p._1)
} yield {
prob
}
prob <- customProb.orElse(basicProb)
} yield {
prob
}
}
probs
}
lazy val probabilityData: List[Double] = {
for {
path <- finalProbabilities
} yield {
path.map(_.toDouble).product
}
}
def probabilityData2: List[Double] = {
for {
path <- finalProbabilities
} yield {
//path.map(_.toDouble).product
path.qproduct.toDouble
}
}
/**
Exact version of `probabilityData`. Currently is not used.
*/
def probabilityDataExact: List[Rational] = {
for {
path <- finalProbabilities
} yield {
path.qproduct
}
}
// Non-anticipativity handling
/*
Scenarios (including non-final) corresponding to each pair of final
scenario index and stage index, with sparated scenarios.
*/
lazy private[stoch] val prefix: Array[Array[Scenario]] = {
val res = Array.ofDim[Scenario](numScenarios, numStages)
for {
(scen, s) <- finalScenarios.zipWithIndex
} {
cfor(0)(_ < numStages, _ + 1) { t =>
res(s)(t) = scen.take(t + 1)
}
}
res
}
def initPrefixes(): Unit = {prefix; ()}
private[this] val offset = separated.fold(1, 0)
def samePrefix(s1: Int, s2: Int, t: Int): Boolean = {
val t_ = t - 1 - offset
if (t_ == -1) {
/* if it's a separeted model (offset == 1), on the first stage
all the scenarios are the same */
true
} else {
val s1_ = s1 - 1
val s2_ = s2 - 1
prefix(s1_)(t_) == prefix(s2_)(t_)
}
}
lazy val linkDataFull: List[(List[Int], Int)] = {
(for {
s1 <- SData
s2 <- SData
t <- TData
} yield {
List(s1, s2, t) -> samePrefix(s1, s2, t).fold(1, 0)
}).toList
}
lazy val linkDataFullDiagonal: List[(List[Int], Int)] = {
(for {
s1 <- SData
s2 <- s1 to numScenarios
t <- TData
same = samePrefix(s1, s2, t)
value = same.fold(1, 0)
data <- if (s1 == s2) List(List(s1, s2, t) -> value)
else List(List(s1, s2, t) -> value, List(s2, s1, t) -> value)
} yield {
data
}).toList
}
lazy val linkDataWithDefault: List[(List[Int], Int)] = {
(for {
s1 <- SData
s2 <- SData
t <- TData
if samePrefix(s1, s2, t)
} yield {
List(s1, s2, t) -> 1
}).toList
}
lazy val linkDataWithDefaultDiagonal: List[(List[Int], Int)] = {
(for {
s1 <- SData
s2 <- s1 to numScenarios
t <- TData
if samePrefix(s1, s2, t)
data <- if (s1 == s2) List(List(s1, s2, t) -> 1)
else List(List(s1, s2, t) -> 1, List(s2, s1, t) -> 1)
} yield {
data
}).toList
}
def linkData: List[(List[Int], Int)] = linkDataFull
/*
BasicScenario corresponding to each combination of final scenario index
and stage index, with sparated scenarios.
*/
lazy private[stoch] val matrix: Array[Array[BasicScenario]] = {
val res = Array.ofDim[BasicScenario](numScenarios, numStages)
for {
(scen, s) <- finalScenarios.zipWithIndex
(bs, t) <- scen.zipWithIndex
} {
res(s)(t) = bs
}
res
}
def initMatrix(): Unit = {matrix; ()}
// TODO take into account already separated models
def linkSetDataBounds: List[(Int, List[(Int, Int)])] =
linkSetDataBounds(false)
def linkSetDataBounds(includeSingleRanges: Boolean): List[(Int, List[(Int, Int)])] = {
val minDiff = includeSingleRanges.fold(0, 1)
val res = ListBuffer.empty[(Int, List[(Int, Int)])]
if (numScenarios > 0) {
cfor(0)(_ < numStages, _ + 1) { t_ =>
val t = t_ + 1
val stageData = ListBuffer.empty[(Int, Int)]
//var lastBS: BasicScenario = matrix(0)(t_)
var ini = 1
var end = 1
cfor(1)(_ < numScenarios, _ + 1) { s_ =>
val s = s_ + 1
if (samePrefix(ini, s, t)) {
end = s
} else {
if (end - ini >= minDiff) stageData += ini -> end
ini = s
end = s
}
/*if (matrix(s_)(t_) == lastBS) {
end = s
} else {
if (end - ini >= minDiff) stageData += ini -> end
ini = s
end = s
}
lastBS = matrix(s_)(t_)
*/
}
if (end - ini >= minDiff) stageData += ini -> end
res += t -> stageData.toList
}
}
res.toList
}
// Parameters
def parameters: List[ParamStat] = {
val defP = defaults.keys
val basicP =
for {
v1 <- basicData.values
v2 <- v1.values
p <- v2.keys
} yield p
val scenP =
for {
v <- scenarioData.values
p <- v.keys
} yield p
(defP ++ basicP ++ scenP).toList.distinct
}
def parametersData: List[(ParamStat, List[(List[SimpleData], SimpleData)])] =
parameters.map(p => p -> paramData(p))
def paramData(param: ParamStat): List[(List[SimpleData], SimpleData)] = {
val scenariosData =
for {
s <- SData
t <- TData
s_ = s - 1
t_ = t - 1
scenPrefix = prefix(s_)(t_)
bs = matrix(s_)(t_)
cData = for {
cbss <- scenarioData.get(scenPrefix)
data <- cbss.get(param)
} yield {
data
}
bData = for {
stage <- stages.lift(t_)
bss <- basicData.get(stage)
ds <- bss.get(bs)
data <- ds.get(param)
} yield {
data
}
defaultData = defaults.get(param)
pData <- cData.orElse(bData).orElse(defaultData).toList
(key, value) <- pData
} yield {
(List[SimpleData](t, s) ::: key.subscript) -> value
}
scenariosData.toList
}
def paramDataST(param: ParamStat): List[(List[SimpleData], SimpleData)] = {
val scenariosData =
for {
(stage, t_) <- stages.zipWithIndex
sts <- scenariosByStage.get(stage).toList
(scen, s_) <- sts.zipWithIndex
cData =
for {
cbss <- scenarioData.get(scen)
data <- cbss.get(param)
} yield {
data
}
bs <- scen.lastOption.toList
bData =
for {
bss <- basicData.get(stage)
ds <- bss.get(bs)
data <- ds.get(param)
} yield {
data
}
defaultData = defaults.get(param)
pData <- cData.orElse(bData).orElse(defaultData).toList
t = t_ + 1
s = s_ + 1
(key, value) <- pData
} yield {
(List[SimpleData](t, s) ::: key.subscript) -> value
}
scenariosData.toList
}
def parametersDataBounds: List[(ParamStat, List[(List[SimpleData], SimpleData)])] =
parameters.map(p => p -> paramDataBounds(p))
def paramDataBounds(param: ParamStat): List[(List[SimpleData], SimpleData)] = {
val scenariosData =
for {
(t, tup) <- linkSetDataBounds(includeSingleRanges = true)
(s1, s2) <- tup
t_ = t - 1
s_ = s1 - 1
scenPrefix = prefix(s_)(t_)
bs = matrix(s_)(t_)
cData = for {
cbss <- scenarioData.get(scenPrefix)
data <- cbss.get(param)
} yield {
data
}
bData = for {
stage <- stages.lift(t_)
bss <- basicData.get(stage)
ds <- bss.get(bs)
data <- ds.get(param)
} yield {
data
}
defaultData = defaults.get(param)
pData <- cData.orElse(bData).orElse(defaultData).toList
(key, value) <- pData
} yield {
(List[SimpleData](t, s1, s2) ::: key.subscript) -> value
}
scenariosData
}
def paramData(scen: Scenario): LinkedMap[ParamStat, ParamStatData] = {
val paramsData =
for {
stage <- stages.lift(scen.size - 1).toList
bs <- scen.lastOption.toList
param <- parameters
cData =
for {
cbss <- scenarioData.get(scen)
data <- cbss.get(param)
} yield {
data
}
bData =
for {
bss <- basicData.get(stage)
ds <- bss.get(bs)
data <- ds.get(param)
} yield {
data
}
defaultData = defaults.get(param)
pData <- cData.orElse(bData).orElse(defaultData).toList
} yield {
param -> pData
}
LinkedMap() ++ paramsData
}
//// AUX
private[this] def normalize(map: LinkedMap[BasicScenario, Rational], target: Rational = 1): LinkedMap[BasicScenario, Rational] = {
val sum = map.unzip._2.qsum
require(sum != 0 || target == 0, "sum of probabilities equals zero")
val coef = if (target == 0) Rational.zero else sum / target
val updFunc: Rational => Rational = if (coef == 0) { _ => Rational.zero } else { _ / coef }
map.map { case (k, v) => k -> updFunc(v) }
}
private[this] def bsIndex(scen: Scenario): List[Int] = {
scen.zipWithIndex.map {
case (bs, i) => basicScenarios.toList.lift(i).cata(x =>
{
val ind = x._2.unzip._1.toList.indexOf(bs)
if (ind == -1) Int.MaxValue else ind
},
Int.MaxValue)
}
}
}
object StochData {
// XXX should be `amphip.stoch.Scenario`
type Scenario = List[BasicScenario]
def apply(): StochData = new StochData(
stages = Nil,
basicScenarios = LinkedMap.empty,
customScenarios = LinkedMap.empty,
deletedScenarios = LinkedMap.empty,
defaults = LinkedMap.empty,
basicData = LinkedMap.empty,
scenarioData = LinkedMap.empty,
separated = false)
def filter(modelData: ModelData, p: ParamStat): ModelData =
modelData.filterParams(_.name == p.name)
def filter(modelData: ModelData, s: SetStat): ModelData =
modelData.filterSets(_.name == s.name)
def requireStochastic(p: ParamStat, model: StochModel): Unit = {
require(isStochastic(p, model), {
val stochIndExpr = IndExpr(stochIndices(model))
val pIndex = p.domain.cata(_.shows, "not-indexed")
s"Index of parameter `${p.name}' must start with ${stochIndExpr.shows} (index: $pIndex)"
})
}
// stricter requirement for stochastic parameters
def isStochastic(p: ParamStat, model: StochModel): Boolean = p match {
case ParamStat(_, _, Some(indexing), _) =>
val sind = stochIndices(model)
val psind = indexing.entries.take(sind.size).map(_.set: IndEntry)
psind == sind
case _ => false
}
def stochIndices(model: StochModel): List[IndEntry] =
model match {
case m: TwoStageStochModel => List(m.S())
case m: MultiStageStochModel => List(m.T(), m.S())
}
/**
* Converts `p` into a deterministic parameters removing the entries
* corresponding to `m.T()` and `m.S()` (if apply).
*
* Important:
* - Assumes that the sets to be removed are at the start of the entries
* list.
* - Removes any predicate of the indexing expression.
*/
def asDet(p: ParamStat, model: StochModel): ParamStat = p match {
case ParamStat(_, _, Some(indexing), _) =>
val newEntries = indexing.entries.drop(stochIndices(model).size)
p.copy(domain = indexing.copy(
entries = newEntries,
predicate = None).some)
case _ => p
}
}
case class Stage(name: String /*, duration? */ )
object Stage {
implicit def StringToStage(str: String): Stage = Stage(str)
}
case class BasicScenario(name: String) {
override def toString = name.shows
}
object BasicScenario {
implicit def StringToBasicScenario(str: String): BasicScenario = BasicScenario(str)
}
| gerferra/amphip | core/src/main/scala/amphip/stoch/StochData.scala | Scala | mpl-2.0 | 20,865 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package client
import org.http4s.Uri.Authority
import org.http4s.Uri.Scheme
/** Represents a key for requests that can conceivably share a [[Connection]]. */
final case class RequestKey(scheme: Scheme, authority: Authority) {
override def toString = s"${scheme.value}://${authority}"
}
object RequestKey {
def fromRequest[F[_]](request: Request[F]): RequestKey = {
val uri = request.uri
RequestKey(uri.scheme.getOrElse(Scheme.http), uri.authority.getOrElse(Authority()))
}
}
| http4s/http4s | client/shared/src/main/scala/org/http4s/client/RequestKey.scala | Scala | apache-2.0 | 1,106 |
//
// Taranos Cloud Sonification Framework: Service Core
// Copyright (C) 2018 David Hinson, Netrogen Blue LLC (dhinson@netrogenblue.com)
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
package org.taranos.mc.trunk.intraprocess
import org.taranos.mc.Cell
class SignalSinkPlant
(implicit protected val _trunkModel: TrunkModel)
extends TrunkElementPlant
{
import scala.collection.mutable
private
val _sinks = mutable.HashMap.empty[(Trunk.Key, SignalSink.Key), SignalSink]
def CreateSignalSink (
trunk: Trunk,
constructor: SignalSink.Constructor,
listenerOpt: Option[ListenerElement]): SignalSink =
{
// Create sink element:
val sink = new SignalSink(
new SignalSink.Meta(
TrunkElement.MakeUniqueKey(constructor._tag),
constructor._tag,
constructor._badgeOpt,
constructor._nameOpt,
constructor._descriptionOpt,
constructor._mode),
new SignalSink.Attrs(),
new SignalSink.Refs(trunk.GetKey),
new SignalSink.State(),
listenerOpt)
// 1: Add element to store:
_sinks += (trunk.GetKey, sink.GetKey) -> sink
// 2: Bind with trunk:
trunk.BindSignalSink(sink.GetKey)
// 3: Bind with parent:
// N/A
// 4: Bind with peers:
// N/A
// 5: Bind with children:
// N/A
// Return sink:
sink
}
def DestroySignalSink (
trunk: Trunk,
destructor: SignalSink.Destructor): SignalSink.Key =
{
destructor._key match
{
case key: SignalSink.Key =>
_sinks.get((trunk.GetKey, key)) match
{
case Some(sink) =>
// 1: Unbind with children:
// N/A
// 2: Unbind with peers:
// Unbind with links:
sink.GetLinkKeys.clone().foreach(pair =>
{
_trunkModel.GetSignalLinkOpt(trunk.GetKey, pair._2) match
{
case Some(link) => link.UnbindSink(isReciprocal = true)
case None => // We don't care...
}
})
// Unbind with tap:
sink.GetTapKeyOpt.foreach(tapKey =>
{
_trunkModel.GetSignalTapOpt(trunk.GetKey, tapKey) match
{
case Some(tap) => tap.UnbindSink(isReciprocal = true)
case None => // We don't care...
}
})
// 3: Unbind with parent:
// N/A
// 4: Unbind with trunk:
trunk.UnbindSignalSink(sink.GetKey)
// 5: Destroy children:
// N/A
// 6: Remove element from store:
_sinks -= ((trunk.GetKey, sink.GetKey))
case None => throw TrunkException(Cell.ErrorCodes.SignalSinkUnknown)
}
case _ => throw TrunkException(Cell.ErrorCodes.SignalSinkInvalid)
}
// Return sink key:
destructor._key
}
def DestroyAllSignalSinks (trunk: Trunk): Unit =
{
val trunkKey = trunk.GetKey
// Destroy each sink of trunk:
_sinks.filter(_._1._1 == trunkKey).foreach(sinkPair =>
{
val ((_, pairSinkKey), _) = sinkPair
val sinkDestructor = SignalSink.Destructor(pairSinkKey)
DestroySignalSink(trunk, sinkDestructor)
})
}
def GetSignalSinkOpt (
trunk: Trunk,
key: SignalSink.Key,
isRequired: Boolean = true): Option[SignalSink] =
{
// Lookup sink key:
key match
{
case _: SignalSink.Key =>
val opt = _sinks.get((trunk.GetKey, key))
if (isRequired && opt.isEmpty)
throw TrunkException(Cell.ErrorCodes.SignalSinkUnknown)
opt
case _ => throw TrunkException(Cell.ErrorCodes.SignalSinkKeyInvalid)
}
}
def GetSignalSinks (trunk: Trunk): Vector[SignalSink] =
{
val trunkKey = trunk.GetKey
// Return sinks vector:
_sinks.filter(_._1._1 == trunkKey).values.toVector
}
def GetSignalSinkKeys (trunk: Trunk): Vector[SignalSink.Key] =
{
val trunkKey = trunk.GetKey
// Return sink keys vector:
_sinks.filter(_._1._1 == trunkKey).keys.map(_._2).toVector
}
def GetElementCount (trunkKey: Trunk.Key): Int =
_sinks.count(_._1._1 == trunkKey)
}
| taranos/taranoscsf-core | src/main/scala/org/taranos/mc/trunk/intraprocess/SignalSinkPlant.scala | Scala | agpl-3.0 | 5,776 |
package providers
import error.NotificationsError
trait ProviderError extends NotificationsError {
def providerName: String
}
| guardian/mobile-n10n | common/src/main/scala/providers/ProviderError.scala | Scala | apache-2.0 | 130 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.concurrent
/** A `SyncChannel` allows one to exchange data synchronously between
* a reader and a writer thread. The writer thread is blocked until the
* data to be written has been read by a corresponding reader thread.
*/
@deprecated("Use `java.util.concurrent.Exchanger` instead.", since = "2.13.0")
class SyncChannel[A] {
private[this] var pendingWrites = List[(A, SyncVar[Boolean])]()
private[this] var pendingReads = List[SyncVar[A]]()
def write(data: A): Unit = {
// create write request
val writeReq = new SyncVar[Boolean]
this.synchronized {
// check whether there is a reader waiting
if (pendingReads.nonEmpty) {
val readReq = pendingReads.head
pendingReads = pendingReads.tail
// let reader continue
readReq put data
// resolve write request
writeReq put true
}
else {
// enqueue write request
pendingWrites = pendingWrites ::: List((data, writeReq))
}
}
writeReq.get
}
def read: A = {
// create read request
val readReq = new SyncVar[A]
this.synchronized {
// check whether there is a writer waiting
if (pendingWrites.nonEmpty) {
// read data
val (data, writeReq) = pendingWrites.head
pendingWrites = pendingWrites.tail
// let writer continue
writeReq.put(true)
// resolve read request
readReq.put (data)
}
else {
// enqueue read request
pendingReads = pendingReads ::: List(readReq)
}
}
readReq.get
}
}
| lrytz/scala | src/library/scala/concurrent/SyncChannel.scala | Scala | apache-2.0 | 1,887 |
package polystyrene.torus
import scala.collection.mutable.LinkedHashSet
import peersim.config.Configuration
import peersim.core.Control
import peersim.core.Network
import peersim.core.Node
import peersim.core.CommonState
import polystyrene.core.DataPoint
class TorusPolyInitializer(prefix : String) extends Control {
val PAR_PROT : String = "protocol"
val PAR_WIDTH : String = "width"
val PAR_HEIGHT : String = "height"
val PAR_VIEW : String = "view"
var pid : Int = Configuration.getPid(prefix + "." + PAR_PROT)
var width : Int = Configuration.getInt(prefix + "." + PAR_WIDTH, 40)
var height : Int = Configuration.getInt(prefix + "." + PAR_HEIGHT, 20)
if(width < height){
var tmp = height
height = width
width = tmp
}
def execute : Boolean = {
var total = 0
// println("WIDTH: " + width + ", HEIGHT: " + height)
// createDataPoints
false
}
def createDataPoints {
var total = 0
for(x <- 0 until width){
for(y <- 0 until height){
var n = Network.get(total)
var prot = n.getProtocol(pid)
.asInstanceOf[TorusPolystyrene]
prot.addSingleGuest(new TorusDataPoint(x, y))
total += 1
}
}
/*var datapoints = LinkedHashSet[DataPoint]()
for(n <- 0 until Network.size){
var prot = Network.get(n).getProtocol(pid).asInstanceOf[TorusPolystyrene]
datapoints ++= prot.guests
}*/
println("Data Point ajouté: " + total)
/*println(datapoints.size)*/
}
}
| HKervadec/Polystyrene | src/polystyrene/torus/TorusPolyInitializer.scala | Scala | gpl-2.0 | 1,427 |
package components
import models._
import play.api.Logger
import models.TransactionOrBalance
import models.PurseBalance
import models.BalanceOnly
import models.BalanceDiff
import java.util.Currency
class BalanceCalculator {
type BalanceMap = Map[Long, PurseBalance]
def calculateBalances(rows: Seq[TransactionOrBalance], initialBalances: BalanceMap = Map): Seq[TransactionOrBalance] = {
rows.foldLeft(FoldResult(Seq(), initialBalances)) {
(previousResult: FoldResult, row: TransactionOrBalance) =>
row match {
case balanceRow: BalanceOnly =>
val (newBalances, balanceDiffSeq) = updateBalances(previousResult.balances, balanceRow.balance)
FoldResult(
previousResult.resultRows ++ balanceDiffSeq ++ Seq(balanceRow),
newBalances
)
case transactionRow: TransactionWithBalances =>
var newBalances = previousResult.balances
var newRows = previousResult.resultRows
var newTransactionRow = transactionRow
if (transactionRow.srcBalance.isDefined) {
updateBalances(newBalances, transactionRow.srcBalance.get) match {
case (balances, balanceDiffSeq) =>
newBalances = balances
newRows ++= balanceDiffSeq
}
} else {
val t = transactionRow.transaction
generateAndUpdateBalances(newBalances, t.srcPurseId, t.amount, t.currency) match {
case (balances, balanceDiffSeq, balanceOpt) =>
newBalances = balances
newRows ++= balanceDiffSeq
newTransactionRow = newTransactionRow.copy(srcBalance = balanceOpt)
}
}
if (transactionRow.dstBalance.isDefined) {
updateBalances(newBalances, transactionRow.dstBalance.get) match {
case (balances, balanceDiffSeq) =>
newBalances = balances
newRows ++= balanceDiffSeq
}
} else if (transactionRow.transaction.dstPurseId.isDefined){
val t = transactionRow.transaction
generateAndUpdateBalances(newBalances, t.dstPurseId.get, -t.amount, t.currency) match {
case (balances, balanceDiffSeq, balanceOpt) =>
newBalances = balances
newRows ++= balanceDiffSeq
newTransactionRow = newTransactionRow.copy(dstBalance = balanceOpt)
}
}
FoldResult(
newRows ++ Seq(newTransactionRow),
newBalances
)
}
}.resultRows
}
private def updateBalances(previousBalances: BalanceMap, newBalance: PurseBalance): (BalanceMap, Seq[BalanceDiff]) = {
val newBalances = previousBalances.updated(newBalance.purseId, newBalance)
if (
previousBalances.contains(newBalance.purseId)
&&
balanceDiffers(previousBalances(newBalance.purseId), newBalance)
) {
val balanceDiff =
BalanceDiff(
newBalance.purseId,
newBalance.amount - previousBalances(newBalance.purseId).amount
)
(newBalances, Seq(balanceDiff))
} else {
(newBalances, Seq())
}
}
private def generateAndUpdateBalances(newBalances: BalanceMap, purseId: Long, amount: Long, currency: Currency): (BalanceMap, Seq[BalanceDiff], Option[PurseBalance]) = {
// Сгенерить новый баланс на основе уже существующего (если есть) и вернуть новые сущности
}
private def balanceDiffers(currentBalance: PurseBalance, newBalance: PurseBalance): Boolean = {
if (currentBalance.purseId != newBalance.purseId) {
throw new RuntimeException("Balance purses does not match")
} else if (currentBalance.currency != newBalance.currency) {
Logger.error(s"Balance currency mismatch: found ${newBalance.currency}, expected ${currentBalance.currency}")
return false
} else {
return currentBalance.amount != newBalance.amount
}
}
case class FoldResult(resultRows: Seq[TransactionOrBalance], balances: BalanceMap)
}
object BalanceCalculator extends BalanceCalculator
| vatt2001/finprocessor | app/components/BalanceCalculator.scala | Scala | mit | 4,281 |
package cook.config.dsl
import cook.ref.DirRef
import cook.ref.FileRef
import cook.target.Target
import cook.target.TargetResult
import scala.collection.mutable
class ConfigContext(val cookFileRef: FileRef) {
private [config] val targets = mutable.Map[String, Target[TargetResult]]()
def dir = cookFileRef.dir
private [dsl] def addTarget(t: Target[TargetResult]) {
targets += (t.refName -> t)
}
}
| timgreen/cook | src/cook/config/dsl/ConfigContext.scala | Scala | apache-2.0 | 414 |
package acceptance.support
import com.mongodb.casbah.Imports._
import org.bson.types.ObjectId
import org.joda.time.DateTime
trait Mongo {
val mongoDB: MongoDB
def createCollection(name: String): MongoCollection = mongoDB(name)
def dropCollection(coll: MongoCollection) = coll.drop()
def removeCollection(coll: MongoCollection) = coll.remove(MongoDBObject())
def insert(coll: MongoCollection, c: DBObject) = coll.insert(c)
}
object Mongo {
def apply(db: String) = new Mongo {
override val mongoDB: MongoDB = MongoClient()(db)
}
}
object FilmBuilder {
def apply(channel: String, provider: String, start: DateTime, end: DateTime,
title: String, rating: Double, imdbId: String, posterImdb: String, id: String) =
MongoDBObject(
"channel" -> channel,
"provider" -> Seq(provider),
"start" -> start.toDate,
"end" -> end.toDate,
"rating" -> rating,
"film" -> MongoDBObject(
"title" -> title,
"imdbId" -> imdbId,
"posterImdb" -> posterImdb
),
"_id" -> new ObjectId(id)
)
}
object SeriesBuilder {
def apply(channel: String, provider: String, start: DateTime, end: DateTime, title: String, epiosdeTitle: String,
season: String, episode: String, rating: Double, imdbId: String, posterImdb: String, id: String) =
MongoDBObject(
"channel" -> channel,
"provider" -> Seq(provider),
"start" -> start.toDate,
"end" -> end.toDate,
"rating" -> rating,
"series" -> MongoDBObject(
"serieTitle" -> title,
"episode" -> MongoDBObject(
"episodeTitle" -> epiosdeTitle,
"seasonNumber" -> season,
"episodeNumber"-> episode
),
"imdbId" -> imdbId,
"posterImdb" -> posterImdb
),
"_id" -> new ObjectId(id)
)
}
object ProgramBuilder {
//
def apply(channel: String, provider: String, start: DateTime, end: DateTime, title: String, id: String) =
MongoDBObject(
"channel" -> channel,
"provider" -> Seq(provider),
"start" -> start.toDate,
"end" -> end.toDate,
"program" -> MongoDBObject("title" -> title),
"_id" -> new ObjectId(id)
)
}
| tvlive/tv-api | test/acceptance/support/Mongo.scala | Scala | apache-2.0 | 2,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.tree.impl.TreeTests
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.mllib.regression.{LabeledPoint => OldLabeledPoint}
import org.apache.spark.mllib.tree.{EnsembleTestHelper, GradientBoostedTrees => OldGBT}
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.util.Utils
/**
* Test suite for [[GBTRegressor]].
*/
class GBTRegressorSuite extends SparkFunSuite with MLlibTestSparkContext
with DefaultReadWriteTest {
import GBTRegressorSuite.compareAPIs
import testImplicits._
// Combinations for estimators, learning rates and subsamplingRate
private val testCombinations =
Array((10, 1.0, 1.0), (10, 0.1, 1.0), (10, 0.5, 0.75), (10, 0.1, 0.75))
private var data: RDD[LabeledPoint] = _
private var trainData: RDD[LabeledPoint] = _
private var validationData: RDD[LabeledPoint] = _
override def beforeAll() {
super.beforeAll()
data = sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 10, 100), 2)
.map(_.asML)
trainData =
sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 20, 120), 2)
.map(_.asML)
validationData =
sc.parallelize(EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures = 20, 80), 2)
.map(_.asML)
}
test("Regression with continuous features") {
val categoricalFeatures = Map.empty[Int, Int]
GBTRegressor.supportedLossTypes.foreach { loss =>
testCombinations.foreach {
case (maxIter, learningRate, subsamplingRate) =>
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setSubsamplingRate(subsamplingRate)
.setLossType(loss)
.setMaxIter(maxIter)
.setStepSize(learningRate)
.setSeed(123)
compareAPIs(data, None, gbt, categoricalFeatures)
}
}
}
test("GBTRegressor behaves reasonably on toy data") {
val df = Seq(
LabeledPoint(10, Vectors.dense(1, 2, 3, 4)),
LabeledPoint(-5, Vectors.dense(6, 3, 2, 1)),
LabeledPoint(11, Vectors.dense(2, 2, 3, 4)),
LabeledPoint(-6, Vectors.dense(6, 4, 2, 1)),
LabeledPoint(9, Vectors.dense(1, 2, 6, 4)),
LabeledPoint(-4, Vectors.dense(6, 3, 2, 2))
).toDF()
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setMaxIter(2)
val model = gbt.fit(df)
MLTestingUtils.checkCopyAndUids(gbt, model)
val preds = model.transform(df)
val predictions = preds.select("prediction").rdd.map(_.getDouble(0))
// Checks based on SPARK-8736 (to ensure it is not doing classification)
assert(predictions.max() > 2)
assert(predictions.min() < -1)
}
test("Checkpointing") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
sc.setCheckpointDir(path)
val df = data.toDF()
val gbt = new GBTRegressor()
.setMaxDepth(2)
.setMaxIter(5)
.setStepSize(0.1)
.setCheckpointInterval(2)
.setSeed(123)
val model = gbt.fit(df)
sc.checkpointDir = None
Utils.deleteRecursively(tempDir)
}
test("should support all NumericType labels and not support other types") {
val gbt = new GBTRegressor().setMaxDepth(1)
MLTestingUtils.checkNumericTypes[GBTRegressionModel, GBTRegressor](
gbt, spark, isClassification = false) { (expected, actual) =>
TreeTests.checkEqual(expected, actual)
}
}
// TODO: Reinstate test once runWithValidation is implemented SPARK-7132
/*
test("runWithValidation stops early and performs better on a validation dataset") {
val categoricalFeatures = Map.empty[Int, Int]
// Set maxIter large enough so that it stops early.
val maxIter = 20
GBTRegressor.supportedLossTypes.foreach { loss =>
val gbt = new GBTRegressor()
.setMaxIter(maxIter)
.setMaxDepth(2)
.setLossType(loss)
.setValidationTol(0.0)
compareAPIs(trainData, None, gbt, categoricalFeatures)
compareAPIs(trainData, Some(validationData), gbt, categoricalFeatures)
}
}
*/
/////////////////////////////////////////////////////////////////////////////
// Tests of feature importance
/////////////////////////////////////////////////////////////////////////////
test("Feature importance with toy data") {
val gbt = new GBTRegressor()
.setMaxDepth(3)
.setMaxIter(5)
.setSubsamplingRate(1.0)
.setStepSize(0.5)
.setSeed(123)
// In this data, feature 1 is very important.
val data: RDD[LabeledPoint] = TreeTests.featureImportanceData(sc)
val categoricalFeatures = Map.empty[Int, Int]
val df: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0)
val importances = gbt.fit(df).featureImportances
val mostImportantFeature = importances.argmax
assert(mostImportantFeature === 1)
assert(importances.toArray.sum === 1.0)
assert(importances.toArray.forall(_ >= 0.0))
}
/////////////////////////////////////////////////////////////////////////////
// Tests of model save/load
/////////////////////////////////////////////////////////////////////////////
test("model save/load") {
def checkModelData(
model: GBTRegressionModel,
model2: GBTRegressionModel): Unit = {
TreeTests.checkEqual(model, model2)
assert(model.numFeatures === model2.numFeatures)
}
val gbt = new GBTRegressor()
val rdd = TreeTests.getTreeReadWriteData(sc)
val allParamSettings = TreeTests.allParamSettings ++ Map("lossType" -> "squared")
val continuousData: DataFrame =
TreeTests.setMetadata(rdd, Map.empty[Int, Int], numClasses = 0)
testEstimatorAndModelReadWrite(gbt, continuousData, allParamSettings,
allParamSettings, checkModelData)
}
}
private object GBTRegressorSuite extends SparkFunSuite {
/**
* Train 2 models on the given dataset, one using the old API and one using the new API.
* Convert the old model to the new format, compare them, and fail if they are not exactly equal.
*/
def compareAPIs(
data: RDD[LabeledPoint],
validationData: Option[RDD[LabeledPoint]],
gbt: GBTRegressor,
categoricalFeatures: Map[Int, Int]): Unit = {
val numFeatures = data.first().features.size
val oldBoostingStrategy = gbt.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Regression)
val oldGBT = new OldGBT(oldBoostingStrategy, gbt.getSeed.toInt)
val oldModel = oldGBT.run(data.map(OldLabeledPoint.fromML))
val newData: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, numClasses = 0)
val newModel = gbt.fit(newData)
// Use parent from newTree since this is not checked anyways.
val oldModelAsNew = GBTRegressionModel.fromOld(
oldModel, newModel.parent.asInstanceOf[GBTRegressor], categoricalFeatures, numFeatures)
TreeTests.checkEqual(oldModelAsNew, newModel)
assert(newModel.numFeatures === numFeatures)
assert(oldModelAsNew.numFeatures === numFeatures)
}
}
| akopich/spark | mllib/src/test/scala/org/apache/spark/ml/regression/GBTRegressorSuite.scala | Scala | apache-2.0 | 8,134 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark.jts.udaf
import com.vividsolutions.jts.geom.Geometry
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.Row
import org.apache.spark.sql.jts.JTSTypes
class ConvexHull extends UserDefinedAggregateFunction {
import org.apache.spark.sql.types.{DataTypes => DT}
override val inputSchema = DT.createStructType(Array(DT.createStructField("inputGeom", JTSTypes.GeometryTypeInstance, true)))
override val bufferSchema = DT.createStructType(Array(DT.createStructField("convexHull", JTSTypes.GeometryTypeInstance, true)))
override val dataType = DT.createStructType(Array(DT.createStructField("convexHull", JTSTypes.GeometryTypeInstance, true)))
override val deterministic = true
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer.update(0, null)
}
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val start = buffer.get(0)
val geom = input.get(0).asInstanceOf[Geometry]
if (start == null) {
buffer.update(0, geom)
} else {
val ch = start.asInstanceOf[Geometry].union(geom).convexHull()
buffer.update(0, ch)
}
}
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val ch =
(buffer1.isNullAt(0), buffer2.isNullAt(0)) match {
case (true, true) => Option.empty[Geometry]
case (false, false) => Some(buffer1.getAs[Geometry](0).union(buffer2.getAs[Geometry](0)).convexHull())
case (false, true) => Some(buffer1.getAs[Geometry](0).convexHull())
case (true, false) => Some(buffer2.getAs[Geometry](0).convexHull())
}
ch.foreach { g => buffer1.update(0, g) }
}
override def evaluate(buffer: Row): Any = buffer
}
| jahhulbert-ccri/geomesa | geomesa-spark/geomesa-spark-jts/src/main/scala/org/locationtech/geomesa/spark/jts/udaf/ConvexHull.scala | Scala | apache-2.0 | 2,285 |
package x7c1.wheat.splicer.core.logger
import ch.qos.logback.classic.encoder.PatternLayoutEncoder
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.FileAppender
import ch.qos.logback.core.rolling.{RollingFileAppender, TimeBasedRollingPolicy}
import x7c1.wheat.splicer.core.logger.Tap.implicits.Provider
case class RollingFileSetting(
encoderPattern: String,
fileName: String,
fileNamePattern: String,
maxHistory: Int
)
object RollingFileSetting {
implicit def createAppender: Appender.From[RollingFileSetting] = {
setting => context =>
val encoder = new PatternLayoutEncoder().tap(
_ setPattern setting.encoderPattern,
_ setContext context,
_ start()
)
val policy = new TimeBasedRollingPolicy[ILoggingEvent]().tap(
_ setFileNamePattern setting.fileNamePattern,
_ setMaxHistory setting.maxHistory,
_ setContext context,
_ setParent new FileAppender[ILoggingEvent]().tap(
_ setFile setting.fileName
),
_ start()
)
new RollingFileAppender[ILoggingEvent]().tap(
_ setEncoder encoder,
_ setContext context,
_ setRollingPolicy policy,
_ start()
)
}
}
| x7c1/Wheat | wheat-splicer/src/test/scala/x7c1/wheat/splicer/core/logger/RollingFileSetting.scala | Scala | mit | 1,240 |
package sangria.validation.rules
import sangria.util.{Pos, ValidationSupport}
import org.scalatest.wordspec.AnyWordSpec
class VariablesInAllowedPositionSpec extends AnyWordSpec with ValidationSupport {
override val defaultRule = Some(new VariablesInAllowedPosition)
"Validate: Variables are in allowed positions" should {
"Boolean => Boolean" in expectPasses("""
query Query($booleanArg: Boolean)
{
complicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
}
""")
"Boolean => Boolean within fragment" in expectPasses("""
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
query Query($booleanArg: Boolean)
{
complicatedArgs {
...booleanArgFrag
}
}
""")
"Boolean => Boolean within fragment (bonus)" in expectPasses("""
query Query($booleanArg: Boolean)
{
complicatedArgs {
...booleanArgFrag
}
}
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
""")
"Boolean! => Boolean" in expectPasses("""
query Query($nonNullBooleanArg: Boolean!)
{
complicatedArgs {
booleanArgField(booleanArg: $nonNullBooleanArg)
}
}
""")
"Boolean! => Boolean within fragment" in expectPasses("""
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $nonNullBooleanArg)
}
query Query($nonNullBooleanArg: Boolean!)
{
complicatedArgs {
...booleanArgFrag
}
}
""")
"[String] => [String]" in expectPasses("""
query Query($stringListVar: [String])
{
complicatedArgs {
stringListArgField(stringListArg: $stringListVar)
}
}
""")
"[String!] => [String]" in expectPasses("""
query Query($stringListVar: [String!])
{
complicatedArgs {
stringListArgField(stringListArg: $stringListVar)
}
}
""")
"String => [String] in item position" in expectPasses("""
query Query($stringVar: String)
{
complicatedArgs {
stringListArgField(stringListArg: [$stringVar])
}
}
""")
"String! => [String] in item position" in expectPasses("""
query Query($stringVar: String!)
{
complicatedArgs {
stringListArgField(stringListArg: [$stringVar])
}
}
""")
"ComplexInput => ComplexInput" in expectPasses("""
query Query($complexVar: ComplexInput)
{
complicatedArgs {
complexArgField(complexArg: $complexVar)
}
}
""")
"ComplexInput => ComplexInput in field position" in expectPasses("""
query Query($boolVar: Boolean = false)
{
complicatedArgs {
complexArgField(complexArg: {requiredArg: $boolVar})
}
}
""")
"Boolean! => Boolean! in directive" in expectPasses("""
query Query($boolVar: Boolean!)
{
dog @include(if: $boolVar)
}
""")
"Int => Int!" in expectFailsPosList(
"""
query Query($intArg: Int)
{
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
}
""",
List(
"Variable '$intArg' of type 'Int' used in position expecting type 'Int!'." -> List(
Pos(2, 21),
Pos(5, 47))
)
)
"Int => Int! within fragment" in expectFailsPosList(
"""
fragment nonNullIntArgFieldFrag on ComplicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
query Query($intArg: Int)
{
complicatedArgs {
...nonNullIntArgFieldFrag
}
}
""",
List(
"Variable '$intArg' of type 'Int' used in position expecting type 'Int!'." -> List(
Pos(6, 21),
Pos(3, 45))
)
)
"Int => Int! within nested fragment" in expectFailsPosList(
"""
fragment outerFrag on ComplicatedArgs {
...nonNullIntArgFieldFrag
}
fragment nonNullIntArgFieldFrag on ComplicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
query Query($intArg: Int)
{
complicatedArgs {
...outerFrag
}
}
""",
List(
"Variable '$intArg' of type 'Int' used in position expecting type 'Int!'." -> List(
Pos(10, 21),
Pos(7, 45))
)
)
"String over Boolean" in expectFailsPosList(
"""
query Query($stringVar: String)
{
complicatedArgs {
booleanArgField(booleanArg: $stringVar)
}
}
""",
List(
"Variable '$stringVar' of type 'String' used in position expecting type 'Boolean'." -> List(
Pos(2, 21),
Pos(5, 41))
)
)
"String => [String]" in expectFailsPosList(
"""
query Query($stringVar: String)
{
complicatedArgs {
stringListArgField(stringListArg: $stringVar)
}
}
""",
List(
"Variable '$stringVar' of type 'String' used in position expecting type '[String]'." -> List(
Pos(2, 21),
Pos(5, 47))
)
)
"Boolean => Boolean! in directive" in expectFailsPosList(
"""
query Query($boolVar: Boolean)
{
dog @include(if: $boolVar)
}
""",
List(
"Variable '$boolVar' of type 'Boolean' used in position expecting type 'Boolean!'." -> List(
Pos(2, 21),
Pos(4, 28))
)
)
"String => Boolean! in directive" in expectFailsPosList(
"""
query Query($stringVar: String)
{
dog @include(if: $stringVar)
}
""",
List(
"Variable '$stringVar' of type 'String' used in position expecting type 'Boolean!'." -> List(
Pos(2, 21),
Pos(4, 28))
)
)
"Allows optional (nullable) variables with default values" in expectFailsSimple(
"""
query Query($intVar: Int = null) {
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intVar)
}
}
""",
"Variable '$intVar' of type 'Int' used in position expecting type 'Int!'." -> Seq(
Pos(2, 21),
Pos(4, 47))
)
"Int => Int! when variable provides non-null default value" in expectPasses("""
query Query($intVar: Int = 1) {
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intVar)
}
}
""")
"Int => Int! when optional argument provides default value" in expectPasses("""
query Query($intVar: Int) {
complicatedArgs {
nonNullFieldWithDefault(nonNullIntArg: $intVar)
}
}
""")
"Boolean => Boolean! in directive with default value with option" in expectPasses("""
query Query($boolVar: Boolean = false) {
dog @include(if: $boolVar)
}
""")
}
}
| OlegIlyenko/sangria | modules/core/src/test/scala/sangria/validation/rules/VariablesInAllowedPositionSpec.scala | Scala | apache-2.0 | 7,366 |
/*
* Copyright (c) 2013 Bridgewater Associates, LP
*
* Distributed under the terms of the Modified BSD License. The full license is in
* the file COPYING, distributed as part of this software.
*/
package notebook.kernel.remote
import java.io.File
import notebook.kernel.pfork.ProcessFork
import notebook.kernel.ConfigUtils._
import com.typesafe.config.Config
import notebook.kernel.ConfigUtils
class Subprocess[A : Manifest](config: Config) extends ProcessFork[A] {
// Make sure the custom classpath comes first, so that child processes can override this process' libs (might be cleaner to load the bare minimum of JARs)
override lazy val classPathString =
(config getArray("kernel.classpath") getOrElse(Nil) :+ super.classPathString)
.mkString(File.pathSeparator)
override lazy val workingDirectory =
config get "kernel.dir" match {
case None => new File(".")
case Some(f) => new File(f)
}
override def heap = config.getMem("heap") getOrElse super.heap
override def permGen = config.getMem("permGen") getOrElse super.permGen
override def stack = config.getMem("stack") getOrElse super.stack
override def reservedCodeCache = config.getMem("reservedCodeCache") getOrElse super.reservedCodeCache
override def server = config get "server" map { _.toBoolean } getOrElse super.server
override def jvmArgs = (config.getArray("vmArgs").getOrElse(Nil).toIndexedSeq) ++ super.jvmArgs
}
| bigdatagenomics/mango-notebook | modules/kernel/src/main/scala/notebook/kernel/remote/Subprocess.scala | Scala | apache-2.0 | 1,438 |
package autolift.test.algebird
import org.scalatest.FlatSpec
import com.twitter.algebird.{Monad, Functor}
case class Foo[A](a: A)
object Foo{
implicit val bind = new Monad[Foo]{
def apply[T](t: T) = Foo(t)
def flatMap[T, U](foo: Foo[T])(fn: T => Foo[U]) = fn(foo.a)
}
}
case class Bar[A](a: A)
object Bar{
implicit val fun = new Functor[Bar]{
def map[T, U](m: Bar[T])(fn: T => U): Bar[U] = Bar(fn(m.a))
}
}
trait BaseSpec extends FlatSpec{
def same[A](x: A, y: A) = assert(x == y)
} | wheaties/AutoLifts | autolift-algebird/src/test/scala/autolift/algebird/BaseTest.scala | Scala | apache-2.0 | 497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param
import org.apache.spark.SparkFunSuite
class ParamsSuite extends SparkFunSuite {
test("param") {
val solver = new TestParams()
val uid = solver.uid
import solver.{maxIter, inputCol}
assert(maxIter.name === "maxIter")
assert(maxIter.doc === "maximum number of iterations (>= 0)")
assert(maxIter.parent === uid)
assert(maxIter.toString === s"${uid}__maxIter")
assert(!maxIter.isValid(-1))
assert(maxIter.isValid(0))
assert(maxIter.isValid(1))
solver.setMaxIter(5)
assert(solver.explainParam(maxIter) ===
"maxIter: maximum number of iterations (>= 0) (default: 10, current: 5)")
assert(inputCol.toString === s"${uid}__inputCol")
intercept[IllegalArgumentException] {
solver.setMaxIter(-1)
}
}
test("param pair") {
val solver = new TestParams()
import solver.maxIter
val pair0 = maxIter -> 5
val pair1 = maxIter.w(5)
val pair2 = ParamPair(maxIter, 5)
for (pair <- Seq(pair0, pair1, pair2)) {
assert(pair.param.eq(maxIter))
assert(pair.value === 5)
}
intercept[IllegalArgumentException] {
val pair = maxIter -> -1
}
}
test("param map") {
val solver = new TestParams()
import solver.{maxIter, inputCol}
val map0 = ParamMap.empty
assert(!map0.contains(maxIter))
map0.put(maxIter, 10)
assert(map0.contains(maxIter))
assert(map0(maxIter) === 10)
intercept[IllegalArgumentException] {
map0.put(maxIter, -1)
}
assert(!map0.contains(inputCol))
intercept[NoSuchElementException] {
map0(inputCol)
}
map0.put(inputCol -> "input")
assert(map0.contains(inputCol))
assert(map0(inputCol) === "input")
val map1 = map0.copy
val map2 = ParamMap(maxIter -> 10, inputCol -> "input")
val map3 = new ParamMap()
.put(maxIter, 10)
.put(inputCol, "input")
val map4 = ParamMap.empty ++ map0
val map5 = ParamMap.empty
map5 ++= map0
for (m <- Seq(map1, map2, map3, map4, map5)) {
assert(m.contains(maxIter))
assert(m(maxIter) === 10)
assert(m.contains(inputCol))
assert(m(inputCol) === "input")
}
}
test("params") {
val solver = new TestParams()
import solver.{maxIter, inputCol}
val params = solver.params
assert(params.length === 2)
assert(params(0).eq(inputCol), "params must be ordered by name")
assert(params(1).eq(maxIter))
assert(!solver.isSet(maxIter))
assert(solver.isDefined(maxIter))
assert(solver.getMaxIter === 10)
solver.setMaxIter(100)
assert(solver.isSet(maxIter))
assert(solver.getMaxIter === 100)
assert(!solver.isSet(inputCol))
assert(!solver.isDefined(inputCol))
intercept[NoSuchElementException](solver.getInputCol)
assert(solver.explainParam(maxIter) ===
"maxIter: maximum number of iterations (>= 0) (default: 10, current: 100)")
assert(solver.explainParams() ===
Seq(inputCol, maxIter).map(solver.explainParam).mkString("\\n"))
assert(solver.getParam("inputCol").eq(inputCol))
assert(solver.getParam("maxIter").eq(maxIter))
assert(solver.hasParam("inputCol"))
assert(!solver.hasParam("abc"))
intercept[NoSuchElementException] {
solver.getParam("abc")
}
intercept[IllegalArgumentException] {
solver.validateParams()
}
solver.copy(ParamMap(inputCol -> "input")).validateParams()
solver.setInputCol("input")
assert(solver.isSet(inputCol))
assert(solver.isDefined(inputCol))
assert(solver.getInputCol === "input")
solver.validateParams()
intercept[IllegalArgumentException] {
ParamMap(maxIter -> -10)
}
intercept[IllegalArgumentException] {
solver.setMaxIter(-10)
}
solver.clearMaxIter()
assert(!solver.isSet(maxIter))
val copied = solver.copy(ParamMap(solver.maxIter -> 50))
assert(copied.uid === solver.uid)
assert(copied.getInputCol === solver.getInputCol)
assert(copied.getMaxIter === 50)
}
test("ParamValidate") {
val alwaysTrue = ParamValidators.alwaysTrue[Int]
assert(alwaysTrue(1))
val gt1Int = ParamValidators.gt[Int](1)
assert(!gt1Int(1) && gt1Int(2))
val gt1Double = ParamValidators.gt[Double](1)
assert(!gt1Double(1.0) && gt1Double(1.1))
val gtEq1Int = ParamValidators.gtEq[Int](1)
assert(!gtEq1Int(0) && gtEq1Int(1))
val gtEq1Double = ParamValidators.gtEq[Double](1)
assert(!gtEq1Double(0.9) && gtEq1Double(1.0))
val lt1Int = ParamValidators.lt[Int](1)
assert(lt1Int(0) && !lt1Int(1))
val lt1Double = ParamValidators.lt[Double](1)
assert(lt1Double(0.9) && !lt1Double(1.0))
val ltEq1Int = ParamValidators.ltEq[Int](1)
assert(ltEq1Int(1) && !ltEq1Int(2))
val ltEq1Double = ParamValidators.ltEq[Double](1)
assert(ltEq1Double(1.0) && !ltEq1Double(1.1))
val inRange02IntInclusive = ParamValidators.inRange[Int](0, 2)
assert(inRange02IntInclusive(0) && inRange02IntInclusive(1) && inRange02IntInclusive(2) &&
!inRange02IntInclusive(-1) && !inRange02IntInclusive(3))
val inRange02IntExclusive =
ParamValidators.inRange[Int](0, 2, lowerInclusive = false, upperInclusive = false)
assert(!inRange02IntExclusive(0) && inRange02IntExclusive(1) && !inRange02IntExclusive(2))
val inRange02DoubleInclusive = ParamValidators.inRange[Double](0, 2)
assert(inRange02DoubleInclusive(0) && inRange02DoubleInclusive(1) &&
inRange02DoubleInclusive(2) &&
!inRange02DoubleInclusive(-0.1) && !inRange02DoubleInclusive(2.1))
val inRange02DoubleExclusive =
ParamValidators.inRange[Double](0, 2, lowerInclusive = false, upperInclusive = false)
assert(!inRange02DoubleExclusive(0) && inRange02DoubleExclusive(1) &&
!inRange02DoubleExclusive(2))
val inArray = ParamValidators.inArray[Int](Array(1, 2))
assert(inArray(1) && inArray(2) && !inArray(0))
val arrayLengthGt = ParamValidators.arrayLengthGt[Int](2.0)
assert(arrayLengthGt(Array(0, 1, 2)) && !arrayLengthGt(Array(0, 1)))
}
test("Params.copyValues") {
val t = new TestParams()
val t2 = t.copy(ParamMap.empty)
assert(!t2.isSet(t2.maxIter))
val t3 = t.copy(ParamMap(t.maxIter -> 20))
assert(t3.isSet(t3.maxIter))
}
}
object ParamsSuite extends SparkFunSuite {
/**
* Checks common requirements for [[Params.params]]:
* - params are ordered by names
* - param parent has the same UID as the object's UID
* - param name is the same as the param method name
* - obj.copy should return the same type as the obj
*/
def checkParams(obj: Params): Unit = {
val clazz = obj.getClass
val params = obj.params
val paramNames = params.map(_.name)
require(paramNames === paramNames.sorted, "params must be ordered by names")
params.foreach { p =>
assert(p.parent === obj.uid)
assert(obj.getParam(p.name) === p)
// TODO: Check that setters return self, which needs special handling for generic types.
}
val copyMethod = clazz.getMethod("copy", classOf[ParamMap])
val copyReturnType = copyMethod.getReturnType
require(copyReturnType === obj.getClass,
s"${clazz.getName}.copy should return ${clazz.getName} instead of ${copyReturnType.getName}.")
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/test/scala/org/apache/spark/ml/param/ParamsSuite.scala | Scala | apache-2.0 | 8,087 |
package org.openapitools.server.model
/**
* @param durationInMillis for example: ''null''
* @param estimatedDurationInMillis for example: ''null''
* @param enQueueTime for example: ''null''
* @param endTime for example: ''null''
* @param id for example: ''null''
* @param organization for example: ''null''
* @param pipeline for example: ''null''
* @param result for example: ''null''
* @param runSummary for example: ''null''
* @param startTime for example: ''null''
* @param state for example: ''null''
* @param `type` for example: ''null''
* @param commitId for example: ''null''
* @param `class` for example: ''null''
*/
final case class PipelineBranchesitemlatestRun (
durationInMillis: Option[Int],
estimatedDurationInMillis: Option[Int],
enQueueTime: Option[String],
endTime: Option[String],
id: Option[String],
organization: Option[String],
pipeline: Option[String],
result: Option[String],
runSummary: Option[String],
startTime: Option[String],
state: Option[String],
`type`: Option[String],
commitId: Option[String],
`class`: Option[String]
)
| cliffano/swaggy-jenkins | clients/scala-akka-http-server/generated/src/main/scala/org/openapitools/server/model/PipelineBranchesitemlatestRun.scala | Scala | mit | 1,112 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.WorkflowProcess
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 31/10/17.
*/
/**
* Workflow Process Mapping
*/
trait WorkflowProcessMapping {
val queryWorkflowProcess = quote {
querySchema[WorkflowProcess]("AD_WF_Process",
_.workflowProcessId-> "AD_WF_Process_ID",
_.tenantId-> "AD_Client_ID",
_.organizationId -> "AD_Org_ID" ,
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.workFlowId-> "AD_Workflow_ID",
_.workflowResponsiveId-> "AD_WF_Responsible_ID",
_.userId-> "AD_User_ID",
_.workflowState-> "WFState",
_.messageId-> "AD_Message_ID",
_.processing-> "Processing",
_.processed-> "Processed",
_.textMsg-> "TextMsg",
_.entityId-> "AD_Table_ID",
_.recordId-> "Record_ID",
_.priority-> "Priority",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WorkflowProcessMapping.scala | Scala | gpl-3.0 | 2,000 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import org.apache.spark.TaskContext
import org.apache.spark.util.CallSite
/**
* Tracks information about an active job in the DAGScheduler.
* ActiveJob对象用来表示这个活跃的Job
*/
private[spark] class ActiveJob(
val jobId: Int,//每个作业都分配一个唯一的I
val finalStage: ResultStage,//最终的stage
val func: (TaskContext, Iterator[_]) => _,//作用于最后一个stage上的函数
val partitions: Array[Int],//分区列表,注意这里表示从多少个分区读入数据并进行处理
val callSite: CallSite,
val listener: JobListener,//Job监听器
val properties: Properties) {
//任务的分区数量
val numPartitions = partitions.length
//标识每个partition相关的任务是否完成
val finished = Array.fill[Boolean](numPartitions)(false)
//已经完成的任务数
var numFinished = 0
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/scheduler/ActiveJob.scala | Scala | apache-2.0 | 1,736 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
class TripleEqualsSpec extends Spec with NonImplicitAssertions {
case class Super(size: Int)
class Sub(sz: Int) extends Super(sz)
val super1: Super = new Super(1)
val sub1: Sub = new Sub(1)
val super2: Super = new Super(2)
val sub2: Sub = new Sub(2)
val nullSuper: Super = null
object `the custom equality === operator` {
object `with UncheckedEquality` {
def `should compare anything with anything` {
new UncheckedEquality {
assert(1 === 1)
assert(!(1 !== 1))
assert(1 === 1L)
assert(!(1 !== 1L))
assert(1L === 1)
assert(!(1L !== 1))
assert("1" !== 1)
assert(!("1" === 1))
assert(1 !== "1")
assert(!(1 === "1"))
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
assert(!(super1 === null))
assert(super1 !== null)
assert(nullSuper === null)
assert(!(nullSuper !== null))
assert(!(nullSuper === super1))
assert(nullSuper !== super1)
}
}
def `should be overridable with TypeCheckedTripleEquals locally when UncheckedEquality imported` {
object O extends UncheckedEquality
import O._
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
// The rest should not compile
assertTypeError("1 === 1L")
assertTypeError("1L === 1")
assertTypeError("1 !== 2L")
assertTypeError("1L !== 2")
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
def `should be overridable with TypeCheckedTripleEquals locally when UncheckedEquality mixed in` {
object O extends UncheckedEquality {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
// The rest should not compile
assertTypeError("1 === 1L")
assertTypeError("1L === 1")
assertTypeError("1 !== 1L")
assertTypeError("1L !== 1")
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when UncheckedEquality imported` {
object O extends UncheckedEquality
import O._
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
// These should work with implicit conversions
assert(1 === 1L)
assert(1L === 1)
assert(!(1 !== 1L))
assert(!(1L !== 1))
// The rest should not compile
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when UncheckedEquality mixed in` {
object O extends UncheckedEquality {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
// These should work with implicit conversions
assert(1 === 1L)
assert(1L === 1)
assert(!(1 !== 1L))
assert(!(1L !== 1))
// The rest should not compile
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
}
}
object `with TypeCheckedTripleEquals` {
def `should compare supertypes with subtypes on either side` {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
assert(!(super1 === null))
assert(super1 !== null)
assert(nullSuper === null)
assert(!(nullSuper !== null))
assert(!(nullSuper === super1))
assert(nullSuper !== super1)
// The rest should not compile
assertTypeError("1 === 1L")
assertTypeError("1L === 1")
assertTypeError("1 !== 2L")
assertTypeError("1L !== 2")
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
} // TODO: Do this kind of thing for CheckedEquality
def `should be overridable with UncheckedEquality locally when TypeCheckedTripleEquals imported` {
object O extends TypeCheckedTripleEquals
import O._
new UncheckedEquality {
assert(1 === 1)
assert(!(1 !== 1))
assert(1 === 1L)
assert(!(1 !== 1L))
assert(1L === 1)
assert(!(1L !== 1))
assert("1" !== 1)
assert(!("1" === 1))
assert(1 !== "1")
assert(!(1 === "1"))
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1) // compiles on 2.10 but not 2.9
}
}
def `should be overridable with UncheckedEquality locally when TypeCheckedTripleEquals mixed in` {
object O extends TypeCheckedTripleEquals {
new UncheckedEquality {
assert(1 === 1)
assert(!(1 !== 1))
assert(1 === 1L)
assert(!(1 !== 1L))
assert(1L === 1)
assert(!(1L !== 1))
assert("1" !== 1)
assert(!("1" === 1))
assert(1 !== "1")
assert(!(1 === "1"))
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1) // compiles on 2.10 but not 2.9
}
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TypeCheckedTripleEquals imported` {
object O extends TypeCheckedTripleEquals
import O._
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1) // compiles on 2.10 but not 2.9
assert(!(1 !== 1)) // compiles on 2.10 but not 2.9
assert(ap === fr) // compiles on 2.10 but not 2.9
// compiles on 2.10 but not 2.9/ assert(fr === ap) // compiles on 2.10 but not 2.9
assert(ap === cr) // compiles on 2.10 but not 2.9
assert(cr === ap) // compiles on 2.10 but not 2.9
assert(super1 !== super2) // compiles on 2.10 but not 2.9
assert(super1 !== sub2) // compiles on 2.10 but not 2.9
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1) // compiles on 2.10 but not 2.9
assert(super1 === sub1) // compiles on 2.10 but not 2.9
assert(sub1 === super1) // compiles on 2.10 but not 2.9
// These should work with implicit conversions
assert(1 === 1L)
assert(1L === 1)
assert(!(1 !== 1L))
assert(!(1L !== 1))
// The rest should not compile
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
def `should be overridable with ConversionCheckedTripleEquals locally when TypeCheckedTripleEquals mixed in` {
object O extends TypeCheckedTripleEquals {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1) // compiles on 2.10 but not 2.9
assert(!(1 !== 1)) // compiles on 2.10 but not 2.9
assert(ap === fr) // compiles on 2.10 but not 2.9
assert(fr === ap) // compiles on 2.10 but not 2.9
assert(ap === cr) // compiles on 2.10 but not 2.9
assert(cr === ap) // compiles on 2.10 but not 2.9
assert(super1 !== super2) // compiles on 2.10 but not 2.9
assert(super1 !== sub2) // compiles on 2.10 but not 2.9
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1) // compiles on 2.10 but not 2.9
assert(super1 === sub1) // compiles on 2.10 but not 2.9
assert(sub1 === super1) // compiles on 2.10 but not 2.9
// These should work with implicit conversions
assert(1 === 1L)
assert(1L === 1)
assert(!(1 !== 1L))
assert(!(1L !== 1))
// The rest should not compile
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
}
}
object `with ConversionCheckedTripleEquals` {
def `should compare supertypes with subtypes on either side as well as types with implicit conversions in either direction` {
new ConversionCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1)
assert(!(1 !== 1))
assert(ap === fr)
assert(fr === ap)
assert(ap === cr)
assert(cr === ap)
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1)
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1)
// These should work with implicit conversions
assert(1 === 1L)
assert(1L === 1)
assert(!(1 !== 1L))
assert(!(1L !== 1))
// Should work sensibly with nulls
assert(!(super1 === null))
assert(super1 !== null)
assert(nullSuper === null)
assert(!(nullSuper !== null))
assert(!(nullSuper === super1))
assert(nullSuper !== super1)
// The rest should not compile
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
def `should be overridable with UncheckedEquality locally when ConversionCheckedTripleEquals imported` {
object O extends ConversionCheckedTripleEquals
import O._
new UncheckedEquality {
assert(1 === 1)
assert(!(1 !== 1))
assert(1 === 1L) // compiles on 2.10 but not 2.9
assert(!(1 !== 1L)) // compiles on 2.10 but not 2.9
assert(1L === 1)
assert(!(1L !== 1))
assert("1" !== 1)
assert(!("1" === 1))
assert(1 !== "1")
assert(!(1 === "1"))
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1) // compiles on 2.10 but not 2.9
}
}
def `should be overridable with UncheckedEquality locally when ConversionCheckedTripleEquals mixed in` {
object O extends ConversionCheckedTripleEquals {
new UncheckedEquality {
assert(1 === 1)
assert(!(1 !== 1))
assert(1 === 1L) // compiles on 2.10 but not 2.9
assert(!(1 !== 1L)) // compiles on 2.10 but not 2.9
assert(1L === 1)
assert(!(1L !== 1))
assert("1" !== 1)
assert(!("1" === 1))
assert(1 !== "1")
assert(!(1 === "1"))
assert(super1 !== super2)
assert(super1 !== sub2)
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1)
assert(super1 === sub1)
assert(sub1 === super1) // compiles on 2.10 but not 2.9
}
}
}
def `should be overridable with TypeCheckedTripleEquals locally when ConversionCheckedTripleEquals imported` {
object O extends ConversionCheckedTripleEquals
import O._
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1) // compiles on 2.10 but not 2.9
assert(!(1 !== 1)) // compiles on 2.10 but not 2.9
assert(ap === fr) // compiles on 2.10 but not 2.9
assert(fr === ap) // compiles on 2.10 but not 2.9
assert(ap === cr) // compiles on 2.10 but not 2.9
assert(cr === ap) // compiles on 2.10 but not 2.9
assert(super1 !== super2) // compiles on 2.10 but not 2.9
assert(super1 !== sub2) // compiles on 2.10 but not 2.9
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1) // compiles on 2.10 but not 2.9
assert(super1 === sub1) // compiles on 2.10 but not 2.9
assert(sub1 === super1) // compiles on 2.10 but not 2.9
// The rest should not compile
assertTypeError("1 === 1L")
assertTypeError("1L === 1")
assertTypeError("1 !== 2L")
assertTypeError("1L !== 2")
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
def `should be overridable with TypeCheckedTripleEquals locally when ConversionCheckedTripleEquals mixed in` {
object O extends ConversionCheckedTripleEquals {
new TypeCheckedTripleEquals {
class Fruit { override def equals(o: Any) = o.isInstanceOf[Fruit] }
trait Crunchy
class Apple extends Fruit with Crunchy
val fr: Fruit = new Apple
val cr: Crunchy = new Apple
val ap: Apple = new Apple
assert(1 === 1) // compiles on 2.10 but not 2.9
assert(!(1 !== 1)) // compiles on 2.10 but not 2.9
assert(ap === fr) // compiles on 2.10 but not 2.9
assert(fr === ap) // compiles on 2.10 but not 2.9
assert(ap === cr) // compiles on 2.10 but not 2.9
assert(cr === ap) // compiles on 2.10 but not 2.9
assert(super1 !== super2) // compiles on 2.10 but not 2.9
assert(super1 !== sub2) // compiles on 2.10 but not 2.9
assert(sub2 !== super1) // compiles on 2.10 but not 2.9
assert(super1 === super1) // compiles on 2.10 but not 2.9
assert(super1 === sub1) // compiles on 2.10 but not 2.9
assert(sub1 === super1) // compiles on 2.10 but not 2.9
// The rest should not compile
assertTypeError("1 === 1L")
assertTypeError("1L === 1")
assertTypeError("1 !== 1L")
assertTypeError("1L !== 1")
assertTypeError("\\"1\\" === 1")
assertTypeError("1 === \\"1\\"")
assertTypeError("\\"1\\" !== 1")
assertTypeError("1 !== \\"1\\"")
assertTypeError("fr === cr")
assertTypeError("cr === fr")
}
}
}
}
}
object `TripleEqualsInvocation ` {
import EqualityPolicy.TripleEqualsInvocation
def `should have pretty toString` {
val result1 = new TripleEqualsInvocation("Bob", true)
assert(result1.toString == "=== \\"Bob\\"")
val result2 = new TripleEqualsInvocation("Bob", false)
assert(result2.toString == "!== \\"Bob\\"")
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalactic/UncheckedEqualitySpec.scala | Scala | apache-2.0 | 21,501 |
package co.pjrt.stags.cli
import java.io._
import java.nio.file._
import java.util.zip._
import scala.util.Random
import scala.io.Source
import org.scalatest._
import co.pjrt.stags.paths.AbsolutePath
final class CliTest extends FreeSpec with BeforeAndAfter {
import Matchers._
private def mkTempDir: Path = {
Files.createTempDirectory("dir")
}
private def runTest(t: AbsolutePath => Assertion): Assertion = {
val cwd = AbsolutePath.forceAbsolute(mkTempDir)
t(cwd)
}
private def mkFile(cwd: AbsolutePath): Path = {
val name = Random.nextInt.abs.toString
val p = Files.createTempFile(cwd.path, name, ".scala")
val pw = new PrintWriter(p.toFile)
val fileContent =
"""|package a.b.c
|object X""".stripMargin
pw.write(fileContent)
pw.close
p
}
private def mkJar(cwd: AbsolutePath): (Path, List[ZipEntry]) = {
val name = Random.nextInt.abs.toString
val p = Files.createTempFile(cwd.path, name, "sources.jar")
val baos = new FileOutputStream(p.toFile)
val zos = new ZipOutputStream(baos)
val entry1 = new ZipEntry("file1.scala")
val entry2 = new ZipEntry("file2.scala")
val fileContent1 =
"""|package a.b.c
|object X""".stripMargin
val fileContent2 =
"""|package a.b.k
|object Y""".stripMargin
zos.putNextEntry(entry1)
zos.write(fileContent1.getBytes())
zos.closeEntry()
zos.putNextEntry(entry2)
zos.write(fileContent2.getBytes())
zos.closeEntry()
zos.close()
(p, List(entry1, entry2))
}
private def mkDir(cwd: AbsolutePath): AbsolutePath = {
val name = Random.nextInt.abs.toString
AbsolutePath.fromPath(cwd, Files.createTempDirectory(cwd.path, name))
}
private def readTags(p: AbsolutePath): List[Path] = {
val op = for {
line <- Source.fromFile(p.toFile).getLines
if !line.startsWith("!_TAG_")
} yield {
line.split('\\t').toList match {
case _ :: file :: _ => Paths.get(file)
case otherwise => fail(s"Did not get a proper tag. Got $otherwise")
}
}
op.toList
}
private val allTypes = List(FileType.SourcesJar, FileType.Scala)
private def sameElements[A](a: List[A], b: List[A]): Assertion =
a should contain theSameElementsAs b
"should capture all scala files in passed" in {
runTest { cwd =>
val f1 = mkFile(cwd)
val up = mkDir(cwd)
val f2 = mkFile(up)
val files = List(f1, f2)
val config = Config(files, None, false, allTypes, 0)
Cli.run_(cwd, config)
val tagLoc = AbsolutePath.fromPath(cwd, Paths.get("tags"))
val tags = readTags(tagLoc)
val relativizedFiles =
files.map(AbsolutePath.unsafeAbsolute).map(_.relativeAgainst(tagLoc))
tags shouldBe relativizedFiles
}
}
"should capture all source jars files in passed" in {
runTest { cwd =>
val (f1, entry1) = mkJar(cwd)
val up = mkDir(cwd)
val (f2, entry2) = mkJar(up)
val files = List(f1, f2)
val config = Config(files, None, false, allTypes, 0)
Cli.run_(cwd, config)
val tagLoc = AbsolutePath.fromPath(cwd, Paths.get("tags"))
val tags = readTags(tagLoc)
val relativizedFiles =
List((f1, entry1), (f2, entry2)).flatMap { case (f, es) =>
val rel = AbsolutePath.unsafeAbsolute(f).relativeAgainst(tagLoc)
es.map(e => Paths.get(s"zipfile:$rel::${e.getName()}"))
}
tags should contain theSameElementsAs (relativizedFiles)
}
}
"should correctly relativize against a tag above" in {
runTest { cwd =>
val f1 = mkFile(cwd)
val files = List(f1)
val up = mkDir(cwd)
val tagLoc = AbsolutePath.fromPath(up, Paths.get("tags"))
val config = Config(files, Some(tagLoc.path), false, allTypes, 0)
Cli.run_(cwd, config)
val tags = readTags(tagLoc)
val relativizedFiles =
files.map(AbsolutePath.unsafeAbsolute).map(_.relativeAgainst(tagLoc))
tags shouldBe relativizedFiles
}
}
"should correctly relativize against a tag below" in {
runTest { cwd =>
val f1 = mkFile(cwd)
val files = List(f1)
val down = mkDir(cwd.parent)
val tagLoc = AbsolutePath.fromPath(down, Paths.get("tags"))
val config = Config(files, Some(tagLoc.path), false, allTypes, 0)
Cli.run_(cwd, config)
val tags = readTags(tagLoc)
val relativizedFiles =
files.map(AbsolutePath.unsafeAbsolute).map(_.relativeAgainst(tagLoc))
tags shouldBe relativizedFiles
}
}
"should make the paths absolute if the absolute tag is passed" in {
runTest { cwd =>
val f1 = mkFile(cwd)
val up = mkDir(cwd)
val f2 = mkFile(up)
val files = List(f1, f2)
val config = Config(files, None, true, allTypes, 0)
Cli.run_(cwd, config)
val tagLoc = AbsolutePath.fromPath(cwd, Paths.get("tags"))
val tags = readTags(tagLoc)
tags shouldBe files
}
}
"should only pick up the files that are specified in fileTypes" in {
allTypes.foreach { t =>
runTest { cwd =>
val (f1, entry1) = mkJar(cwd)
val sf1 = mkFile(cwd)
val up = mkDir(cwd)
val (f2, entry2) = mkJar(up)
val sf2 = mkFile(up)
val files = List(f1, f2, sf1, sf2)
val config = Config(files, None, false, List(t), 0)
Cli.run_(cwd, config)
val tagLoc = AbsolutePath.fromPath(cwd, Paths.get("tags"))
val tags = readTags(tagLoc)
val jarFiles =
List((f1, entry1), (f2, entry2)).flatMap { case (f, es) =>
val rel = AbsolutePath.unsafeAbsolute(f).relativeAgainst(tagLoc)
es.map(e => Paths.get(s"zipfile:$rel::${e.getName()}"))
}
val scalaFiles =
List(sf1, sf2)
.map(AbsolutePath.unsafeAbsolute)
.map(_.relativeAgainst(tagLoc))
t match {
case FileType.SourcesJar =>
sameElements(tags, jarFiles)
case FileType.Scala =>
sameElements(tags, scalaFiles)
}
}
}
}
}
| pjrt/stags | cli/src/test/scala/co/pjrt/stags/cli/CliTest.scala | Scala | mit | 6,112 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.mllib.fpm.FPGrowth
import org.apache.spark.{SparkConf, SparkContext}
/**
* Example for mining frequent itemsets using FP-growth.
* Example usage: ./bin/run-example mllib.FPGrowthExample \\
* --minSupport 0.8 --numPartition 2 ./data/mllib/sample_fpgrowth.txt
*/
object FPGrowthExample {
case class Params(
input: String = null,
minSupport: Double = 0.3,
numPartition: Int = -1) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("FPGrowthExample") {
head("FPGrowth: an example FP-growth app.")
opt[Double]("minSupport")
.text(s"minimal support level, default: ${defaultParams.minSupport}")
.action((x, c) => c.copy(minSupport = x))
opt[Int]("numPartition")
.text(s"number of partition, default: ${defaultParams.numPartition}")
.action((x, c) => c.copy(numPartition = x))
arg[String]("<input>")
.text("input paths to input data set, whose file format is that each line " +
"contains a transaction with each item in String and separated by a space")
.required()
.action((x, c) => c.copy(input = x))
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"FPGrowthExample with $params")
val sc = new SparkContext(conf)
val transactions = sc.textFile(params.input).map(_.split(" ")).cache()
println(s"Number of transactions: ${transactions.count()}")
val model = new FPGrowth()
.setMinSupport(params.minSupport)
.setNumPartitions(params.numPartition)
.run(transactions)
println(s"Number of frequent itemsets: ${model.freqItemsets.count()}")
model.freqItemsets.collect().foreach { itemset =>
println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq)
}
sc.stop()
}
}
// scalastyle:on println
| practice-vishnoi/dev-spark-1 | examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala | Scala | apache-2.0 | 2,923 |
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran.transformers
import com.spotify.featran.{FeatureBuilder, FeatureRejection, FlatReader, FlatWriter}
import com.twitter.algebird.{Aggregator, Max}
/**
* Transform features by rescaling each feature to range [-1, 1] by dividing through the maximum
* absolute value in each feature.
*
* Missing values are transformed to 0.0.
*
* When using aggregated feature summary from a previous session, out of bound values are truncated
* to -1.0 or 1.0 and [[FeatureRejection.OutOfBound]] rejections are reported.
*/
object MaxAbsScaler extends SettingsBuilder {
/** Create a new [[MaxAbsScaler]] instance. */
def apply(name: String): Transformer[Double, Max[Double], Double] =
new MaxAbsScaler(name)
/**
* Create a new [[MaxAbsScaler]] from a settings object
* @param setting
* Settings object
*/
def fromSettings(setting: Settings): Transformer[Double, Max[Double], Double] =
MaxAbsScaler(setting.name)
}
private[featran] class MaxAbsScaler(name: String)
extends OneDimensional[Double, Max[Double], Double](name) {
override val aggregator: Aggregator[Double, Max[Double], Double] =
Aggregators.from[Double](x => Max(math.abs(x))).to(_.get)
override def buildFeatures(a: Option[Double], c: Double, fb: FeatureBuilder[_]): Unit = a match {
case Some(x) =>
// truncate x to [-max, max]
val truncated = math.min(math.abs(x), c) * math.signum(x)
fb.add(name, truncated / c)
if (math.abs(x) > c) {
fb.reject(this, FeatureRejection.OutOfBound(-c, c, x))
}
case None => fb.skip()
}
override def encodeAggregator(c: Double): String = c.toString
override def decodeAggregator(s: String): Double = s.toDouble
override def flatRead[T: FlatReader]: T => Option[Any] = FlatReader[T].readDouble(name)
override def flatWriter[T](implicit fw: FlatWriter[T]): Option[Double] => fw.IF =
fw.writeDouble(name)
}
| spotify/featran | core/src/main/scala/com/spotify/featran/transformers/MaxAbsScaler.scala | Scala | apache-2.0 | 2,524 |
package com.nelly.monitor
import java.nio.file.{StandardWatchEventKinds => EventType}
import akka.actor.{ActorLogging, ActorSystem}
import better.files.FileWatcher._
import better.files._
import com.nelly.core.domain.LogEntry
class LogMonitor(logFile: File, changeObserversRootPath: String)(
implicit logEntryParser: LogEntryParser[LogEntry], actorSystem: ActorSystem
)
extends FileWatcher(logFile, false) with ActorLogging {
private var logPointer = 0
this.log.info(s"... logfile ${logFile} Monitoring begins...")
sendNotifications(logFile) //On start up, read whatever is in the log file and send out notifications
/*
listen to log file change event and supply a callback
*/
self ! when(events = EventType.ENTRY_MODIFY) {
case (EventType.ENTRY_MODIFY, file) if !file.isDirectory => sendNotifications(file)
case _ => //ignore
}
private[this] def sendNotifications(logEntry: LogEntry):Unit = {
context.actorSelection(s"/user/${changeObserversRootPath}/*") ! logEntry
}
private[this] def sendNotifications(file: File) : Unit = synchronized {
val startingPointer = logPointer
/* stream to prevent loading the entire file in memory (to prevent possible out of memory exception) */
val lines = file.lineIterator
val iterator = lines.drop(logPointer)
for (str <- iterator) {
logEntryParser.parse(str) match {
case Some(logEntry) => sendNotifications(logEntry)
case None => this.log.error(s"Failed to parse log entry ${str}")
}
logPointer +=1
}
//if the log file gets truncated, reset pointer
logPointer = if(0 != logPointer && startingPointer == logPointer) file.lineIterator.length else logPointer
}
}
| ezinelony/http-log-monitor | monitor/src/main/scala/com/nelly/monitor/LogMonitor.scala | Scala | mit | 1,731 |
package ml.sparkling.graph.operators.measures.vertex.betweenness.hua.struct
import ml.sparkling.graph.operators.measures.vertex.betweenness.hua.struct.messages.DFSPointer
import org.apache.spark.graphx.VertexId
/**
* Created by mth on 5/6/17.
*/
class NOVertex(val vertexId: VertexId,
val bfsMap: Map[VertexId, NOBFSVertex],
val pred: Option[VertexId],
val succ: Option[Array[VertexId]],
val dfsPointer: Option[DFSPointer],
val bc: Double) extends Serializable {
def setParent(idParent: VertexId) = NOVertex(vertexId, bfsMap, Some(idParent), succ, dfsPointer, bc)
def setPredecessorAndSuccessors(newPred: Option[VertexId], newSucc: Option[Array[VertexId]]) =
NOVertex(vertexId, bfsMap, newPred, newSucc, dfsPointer, bc)
val isCompleted = pred.nonEmpty && succ.nonEmpty
val leaf = succ.isEmpty
lazy val bfsRoot = bfsMap.contains(vertexId)
lazy val lowestSucc = succ.getOrElse(Array.empty).sorted.headOption
lazy val eccentricity = if (bfsMap.isEmpty) 0 else bfsMap.map({ case (id, v) => v.distance}).max
def withDfsPointer(pointer: Option[DFSPointer]) =
NOVertex(vertexId, bfsMap, pred, succ, pointer, bc)
def update(bfsMap: Map[VertexId, NOBFSVertex] = bfsMap, succ: Option[Array[VertexId]] = succ, dfsPointer: Option[DFSPointer] = dfsPointer, bcInc: Double = 0) =
NOVertex(vertexId, bfsMap, pred, succ, dfsPointer, bc + bcInc)
}
object NOVertex extends Serializable {
def apply(vertexId: VertexId,
bfsMap: Map[VertexId, NOBFSVertex] = Map.empty,
pred: Option[VertexId] = None,
succ: Option[Array[VertexId]] = None,
dfsPointer: Option[DFSPointer] = None,
bc: Double = .0): NOVertex = new NOVertex(vertexId, bfsMap, pred, succ, dfsPointer, bc)
}
| sparkling-graph/sparkling-graph | operators/src/main/scala/ml/sparkling/graph/operators/measures/vertex/betweenness/hua/struct/NOVertex.scala | Scala | bsd-2-clause | 1,827 |
package org.http4s.blazecore
package websocket
import fs2.Stream
import fs2.concurrent.{Queue, SignallingRef}
import cats.effect.IO
import cats.implicits._
import java.util.concurrent.atomic.AtomicBoolean
import org.http4s.Http4sSpec
import org.http4s.blaze.pipeline.LeafBuilder
import org.http4s.websocket.{WebSocket, WebSocketFrame}
import org.http4s.websocket.WebSocketFrame._
import org.http4s.blaze.pipeline.Command
import scala.concurrent.ExecutionContext
class Http4sWSStageSpec extends Http4sSpec {
override implicit def testExecutionContext: ExecutionContext =
ExecutionContext.global
class TestWebsocketStage(
outQ: Queue[IO, WebSocketFrame],
head: WSTestHead,
closeHook: AtomicBoolean) {
def sendWSOutbound(w: WebSocketFrame*): IO[Unit] =
Stream
.emits(w)
.covary[IO]
.through(outQ.enqueue)
.compile
.drain
def sendInbound(w: WebSocketFrame*): IO[Unit] =
w.toList.traverse(head.put).void
def pollOutbound(timeoutSeconds: Long = 4L): IO[Option[WebSocketFrame]] =
head.poll(timeoutSeconds)
def pollBatchOutputbound(batchSize: Int, timeoutSeconds: Long = 4L): IO[List[WebSocketFrame]] =
head.pollBatch(batchSize, timeoutSeconds)
def wasCloseHookCalled(): IO[Boolean] =
IO(closeHook.get())
}
object TestWebsocketStage {
def apply(): IO[TestWebsocketStage] =
for {
outQ <- Queue.unbounded[IO, WebSocketFrame]
closeHook = new AtomicBoolean(false)
ws = WebSocket[IO](outQ.dequeue, _.drain, IO(closeHook.set(true)))
deadSignal <- SignallingRef[IO, Boolean](false)
wsHead <- WSTestHead()
head = LeafBuilder(new Http4sWSStage[IO](ws, closeHook, deadSignal)).base(wsHead)
_ <- IO(head.sendInboundCommand(Command.Connected))
} yield new TestWebsocketStage(outQ, head, closeHook)
}
"Http4sWSStage" should {
"reply with pong immediately after ping" in (for {
socket <- TestWebsocketStage()
_ <- socket.sendInbound(Ping())
_ <- socket.pollOutbound(2).map(_ must beSome[WebSocketFrame](Pong()))
_ <- socket.sendInbound(Close())
} yield ok)
"not write any more frames after close frame sent" in (for {
socket <- TestWebsocketStage()
_ <- socket.sendWSOutbound(Text("hi"), Close(), Text("lol"))
_ <- socket.pollOutbound().map(_ must_=== Some(Text("hi")))
_ <- socket.pollOutbound().map(_ must_=== Some(Close()))
_ <- socket.pollOutbound().map(_ must_=== None)
_ <- socket.sendInbound(Close())
} yield ok)
"send a close frame back and call the on close handler upon receiving a close frame" in (for {
socket <- TestWebsocketStage()
_ <- socket.sendInbound(Close())
_ <- socket.pollBatchOutputbound(2, 2).map(_ must_=== List(Close()))
_ <- socket.wasCloseHookCalled().map(_ must_=== true)
} yield ok)
"not send two close frames " in (for {
socket <- TestWebsocketStage()
_ <- socket.sendWSOutbound(Close())
_ <- socket.sendInbound(Close())
_ <- socket.pollBatchOutputbound(2).map(_ must_=== List(Close()))
_ <- socket.wasCloseHookCalled().map(_ must_=== true)
} yield ok)
"ignore pong frames" in (for {
socket <- TestWebsocketStage()
_ <- socket.sendInbound(Pong())
_ <- socket.pollOutbound().map(_ must_=== None)
_ <- socket.sendInbound(Close())
} yield ok)
}
}
| ChristopherDavenport/http4s | blaze-core/src/test/scala/org/http4s/blazecore/websocket/Http4sWSStageSpec.scala | Scala | apache-2.0 | 3,444 |
// Copyright 2012 Brennan Saeta
//
// This file is part of Axess
//
// Axess is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Axess is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Axess. If not, see <http://www.gnu.org/licenses/>.
package models
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
import org.openqa.selenium._
import org.openqa.selenium.htmlunit.HtmlUnitDriver
class BasicSiteSpec extends Specification {
"BasicSite" should {
"get the Stanford homepage" in {
val browser = new HtmlUnitDriver
val site = new BasicSite
site.configure(
Site(1,
"stanford-home",
"", // username
"", // password
"http://www.stanford.edu/",
"models.BasicSite",
""))
site.login(browser)
browser.getCurrentUrl() must startWith("http://www.stanford.edu/")
}
"get the Stanford homepage over SSL" in {
val browser = new HtmlUnitDriver
val site = new BasicSite
site.configure(
Site(1,
"stanford-ssl",
"",
"",
"https://www.stanford.edu/",
"models.BasicSite",
""))
site.login(browser)
browser.getCurrentUrl() must startWith("https://www.stanford.edu/")
}
"be created by reflection" in {
val name = "models.BasicSite"
val clazz = Class.forName(name)
val cs = clazz.getConstructors()(0).newInstance()
(clazz != null) === (true) and
(clazz.getConstructors()(0).newInstance() != null) === (true)
}
}
}
| saeta/axess | test/models/BasicSiteSpec.scala | Scala | agpl-3.0 | 2,062 |
object InfiniteSubtypingLoopPossibility {
trait A[X]
trait B extends A[B]
trait Min[+S <: B with A[S]]
def c: Any = ???
c match {
case pc: Min[_] =>
}
}
| lampepfl/dotty | tests/pos/infinite-loop-potential.scala | Scala | apache-2.0 | 170 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import org.apache.mxnet.init.Base._
import org.apache.mxnet.utils.CToScalaUtils
import java.io._
import java.security.MessageDigest
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
/**
* This object will generate the Scala documentation of the new Scala API
* Two file namely: SymbolAPIBase.scala and NDArrayAPIBase.scala
* The code will be executed during Macros stage and file live in Core stage
*/
private[mxnet] object APIDocGenerator{
case class absClassArg(argName : String, argType : String, argDesc : String, isOptional : Boolean)
case class absClassFunction(name : String, desc : String,
listOfArgs: List[absClassArg], returnType : String)
def main(args: Array[String]) : Unit = {
val FILE_PATH = args(0)
val hashCollector = ListBuffer[String]()
hashCollector += absClassGen(FILE_PATH, true)
hashCollector += absClassGen(FILE_PATH, false)
hashCollector += nonTypeSafeClassGen(FILE_PATH, true)
hashCollector += nonTypeSafeClassGen(FILE_PATH, false)
val finalHash = hashCollector.mkString("\n")
}
def MD5Generator(input : String) : String = {
val md = MessageDigest.getInstance("MD5")
md.update(input.getBytes("UTF-8"))
val digest = md.digest()
org.apache.commons.codec.binary.Base64.encodeBase64URLSafeString(digest)
}
def absClassGen(FILE_PATH : String, isSymbol : Boolean) : String = {
// scalastyle:off
val absClassFunctions = getSymbolNDArrayMethods(isSymbol)
// Defines Operators that should not generated
val notGenerated = Set("Custom")
// TODO: Add Filter to the same location in case of refactor
val absFuncs = absClassFunctions.filterNot(_.name.startsWith("_"))
.filterNot(ele => notGenerated.contains(ele.name))
.map(absClassFunction => {
val scalaDoc = generateAPIDocFromBackend(absClassFunction)
val defBody = generateAPISignature(absClassFunction, isSymbol)
s"$scalaDoc\n$defBody"
})
val packageName = if (isSymbol) "SymbolAPIBase" else "NDArrayAPIBase"
val apacheLicence = "/*\n* Licensed to the Apache Software Foundation (ASF) under one or more\n* contributor license agreements. See the NOTICE file distributed with\n* this work for additional information regarding copyright ownership.\n* The ASF licenses this file to You under the Apache License, Version 2.0\n* (the \"License\"); you may not use this file except in compliance with\n* the License. You may obtain a copy of the License at\n*\n* http://www.apache.org/licenses/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n*/\n"
val scalaStyle = "// scalastyle:off"
val packageDef = "package org.apache.mxnet"
val imports = "import org.apache.mxnet.annotation.Experimental"
val absClassDef = s"abstract class $packageName"
val finalStr = s"$apacheLicence\n$scalaStyle\n$packageDef\n$imports\n$absClassDef {\n${absFuncs.mkString("\n")}\n}"
val pw = new PrintWriter(new File(FILE_PATH + s"$packageName.scala"))
pw.write(finalStr)
pw.close()
MD5Generator(finalStr)
}
def nonTypeSafeClassGen(FILE_PATH : String, isSymbol : Boolean) : String = {
// scalastyle:off
val absClassFunctions = getSymbolNDArrayMethods(isSymbol)
val absFuncs = absClassFunctions.map(absClassFunction => {
val scalaDoc = generateAPIDocFromBackend(absClassFunction, false)
if (isSymbol) {
val defBody = s"def ${absClassFunction.name}(name : String = null, attr : Map[String, String] = null)(args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): org.apache.mxnet.Symbol"
s"$scalaDoc\n$defBody"
} else {
val defBodyWithKwargs = s"def ${absClassFunction.name}(kwargs: Map[String, Any] = null)(args: Any*) : org.apache.mxnet.NDArrayFuncReturn"
val defBody = s"def ${absClassFunction.name}(args: Any*) : org.apache.mxnet.NDArrayFuncReturn"
s"$scalaDoc\n$defBodyWithKwargs\n$scalaDoc\n$defBody"
}
})
val packageName = if (isSymbol) "SymbolBase" else "NDArrayBase"
val apacheLicence = "/*\n* Licensed to the Apache Software Foundation (ASF) under one or more\n* contributor license agreements. See the NOTICE file distributed with\n* this work for additional information regarding copyright ownership.\n* The ASF licenses this file to You under the Apache License, Version 2.0\n* (the \"License\"); you may not use this file except in compliance with\n* the License. You may obtain a copy of the License at\n*\n* http://www.apache.org/licenses/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n*/\n"
val scalaStyle = "// scalastyle:off"
val packageDef = "package org.apache.mxnet"
val imports = "import org.apache.mxnet.annotation.Experimental"
val absClassDef = s"abstract class $packageName"
val finalStr = s"$apacheLicence\n$scalaStyle\n$packageDef\n$imports\n$absClassDef {\n${absFuncs.mkString("\n")}\n}"
import java.io._
val pw = new PrintWriter(new File(FILE_PATH + s"$packageName.scala"))
pw.write(finalStr)
pw.close()
MD5Generator(finalStr)
}
// Generate ScalaDoc type
def generateAPIDocFromBackend(func : absClassFunction, withParam : Boolean = true) : String = {
val desc = ArrayBuffer[String]()
desc += " * <pre>"
func.desc.split("\n").foreach({ currStr =>
desc += s" * $currStr"
})
desc += " * </pre>"
val params = func.listOfArgs.map({ absClassArg =>
val currArgName = absClassArg.argName match {
case "var" => "vari"
case "type" => "typeOf"
case _ => absClassArg.argName
}
s" * @param $currArgName\t\t${absClassArg.argDesc}"
})
val returnType = s" * @return ${func.returnType}"
if (withParam) {
s" /**\n${desc.mkString("\n")}\n${params.mkString("\n")}\n$returnType\n */"
} else {
s" /**\n${desc.mkString("\n")}\n$returnType\n */"
}
}
def generateAPISignature(func : absClassFunction, isSymbol : Boolean) : String = {
var argDef = ListBuffer[String]()
func.listOfArgs.foreach(absClassArg => {
val currArgName = absClassArg.argName match {
case "var" => "vari"
case "type" => "typeOf"
case _ => absClassArg.argName
}
if (absClassArg.isOptional) {
argDef += s"$currArgName : Option[${absClassArg.argType}] = None"
}
else {
argDef += s"$currArgName : ${absClassArg.argType}"
}
})
var returnType = func.returnType
if (isSymbol) {
argDef += "name : String = null"
argDef += "attr : Map[String, String] = null"
} else {
argDef += "out : Option[NDArray] = None"
returnType = "org.apache.mxnet.NDArrayFuncReturn"
}
val experimentalTag = "@Experimental"
s"$experimentalTag\ndef ${func.name} (${argDef.mkString(", ")}) : $returnType"
}
// List and add all the atomic symbol functions to current module.
private def getSymbolNDArrayMethods(isSymbol : Boolean): List[absClassFunction] = {
val opNames = ListBuffer.empty[String]
val returnType = if (isSymbol) "Symbol" else "NDArray"
_LIB.mxListAllOpNames(opNames)
// TODO: Add '_linalg_', '_sparse_', '_image_' support
// TODO: Add Filter to the same location in case of refactor
opNames.map(opName => {
val opHandle = new RefLong
_LIB.nnGetOpHandle(opName, opHandle)
makeAtomicSymbolFunction(opHandle.value, opName, "org.apache.mxnet." + returnType)
}).toList.filterNot(_.name.startsWith("_"))
}
// Create an atomic symbol function by handle and function name.
private def makeAtomicSymbolFunction(handle: SymbolHandle, aliasName: String, returnType : String)
: absClassFunction = {
val name = new RefString
val desc = new RefString
val keyVarNumArgs = new RefString
val numArgs = new RefInt
val argNames = ListBuffer.empty[String]
val argTypes = ListBuffer.empty[String]
val argDescs = ListBuffer.empty[String]
_LIB.mxSymbolGetAtomicSymbolInfo(
handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs)
val argList = argNames zip argTypes zip argDescs map { case ((argName, argType), argDesc) =>
val typeAndOption = CToScalaUtils.argumentCleaner(argName, argType, returnType)
new absClassArg(argName, typeAndOption._1, argDesc, typeAndOption._2)
}
new absClassFunction(aliasName, desc.value, argList.toList, returnType)
}
}
| rahul003/mxnet | scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala | Scala | apache-2.0 | 9,826 |
/*
* TimelineFrame.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite
import de.sciss.lucre.synth.Txn
import de.sciss.lucre.{Txn => LTxn}
import de.sciss.mellite.impl.timeline.{TimelineFrameImpl => Impl}
import de.sciss.proc.Timeline
object TimelineFrame extends WorkspaceWindow.Key {
def apply[T <: Txn[T]](obj: Timeline[T])
(implicit tx: T, handler: UniverseHandler[T]): TimelineFrame[T] =
Impl(obj)
type Repr[T <: LTxn[T]] = TimelineFrame[T]
}
trait TimelineFrame[T <: LTxn[T]] extends WorkspaceWindow[T] {
type Repr[~ <: LTxn[~]] = TimelineFrame[~]
override def view: TimelineView[T]
} | Sciss/Mellite | app/src/main/scala/de/sciss/mellite/TimelineFrame.scala | Scala | agpl-3.0 | 875 |
package org.scaladebugger.api.profiles.java.info
import com.sun.jdi.{ReferenceType, StackFrame, ThreadReference, VirtualMachine}
import org.scaladebugger.api.profiles.traits.info.{FrameInfo, ThreadStatusInfo}
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
class JavaThreadStatusInfoSpec extends ParallelMockFunSpec
{
private val mockThreadReference = mock[ThreadReference]
private val javaThreadStatusInfoProfile =
new JavaThreadStatusInfo(mockThreadReference)
describe("JavaThreadStatusInfo") {
describe("#statusCode") {
it("should return the status code of the underlying thread") {
val expected = 999
(mockThreadReference.status _).expects().returning(expected).once()
val actual = javaThreadStatusInfoProfile.statusCode
actual should be (expected)
}
}
describe("#isUnknown") {
it("should return true if the status of the thread reference is unknown") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_UNKNOWN)
javaThreadStatusInfoProfile.isUnknown should be (true)
}
it("should return false if the status of the thread reference is not unknown") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_UNKNOWN)
javaThreadStatusInfoProfile.isUnknown should be (false)
}
}
describe("#isZombie") {
it("should return true if the thread is a zombie") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_ZOMBIE)
javaThreadStatusInfoProfile.isZombie should be (true)
}
it("should return false if the thread is not a zombie") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_ZOMBIE)
javaThreadStatusInfoProfile.isZombie should be (false)
}
}
describe("#isRunning") {
it("should return true if the thread is running") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_RUNNING)
javaThreadStatusInfoProfile.isRunning should be (true)
}
it("should return false if the thread is not running") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_RUNNING)
javaThreadStatusInfoProfile.isRunning should be (false)
}
}
describe("#isSleeping") {
it("should return true if the thread is sleeping") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_SLEEPING)
javaThreadStatusInfoProfile.isSleeping should be (true)
}
it("should return false if the thread is not sleeping") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_SLEEPING)
javaThreadStatusInfoProfile.isSleeping should be (false)
}
}
describe("#isMonitor") {
it("should return true if the thread is monitoring") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_MONITOR)
javaThreadStatusInfoProfile.isMonitor should be (true)
}
it("should return false if the thread is not monitoring") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_MONITOR)
javaThreadStatusInfoProfile.isMonitor should be (false)
}
}
describe("#isWait") {
it("should return true if the thread is waiting") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_WAIT)
javaThreadStatusInfoProfile.isWait should be (true)
}
it("should return false if the thread is not waiting") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_WAIT)
javaThreadStatusInfoProfile.isWait should be (false)
}
}
describe("#isNotStarted") {
it("should return true if the thread has not been started") {
(mockThreadReference.status _).expects()
.returning(ThreadReference.THREAD_STATUS_NOT_STARTED)
javaThreadStatusInfoProfile.isNotStarted should be (true)
}
it("should return false if the thread has been started") {
(mockThreadReference.status _).expects()
.returning(~ThreadReference.THREAD_STATUS_NOT_STARTED)
javaThreadStatusInfoProfile.isNotStarted should be (false)
}
}
describe("#isAtBreakpoint") {
it("should return true if the thread is suspended at a breakpoint") {
val expected = true
(mockThreadReference.isAtBreakpoint _).expects()
.returning(expected).once()
val actual = javaThreadStatusInfoProfile.isAtBreakpoint
actual should be (expected)
}
it("should return false if the thread is not suspended at a breakpoint") {
val expected = false
(mockThreadReference.isAtBreakpoint _).expects()
.returning(expected).once()
val actual = javaThreadStatusInfoProfile.isAtBreakpoint
actual should be (expected)
}
}
describe("#isSuspended") {
it("should return true if the thread is suspended") {
val expected = true
(mockThreadReference.isSuspended _).expects()
.returning(expected).once()
val actual = javaThreadStatusInfoProfile.isSuspended
actual should be (expected)
}
it("should return false if the thread is not suspended") {
val expected = false
(mockThreadReference.isSuspended _).expects()
.returning(expected).once()
val actual = javaThreadStatusInfoProfile.isSuspended
actual should be (expected)
}
}
describe("#suspendCount") {
it("should return the suspend count of the underlying thread") {
val expected = 999
(mockThreadReference.suspendCount _).expects()
.returning(expected).once()
val actual = javaThreadStatusInfoProfile.suspendCount
actual should be (expected)
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/java/info/JavaThreadStatusInfoSpec.scala | Scala | apache-2.0 | 6,366 |
package org.salgo.geometry.structures
import org.salgo.common.{Comparison, MathUtils}
case class Point3D(x: Double, y: Double, z: Double) {
def + (toAdd: Point3D) : Vector3D = {
Vector3D(this.x + toAdd.x, this.y + toAdd.y, this.z + toAdd.z)
}
def - (subtrahend: Point3D) : Vector3D = {
Vector3D(this.x - subtrahend.x, this.y - subtrahend.y, this.z - subtrahend.z)
}
def toVector : Vector3D = {
Vector3D(this.x, this.y, this.z)
}
def isInTetrahedra(a: Point3D, b: Point3D, c: Point3D, d: Point3D, epsilon: Double) : Boolean = {
//if (!this.isInTetrahedraBoundingBox(a, b, c, d, epsilon)) false
//else
if (this.isInTetrahedraByDotProduct(a, b, c, d, epsilon)) true
else false
}
def isInTetrahedraBoundingBox(a: Point3D, b: Point3D, c: Point3D, d: Point3D, epsilon: Double) : Boolean = {
if (this.x < MathUtils.min(a.x, b.x, c.x, d.x).getOrElse(0.0) - epsilon) false
else if (this.x > MathUtils.max(a.x, b.x, c.x, d.x).getOrElse(0.0) + epsilon) false
else if (this.y < MathUtils.min(a.y, b.y, c.y, d.y).getOrElse(0.0) - epsilon) false
else if (this.y > MathUtils.max(a.y, b.y, c.y, d.y).getOrElse(0.0) + epsilon) false
else if (this.z < MathUtils.min(a.z, b.z, c.z, d.z).getOrElse(0.0) - epsilon) false
else if (this.z > MathUtils.max(a.z, b.z, c.z, d.z).getOrElse(0.0) + epsilon) false
else true
}
def isInTetrahedraByDotProduct(a: Point3D, b: Point3D, c: Point3D, d: Point3D, epsilon: Double) : Boolean = {
val d0 = this.getDeterminant(a, b, c, d)
val d1 = this.getDeterminant(this, b, c, d)
val d2 = this.getDeterminant(a, this, c, d)
val d3 = this.getDeterminant(a, b, this, d)
val d4 = this.getDeterminant(a, b, c, this)
if (Comparison.isApproximatelyEqualOrGreater(d0, 0d)) {
if (!Comparison.isApproximatelyEqualOrGreater(d1, 0d)) false
else if (!Comparison.isApproximatelyEqualOrGreater(d2, 0d)) false
else if (!Comparison.isApproximatelyEqualOrGreater(d3, 0d)) false
else if (!Comparison.isApproximatelyEqualOrGreater(d4, 0d)) false
else true
} else {
if (!Comparison.isApproximatelyEqualOrSmaller(d1, 0d)) false
else if (!Comparison.isApproximatelyEqualOrSmaller(d2, 0d)) false
else if (!Comparison.isApproximatelyEqualOrSmaller(d3, 0d)) false
else if (!Comparison.isApproximatelyEqualOrSmaller(d4, 0d)) false
else true
}
}
private def getDeterminant(a: Point3D, b: Point3D, c: Point3D, d: Point3D, default: Double = 0.0) : Double = {
DoubleMatrix(4, Seq(a.x, a.y, a.z, 1), Seq(b.x, b.y, b.z, 1), Seq(c.x, c.y, c.z, 1), Seq(d.x, d.y, d.z, 1)).determinant().getOrElse(default)
}
}
object Point3D {
def apply(coordinates: (Double, Double, Double)*) : Traversable[Point3D] = {
coordinates.foldLeft(Seq[Point3D]())((seq, c) => seq :+ Point3D(c._1, c._2, c._3))
}
def getDistance(p1: Point3D, p2: Point3D) : Double = {
(p2 - p1).magnitude()
}
} | ascensio/salgo | src/org.salgo/geometry/structures/Point3D.scala | Scala | apache-2.0 | 2,957 |
package org.jetbrains.plugins.scala
package format
import com.intellij.psi.PsiElement
import lang.psi.api.base.ScInterpolatedStringLiteral
import lang.psi.api.expr.{ScBlockExpr, ScExpression}
import lang.lexer.ScalaTokenTypes
import com.intellij.openapi.util.text.StringUtil
/**
* Pavel Fatin
*/
object InterpolatedStringParser extends StringParser {
private val FormatSpecifierPattern = "^%(\\\\d+\\\\$)?([-#+ 0,(\\\\<]*)?(\\\\d+)?(\\\\.\\\\d+)?([tT])?([a-zA-Z%])".r
def parse(element: PsiElement) = parse(element, checkStripMargin = true)
def parse(element: PsiElement, checkStripMargin: Boolean): Option[Seq[StringPart]] = {
if (checkStripMargin) element match {
case WithStrippedMargin(_, _) => return StripMarginParser.parse(element)
case _ =>
}
Some(element) collect {
case literal: ScInterpolatedStringLiteral =>
val formatted = literal.firstChild.exists(_.getText == "f")
val pairs = {
val elements = literal.children.toList.drop(1)
elements.zipAll(elements.drop(1).map(Some(_)), null, None)
}
val parts = pairs.collect {
case (expression: ScExpression, next) =>
val actualExpression = expression match {
case block: ScBlockExpr => if (block.exprs.length > 1) block else block.exprs.headOption.getOrElse(block)
case it => it
}
val specifier = if (!formatted) None else next match {
case Some(e) if isTextElement(e) =>
FormatSpecifierPattern.findFirstIn(textIn(e)).map(format => Specifier(Span(e, 0, format.length), format))
case _ => None
}
Injection(actualExpression, specifier)
case (e, _) if isTextElement(e) =>
val text = {
val s = textIn(e)
if (!formatted) s else
FormatSpecifierPattern.findFirstIn(s).map(format => s.substring(format.length)).getOrElse(s)
}
Text(text)
case (e, _) if e.getNode.getElementType == ScalaTokenTypes.tINTERPOLATED_STRING_ESCAPE =>
Text(e.getText.drop(1))
}
(parts match {
case (Text(s) :: t) =>
val edgeLength = if (literal.isMultiLineString) 3 else 1
Text(s.drop(edgeLength)) :: t
case it => it
}) flatMap {
case t: Text => t.withEscapedPercent(element.getManager)
case part => List(part)
} filter {
case Text("") => false
case _ => true
}
}
}
private def isTextElement(e: PsiElement) = {
val elementType = e.getNode.getElementType
elementType == ScalaTokenTypes.tINTERPOLATED_STRING ||
elementType == ScalaTokenTypes.tINTERPOLATED_MULTILINE_STRING
}
private def textIn(e: PsiElement) = {
val elementType = e.getNode.getElementType
val text = e.getText
elementType match {
case ScalaTokenTypes.tINTERPOLATED_STRING => StringUtil.unescapeStringCharacters(text)
case ScalaTokenTypes.tINTERPOLATED_MULTILINE_STRING => text
}
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/format/InterpolatedStringParser.scala | Scala | apache-2.0 | 3,099 |
package net.hearthstats.util
import java.io.File
import java.io.FileWriter
import java.io.BufferedWriter
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FileObserverSpec extends FlatSpec with Matchers with OneInstancePerTest {
ignore should "notify observers of content changed" in {
val f = File.createTempFile("prefa", "aazeae")
val obs = new FileObserver
obs.start(f)
var read = ""
obs.addReceive {
case l: String => read = l
}
val writer = new BufferedWriter(new FileWriter(f))
writer.write("Димотариус\\n")
writer.close()
Thread.sleep(4 * obs.DEFAULT_DELAY_MS)
read shouldBe "Димотариус"
}
} | HearthStats/HearthStats.net-Uploader | companion/src/test/scala/net/hearthstats/util/FileObserverSpec.scala | Scala | bsd-3-clause | 755 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.laws._
import cats.laws.discipline._
import monix.eval.Task
import monix.reactive.Observable
import monix.execution.exceptions.DummyException
import scala.concurrent.duration._
import scala.concurrent.duration.Duration.Zero
import scala.util.Failure
object FlatScanSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val o = Observable.range(0, sourceCount)
.flatScan(1L)((acc, elem) => Observable.repeat(acc + elem).take(3))
val sum = (0 until sourceCount).map(x => (1 to x).sum + 1L).sum * 3
Sample(o, sourceCount * 3, sum, Zero, Zero)
}
def observableInError(sourceCount: Int, ex: Throwable) =
if (sourceCount == 1) None else Some {
val o = Observable.range(0, sourceCount).endWithError(ex)
.flatScan(1L)((acc, elem) => Observable.fromIterable(Seq(1L,1L,1L)))
Sample(o, sourceCount * 3, sourceCount * 3, Zero, Zero)
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val o = Observable.range(0, sourceCount+1).flatScan(1L) { (acc, elem) =>
if (elem == sourceCount) throw ex else
Observable.repeat(acc + elem).take(3)
}
val sum = (0 until sourceCount).map(x => (1 to x).sum + 1L).sum * 3
Sample(o, sourceCount * 3, sum, Zero, Zero)
}
override def cancelableObservables() = {
val sample1 = Observable.range(0, 10)
.flatScan(1L)((acc,e) => Observable.now(acc+e).delayExecution(1.second))
val sample2 = Observable.range(0, 10).delayOnNext(1.second)
.flatScan(1L)((acc,e) => Observable.now(acc+e).delayExecution(1.second))
Seq(
Sample(sample1,0,0,0.seconds,0.seconds),
Sample(sample2,0,0,0.seconds,0.seconds)
)
}
test("should trigger error if the initial state triggers errors") { implicit s =>
val ex = DummyException("dummy")
val obs = Observable(1,2,3,4).flatScan[Int](throw ex)((_,e) => Observable(e))
val f = obs.runAsyncGetFirst; s.tick()
assertEquals(f.value, Some(Failure(ex)))
}
test("flatScan0.drop(1) <-> flatScan") { implicit s =>
check2 { (obs: Observable[Int], seed: Long) =>
obs.flatScan0(seed)((a, b) => Observable(a, b)).drop(1) <->
obs.flatScan(seed)((a, b) => Observable(a, b))
}
}
test("flatScan0.headL <-> Task.pure(seed)") { implicit s =>
check2 { (obs: Observable[Int], seed: Int) =>
obs.flatScan0(seed)((_, _) => Observable.empty).headL <-> Task.pure(seed)
}
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/FlatScanSuite.scala | Scala | apache-2.0 | 3,174 |
package ml.wolfe.nlp.syntax
import ml.wolfe.nlp.Token
import ml.wolfe.nlp.io.ConstituentTreeFactory
import scala.collection.mutable.HashMap // , Map => MMap}
/**
* Created by narad on 12/5/14.
*/
/**
* A constituent tree.
*/
case class ConstituentTree(node: ConstituentNode, children : List[ConstituentTree] = List()) {
def label: String = node.label
def start: Int = node.start
def end: Int = node.end
def isPreterminal = node.isPreterminal
def isNonterminal = node.isNonterminal
def tags: Iterator[String] = leaves.collect { case l: PreterminalNode => l.label }
def words: Iterator[String] = leaves.collect { case l: PreterminalNode => l.word }
def tokens: Iterator[Token] = leaves.collect { case l: PreterminalNode => Token(word = l.word, offsets = null, posTag = l.label) }
def labelsOfSpan(i: Int, j: Int): Iterator[String] = {
spans((i,j)).view.map(_.label).iterator
}
def coarsenLabels: ConstituentTree = {
val coarsed = coarsen(node.label)
if (node.label == coarsed) {
new ConstituentTree(node, children.map(_.coarsenLabels))
}
else {
node match {
case nt: NonterminalNode => copy(node = nt.copy(label = coarsed), children = children.map(_.coarsenLabels))
case pt: PreterminalNode => copy(node = pt.copy(label = coarsed))
}
}
}
def coarsen(ll: String): String = {
if (ll == "-DFL-") return ll
var l = ll
while (l.startsWith("^")) {
l = l.substring(1)
}
if (l.contains("|"))
l = l.substring(0, l.indexOf("|"))
if (l.contains("-"))
l = l.substring(0, l.indexOf("-"))
if (l.contains("="))
l = l.substring(0, l.indexOf("="))
if (l.contains("^"))
l = l.substring(0, l.indexOf("^"))
return l
}
def setYield(words: Array[String], tags: Array[String], offset: Int = 0): ConstituentTree = {
// println("setting yield")
var tally = 0
node match {
case nt: NonterminalNode => {
new ConstituentTree(node, children.map{ c =>
val child = c.setYield(words, tags, offset + tally)
tally += c.width
child
})
}
case pt: PreterminalNode => {
new ConstituentTree(node = pt.copy(label = tags(offset), word = words(offset)))
}
}
}
def indexViaHash: collection.mutable.Map[(Int,Int), List[ConstituentSpan]] = {
val index = new collection.mutable.HashMap[(Int,Int), List[ConstituentSpan]].withDefaultValue(List())
var numLeaves = 0
for (t <- leafFirstSearch) {
t.node match {
case nt: NonterminalNode => {
val len = t.length
val height = index(numLeaves-len, numLeaves).size
index((numLeaves-len, numLeaves)) = index((numLeaves-len, numLeaves)) ++ List(new ConstituentSpan(numLeaves-len, numLeaves, t.label, height = height, headInfo = nt.headInfo))
}
case leaf: PreterminalNode => {
index((numLeaves, numLeaves + 1)) = index((numLeaves, numLeaves + 1)) ++ List(new ConstituentSpan(numLeaves, numLeaves + 1, t.label, height = 0, headInfo = Some(HeadInfo(headWord = leaf.word, headIdx = 0, tokenIdx = leaf.start))))
numLeaves += 1
}
}
}
index
}
def indexParents(tree: ConstituentTree): Map[ConstituentTree, ConstituentTree] = {
((tree.children map (c => c -> tree)) ++ (tree.children map (c => indexParents(c))).flatten.toMap).toMap
}
def parentOf(tree: ConstituentTree): Option[ConstituentTree] = parents.get(tree)
def parentOf(i: Int): Option[ConstituentTree] = {
depthFirstSearch.find( t => t.isPreterminal && t.start == i)
}
lazy val parents = indexParents(this)
def searchUpFrom(i: Int): Iterator[ConstituentTree] = {
val parent = parentOf(i)
parent match {
case Some(t) => searchUpFrom(t)
case None => Iterator.empty
}
}
def searchUpFrom(tree: ConstituentTree): Iterator[ConstituentTree] = {
parents.get(tree) match {
case Some(parent) => Iterator.single(parent) ++ searchUpFrom(parent)
case None => Iterator.empty
}
}
def headwordOf(i: Int, j: Int): Option[String] = {
if (i < 0 || j < 0) return None
if (i > length || j > length) return None
if (spans((i,j)).isEmpty) return None
Some(spans((i,j)).head.headInfo.get.headWord)
// spans((i,j)).collectFirst{ case x => x.headInfo.get.headWord }
}
def headOf(i: Int, j: Int): Option[HeadInfo] = {
if (i < 0 || j < 0) return None
if (i > length || j > length) return None
if (spans.contains((i,j)) && spans((i,j)).nonEmpty) {
spans((i,j)).head.headInfo
} else None
}
def covers(i: Int): Boolean = {
start <= i && end > i
}
def toDependencyTree: DependencyTree = {
val arcs = (0 until length).map{ i =>
val gc = searchUpFrom(i).find { t =>
t.isNonterminal && t.node.asInstanceOf[NonterminalNode].headInfo.get.tokenIdx != i
}
gc match {
case Some(t) => Some(Arc(child = i, parent = t.node.asInstanceOf[NonterminalNode].headInfo.get.tokenIdx))
case None => None
}
}.flatten
DependencyTree(tokens = tokens.toIndexedSeq, arcs = arcs)
}
def slice(i: Int, j: Int): ConstituentTree = {
val ospans = toSpans.toArray
val fspans = ospans.filter{ s => s.start >= i && s.end <= j && s.width > 1} // && (span.end-span.start > 1 || span.isUnary)}
val ss2 = fspans.map{span => ConstituentSpan(span.start-i, span.end-i, span.label, span.height)}
val t = ConstituentTreeFactory.constructFromSpans(ss2, j-i, words.slice(i, j).toArray, tags.slice(i, j).toArray)
t
}
def binarize(mode: String = "RIGHT_0MARKOV"): ConstituentTree = {
???
// println("-- " + children.map(_.label).mkString(", "))
// if (children.size > 2) {
// //val grandchildren = children.slice(1, children.size)
// mode match {
// case "RIGHT_0MARKOV" => {
// println("right 0 markov")
// val blabel = if (node.label.startsWith("@")) node.label else "@%s".format(node.label)
// return new ConstituentTree(node, List[ConstituentTree](
// children.head.binarize(mode),
// new ConstituentTree(new NonterminalNode(blabel), children.slice(1, children.size)).binarize(mode)))
// }
// case "LEFT_0MARKOV" => {
// println("left 0 markov")
// val blabel = if (node.label.startsWith("@")) node.label else "@%s".format(node.label)
// return new ConstituentTree(node, List[ConstituentTree](
// new ConstituentTree(new NonterminalNode(blabel), children.slice(0, children.size-1)).binarize(mode),
// children.last.binarize(mode)))
// }
// case "RIGHT_SINGLE" => {
// println("right single")
// return new ConstituentTree(node, List[ConstituentTree](
// children(0).binarize(mode),
// new ConstituentTree(new NonterminalNode("@"), children.slice(1, children.size)).binarize(mode)))
// }
// case "LEFT_SINGLE" => {
// println("left single")
// return new ConstituentTree(node, List[ConstituentTree](
// new ConstituentTree(new NonterminalNode("@"), children.slice(0, children.size-1)).binarize(mode),
// children.last.binarize(mode)))
// }
// }
// }
// else{
// return new ConstituentTree(node, children.map(_.binarize(mode)))
// }
}
def isBinarized: Boolean = node.label.contains("@")
def removeUnaryChains(): ConstituentTree = {
new ConstituentTree(node,
if (children.size == 1) {
val uh = unaryHelper()
unaryHelper().map(_.removeUnaryChains())
}
else {
children.map(_.removeUnaryChains())
})
}
def unaryHelper(): List[ConstituentTree] = {
if (children.size == 0) {
return List(this)
}
if (children.size == 1) {
children(0).unaryHelper()
}
else {
children
}
}
def removeNones(): ConstituentTree = {
val nchildren = children.map(_.removeNones()).filter(_ != null.asInstanceOf[ConstituentTree])
if (label == "-NONE-" || label == "-RRB-" || label == "-LRB-" || (children.size > 0 && nchildren.size == 0)) {
null.asInstanceOf[ConstituentTree]
}
else {
new ConstituentTree(node, nchildren)
}
}
def removeTopNode: ConstituentTree = {
assert(children.size == 1, "Attempted to remove top node on a tree that would become multi-rooted.\\n" + toString)
children(0)
}
def nodemap(f: (ConstituentNode) => ConstituentNode): ConstituentTree = {
new ConstituentTree(f(node), children.map(_.nodemap(f)))
}
private lazy val len = leaves.size
def breadthFirstSearch: Iterator[ConstituentTree] = {
Iterator.single(this) ++ children.iterator ++ children.flatMap(_.breadthFirstSeachHelper)
}
private def breadthFirstSeachHelper: Iterator[ConstituentTree] = {
children.iterator ++ children.flatMap(_.breadthFirstSeachHelper)
}
def depthFirstSearch: Iterator[ConstituentTree] = {
Iterator.single(this) ++ children.flatMap(_.depthFirstSearch)
}
def leafFirstSearch: Iterator[ConstituentTree] = {
children.view.flatMap(_.leafFirstSearch).iterator ++ Iterator.single(this)
}
def height: Int = {
if (isLeaf) 0
else children.map(_.height).max + 1
}
def isLeaf: Boolean = {
children.size == 0
}
def leaves: Iterator[ConstituentNode] = {
depthFirstSearch.collect { case x: ConstituentTree if x.isLeaf => x.node }
}
def length: Int = len
def width: Int = {
leaves.size
}
override def toString: String = {
toTreebankString
}
def toTreebankString: String = {
node match {
case x: NonterminalNode => "(" + x.label + " " + children.map(_.toString).mkString(" ") + ")"
case x: PreterminalNode => "(" + x.label + " " + x.word + ")"
case _ => "empty"
}
}
def toHeadedTreebankString: String = {
node match {
case x: NonterminalNode => "(" + x.label + "-" + x.headInfo.get.headWord + " " + children.map(_.toHeadedTreebankString).mkString(" ") + ")"
case x: PreterminalNode => "(" + x.label + " " + x.word + ")"
case _ => "empty"
}
}
// Indexing Methods
lazy private val spans = indexViaHash
def containsSpan(i: Int, j: Int): Boolean = {
if (i < 0 || j < 0) return false
if (i > length || j > length) return false
spans((i,j)).nonEmpty
// !spans(i)(j).isEmpty
}
def containsSpan(i: Int, j: Int, l: String): Boolean = {
if (!containsSpan(i, j)) return false
spans((i,j)).exists(_.label == l)
// return spans(i)(j).exists(_.label == l)
}
def containsUnarySpan(i: Int, j: Int): Boolean = {
if (i < 0 || j < 0) return false
if (i > length || j > length) return false
spans((i,j)).exists(_.isUnary)
// spans(i)(j).exists(_.isUnary)
}
def containsUnarySpan(i: Int, j: Int, l: String): Boolean = {
if (i < 0 || j < 0) return false
if (i > length || j > length) return false
spans((i,j)).exists(s => s.isUnary && s.label == l)
// spans(i)(j).exists(s => s.isUnary && s.label == l)
}
def containsUnarySpan(i: Int, j: Int, l: String, h: Int): Boolean = {
if (i < 0 || j < 0) return false
if (i > length || j > length) return false
spans((i,j)).exists(s => s.isUnary && s.label == l && s.height == h)
// spans(i)(j).exists(s => s.isUnary && s.label == l && s.height == h)
}
def containsLabel(i: Int, j: Int, l: String): Boolean = {
if (i < 0 || j < 0) return false
if (i > length || j > length) return false
// spans(i)(j).exists(s => s.label == l)
spans((i,j)).exists(s => s.label == l)
}
lazy val toSpans: Iterable[ConstituentSpan] = {
// (for (i <- 0 until length; j <- 1 to length if spans.contains((i,j))) yield spans((i,j))).flatten
spans.values.flatten
}
def toNonterminalSpans: Iterable[ConstituentSpan] = {
toSpans.filter(s => s.width > 1 || s.height > 0)
}
def spansAt(i: Int, j: Int): Iterable[ConstituentSpan] = spans((i,j)) //spans(i)(j).toIterable
}
/**
* Companion object for the ConstituentTree class.
*/
object ConstituentTree {
private val TOKEN_PATTERN = """ *\\( *([^ \\(]+) ([^ \\)]+) *\\).*""".r
private val CONSTITUENT_PATTERN = """ *\\( *([^ ]+) .*""".r
private val EMPTY_PATTERN = """ *\\( .*""".r
private val DOUBLE_PAREN_PATTERN = """ *\\( *\\(+.*""".r
val empty = ConstituentTree(node=null, children=List())
}
/*
// val gc: NonterminalNode = depthFirstSearch.toArray.filter(n => n.width > 1 && n.covers(i)).map(_.node).collect { case nt: NonterminalNode => nt }.sortBy(_.width).head
def governingConstituent(i: Int): ConstituentTree = {
depthFirstSearch.toArray.filter(c => c.width > 1 && c.covers(i)).sortBy(_.width).head
}
def tokenIndexOfHead: Option[Int] = {
node match {
case nt: NonterminalNode => {
nt.headIdx match {
case None => None
case Some(h) => children(h).tokenIndexOfHead
}
}
case pt: PreterminalNode => Some(pt.start)
}
}
*/
/*
//
// def stringToTree(str: String, leftMost: Int = 0): ConstituentTree = {
// str match {
// case DOUBLE_PAREN_PATTERN() => {
// val children = findChildren(subexpressions(str), leftMost = leftMost)
// ConstituentTreeFactory.buildTree(start = leftMost, end = children.last.end, children=children)
// }
// case TOKEN_PATTERN(tag, word) => {
// ConstituentTreeFactory.buildTree(start = leftMost, end = leftMost + 1, label=tag, word = Some(word))
// }
// case CONSTITUENT_PATTERN(label) => {
// val children = findChildren(subexpressions(str), leftMost = leftMost)
// ConstituentTreeFactory.buildTree(start = leftMost, end = children.last.end, label=label, children=children)
// }
//// case EMPTY_PATTERN() => {
//// val children = findChildren(subexpressions(str), leftMost = leftMost)
//// ConstituentTreeFactory.buildTree(start = leftMost, end = children.last.end, label=options.DEFAULT_LABEL, children=children)
//// }
// }
// }
def findChildren(strs: List[String], leftMost: Int): List[ConstituentTree] = {
var tmpLeftMost = leftMost
strs.map { s =>
val child = stringToTree(s, leftMost = tmpLeftMost)
tmpLeftMost = child.end
child
}
}
def subexpressions(str: String, ldelim: Char='(', rdelim: Char=')'): List[String] = {
val subs = new ArrayBuffer[String]
var count = -1; var start = 0
str.zipWithIndex.foreach { case(letter, index) =>
if (letter == ldelim) {
count += 1
if (count == 1)
start = index
}
else if (letter == rdelim) {
count -= 1
if (count == 0)
subs += str.substring(start, index+1)
}
}
subs.toList
}
*/
/*
// if (isNonterminal) {
// new ConstituentTree(new NonterminalNode(coarsed), children.map(_.coarsenLabels))
// }
// else {
// new ConstituentTree(new PreterminalNode(coarsed, node.asInstanceOf[PreterminalNode].word), children.map(_.coarsenLabels))
// }
*/
// def headword: String = {
// node match {
// case nt: NonterminalNode => children(nt.head).headword
// case pt: PreterminalNode => pt.word
// }
// }
// def headOf(i: Int, j: Int): Option[String] = {
// spans((i,j)).collectFirst{ case i if i}
// }
/*
def indexViaArray: Array[Array[ArrayBuffer[ConstituentSpan]]] = {
val ispans = Array.fill(length+1,length+1)(new ArrayBuffer[ConstituentSpan])
var numLeaves = 0
for (t <- leafFirstSearch) {
t.node match {
case nt: NonterminalNode => {
val len = t.length
val height = ispans(numLeaves-len)(numLeaves).size
val head = nt.head
ispans(numLeaves-len)(numLeaves) += new ConstituentSpan(numLeaves-len, numLeaves, t.label, height, head = nt.head + numLeaves-len)
}
case leaf: PreterminalNode => {
ispans(numLeaves)(numLeaves+1) += new ConstituentSpan(numLeaves, numLeaves+1, t.label, 0, head = numLeaves)
numLeaves += 1
}
}
}
ispans
}
*/
// if (t.isLeaf) {
// index((numLeaves, numLeaves+1)) = index((numLeaves, numLeaves+1)) ++ List(new ConstituentSpan(numLeaves, numLeaves+1, t.label, 0))
// numLeaves += 1
// }
// else {
// val len = t.length
// val height = index(numLeaves-len, numLeaves).size
// index((numLeaves-len, numLeaves)) = index((numLeaves-len, numLeaves)) ++ List(new ConstituentSpan(numLeaves-len, numLeaves, t.label, height, head = head))
// }
// def highestUnarySpan(i: Int, j: Int): String = {
// if (i < 0 || j < 0) return "none"
// if (i > length || j > length) return "none"
// spans((i,j)).filter()
// if (spans(i)(j).filter(_.isUnary).size > 0) {
// spans(i)(j).filter(_.isUnary).sortBy(_.height * -1).head.label
// }
// else {
// "none"
// }
// }
//
// def toDependencyTree: DependencyTree = {
//
// } | wolfe-pack/wolfe | wolfe-nlp/src/main/scala/ml/wolfe/nlp/syntax/ConstituentTree.scala | Scala | apache-2.0 | 17,059 |
package core.filter
import com.lvxingpai.yunkai.Userservice.{ FinagledClient => YunkaiClient }
import core.Implicits.TwitterConverter._
import core.exception.GroupMemberException
import models.Message
import play.api.Play
import play.api.Play.current
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* Created by pengyt on 2015/8/17.
*/
class GroupMemberFilter extends Filter {
lazy val yunkai = Play.application.injector instanceOf classOf[YunkaiClient]
/**
* 检查用户是否是群成员
*
* @return
*/
private def member(userId: Long, chatGroupId: Long): Future[Boolean] = yunkai.isMember(userId, chatGroupId)
/**
* 权限检查。根据message,如果用户sender不是群receiver中的成员,将抛出StopMessageFilterException的异常,终止消息过滤流水线。
*
* @param message 需要处理的消息
* @return
*/
private def validate(message: Message): Future[Message] = {
if ("group".equals(message.chatType)) {
for {
m <- member(message.senderId, message.receiverId)
} yield {
if (m)
message
else
throw GroupMemberException("您还不是群成员")
}
} else {
Future { message }
}
}
/**
* 用户是否为群成员过滤器
*/
override val doFilter: PartialFunction[AnyRef, AnyRef] = {
case futureMsg: Future[Message] =>
for {
msg <- futureMsg
validatedMessage <- validate(msg)
} yield {
validatedMessage
}
case msg: Message => validate(msg)
}
}
| Lvxingpai/Hedylogos-Server | app/core/filter/GroupMemberFilter.scala | Scala | apache-2.0 | 1,601 |
package org.sbuild.plugins.aether.impl
import org.scalatest.FreeSpec
import java.io.File
import org.sbuild.plugins.aether.Repository
import org.eclipse.aether.graph.Dependency
import org.eclipse.aether.graph.Exclusion
import org.eclipse.aether.collection.CollectRequest
import org.sbuild.plugins.aether.ArtifactDependency
import org.eclipse.aether.repository.RemoteRepository
import org.eclipse.aether.resolution.DependencyRequest
import org.eclipse.aether.util.graph.visitor.PreorderNodeListGenerator
import scala.collection.JavaConverters._
import org.eclipse.aether.artifact.DefaultArtifact
class AetherTests extends FreeSpec {
"Plain Aether" - {
val repoDir = File.createTempFile("repo", "")
repoDir.delete()
repoDir.mkdirs()
val remoteRepo = Repository.Central
val worker = new AetherSchemeHandlerWorkerImpl(repoDir, Seq(remoteRepo))
lazy val repoSystem = worker.newRepositorySystem()
lazy val session = worker.newSession(repoSystem)
def resolve(deps: Seq[Dependency]): Seq[File] = {
println("About to resolve deps: " + deps)
val collectRequest = new CollectRequest()
deps.foreach { d => collectRequest.addDependency(d) }
collectRequest.addRepository(
new RemoteRepository.Builder(remoteRepo.name, remoteRepo.layout, remoteRepo.url).build())
val node = repoSystem.collectDependencies(session, collectRequest).getRoot()
val dependencyRequest = new DependencyRequest()
dependencyRequest.setRoot(node)
repoSystem.resolveDependencies(session, dependencyRequest)
val nlg = new PreorderNodeListGenerator()
node.accept(nlg)
println("Resolved deps: ")
var count = 1
val files = nlg.getNodes().asScala.toSeq.map { node =>
val dep = node.getDependency()
println(count + ". " + dep)
count += 1
val artifact = if (dep != null) dep.getArtifact() else null
val file = if (artifact != null) artifact.getFile() else null
if (file != null) file.getAbsoluteFile() else null
}.filter(_ != null)
files
}
"should resolve testng:6.8" in {
val result = resolve(Seq(
new Dependency(
new DefaultArtifact("org.testng", "testng", "jar", "6.8"),
"compile", false,
null
)
))
val expected = Set(
"testng-6.8.jar",
"junit-4.10.jar",
"hamcrest-core-1.1.jar",
"bsh-2.0b4.jar",
"jcommander-1.27.jar",
"snakeyaml-1.6.jar")
assert(result.map(_.getName()).toSet === expected)
}
"should resolve testng:6.8 excluding jcommander" in {
val result = resolve(Seq(
new Dependency(
new DefaultArtifact("org.testng", "testng", "jar", "6.8"),
"compile", false,
Seq(new Exclusion("com.beust", "jcommander", "*", "*")).asJava
)
))
val expected = Set(
"testng-6.8.jar",
"junit-4.10.jar",
"hamcrest-core-1.1.jar",
"bsh-2.0b4.jar",
"snakeyaml-1.6.jar")
assert(result.map(_.getName()).toSet === expected)
}
}
} | SBuild-org/sbuild-aether-plugin | org.sbuild.plugins.aether/src/test/scala/org/sbuild/plugins/aether/impl/AetherTests.scala | Scala | apache-2.0 | 3,114 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util.CompressionCodecs
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.SerializableConfiguration
class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister {
override val shortName: String = "json"
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
val parsedOptions = new JSONOptionsInRead(
options,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val jsonDataSource = JsonDataSource(parsedOptions)
jsonDataSource.isSplitable && super.isSplitable(sparkSession, options, path)
}
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val parsedOptions = new JSONOptionsInRead(
options,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
JsonDataSource(parsedOptions).inferSchema(
sparkSession, files, parsedOptions)
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val conf = job.getConfiguration
val parsedOptions = new JSONOptions(
options,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
parsedOptions.compressionCodec.foreach { codec =>
CompressionCodecs.setCodecConfiguration(conf, codec)
}
new OutputWriterFactory {
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new JsonOutputWriter(path, parsedOptions, dataSchema, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
".json" + CodecStreams.getCompressionExtension(context)
}
}
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): PartitionedFile => Iterator[InternalRow] = {
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
val parsedOptions = new JSONOptionsInRead(
options,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val actualSchema =
StructType(requiredSchema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
// Check a field requirement for corrupt records here to throw an exception in a driver side
ExprUtils.verifyColumnNameOfCorruptRecord(dataSchema, parsedOptions.columnNameOfCorruptRecord)
if (requiredSchema.length == 1 &&
requiredSchema.head.name == parsedOptions.columnNameOfCorruptRecord) {
throw new AnalysisException(
"Since Spark 2.3, the queries from raw JSON/CSV files are disallowed when the\\n" +
"referenced columns only include the internal corrupt record column\\n" +
s"(named _corrupt_record by default). For example:\\n" +
"spark.read.schema(schema).json(file).filter($\\"_corrupt_record\\".isNotNull).count()\\n" +
"and spark.read.schema(schema).json(file).select(\\"_corrupt_record\\").show().\\n" +
"Instead, you can cache or save the parsed results and then send the same query.\\n" +
"For example, val df = spark.read.schema(schema).json(file).cache() and then\\n" +
"df.filter($\\"_corrupt_record\\".isNotNull).count()."
)
}
(file: PartitionedFile) => {
val parser = new JacksonParser(
actualSchema,
parsedOptions,
allowArrayAsStructs = true,
filters)
JsonDataSource(parsedOptions).readFile(
broadcastedHadoopConf.value.value,
file,
parser,
requiredSchema)
}
}
override def toString: String = "JSON"
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[JsonFileFormat]
override def supportDataType(dataType: DataType): Boolean = dataType match {
case _: AtomicType => true
case st: StructType => st.forall { f => supportDataType(f.dataType) }
case ArrayType(elementType, _) => supportDataType(elementType)
case MapType(keyType, valueType, _) =>
supportDataType(keyType) && supportDataType(valueType)
case udt: UserDefinedType[_] => supportDataType(udt.sqlType)
case _: NullType => true
case _ => false
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JsonFileFormat.scala | Scala | apache-2.0 | 6,132 |
import sbt._
import sbt.Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
/**
* Build settings for MLlib. To build against a specific Spark version (e.g., 1.1.0), use
* {{{
* sbt -Dspark.version=1.1.0 clean ...
* }}}
*/
object MLlibTestsBuild extends Build {
val sparkVersion = settingKey[String]("Spark version to test against.")
lazy val commonSettings = Seq(
organization := "org.spark-project",
version := "0.1",
scalaVersion := "2.10.4",
sparkVersion := sys.props.get("spark.version").getOrElse("1.2.1"),
libraryDependencies ++= Seq(
"net.sf.jopt-simple" % "jopt-simple" % "4.6",
"org.scalatest" %% "scalatest" % "2.2.1" % "test",
"org.slf4j" % "slf4j-log4j12" % "1.7.2",
"org.json4s" %% "json4s-native" % "3.2.9",
"org.apache.spark" %% "spark-mllib" % sparkVersion.value % "provided"
)
)
lazy val root = Project(
"mllib-perf",
file("."),
settings = assemblySettings ++ commonSettings ++ Seq(
scalaSource in Compile := {
val targetFolder = sparkVersion.value match {
case v if v.startsWith("1.0.") => "v1p0"
case v if v.startsWith("1.1.") => "v1p1"
case v if v.startsWith("1.2.") => "v1p2"
case v if v.startsWith("1.3.") => "v1p3"
case _ => throw new IllegalArgumentException(s"Do not support Spark $sparkVersion.")
}
baseDirectory.value / targetFolder / "src" / "main" / "scala"
},
test in assembly := {},
outputPath in assembly := file("target/mllib-perf-tests-assembly.jar"),
assemblyOption in assembly ~= { _.copy(includeScala = false) },
mergeStrategy in assembly := {
case PathList("META-INF", xs@_*) =>
(xs.map(_.toLowerCase)) match {
case ("manifest.mf" :: Nil) => MergeStrategy.discard
// Note(harvey): this to get Shark perf test assembly working.
case ("license" :: _) => MergeStrategy.discard
case ps@(x :: xs) if ps.last.endsWith(".sf") => MergeStrategy.discard
case _ => MergeStrategy.first
}
case PathList("reference.conf", xs@_*) => MergeStrategy.concat
case "log4j.properties" => MergeStrategy.discard
case PathList("application.conf", xs@_*) => MergeStrategy.concat
case _ => MergeStrategy.first
}
))
}
| nchammas/spark-perf | mllib-tests/project/MLlibTestsBuild.scala | Scala | apache-2.0 | 2,356 |
object safe_number_input_stub {
def main(args: Array[String]) {
// Put code here
}
}
| LoyolaChicagoBooks/introcs-scala-examples | safe_number_input_stub/safe_number_input_stub.scala | Scala | gpl-3.0 | 93 |
package provingground.library
import provingground._
import HoTT._
import induction._
import implicits._
import shapeless._
import Fold._
object uliftInd {
lazy val value = Subst.Lambda(
"$b" :: Type,
ConstructorSeqTL(
ConstructorSeqDom.Cons(
ApplnSym(
"ulift.up" :: piDefn("'c_586902701" :: Type)(
FuncTyp("'c_586902701" :: Type,
("ulift" :: FuncTyp(Type, Type))("'c_586902701" :: Type))),
"$b" :: Type),
ConstructorShape.CnstFuncConsShape(
"$b" :: Type,
ConstructorShape.IdShape.byTyp(
("ulift" :: FuncTyp(Type, Type))("$b" :: Type))),
ConstructorSeqDom.Empty.byTyp(
("ulift" :: FuncTyp(Type, Type))("$b" :: Type))
),
("ulift" :: FuncTyp(Type, Type))("$b" :: Type)
)
)
}
| siddhartha-gadgil/ProvingGround | leanlib/src/main/scala/provingground/library/inductive-types/uliftInd.scala | Scala | mit | 825 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.collection
package immutable
import scala.collection.generic.{BitOperations, DefaultSerializationProxy}
import scala.collection.mutable.{Builder, ImmutableBuilder}
import scala.annotation.tailrec
import scala.annotation.unchecked.uncheckedVariance
import scala.language.implicitConversions
/** Utility class for integer maps.
*/
private[immutable] object IntMapUtils extends BitOperations.Int {
def branchMask(i: Int, j: Int) = highestOneBit(i ^ j)
def join[T](p1: Int, t1: IntMap[T], p2: Int, t2: IntMap[T]): IntMap[T] = {
val m = branchMask(p1, p2)
val p = mask(p1, m)
if (zero(p1, m)) IntMap.Bin(p, m, t1, t2)
else IntMap.Bin(p, m, t2, t1)
}
def bin[T](prefix: Int, mask: Int, left: IntMap[T], right: IntMap[T]): IntMap[T] = (left, right) match {
case (left, IntMap.Nil) => left
case (IntMap.Nil, right) => right
case (left, right) => IntMap.Bin(prefix, mask, left, right)
}
}
import IntMapUtils._
/** A companion object for integer maps.
*
* @define Coll `IntMap`
*/
object IntMap {
def empty[T] : IntMap[T] = IntMap.Nil
def singleton[T](key: Int, value: T): IntMap[T] = IntMap.Tip(key, value)
def apply[T](elems: (Int, T)*): IntMap[T] =
elems.foldLeft(empty[T])((x, y) => x.updated(y._1, y._2))
def from[V](coll: IterableOnce[(Int, V)]): IntMap[V] =
newBuilder[V].addAll(coll).result()
private[immutable] case object Nil extends IntMap[Nothing] {
// Important! Without this equals method in place, an infinite
// loop from Map.equals => size => pattern-match-on-Nil => equals
// develops. Case objects and custom equality don't mix without
// careful handling.
override def equals(that : Any) = that match {
case _: this.type => true
case _: IntMap[_] => false // The only empty IntMaps are eq Nil
case _ => super.equals(that)
}
}
private[immutable] case class Tip[+T](key: Int, value: T) extends IntMap[T]{
def withValue[S](s: S) =
if (s.asInstanceOf[AnyRef] eq value.asInstanceOf[AnyRef]) this.asInstanceOf[IntMap.Tip[S]]
else IntMap.Tip(key, s)
}
private[immutable] case class Bin[+T](prefix: Int, mask: Int, left: IntMap[T], right: IntMap[T]) extends IntMap[T] {
def bin[S](left: IntMap[S], right: IntMap[S]): IntMap[S] = {
if ((this.left eq left) && (this.right eq right)) this.asInstanceOf[IntMap.Bin[S]]
else IntMap.Bin[S](prefix, mask, left, right)
}
}
def newBuilder[V]: Builder[(Int, V), IntMap[V]] =
new ImmutableBuilder[(Int, V), IntMap[V]](empty) {
def addOne(elem: (Int, V)): this.type = { elems = elems + elem; this }
}
implicit def toFactory[V](dummy: IntMap.type): Factory[(Int, V), IntMap[V]] = ToFactory.asInstanceOf[Factory[(Int, V), IntMap[V]]]
@SerialVersionUID(3L)
private[this] object ToFactory extends Factory[(Int, AnyRef), IntMap[AnyRef]] with Serializable {
def fromSpecific(it: IterableOnce[(Int, AnyRef)]): IntMap[AnyRef] = IntMap.from[AnyRef](it)
def newBuilder: Builder[(Int, AnyRef), IntMap[AnyRef]] = IntMap.newBuilder[AnyRef]
}
implicit def toBuildFrom[V](factory: IntMap.type): BuildFrom[Any, (Int, V), IntMap[V]] = ToBuildFrom.asInstanceOf[BuildFrom[Any, (Int, V), IntMap[V]]]
private[this] object ToBuildFrom extends BuildFrom[Any, (Int, AnyRef), IntMap[AnyRef]] {
def fromSpecific(from: Any)(it: IterableOnce[(Int, AnyRef)]) = IntMap.from(it)
def newBuilder(from: Any) = IntMap.newBuilder[AnyRef]
}
implicit def iterableFactory[V]: Factory[(Int, V), IntMap[V]] = toFactory(this)
implicit def buildFromIntMap[V]: BuildFrom[IntMap[_], (Int, V), IntMap[V]] = toBuildFrom(this)
}
// Iterator over a non-empty IntMap.
private[immutable] abstract class IntMapIterator[V, T](it: IntMap[V]) extends AbstractIterator[T] {
// Basically this uses a simple stack to emulate conversion over the tree. However
// because we know that Ints are at least 32 bits we can have at most 32 IntMap.Bins and
// one IntMap.Tip sitting on the tree at any point. Therefore we know the maximum stack
// depth is 33 and
var index = 0
var buffer = new Array[AnyRef](33)
def pop = {
index -= 1
buffer(index).asInstanceOf[IntMap[V]]
}
def push(x: IntMap[V]): Unit = {
buffer(index) = x.asInstanceOf[AnyRef]
index += 1
}
push(it)
/**
* What value do we assign to a tip?
*/
def valueOf(tip: IntMap.Tip[V]): T
def hasNext = index != 0
@tailrec
final def next(): T =
pop match {
case IntMap.Bin(_,_, t@IntMap.Tip(_, _), right) => {
push(right)
valueOf(t)
}
case IntMap.Bin(_, _, left, right) => {
push(right)
push(left)
next()
}
case t@IntMap.Tip(_, _) => valueOf(t)
// This should never happen. We don't allow IntMap.Nil in subtrees of the IntMap
// and don't return an IntMapIterator for IntMap.Nil.
case IntMap.Nil => throw new IllegalStateException("Empty maps not allowed as subtrees")
}
}
private[immutable] class IntMapEntryIterator[V](it: IntMap[V]) extends IntMapIterator[V, (Int, V)](it) {
def valueOf(tip: IntMap.Tip[V]) = (tip.key, tip.value)
}
private[immutable] class IntMapValueIterator[V](it: IntMap[V]) extends IntMapIterator[V, V](it) {
def valueOf(tip: IntMap.Tip[V]) = tip.value
}
private[immutable] class IntMapKeyIterator[V](it: IntMap[V]) extends IntMapIterator[V, Int](it) {
def valueOf(tip: IntMap.Tip[V]) = tip.key
}
import IntMap._
/** Specialised immutable map structure for integer keys, based on
* [[https://ittc.ku.edu/~andygill/papers/IntMap98.pdf Fast Mergeable Integer Maps]]
* by Okasaki and Gill. Essentially a trie based on binary digits of the integers.
*
* '''Note:''' This class is as of 2.8 largely superseded by HashMap.
*
* @tparam T type of the values associated with integer keys.
*
* @define Coll `immutable.IntMap`
* @define coll immutable integer map
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
sealed abstract class IntMap[+T] extends AbstractMap[Int, T]
with StrictOptimizedMapOps[Int, T, Map, IntMap[T]]
with Serializable {
override protected def fromSpecific(coll: scala.collection.IterableOnce[(Int, T) @uncheckedVariance]): IntMap[T] =
intMapFrom[T](coll)
protected def intMapFrom[V2](coll: scala.collection.IterableOnce[(Int, V2)]): IntMap[V2] = {
val b = IntMap.newBuilder[V2]
b.sizeHint(coll)
b.addAll(coll)
b.result()
}
override protected def newSpecificBuilder: Builder[(Int, T), IntMap[T]] @uncheckedVariance =
new ImmutableBuilder[(Int, T), IntMap[T]](empty) {
def addOne(elem: (Int, T)): this.type = { elems = elems + elem; this }
}
override def empty: IntMap[T] = IntMap.Nil
override def toList = {
val buffer = new scala.collection.mutable.ListBuffer[(Int, T)]
foreach(buffer += _)
buffer.toList
}
/**
* Iterator over key, value pairs of the map in unsigned order of the keys.
*
* @return an iterator over pairs of integer keys and corresponding values.
*/
def iterator: Iterator[(Int, T)] = this match {
case IntMap.Nil => Iterator.empty
case _ => new IntMapEntryIterator(this)
}
/**
* Loops over the key, value pairs of the map in unsigned order of the keys.
*/
override final def foreach[U](f: ((Int, T)) => U): Unit = this match {
case IntMap.Bin(_, _, left, right) => { left.foreach(f); right.foreach(f) }
case IntMap.Tip(key, value) => f((key, value))
case IntMap.Nil =>
}
override def foreachEntry[U](f: (IntMapUtils.Int, T) => U): Unit = this match {
case IntMap.Bin(_, _, left, right) => { left.foreachEntry(f); right.foreachEntry(f) }
case IntMap.Tip(key, value) => f(key, value)
case IntMap.Nil =>
}
override def keysIterator: Iterator[Int] = this match {
case IntMap.Nil => Iterator.empty
case _ => new IntMapKeyIterator(this)
}
/**
* Loop over the keys of the map. The same as `keys.foreach(f)`, but may
* be more efficient.
*
* @param f The loop body
*/
final def foreachKey[U](f: Int => U): Unit = this match {
case IntMap.Bin(_, _, left, right) => { left.foreachKey(f); right.foreachKey(f) }
case IntMap.Tip(key, _) => f(key)
case IntMap.Nil =>
}
override def valuesIterator: Iterator[T] = this match {
case IntMap.Nil => Iterator.empty
case _ => new IntMapValueIterator(this)
}
/**
* Loop over the values of the map. The same as `values.foreach(f)`, but may
* be more efficient.
*
* @param f The loop body
*/
final def foreachValue[U](f: T => U): Unit = this match {
case IntMap.Bin(_, _, left, right) => { left.foreachValue(f); right.foreachValue(f) }
case IntMap.Tip(_, value) => f(value)
case IntMap.Nil =>
}
override protected[this] def className = "IntMap"
override def isEmpty = this eq IntMap.Nil
override def knownSize: Int = if (isEmpty) 0 else super.knownSize
override def filter(f: ((Int, T)) => Boolean): IntMap[T] = this match {
case IntMap.Bin(prefix, mask, left, right) => {
val (newleft, newright) = (left.filter(f), right.filter(f))
if ((left eq newleft) && (right eq newright)) this
else bin(prefix, mask, newleft, newright)
}
case IntMap.Tip(key, value) =>
if (f((key, value))) this
else IntMap.Nil
case IntMap.Nil => IntMap.Nil
}
override def transform[S](f: (Int, T) => S): IntMap[S] = this match {
case b@IntMap.Bin(prefix, mask, left, right) => b.bin(left.transform(f), right.transform(f))
case t@IntMap.Tip(key, value) => t.withValue(f(key, value))
case IntMap.Nil => IntMap.Nil
}
final override def size: Int = this match {
case IntMap.Nil => 0
case IntMap.Tip(_, _) => 1
case IntMap.Bin(_, _, left, right) => left.size + right.size
}
@tailrec
final def get(key: Int): Option[T] = this match {
case IntMap.Bin(prefix, mask, left, right) => if (zero(key, mask)) left.get(key) else right.get(key)
case IntMap.Tip(key2, value) => if (key == key2) Some(value) else None
case IntMap.Nil => None
}
@tailrec
final override def getOrElse[S >: T](key: Int, default: => S): S = this match {
case IntMap.Nil => default
case IntMap.Tip(key2, value) => if (key == key2) value else default
case IntMap.Bin(prefix, mask, left, right) =>
if (zero(key, mask)) left.getOrElse(key, default) else right.getOrElse(key, default)
}
@tailrec
final override def apply(key: Int): T = this match {
case IntMap.Bin(prefix, mask, left, right) => if (zero(key, mask)) left(key) else right(key)
case IntMap.Tip(key2, value) => if (key == key2) value else throw new IllegalArgumentException("Key not found")
case IntMap.Nil => throw new IllegalArgumentException("key not found")
}
override def + [S >: T] (kv: (Int, S)): IntMap[S] = updated(kv._1, kv._2)
override def updated[S >: T](key: Int, value: S): IntMap[S] = this match {
case IntMap.Bin(prefix, mask, left, right) =>
if (!hasMatch(key, prefix, mask)) join(key, IntMap.Tip(key, value), prefix, this)
else if (zero(key, mask)) IntMap.Bin(prefix, mask, left.updated(key, value), right)
else IntMap.Bin(prefix, mask, left, right.updated(key, value))
case IntMap.Tip(key2, value2) =>
if (key == key2) IntMap.Tip(key, value)
else join(key, IntMap.Tip(key, value), key2, this)
case IntMap.Nil => IntMap.Tip(key, value)
}
def map[V2](f: ((Int, T)) => (Int, V2)): IntMap[V2] = intMapFrom(new View.Map(toIterable, f))
def flatMap[V2](f: ((Int, T)) => IterableOnce[(Int, V2)]): IntMap[V2] = intMapFrom(new View.FlatMap(toIterable, f))
override def concat[V1 >: T](that: collection.IterableOnce[(Int, V1)]): IntMap[V1] =
super.concat(that).asInstanceOf[IntMap[V1]] // Already has correct type but not declared as such
override def ++ [V1 >: T](that: collection.IterableOnce[(Int, V1)]): IntMap[V1] = concat(that)
def collect[V2](pf: PartialFunction[(Int, T), (Int, V2)]): IntMap[V2] =
strictOptimizedCollect(IntMap.newBuilder[V2], pf)
/**
* Updates the map, using the provided function to resolve conflicts if the key is already present.
*
* Equivalent to:
* {{{
* this.get(key) match {
* case None => this.update(key, value)
* case Some(oldvalue) => this.update(key, f(oldvalue, value)
* }
* }}}
*
* @tparam S The supertype of values in this `LongMap`.
* @param key The key to update
* @param value The value to use if there is no conflict
* @param f The function used to resolve conflicts.
* @return The updated map.
*/
def updateWith[S >: T](key: Int, value: S, f: (T, S) => S): IntMap[S] = this match {
case IntMap.Bin(prefix, mask, left, right) =>
if (!hasMatch(key, prefix, mask)) join(key, IntMap.Tip(key, value), prefix, this)
else if (zero(key, mask)) IntMap.Bin(prefix, mask, left.updateWith(key, value, f), right)
else IntMap.Bin(prefix, mask, left, right.updateWith(key, value, f))
case IntMap.Tip(key2, value2) =>
if (key == key2) IntMap.Tip(key, f(value2, value))
else join(key, IntMap.Tip(key, value), key2, this)
case IntMap.Nil => IntMap.Tip(key, value)
}
def removed (key: Int): IntMap[T] = this match {
case IntMap.Bin(prefix, mask, left, right) =>
if (!hasMatch(key, prefix, mask)) this
else if (zero(key, mask)) bin(prefix, mask, left - key, right)
else bin(prefix, mask, left, right - key)
case IntMap.Tip(key2, _) =>
if (key == key2) IntMap.Nil
else this
case IntMap.Nil => IntMap.Nil
}
/**
* A combined transform and filter function. Returns an `IntMap` such that
* for each `(key, value)` mapping in this map, if `f(key, value) == None`
* the map contains no mapping for key, and if `f(key, value)`.
*
* @tparam S The type of the values in the resulting `LongMap`.
* @param f The transforming function.
* @return The modified map.
*/
def modifyOrRemove[S](f: (Int, T) => Option[S]): IntMap[S] = this match {
case IntMap.Bin(prefix, mask, left, right) =>
val newleft = left.modifyOrRemove(f)
val newright = right.modifyOrRemove(f)
if ((left eq newleft) && (right eq newright)) this.asInstanceOf[IntMap[S]]
else bin(prefix, mask, newleft, newright)
case IntMap.Tip(key, value) => f(key, value) match {
case None =>
IntMap.Nil
case Some(value2) =>
//hack to preserve sharing
if (value.asInstanceOf[AnyRef] eq value2.asInstanceOf[AnyRef]) this.asInstanceOf[IntMap[S]]
else IntMap.Tip(key, value2)
}
case IntMap.Nil =>
IntMap.Nil
}
/**
* Forms a union map with that map, using the combining function to resolve conflicts.
*
* @tparam S The type of values in `that`, a supertype of values in `this`.
* @param that The map to form a union with.
* @param f The function used to resolve conflicts between two mappings.
* @return Union of `this` and `that`, with identical key conflicts resolved using the function `f`.
*/
def unionWith[S >: T](that: IntMap[S], f: (Int, S, S) => S): IntMap[S] = (this, that) match{
case (IntMap.Bin(p1, m1, l1, r1), that@(IntMap.Bin(p2, m2, l2, r2))) =>
if (shorter(m1, m2)) {
if (!hasMatch(p2, p1, m1)) join(p1, this, p2, that)
else if (zero(p2, m1)) IntMap.Bin(p1, m1, l1.unionWith(that, f), r1)
else IntMap.Bin(p1, m1, l1, r1.unionWith(that, f))
} else if (shorter(m2, m1)){
if (!hasMatch(p1, p2, m2)) join(p1, this, p2, that)
else if (zero(p1, m2)) IntMap.Bin(p2, m2, this.unionWith(l2, f), r2)
else IntMap.Bin(p2, m2, l2, this.unionWith(r2, f))
}
else {
if (p1 == p2) IntMap.Bin(p1, m1, l1.unionWith(l2,f), r1.unionWith(r2, f))
else join(p1, this, p2, that)
}
case (IntMap.Tip(key, value), x) => x.updateWith[S](key, value, (x, y) => f(key, y, x))
case (x, IntMap.Tip(key, value)) => x.updateWith[S](key, value, (x, y) => f(key, x, y))
case (IntMap.Nil, x) => x
case (x, IntMap.Nil) => x
}
/**
* Forms the intersection of these two maps with a combining function. The
* resulting map is a map that has only keys present in both maps and has
* values produced from the original mappings by combining them with `f`.
*
* @tparam S The type of values in `that`.
* @tparam R The type of values in the resulting `LongMap`.
* @param that The map to intersect with.
* @param f The combining function.
* @return Intersection of `this` and `that`, with values for identical keys produced by function `f`.
*/
def intersectionWith[S, R](that: IntMap[S], f: (Int, T, S) => R): IntMap[R] = (this, that) match {
case (IntMap.Bin(p1, m1, l1, r1), that@IntMap.Bin(p2, m2, l2, r2)) =>
if (shorter(m1, m2)) {
if (!hasMatch(p2, p1, m1)) IntMap.Nil
else if (zero(p2, m1)) l1.intersectionWith(that, f)
else r1.intersectionWith(that, f)
} else if (m1 == m2) bin(p1, m1, l1.intersectionWith(l2, f), r1.intersectionWith(r2, f))
else {
if (!hasMatch(p1, p2, m2)) IntMap.Nil
else if (zero(p1, m2)) this.intersectionWith(l2, f)
else this.intersectionWith(r2, f)
}
case (IntMap.Tip(key, value), that) => that.get(key) match {
case None => IntMap.Nil
case Some(value2) => IntMap.Tip(key, f(key, value, value2))
}
case (_, IntMap.Tip(key, value)) => this.get(key) match {
case None => IntMap.Nil
case Some(value2) => IntMap.Tip(key, f(key, value2, value))
}
case (_, _) => IntMap.Nil
}
/**
* Left biased intersection. Returns the map that has all the same mappings
* as this but only for keys which are present in the other map.
*
* @tparam R The type of values in `that`.
* @param that The map to intersect with.
* @return A map with all the keys both in `this` and `that`, mapped to corresponding values from `this`.
*/
def intersection[R](that: IntMap[R]): IntMap[T] =
this.intersectionWith(that, (key: Int, value: T, value2: R) => value)
def ++[S >: T](that: IntMap[S]) =
this.unionWith[S](that, (key, x, y) => y)
/**
* The entry with the lowest key value considered in unsigned order.
*/
@tailrec
final def firstKey: Int = this match {
case Bin(_, _, l, r) => l.firstKey
case Tip(k, v) => k
case IntMap.Nil => throw new IllegalStateException("Empty set")
}
/**
* The entry with the highest key value considered in unsigned order.
*/
@tailrec
final def lastKey: Int = this match {
case Bin(_, _, l, r) => r.lastKey
case Tip(k, v) => k
case IntMap.Nil => throw new IllegalStateException("Empty set")
}
protected[this] def writeReplace(): AnyRef = new DefaultSerializationProxy(IntMap.toFactory[T](IntMap), this)
}
| lrytz/scala | src/library/scala/collection/immutable/IntMap.scala | Scala | apache-2.0 | 19,313 |
package org.openurp.edu.eams.teach.grade.course.service.impl
import org.beangle.data.model.dao.EntityDao
import org.beangle.commons.entity.metadata.Model
import org.openurp.edu.teach.code.GradeType
import org.openurp.edu.eams.teach.code.industry.ScoreMarkStyle
import org.openurp.edu.eams.teach.grade.course.service.MarkStyleStrategy
import org.openurp.edu.eams.teach.grade.model.CourseGradeSetting
import org.openurp.edu.eams.teach.grade.service.CourseGradeSettings
import org.openurp.edu.teach.grade.model.CourseGradeState
import org.openurp.edu.teach.grade.model.ExamGradeState
import org.openurp.edu.eams.teach.lesson.GradeTypeConstants
class DefaultMarkStyleStrategy extends MarkStyleStrategy() {
private var entityDao: EntityDao = _
private var settings: CourseGradeSettings = _
private def isDefault(style: ScoreMarkStyle): Boolean = {
null == style || style.id == ScoreMarkStyle.PERCENT
}
def configMarkStyle(gradeState: CourseGradeState, gradeTypes: List[GradeType]) {
val setting = settings.getSetting(gradeState.getLesson.getProject)
if (isDefault(gradeState.getScoreMarkStyle)) gradeState.setScoreMarkStyle(getDefaultCourseGradeMarkStyle(gradeState,
setting))
for (`type` <- gradeTypes) {
val typeState = getState(gradeState, `type`)
if (null == typeState.getScoreMarkStyle) {
typeState.setScoreMarkStyle(getDefaultExamGradeMarkStyle(typeState, setting))
}
}
entityDao.saveOrUpdate(gradeState)
}
protected def getDefaultCourseGradeMarkStyle(state: CourseGradeState, setting: CourseGradeSetting): ScoreMarkStyle = {
var defaultMarkStyle = state.getLesson.getCourse.getMarkStyle
if (null == defaultMarkStyle) defaultMarkStyle = entityDao.get(classOf[ScoreMarkStyle], ScoreMarkStyle.PERCENT)
defaultMarkStyle
}
protected def getDefaultExamGradeMarkStyle(typeState: ExamGradeState, setting: CourseGradeSetting): ScoreMarkStyle = {
if (setting.getFinalCandinateTypes.contains(typeState.gradeType)) {
typeState.gradeState.getScoreMarkStyle
} else {
if (typeState.gradeType.id == GradeTypeConstants.DELAY_ID) {
val endGradeState = typeState.gradeState.getState(new GradeType(GradeTypeConstants.END_ID))
if (null == endGradeState) typeState.gradeState.getScoreMarkStyle else endGradeState.getScoreMarkStyle
} else {
entityDao.get(classOf[ScoreMarkStyle], ScoreMarkStyle.PERCENT)
}
}
}
private def getState(gradeState: CourseGradeState, gradeType: GradeType): ExamGradeState = {
var gradeTypeState = gradeState.getState(gradeType)
if (null == gradeTypeState) {
gradeTypeState = Model.newInstance(classOf[ExamGradeState]).asInstanceOf[ExamGradeState]
gradeTypeState.setGradeType(gradeType)
gradeTypeState.setGradeState(gradeState)
gradeState.getStates.add(gradeTypeState)
}
gradeTypeState
}
def setEntityDao(entityDao: EntityDao) {
this.entityDao = entityDao
}
def setSettings(settings: CourseGradeSettings) {
this.settings = settings
}
}
| openurp/edu-eams-webapp | grade/src/main/scala/org/openurp/edu/eams/teach/grade/course/service/impl/DefaultMarkStyleStrategy.scala | Scala | gpl-3.0 | 3,064 |
package dsl.reactive.syntaxops
import scala.virtualization.lms.common.{Base, EffectExp, ScalaGenFunctions, FunctionsExp}
import dsl.reactive.phantom._
import language.implicitConversions
/** Defines Signal syntax, methods and code generators. */
trait SignalSyntax extends Base {
/* Enabling signal.map(_ + 1) via implicit class*/
implicit def toBehaviorOps[A:Manifest](s: Rep[Behavior[A]]) = new BehaviorOps(s)
class BehaviorOps[A:Manifest](s: Rep[Behavior[A]]) {
def map[B:Manifest](f: Rep[A] => Rep[B]) = mapping_behavior(s,f)
}
def mapping_behavior[A:Manifest,B:Manifest](sig: Rep[Behavior[A]],
f: Rep[A] => Rep[B]): Rep[Behavior[B]]
object Signal {
/* The Signal expression factory method */
def apply[A:Manifest](dhs: Rep[DepHolder]*)(f: => Rep[A]) =
new_behavior(dhs, f)
}
def new_behavior[A:Manifest](
dhs: Seq[Rep[DepHolder]], f: => Rep[A]): Rep[Behavior[A]]
}
trait SignalOps extends EffectExp with FunctionsExp {
this: SignalSyntax =>
case class SignalCreation[A:Manifest](
dhs: Seq[Exp[DepHolder]],
body: Block[A]
) extends Def[Behavior[A]]
override def new_behavior[A:Manifest](
dhs: Seq[Exp[DepHolder]],
f: => Exp[A]
): Exp[Behavior[A]] = SignalCreation(dhs, reifyEffects(f))
case class MappedBehavior[A:Manifest,B:Manifest](
sig: Exp[Behavior[A]],
f: Rep[A => B]
) extends Def[Behavior[B]]
override def mapping_behavior[A:Manifest,B:Manifest](sig: Exp[Behavior[A]],
f: Exp[A] => Exp[B]): Exp[Behavior[B]] = MappedBehavior(sig,doLambda(f))
override def boundSyms(e: Any): List[Sym[Any]] = e match {
case SignalCreation(dhs,body) => effectSyms(body)
case _ => super.boundSyms(e)
}
}
trait ScalaGenSignals extends ScalaGenReactiveBase with ScalaGenFunctions {
val IR: SignalOps
import IR._
override def emitNode(sym: Sym[Any], node: Def[Any]): Unit = node match {
/* emit the stored block at the inside of the Signal expression */
case SignalCreation(dhs,f) => emitValDef(sym,
simpleReactivePkg + "Signal(" + dhs.map(quote).mkString(", ") + ") { ")
emitBlock(f)
stream.println(quote(getBlockResult(f)) + "\\n")
stream.println("}")
/* mapping is provided by the underlying framework */
case MappedBehavior(s,f) => emitValDef(sym, quote(s) + ".map(" + quote(f) + ")")
case _ => super.emitNode(sym,node)
}
}
| markus1189/OptiReactive | src/main/scala/dsl/reactive/syntaxops/Signals.scala | Scala | gpl-3.0 | 2,386 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
class Lgamma[T: ClassTag, D: ClassTag]()(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends Operation[Tensor[D], Tensor[D], T] {
output = Tensor[D]()
override def updateOutput(input: Tensor[D]): Tensor[D] = {
output.resizeAs(input).copy(input).logGamma()
output
}
override def getClassTagNumerics() : (Array[ClassTag[_]], Array[TensorNumeric[_]]) = {
(Array(scala.reflect.classTag[T], scala.reflect.classTag[D]), Array(ev, ev2))
}
}
object Lgamma {
def apply[T: ClassTag, D: ClassTag]()(
implicit ev: TensorNumeric[T], ev2: TensorNumeric[D]): Lgamma[T, D] = new Lgamma()
}
| qiuxin2012/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/ops/Lgamma.scala | Scala | apache-2.0 | 1,412 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.carbondata.api.CarbonStore
/**
* clean files api
*/
// scalastyle:off
object CleanFiles {
def cleanFiles(spark: SparkSession, dbName: String, tableName: String,
storePath: String): Unit = {
TableAPIUtil.validateTableExists(spark, dbName, tableName)
val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore
.getTableFromMetadata(dbName, tableName).map(_.carbonTable).getOrElse(null)
CarbonStore.cleanFiles(dbName, tableName, storePath, carbonTable)
}
def main(args: Array[String]): Unit = {
if (args.length < 2) {
System.err.println("Usage: CleanFiles <store path> <table name>")
System.exit(1)
}
val storePath = TableAPIUtil.escape(args(0))
val (dbName, tableName) = TableAPIUtil.parseSchemaName(TableAPIUtil.escape(args(1)))
val spark = TableAPIUtil.spark(storePath, s"CleanFiles: $dbName.$tableName")
CarbonEnv.getInstance(spark).carbonMetastore.checkSchemasModifiedTimeAndReloadTables()
cleanFiles(spark, dbName, tableName, storePath)
}
}
| ksimar/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/util/CleanFiles.scala | Scala | apache-2.0 | 1,934 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule.utils
object Reflect {
/**
* Access companion object
*/
def singleton[T](implicit man: ClassManifest[T]): T = {
val name = man.erasure.getName()
assert(name endsWith "$", "Not an object: " + name)
val clazz = java.lang.Class.forName(name)
clazz.getField("MODULE$").get(clazz).asInstanceOf[T]
}
def companion[T](implicit man: ClassManifest[T]): AnyRef = {
val name = man.erasure.getName()
val clazz = java.lang.Class.forName(name + "$")
clazz.getField("MODULE$").get(clazz)
}
def companionUnapply1[T <: AnyRef, A](implicit man: ClassManifest[T]): T => Option[A] = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[A]] }
}
def companionApplyUnapply1[T <: AnyRef, A](implicit man: ClassManifest[T]): (A => T, T => Option[A]) = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
val app = comp.getClass.getDeclaredMethod("apply", classOf[AnyRef])
(
{ a: A => app.invoke(comp, a.asInstanceOf[AnyRef]).asInstanceOf[T] },
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[A]] }
)
}
def companionUnapply2[T <: AnyRef, A, B](implicit man: ClassManifest[T]): T => Option[(A, B)] = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B)]] }
}
import scala.collection.JavaConversions._
def companionApplyUnapply2[T <: AnyRef, A, B](implicit man: ClassManifest[T]): ((A, B) => T, T => Option[(A, B)]) = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
println(comp.getClass.getDeclaredMethods.toList)
val app = comp.getClass.getDeclaredMethod("apply", classOf[AnyRef], classOf[AnyRef])
(
{ (a: A, b: B) => app.invoke(comp, a.asInstanceOf[AnyRef], b.asInstanceOf[AnyRef]).asInstanceOf[T] },
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B)]] }
)
}
def companionUnapply3[T <: AnyRef, A, B, C](implicit man: ClassManifest[T]): T => Option[(A, B, C)] = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B, C)]] }
}
def companionApplyUnapply3[T <: AnyRef, A, B, C](implicit man: ClassManifest[T]): ((A, B, C) => T, T => Option[(A, B, C)]) = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
val app = comp.getClass.getDeclaredMethod("apply", classOf[AnyRef], classOf[AnyRef], classOf[AnyRef])
(
{ (a: A, b: B, c: C) => app.invoke(comp, a.asInstanceOf[AnyRef], b.asInstanceOf[AnyRef], c.asInstanceOf[AnyRef]).asInstanceOf[T] },
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B, C)]] }
)
}
def companionUnapply4[T <: AnyRef, A, B, C, D](implicit man: ClassManifest[T]): T => Option[(A, B, C, D)] = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B, C, D)]] }
}
def companionApplyUnapply4[T <: AnyRef, A, B, C, D](implicit man: ClassManifest[T]): ((A, B, C, D) => T, T => Option[(A, B, C, D)]) = {
val comp = companion[T]
val unapp = comp.getClass.getDeclaredMethod("unapply", man.erasure)
val app = comp.getClass.getDeclaredMethod("apply", classOf[AnyRef], classOf[AnyRef], classOf[AnyRef], classOf[AnyRef])
(
{ (a: A, b: B, c: C, d: D) => app.invoke(comp, a.asInstanceOf[AnyRef], b.asInstanceOf[AnyRef], c.asInstanceOf[AnyRef], d.asInstanceOf[AnyRef]).asInstanceOf[T] },
{ t: T => unapp.invoke(comp, t).asInstanceOf[Option[(A, B, C, D)]] }
)
}
case class Person(name: String, age: Int)
def main(args: Array[String]): Unit = {
import scala.collection.JavaConversions._
val s = companion[Person]
val app = s.getClass.getDeclaredMethod("apply", classOf[AnyRef], classOf[AnyRef])
println(app.invoke(s, "hi", new java.lang.Integer(12)))
println(s.getClass.getDeclaredMethods.mkString)
val unapp = s.getClass.getDeclaredMethod("unapply", classOf[Person])
println(unapp.invoke(s, Person("hi", 12)))
}
} | molecule-labs/molecule | molecule-core/src/main/scala/molecule/utils/Reflect.scala | Scala | apache-2.0 | 5,080 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package rest
import xml._
import org.specs2.mutable.Specification
import org.specs2.matcher.Matcher
import common._
import util.ControlHelpers.tryo
/**
* System under specification for XMLApi.
*/
object XmlApiSpec extends Specification {
"XMLApi Specification".title
object XMLApiExample extends XMLApiHelper {
// Define our root tag
def createTag(contents : NodeSeq) : Elem = <api>{contents}</api>
// This method exists to test the non-XML implicit conversions on XMLApiHelper
def produce (in : Any) : LiftResponse = in match {
// Tests boolToResponse
case "true" => true
case "false" => false
// Tests canBoolToResponse
case s : String => tryo[Boolean] { s.toInt > 5 }
// Tests pairToResponse
case i : Int if i == 42 => (true,"But what is the question?")
// These test the listElemToResponse conversion
case f : Float if f == 42f => (<float>perfect</float> : Elem)
case f : Float if f == 0f => (<float>zero</float> : Node)
case f : Float if f > 0f => (<float>positive</float> : NodeSeq)
case f : Float if f < 0f => (<float>negative</float> : Seq[Node])
}
// This method tests the XML implicit conversions on XMLApiHelper
def calculator : LiftRules.DispatchPF = {
case r @ Req(List("api","sum"), _, GetRequest) => () => doSum(r)
case r @ Req(List("api","product"), _, GetRequest) => () => doProduct(r)
case r @ Req(List("api","max"), _, GetRequest) => () => doMax(r)
case r @ Req(List("api","min"), _, GetRequest) => () => doMin(r)
// Tests putResponseInBox
case Req("api" :: _, _, _) => () => BadResponse()
}
// ===== Handler methods =====
def reduceOp (operation : (Int,Int) => Int)(r : Req) : Box[Elem] = tryo {
(r.param("args").map {
args => <result>{args.split(",").map(_.toInt).reduceLeft(operation)}</result>
}) ?~ "Missing args"
} match {
case Full(x) => x
case f : Failure => f
case Empty => Empty
}
// We specify the LiftResponse return type to force use of the implicit
// canNodeToResponse conversion
def doSum (r : Req) : LiftResponse = reduceOp(_ + _)(r)
def doProduct (r : Req) : LiftResponse = (reduceOp(_ * _)(r) : Box[Node])
def doMax (r : Req) : LiftResponse = (reduceOp(_ max _)(r) : Box[NodeSeq])
def doMin (r : Req) : LiftResponse = (reduceOp(_ min _)(r) : Box[Node])
//def doMin (r : Req) : LiftResponse = (reduceOp(_ min _)(r) : Box[Seq[Node]])
}
// A helper to simplify the specs matching
case class matchXmlResponse(expected : Node) extends Matcher[LiftResponse] {
def apply[T <: LiftResponse](response : org.specs2.matcher.Expectable[T]) = response.value match {
case x : XmlResponse => {
/* For some reason, the UnprefixedAttributes that Lift uses to merge in
* new attributes makes comparison fail. Instead, we simply stringify and
* reparse the response contents and that seems to fix the issue. */
val converted = XML.loadString(x.xml.toString)
result(converted == expected,
"%s matches %s".format(converted,expected),
"%s does not match %s".format(converted, expected),
response)
}
case other => result(false,"matches","not an XmlResponse", response)
}
}
"XMLApiHelper" should {
import XMLApiExample.produce
/* In all of these tests we include the <xml:group/> since that's what Lift
* inserts for content in non-content responses.
*/
"Convert booleans to LiftResponses" in {
produce("true") must matchXmlResponse(<api success="true"><xml:group/></api>)
produce("false") must matchXmlResponse(<api success="false"><xml:group/></api>)
}
"Convert Boxed booleans to LiftResponses" in {
produce("42") must matchXmlResponse(<api success="true"><xml:group/></api>)
produce("1") must matchXmlResponse(<api success="false"><xml:group/></api>)
val failure = produce("invalidInt")
failure must haveClass[XmlResponse]
failure match {
case x : XmlResponse => {
x.xml.attribute("success").map(_.text) must_== Some("false")
x.xml.attribute("msg").isDefined must_== true
}
}
}
"Convert Pairs to responses" in {
produce(42) must matchXmlResponse(<api success="true" msg="But what is the question?"><xml:group/></api>)
}
"Convert various XML types to a response" in {
produce(0f) must matchXmlResponse(<api success="true"><float>zero</float></api>)
produce(-1f) must matchXmlResponse(<api success="true"><float>negative</float></api>)
produce(1f) must matchXmlResponse(<api success="true"><float>positive</float></api>)
produce(42f) must matchXmlResponse(<api success="true"><float>perfect</float></api>)
}
}
}
| pbrant/framework | web/webkit/src/test/scala/net/liftweb/http/rest/XMLApiSpec.scala | Scala | apache-2.0 | 5,505 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.accio.discovery.libraries
import fr.cnrs.liris.accio.domain.{Attribute, Operator}
import fr.cnrs.liris.accio.sdk.{OpMetadata, ScalaLibrary}
import fr.cnrs.liris.lumos.domain.{DataType, RemoteFile}
object TestOps1 extends ScalaLibrary {
private[this] val fakeOpClazz = classOf[Operator]
override def ops: Seq[OpMetadata] = Seq(
new OpMetadata(Operator(
name = "ThirdSimple",
executable = RemoteFile("."),
inputs = Seq(
Attribute("data1", DataType.Dataset),
Attribute("data2", DataType.Dataset)),
outputs = Seq(Attribute("data", DataType.Dataset))),
fakeOpClazz))
}
| privamov/accio | accio/javatests/fr/cnrs/liris/accio/discovery/libraries/TestOps1.scala | Scala | gpl-3.0 | 1,409 |
package gitbucket.core.controller
import gitbucket.core.model.Account
import gitbucket.core.service.RepositoryService.RepositoryInfo
import gitbucket.core.service.{AccountService, RepositoryService}
import gitbucket.core.servlet.Database
import gitbucket.core.util._
import gitbucket.core.util.SyntaxSugars._
import gitbucket.core.util.Directory._
import gitbucket.core.util.Implicits._
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.dircache.DirCache
import org.eclipse.jgit.lib.{Constants, FileMode}
import org.scalatra._
import org.scalatra.servlet.{FileItem, FileUploadSupport, MultipartConfig}
import org.apache.commons.io.{FileUtils, IOUtils}
/**
* Provides Ajax based file upload functionality.
*
* This servlet saves uploaded file.
*/
class FileUploadController extends ScalatraServlet with FileUploadSupport with RepositoryService with AccountService {
configureMultipartHandling(MultipartConfig(maxFileSize = Some(3 * 1024 * 1024)))
post("/image"){
execute({ (file, fileId) =>
FileUtils.writeByteArrayToFile(new java.io.File(getTemporaryDir(session.getId), fileId), file.get)
session += Keys.Session.Upload(fileId) -> file.name
}, FileUtil.isImage)
}
post("/file/:owner/:repository"){
execute({ (file, fileId) =>
FileUtils.writeByteArrayToFile(new java.io.File(
getAttachedDir(params("owner"), params("repository")),
fileId + "." + FileUtil.getExtension(file.getName)), file.get)
}, FileUtil.isUploadableType)
}
post("/wiki/:owner/:repository"){
// Don't accept not logged-in users
session.get(Keys.Session.LoginAccount).collect { case loginAccount: Account =>
val owner = params("owner")
val repository = params("repository")
// Check whether logged-in user is collaborator
onlyWikiEditable(owner, repository, loginAccount){
execute({ (file, fileId) =>
val fileName = file.getName
LockUtil.lock(s"${owner}/${repository}/wiki") {
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))) { git =>
val builder = DirCache.newInCore.builder()
val inserter = git.getRepository.newObjectInserter()
val headId = git.getRepository.resolve(Constants.HEAD + "^{commit}")
if(headId != null){
JGitUtil.processTree(git, headId){ (path, tree) =>
if(path != fileName){
builder.add(JGitUtil.createDirCacheEntry(path, tree.getEntryFileMode, tree.getEntryObjectId))
}
}
}
val bytes = IOUtils.toByteArray(file.getInputStream)
builder.add(JGitUtil.createDirCacheEntry(fileName, FileMode.REGULAR_FILE, inserter.insert(Constants.OBJ_BLOB, bytes)))
builder.finish()
val newHeadId = JGitUtil.createNewCommit(git, inserter, headId, builder.getDirCache.writeTree(inserter),
Constants.HEAD, loginAccount.userName, loginAccount.mailAddress, s"Uploaded ${fileName}")
fileName
}
}
}, FileUtil.isUploadableType)
}
} getOrElse BadRequest()
}
post("/import") {
import JDBCUtil._
session.get(Keys.Session.LoginAccount).collect { case loginAccount: Account if loginAccount.isAdmin =>
execute({ (file, fileId) =>
request2Session(request).conn.importAsSQL(file.getInputStream)
}, _ => true)
}
redirect("/admin/data")
}
private def onlyWikiEditable(owner: String, repository: String, loginAccount: Account)(action: => Any): Any = {
implicit val session = Database.getSession(request)
getRepository(owner, repository) match {
case Some(x) => x.repository.options.wikiOption match {
case "ALL" if !x.repository.isPrivate => action
case "PUBLIC" if hasGuestRole(owner, repository, Some(loginAccount)) => action
case "PRIVATE" if hasDeveloperRole(owner, repository, Some(loginAccount)) => action
case _ => BadRequest()
}
case None => BadRequest()
}
}
private def execute(f: (FileItem, String) => Unit, mimeTypeChcker: (String) => Boolean) = fileParams.get("file") match {
case Some(file) if(mimeTypeChcker(file.name)) =>
defining(FileUtil.generateFileId){ fileId =>
f(file, fileId)
Ok(fileId)
}
case _ => BadRequest()
}
}
| nobusugi246/gitbucket | src/main/scala/gitbucket/core/controller/FileUploadController.scala | Scala | apache-2.0 | 4,408 |
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.asyncfreespec.ignoreall
import org.scalatest.AsyncFreeSpec
import scala.concurrent.Future
import org.scalatest.Ignore
@Ignore
class AddSpec extends AsyncFreeSpec {
def addSoon(addends: Int*): Future[Int] = Future { addends.sum }
"addSoon" - {
"will eventually compute a sum of passed Ints" in {
val futureSum: Future[Int] = addSoon(1, 2)
// You can map assertions onto a Future, then return
// the resulting Future[Assertion] to ScalaTest:
futureSum map { sum => assert(sum == 3) }
}
}
def addNow(addends: Int*): Int = addends.sum
"addNow" - {
"will immediately compute a sum of passed Ints" in {
val sum: Int = addNow(1, 2)
// You can also write synchronous tests. The body
// must have result type Assertion:
assert(sum == 3)
}
}
}
| dotty-staging/scalatest | examples/src/test/scala/org/scalatest/examples/asyncfreespec/ignoreall/AddSpec.scala | Scala | apache-2.0 | 1,449 |
package org.http4s
package server
package middleware
import cats.~>
import cats.arrow.FunctionK
import cats.implicits._
import cats.data.OptionT
import cats.effect.{Bracket, Concurrent, Sync}
import cats.effect.Sync._
import fs2.Stream
import org.http4s.util.CaseInsensitiveString
import org.log4s.getLogger
/**
* Simple Middleware for Logging All Requests and Responses
*/
object Logger {
private[this] val logger = getLogger
def apply[G[_]: Bracket[?[_], Throwable], F[_]: Concurrent](
logHeaders: Boolean,
logBody: Boolean,
fk: F ~> G,
redactHeadersWhen: CaseInsensitiveString => Boolean = Headers.SensitiveHeaders.contains,
logAction: Option[String => F[Unit]] = None
)(http: Http[G, F]): Http[G, F] = {
val log: String => F[Unit] = logAction.getOrElse { s =>
Sync[F].delay(logger.info(s))
}
ResponseLogger(logHeaders, logBody, fk, redactHeadersWhen, log.pure[Option])(
RequestLogger(logHeaders, logBody, fk, redactHeadersWhen, log.pure[Option])(http)
)
}
def httpApp[F[_]: Concurrent](
logHeaders: Boolean,
logBody: Boolean,
redactHeadersWhen: CaseInsensitiveString => Boolean = Headers.SensitiveHeaders.contains,
logAction: Option[String => F[Unit]] = None
)(httpApp: HttpApp[F]): HttpApp[F] =
apply(logHeaders, logBody, FunctionK.id[F], redactHeadersWhen, logAction)(httpApp)
def httpRoutes[F[_]: Concurrent](
logHeaders: Boolean,
logBody: Boolean,
redactHeadersWhen: CaseInsensitiveString => Boolean = Headers.SensitiveHeaders.contains,
logAction: Option[String => F[Unit]] = None
)(httpRoutes: HttpRoutes[F]): HttpRoutes[F] =
apply(logHeaders, logBody, OptionT.liftK[F], redactHeadersWhen, logAction)(httpRoutes)
def logMessage[F[_], A <: Message[F]](message: A)(
logHeaders: Boolean,
logBody: Boolean,
redactHeadersWhen: CaseInsensitiveString => Boolean = Headers.SensitiveHeaders.contains)(
log: String => F[Unit])(implicit F: Sync[F]): F[Unit] = {
val charset = message.charset
val isBinary = message.contentType.exists(_.mediaType.binary)
val isJson = message.contentType.exists(mT =>
mT.mediaType == MediaType.application.json || mT.mediaType == MediaType.application.`vnd.hal+json`)
val isText = !isBinary || isJson
def prelude = message match {
case Request(method, uri, httpVersion, _, _, _) =>
s"$httpVersion $method $uri"
case Response(status, httpVersion, _, _, _) =>
s"$httpVersion $status"
}
val headers =
if (logHeaders)
message.headers.redactSensitive(redactHeadersWhen).toList.mkString("Headers(", ", ", ")")
else ""
val bodyStream = if (logBody && isText) {
message.bodyAsText(charset.getOrElse(Charset.`UTF-8`))
} else if (logBody) {
message.body
.map(b => java.lang.Integer.toHexString(b & 0xff))
} else {
Stream.empty.covary[F]
}
val bodyText = if (logBody) {
bodyStream.compile.string
.map(text => s"""body="$text"""")
} else {
F.pure("")
}
def spaced(x: String): String = if (x.isEmpty) x else s" $x"
bodyText
.map(body => s"$prelude${spaced(headers)}${spaced(body)}")
.flatMap(log)
}
}
| ChristopherDavenport/http4s | server/src/main/scala/org/http4s/server/middleware/Logger.scala | Scala | apache-2.0 | 3,271 |
package com.stulsoft.ysps.plambda
/**
* Created by Yuriy Stul on 9/13/2016.
*/
object Lambda {
def main(args: Array[String]): Unit = {
println("==>main")
lambda1()
println("<==main")
}
def lambda1(): Unit = {
println("==>lambda1")
val max: (Int, Int) => Int = (m: Int, n: Int) => if (m > n) m else n
println(max(1, 2))
println(max(3, 2))
println("<==lambda1")
}
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/plambda/Lambda.scala | Scala | mit | 410 |
/** **\\
** Copyright (c) 2012 Center for Organic and Medicinal Chemistry **
** Zurich University of Applied Sciences **
** Wädenswil, Switzerland **
\\** **/
package chemf
import scalaz._, Scalaz._, std.indexedSeq._
import org.scalacheck._
/**
* @author Stefan Höck
*/
object ElementTest extends Properties("Element") {
property("orderNr") = Element.values.∀ (e ⇒ (Element fromNr e.atomicNr) ≟ e)
property("symbol") = Element.values.∀ (e ⇒ (Element fromSymbol e.symbol) ≟ e.some)
property("listOrder") =
Element.values.zipWithIndex.∀ (p ⇒ p._1.atomicNr ≟ p._2)
property("isotopes") =
Element.values ∀ (e ⇒ e.isotopes.size ≟ IsotopeData.isotopes(e).size)
property("isotopes") =
Element.values ∀ (e ⇒ e.isotopes ∀ (_.element ≟ e))
property("massDist") =
Element.values ∀ (e ⇒
e.isotopeDist ∀ (
p ⇒ (p._1.element ≟ e) && p._2 <= 1.0
)
)
}
// vim: set ts=2 sw=2 et:
| stefan-hoeck/chemf | src/test/scala/chemf/ElementTest.scala | Scala | gpl-3.0 | 1,173 |
// Copyright 2012 Brennan Saeta
//
// This file is part of Axess
//
// Axess is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Axess is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Axess. If not, see <http://www.gnu.org/licenses/>.
package axess
import akka.actor.Actor
import scala.collection.immutable.Stack
import akka.routing.RoundRobinRouter
import akka.actor.Props
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.openqa.selenium.WebDriver
import org.openqa.selenium.WebElement
import org.openqa.selenium.By
import models.CourseraSite
import models.SiteType
import scala.collection.JavaConversions._
import models.Site
import axess.checkers._
class Worker extends Actor {
var browser: WebDriver = new HtmlUnitDriver
var siteType: SiteType = _
val checkers = List(new AltText(),
new FormLabel(),
new PageTitle(),
new Blink(),
new LinkText() /*,
new ValidLinks()*/ ) // ValidLinks disabled due to performance concerns
def scanPage(url: String) = {
var problemLoading = false
try {
println("Getting: " + url)
browser.get(url)
println("Got: " + url)
} catch {
// Handle failures more gracefully
case e =>
problemLoading = true
sender ! PageScanResult(url, url, List(PageNote("Usability", e.getMessage())))
sender ! NewUrls(url, Set())
}
if (!problemLoading) {
findAllUrls(url)
checkPage(url)
}
}
def findAllUrls(url: String) = {
if (siteType == null) throw new RuntimeException("Help!")
val f = browser.findElements(By.tagName("a")).toSet.map {
e: WebElement => e.getAttribute("href")
}.map(
s => makeCanonical(url, s)).filter(
s => siteType.inSite(s))
sender ! NewUrls(url, f)
}
def checkPage(url: String) = {
val list = for {
chk <- checkers
msgs <- chk.checkPage(browser)
} yield {
PageNote(chk.category, msgs)
}
sender ! PageScanResult(url, browser.getTitle(), list)
}
def login(site: Site) = {
browser.manage().deleteAllCookies()
val s = Class.forName(site.stype).newInstance().asInstanceOf[SiteType]
s.configure(site)
siteType = s
siteType.login(browser)
}
// TODO: make it work on relative links!
def makeCanonical(curPage: String, linkText: String): String = {
if (linkText == null) null
else if (linkText.contains("#")) {
linkText.split("#")(0)
} else {
linkText
}
}
def receive = {
case ScanPage(url) =>
scanPage(url)
case NewSite(site) =>
login(site)
}
} | saeta/axess | app/axess/Worker.scala | Scala | agpl-3.0 | 3,051 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.util
import java.io.{ File => JFile, _ }
import java.nio.charset.Charset
import java.util.regex.Pattern
import java.nio.file.Files
import scala.collection.JavaConverters._
import scala.util.Try
import com.google.common.io.{ Files => GFiles }
import org.ensime.api.deprecating
/**
* Decorate `java.io.File` with functionality from common utility
* packages, which would otherwise be verbose/ugly to call directly.
*
* Its nicer to put conveniences for working with `File` here
* instead of using static accessors from J2SE or Guava.
*
* NOTE: prefer NIO via the path utilities.
*/
package object file {
type File = JFile
/**
* Convenience for creating `File`s (which we do a lot), but has the
* caveat that static methods on `java.io.File` can no longer be
* accessed, so it must be imported like:
*
* `java.io.{ File => JFile }`
*/
def File(name: String): File = new File(name)
def withTempDir[T](a: File => T): T = {
val dir = Files.createTempDirectory("ensime").toFile.canon
import path._
try a(dir)
finally Try(dir.toPath.deleteDirRecursively())
}
def withTempFile[T](a: File => T): T = {
val file = Files.createTempFile("ensime-", ".tmp").toFile.canon
try a(file)
finally Try(file.delete())
}
implicit class RichFile(val file: File) extends AnyVal {
def /(sub: String): File = new File(file, sub)
def isScala: Boolean = file.getName.toLowerCase.endsWith(".scala")
def isJava: Boolean = file.getName.toLowerCase.endsWith(".java")
def isClassfile: Boolean = file.getName.toLowerCase.endsWith(".class")
def isJar: Boolean = file.getName.toLowerCase.endsWith(".jar")
def parts: List[String] =
file.getPath.split(
Pattern.quote(JFile.separator)
).toList.filterNot(Set("", "."))
def outputStream(): OutputStream = new FileOutputStream(file)
def createWithParents(): Boolean = {
GFiles.createParentDirs(file)
file.createNewFile()
}
def readLines()(implicit cs: Charset): List[String] = {
GFiles.readLines(file, cs).asScala.toList
}
def writeLines(lines: List[String])(implicit cs: Charset): Unit = {
GFiles.write(lines.mkString("", "\\n", "\\n"), file, cs)
}
def writeString(contents: String)(implicit cs: Charset): Unit = {
GFiles.write(contents, file, cs)
}
@deprecating("prefer path")
def readString()(implicit cs: Charset): String = {
import path._
file.toPath.readString
}
/**
* @return the file and its descendent family tree (if it is a directory).
*/
@deprecating("prefer path approaches")
def tree: Stream[File] = {
file #:: GFiles.fileTreeTraverser().breadthFirstTraversal(file).asScala.toStream
}
/**
* Non-recursive children of the file.
*/
def children: Stream[File] =
Option(file.listFiles()).map(_.toStream).getOrElse(Stream.empty)
/**
* Helps to resolve ambiguity surrounding files in symbolically
* linked directories, which are common on operating systems that
* use a symbolically linked temporary directory (OS X I'm looking
* at you).
*
* @return the canonical form of `file`, falling back to the absolute file.
*/
def canon =
try file.getCanonicalFile
catch {
case t: Throwable => file.getAbsoluteFile
}
}
}
| pascr/ensime-server | util/src/main/scala/org/ensime/util/file.scala | Scala | gpl-3.0 | 3,528 |
package org.jetbrains.plugins.scala.codeInspection.typeLambdaSimplify
import com.intellij.codeInspection.{LocalQuickFix, ProblemDescriptor, ProblemsHolder}
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.typeLambdaSimplify.KindProjectorUseCorrectLambdaKeywordInspection._
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection}
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScParameterizedTypeElement, ScSimpleTypeElement}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createTypeElementFromText
/**
* Author: Svyatoslav Ilinskiy
* Date: 6/25/15
*/
class KindProjectorUseCorrectLambdaKeywordInspection extends AbstractInspection(inspectionId, inspectionName) {
override def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case param: ScParameterizedTypeElement if ScalaPsiUtil.kindProjectorPluginEnabled(param) =>
val useGreekLambda = ScalaCodeStyleSettings.getInstance(param.getProject).REPLACE_LAMBDA_WITH_GREEK_LETTER
param.children.foreach {
case simple: ScSimpleTypeElement =>
simple.getText match {
case "Lambda" if useGreekLambda =>
val changeKeywordFix = new KindProjectorUseCorrectLambdaKeywordQuickFix(simple, "λ")
holder.registerProblem(simple, "Kind Projector: Replace Lambda with λ", changeKeywordFix)
val changeSettingsFix = new ChangeLambdaCodeStyleSetting(!useGreekLambda)
holder.registerProblem(simple, codeStyleSettingUseWordLambda, changeSettingsFix)
case "λ" if !useGreekLambda =>
val changeKeywordFix = new KindProjectorUseCorrectLambdaKeywordQuickFix(simple, "Lambda")
holder.registerProblem(simple, "Kind Projector: Replace λ with Lambda", changeKeywordFix)
val changeSettingsFix = new ChangeLambdaCodeStyleSetting(!useGreekLambda)
holder.registerProblem(simple, codeStyleSettingUseGreekLambda, changeSettingsFix)
case _ =>
}
case _ =>
}
}
}
class KindProjectorUseCorrectLambdaKeywordQuickFix(e: PsiElement, replacement: String) extends AbstractFixOnPsiElement(inspectionName, e) {
override def doApplyFix(project: Project): Unit = {
val elem = getElement
if (!elem.isValid) return
elem.replace(createTypeElementFromText(replacement)(elem.getManager))
}
}
class ChangeLambdaCodeStyleSetting(useGreekLambda: Boolean) extends LocalQuickFix {
override def getFamilyName: String = getName
override def getName: String =
if (useGreekLambda) codeStyleSettingUseGreekLambda
else codeStyleSettingUseWordLambda
override def applyFix(project: Project, d: ProblemDescriptor): Unit = {
ScalaCodeStyleSettings.getInstance(project).REPLACE_LAMBDA_WITH_GREEK_LETTER = useGreekLambda
}
}
object KindProjectorUseCorrectLambdaKeywordInspection {
val inspectionName = "Kind Projector: Use correct lambda keyword"
val inspectionId = "KindProjectorUseCorrectLambdaKeyword"
val codeStyleSettingUseGreekLambda = "Kind Projector: Change code style setting: use λ instead of Lambda"
val codeStyleSettingUseWordLambda = "Kind Projector: Change code style setting: use Lambda instead of λ"
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/typeLambdaSimplify/KindProjectorUseCorrectLambdaKeywordInspection.scala | Scala | apache-2.0 | 3,554 |
// Copyright (c) 2013-2020 Rob Norris and Contributors
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT
package doobie.hi
import doobie.enumerated.JdbcType
import doobie.util.{ Get, Put }
import doobie.enumerated.ColumnNullable
import doobie.enumerated.ParameterNullable
import doobie.enumerated.ParameterMode
import doobie.enumerated.Holdability
import doobie.enumerated.Nullability.NullabilityKnown
import doobie.enumerated.FetchDirection
import doobie.enumerated.ResultSetConcurrency
import doobie.enumerated.ResultSetType
import doobie.util.{ Read, Write }
import doobie.util.analysis._
import doobie.util.stream.repeatEvalChunks
import doobie.syntax.align._
import java.sql.{ ParameterMetaData, ResultSetMetaData, SQLWarning }
import scala.Predef.{ intArrayOps, intWrapper }
import cats.Foldable
import cats.syntax.all._
import cats.data.Ior
import cats.effect.kernel.syntax.monadCancel._
import fs2.Stream
import fs2.Stream.bracket
/**
* Module of high-level constructors for `PreparedStatementIO` actions. Batching operations are not
* provided; see the `statement` module for this functionality.
* @group Modules
*/
object preparedstatement {
import implicits._
// fs2 handler, not public
private def unrolled[A: Read](rs: java.sql.ResultSet, chunkSize: Int): Stream[PreparedStatementIO, A] =
repeatEvalChunks(FPS.embed(rs, resultset.getNextChunk[A](chunkSize)))
/** @group Execution */
def stream[A: Read](chunkSize: Int): Stream[PreparedStatementIO, A] =
bracket(FPS.executeQuery)(FPS.embed(_, FRS.close)).flatMap(unrolled[A](_, chunkSize))
/**
* Non-strict unit for capturing effects.
* @group Constructors (Lifting)
*/
def delay[A](a: => A): PreparedStatementIO[A] =
FPS.delay(a)
/** @group Batching */
val executeBatch: PreparedStatementIO[List[Int]] =
FPS.executeBatch.map(_.toIndexedSeq.toList) // intArrayOps does not have `toList` in 2.13
/** @group Batching */
val addBatch: PreparedStatementIO[Unit] =
FPS.addBatch
/**
* Add many sets of parameters and execute as a batch update, returning total rows updated. Note
* that failed updates are not reported (see https://github.com/tpolecat/doobie/issues/706). This
* API is likely to change.
* @group Batching
*/
def addBatchesAndExecute[F[_]: Foldable, A: Write](fa: F[A]): PreparedStatementIO[Int] =
fa.toList
.foldRight(executeBatch)((a, b) => set(a) *> addBatch *> b)
.map(_.foldLeft(0)((acc, n) => acc + (n max 0))) // treat negatives (failures) as no rows updated
/**
* Add many sets of parameters.
* @group Batching
*/
def addBatches[F[_]: Foldable, A: Write](fa: F[A]): PreparedStatementIO[Unit] =
fa.toList.foldRight(().pure[PreparedStatementIO])((a, b) => set(a) *> addBatch *> b)
/** @group Execution */
def executeQuery[A](k: ResultSetIO[A]): PreparedStatementIO[A] =
FPS.executeQuery.bracket(s => FPS.embed(s, k))(s => FPS.embed(s, FRS.close))
/** @group Execution */
val executeUpdate: PreparedStatementIO[Int] =
FPS.executeUpdate
/** @group Execution */
def executeUpdateWithUniqueGeneratedKeys[A: Read]: PreparedStatementIO[A] =
executeUpdate.flatMap(_ => getUniqueGeneratedKeys[A])
/** @group Execution */
def executeUpdateWithGeneratedKeys[A: Read](chunkSize: Int): Stream[PreparedStatementIO, A] =
bracket(FPS.executeUpdate *> FPS.getGeneratedKeys)(FPS.embed(_, FRS.close)).flatMap(unrolled[A](_, chunkSize))
/**
* Compute the column `JdbcMeta` list for this `PreparedStatement`.
* @group Metadata
*/
def getColumnJdbcMeta: PreparedStatementIO[List[ColumnMeta]] =
FPS.getMetaData.flatMap {
case null => FPS.pure(Nil) // https://github.com/tpolecat/doobie/issues/262
case md =>
(1 to md.getColumnCount).toList.traverse { i =>
for {
n <- ColumnNullable.fromIntF[PreparedStatementIO](md.isNullable(i))
} yield {
val j = JdbcType.fromInt(md.getColumnType(i))
val s = md.getColumnTypeName(i)
val c = md.getColumnName(i)
ColumnMeta(j, s, n.toNullability, c)
}
}
}
/**
* Compute the column mappings for this `PreparedStatement` by aligning its `JdbcMeta`
* with the `JdbcMeta` provided by a `Write` instance.
* @group Metadata
*/
def getColumnMappings[A](implicit A: Read[A]): PreparedStatementIO[List[(Get[_], NullabilityKnown) Ior ColumnMeta]] =
getColumnJdbcMeta.map(m => A.gets align m)
/** @group Properties */
val getFetchDirection: PreparedStatementIO[FetchDirection] =
FPS.getFetchDirection.flatMap(FetchDirection.fromIntF[PreparedStatementIO])
/** @group Properties */
val getFetchSize: PreparedStatementIO[Int] =
FPS.getFetchSize
/** @group Results */
def getGeneratedKeys[A](k: ResultSetIO[A]): PreparedStatementIO[A] =
FPS.getGeneratedKeys.bracket(s => FPS.embed(s, k))(s => FPS.embed(s, FRS.close))
/** @group Results */
def getUniqueGeneratedKeys[A: Read]: PreparedStatementIO[A] =
getGeneratedKeys(resultset.getUnique[A])
/**
* Compute the parameter `JdbcMeta` list for this `PreparedStatement`.
* @group Metadata
*/
def getParameterJdbcMeta: PreparedStatementIO[List[ParameterMeta]] =
FPS.getParameterMetaData.flatMap { md =>
(1 to md.getParameterCount).toList.traverse { i =>
for {
n <- ParameterNullable.fromIntF[PreparedStatementIO](md.isNullable(i))
m <- ParameterMode.fromIntF[PreparedStatementIO](md.getParameterMode(i))
} yield {
val j = JdbcType.fromInt(md.getParameterType(i))
val s = md.getParameterTypeName(i)
ParameterMeta(j, s, n.toNullability, m)
}
}
}
/**
* Compute the parameter mappings for this `PreparedStatement` by aligning its `JdbcMeta`
* with the `JdbcMeta` provided by a `Write` instance.
* @group Metadata
*/
def getParameterMappings[A](implicit A: Write[A]): PreparedStatementIO[List[(Put[_], NullabilityKnown) Ior ParameterMeta]] =
getParameterJdbcMeta.map(m => A.puts align m)
/** @group Properties */
val getMaxFieldSize: PreparedStatementIO[Int] =
FPS.getMaxFieldSize
/** @group Properties */
val getMaxRows: PreparedStatementIO[Int] =
FPS.getMaxRows
/** @group MetaData */
val getMetaData: PreparedStatementIO[ResultSetMetaData] =
FPS.getMetaData
/** @group MetaData */
val getParameterMetaData: PreparedStatementIO[ParameterMetaData] =
FPS.getParameterMetaData
/** @group Properties */
val getQueryTimeout: PreparedStatementIO[Int] =
FPS.getQueryTimeout
/** @group Properties */
val getResultSetConcurrency: PreparedStatementIO[ResultSetConcurrency] =
FPS.getResultSetConcurrency.flatMap(ResultSetConcurrency.fromIntF[PreparedStatementIO])
/** @group Properties */
val getResultSetHoldability: PreparedStatementIO[Holdability] =
FPS.getResultSetHoldability.flatMap(Holdability.fromIntF[PreparedStatementIO])
/** @group Properties */
val getResultSetType: PreparedStatementIO[ResultSetType] =
FPS.getResultSetType.flatMap(ResultSetType.fromIntF[PreparedStatementIO])
/** @group Results */
val getWarnings: PreparedStatementIO[SQLWarning] =
FPS.getWarnings
/**
* Set the given writable value, starting at column `n`.
* @group Parameters
*/
def set[A](n: Int, a: A)(implicit A: Write[A]): PreparedStatementIO[Unit] =
A.set(n, a)
/**
* Set the given writable value, starting at column `1`.
* @group Parameters
*/
def set[A](a: A)(implicit A: Write[A]): PreparedStatementIO[Unit] =
A.set(1, a)
/** @group Properties */
def setCursorName(name: String): PreparedStatementIO[Unit] =
FPS.setCursorName(name)
/** @group Properties */
def setEscapeProcessing(a: Boolean): PreparedStatementIO[Unit] =
FPS.setEscapeProcessing(a)
/** @group Properties */
def setFetchDirection(fd: FetchDirection): PreparedStatementIO[Unit] =
FPS.setFetchDirection(fd.toInt)
/** @group Properties */
def setFetchSize(n: Int): PreparedStatementIO[Unit] =
FPS.setFetchSize(n)
/** @group Properties */
def setMaxFieldSize(n: Int): PreparedStatementIO[Unit] =
FPS.setMaxFieldSize(n)
/** @group Properties */
def setMaxRows(n: Int): PreparedStatementIO[Unit] =
FPS.setMaxRows(n)
/** @group Properties */
def setQueryTimeout(a: Int): PreparedStatementIO[Unit] =
FPS.setQueryTimeout(a)
}
| tpolecat/doobie | modules/core/src/main/scala/doobie/hi/preparedstatement.scala | Scala | mit | 8,528 |
package scala
/** A base trait of all enum classes */
trait Enum {
/** A number uniquely identifying a case of an enum */
def ordinal: Int
protected def $ordinal: Int
}
| som-snytt/dotty | library/src/scala/Enum.scala | Scala | apache-2.0 | 177 |
package uk.co.morleydev.ghosthunt.controller.impl.game
import uk.co.morleydev.ghosthunt.controller.Controller
import uk.co.morleydev.ghosthunt.model.net.{AcceptJoinGameRequest, NetworkMessage, ClientId, game}
import uk.co.morleydev.ghosthunt.model.GameTime
import uk.co.morleydev.ghosthunt.data.store.{Maze, EntityComponentStore}
import uk.co.morleydev.ghosthunt.model.component.game.{Actor, Ghost, Player, Remote}
import uk.co.morleydev.ghosthunt.data.net.Server
import uk.co.morleydev.ghosthunt.model.event.sys
import uk.co.morleydev.ghosthunt.model.event
import uk.co.morleydev.ghosthunt.model.store.EntityId
import uk.co.morleydev.ghosthunt.model.component.menu.{MenuOption, Text}
import org.jsfml.system.Vector2f
import java.util.concurrent.ConcurrentLinkedDeque
import uk.co.morleydev.ghosthunt.data.event.EventQueue
/**
* The server lobby controller is responsible for waiting for players to join the game, as well as noticing when waiting
* players disconnect. Finally, it is responsible for starting the game when the appropriate menu option is selected.
*
* @param entities
* @param server
* @param events
* @param maze
*/
class ServerLobbyController(entities : EntityComponentStore, server : Server, events : EventQueue, maze : Maze)
extends Controller(messages = Seq[String](game.JoinGameRequest.name, game.Disconnected.name)) {
maze.pellets.reset()
private val textWaiting = Map[Int, EntityId](-1 -> entities.createEntity(),
0 -> entities.createEntity(),
1 -> entities.createEntity(),
2 -> entities.createEntity())
private val entityQueue = new ConcurrentLinkedDeque[Int]()
entityQueue.add(-1)
entityQueue.add(0)
entityQueue.add(1)
entityQueue.add(2)
private val startGameButton = entities.createEntity()
(entities.get("Remote", "Ghost").map(s => s._2("Ghost").asInstanceOf[Ghost].id) ++ entities.get("Remote", "Player").map(s => -1))
.foreach(id => entities.link(textWaiting(id), "Text", new Text(new Vector2f(10.0f, 40.0f * id + 50.0f), 36.0f, "Connected!")))
(-1 to 2).foreach(i => entities.link(textWaiting(i), "Text", new Text(new Vector2f(10.0f, 40.0f * i + 50.0f), 36.0f, "Waiting...")))
override def update(gameTime: GameTime): Unit = {
entities.get(startGameButton).filter(_._1 == "MenuOption").map(_._2.asInstanceOf[MenuOption].active).filter(_ > -1).foreach(menu => {
entities.removeEntity(startGameButton)
textWaiting.map(_._2).foreach(id => entities.removeEntity(id))
entities.get("Remote")
.map(s => s._2("Remote").asInstanceOf[Remote].id)
.foreach(s => server.send(s, game.StartGame(gameTime)))
events.enqueue(event.game.ShowScore)
events.enqueue(sys.CreateController(() => new ServerGameController(entities, events, server, maze)))
kill()
})
}
protected override def onServerMessage(client : ClientId, message : NetworkMessage, gameTime : GameTime) = {
message.name match {
case game.Disconnected.name => synchronized {
entities.get("Remote")
.map(s => (s._1, s._2("Remote").asInstanceOf[Remote].id))
.filter(s => s._2 == client)
.foreach(s => {
if (entities.has(s._1, "Ghost")) {
val id = entities.get(s._1)("Ghost").asInstanceOf[Ghost].id
entityQueue.addLast(id)
entities.link(textWaiting(id), "Text", new Text(new Vector2f(10.0f, 40.0f * id + 50.0f), 36.0f, "Waiting..."))
}
else {
entities.link(textWaiting(-1), "Text", new Text(new Vector2f(10.0f, 40.0f * -1 + 50.0f), 36.0f, "Waiting..."))
entityQueue.addFirst(-1)
}
entities.removeEntity(s._1)
entities.get("Remote")
.map(s => s._2("Remote").asInstanceOf[Remote].id)
.foreach(r => { println("Server (" + r.toString + "): " + s._2.toString()); server.send(r, game.InformLeftGame(s._2.value.toString, gameTime)) })
if (entities.get("Ghost").size == 0 || entities.get("Player").size == 0)
entities.unlink(startGameButton, "MenuOption")
})
}
case game.JoinGameRequest.name => synchronized {
val remote = entities.createEntity()
entities.link(remote, "Remote", new Remote(client))
val connectedId = entityQueue.poll()
// Link the player or ghost
if (connectedId == -1) {
entities.link(remote, "Player", new Player)
entities.link(remote, "Actor", new Actor(new Vector2f(336.0f, 48.0f)))
} else {
entities.link(startGameButton, "MenuOption", new MenuOption(new Vector2f(400.0f, 400.0f), new Vector2f(100.0f, 32.0f), Seq("Start Game")))
entities.link(remote, "Ghost", new Ghost(connectedId))
entities.link(remote, "Actor", new Actor(new Vector2f(336.0f, 272.0f)))
}
// Response with an accept join game request
val tuple = new AcceptJoinGameRequest(connectedId, message.time.toNanos)
server.send(client, game.AcceptJoinGameRequest(tuple, gameTime))
// Get the entities that are not of this
val otherEntities = entities.get("Remote", "Ghost")
.map(s => (s._2("Remote").asInstanceOf[Remote].id, s._2("Ghost").asInstanceOf[Ghost].id))
.filter(s => s._1 != client) ++ entities.get("Remote", "Player")
.map(s => (s._2("Remote").asInstanceOf[Remote].id, -1))
.filter(s => s._1 != client)
// Tell other players this player joined
otherEntities.foreach(s => server.send(s._1, game.InformJoinedGame((connectedId, client.value.toString), gameTime)))
// Tell this player what other players exist
otherEntities.foreach(s => server.send(client, game.InformJoinedGame((s._2, s._1.value.toString), gameTime)))
// Record player as connected
entities.link(textWaiting(connectedId), "Text", new Text(new Vector2f(10.0f, 40.0f * connectedId + 50.0f), 36.0f, "Connected!"))
}
}
}
}
| MorleyDev/GhostHunt | src/main/scala/uk/co/morleydev/ghosthunt/controller/impl/game/ServerLobbyController.scala | Scala | mit | 5,952 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.internals.builders
import minitest.TestSuite
import monifu.concurrent.schedulers.TestScheduler
import monifu.reactive.Ack.Continue
import monifu.reactive.{Ack, Observer, Observable}
import scala.concurrent.Future
object FromTaskSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(s: TestScheduler): Unit = {
assert(s.state.get.tasks.isEmpty,
"TestScheduler should have no pending tasks")
}
test("should work") { implicit s =>
var wasCompleted = 0
var received = 0
var i = 0
val obs = Observable.fromTask { i += 1; i }
obs.onSubscribe(new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
received += elem
Continue
}
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
s.tickOne()
assertEquals(wasCompleted, 1)
assertEquals(received, 1)
obs.onSubscribe(new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
received += elem
Continue
}
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
s.tickOne()
assertEquals(wasCompleted, 2)
assertEquals(received, 3)
}
}
| virtualirfan/monifu | monifu/shared/src/test/scala/monifu/reactive/internals/builders/FromTaskSuite.scala | Scala | apache-2.0 | 1,940 |
package net.bmjames.opts.types
import scalaz.Functor
import scalaz.syntax.applicative._
import CReader._
sealed trait OptReader[A] {
final def names: List[OptName] =
this match {
case OptionReader(ns, _, _) => ns
case FlagReader(ns, _) => ns
case _ => Nil
}
}
case class OptionReader[A](ns: List[OptName], cr: CReader[A], e: ParseError) extends OptReader[A]
case class FlagReader[A](ns: List[OptName], a: A) extends OptReader[A]
case class ArgReader[A](cr: CReader[A]) extends OptReader[A]
case class CmdReader[A](ns: List[String], f: String => Option[ParserInfo[A]]) extends OptReader[A]
object OptReader {
implicit val optReaderFunctor: Functor[OptReader] =
new Functor[OptReader] {
def map[A, B](fa: OptReader[A])(f: A => B): OptReader[B] =
fa match {
case OptionReader(ns, cr, e) => OptionReader(ns, cr.map(f), e)
case FlagReader(ns, a) => FlagReader(ns, f(a))
case ArgReader(cr) => ArgReader(cr.map(f))
case CmdReader(ns, g) => CmdReader(ns, g.andThen(_.map(_.map(f))))
}
}
}
sealed trait OptName
case class OptShort(name: Char) extends OptName
case class OptLong(name: String) extends OptName
object OptName {
implicit val optNameOrdering: Ordering[OptName] =
Ordering.fromLessThan {
case (OptShort(n1), OptShort(n2)) => n1 < n2
case (OptLong(n1), OptLong(n2)) => n1 < n2
case (OptShort(_), _) => true
case (OptLong(_), _) => false
}
}
| bmjames/scala-optparse-applicative | src/main/scala/net/bmjames/opts/types/OptReader.scala | Scala | bsd-3-clause | 1,542 |
import akka.actor.ActorSystem
import language.postfixOps
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri.Query
import akka.http.scaladsl.model.{Uri, HttpRequest}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import spray.json._
import sangria.execution.Executor
import sangria.parser.QueryParser
import sangria.marshalling.sprayJson._
import scala.concurrent.{Future, Await}
import scala.util.{Failure, Success}
import scala.concurrent.duration._
object Server extends App {
implicit val system = ActorSystem("sangria")
implicit val materializer = ActorMaterializer()
import system.dispatcher
def startServer() = {
val userRepo = new Data.UserRepo
val colorRepo = new Data.ColorRepo
val schema = Schema.middlewareBased.schema
// Alternative approach
// val schema = Schema.resolveBased.schema
val route: Route =
(get & path("graphql")) {
parameters('query, 'variables.?, 'operation.?) { (query, variables, operation) ⇒
optionalHeaderValueByName("SecurityToken") { token ⇒
QueryParser.parse(query) match {
// query parsed successfully, time to execute it!
case Success(queryAst) ⇒
complete(Executor.execute(schema, queryAst,
userContext = new Data.SecureContext(token, userRepo, colorRepo),
exceptionHandler = Data.errorHandler,
middleware = Schema.middlewareBased.SecurityEnforcer :: Nil,
operationName = operation,
variables = variables map (_.parseJson) getOrElse JsObject.empty))
// can't parse GraphQL query, return error
case Failure(error) ⇒
complete(BadRequest, JsObject("error" → JsString(error.getMessage)))
}
}
}
}
Http().bindAndHandle(route, "0.0.0.0", 8080)
}
def clientExampleRequests() = {
def printResult(query: String, result: Future[JsValue]) =
println(s"Query:\n\n$query\n\nResult:\n\n${Await.result(result, 5 seconds).prettyPrint}\n\n")
// invalid token
{
val query =
"""
query ShowEverything {
me {userName, permissions}
colors
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query)))
.withHeaders(RawHeader("SecurityToken", "some invalid token")))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "me":null,
// "colors":null
// },
// "errors":[{
// "message":"Invalid token",
// "field":"me",
// "locations":[{
// "line":3,
// "column":11
// }]
// },{
// "message":"Invalid token",
// "field":"colors",
// "locations":[{
// "line":4,
// "column":11
// }]
// }]
// }
}
// admin login with subsequent fetch. Please note, that admin is allowed to view his permissions, so he sees them.
{
val loginQuery =
"""
mutation JustLogin {
login(userName: "admin", password: "secret")
}
"""
val loginResult = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → loginQuery))))
.flatMap(Unmarshal(_).to[JsValue])
printResult(loginQuery, loginResult)
// Prints:
//
// {
// "data":{
// "login":"a4d7fc91-e490-446e-9d4c-90b5bb22e51d",
// }
// }
val JsString(token) = Await.result(loginResult, 5 seconds).asJsObject.fields("data").asJsObject.fields("login")
val query =
"""
query ShowEverything {
me {userName, permissions}
colors
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query)))
.withHeaders(RawHeader("SecurityToken", token)))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "me":{
// "userName":"admin",
// "permissions":["VIEW_PERMISSIONS","EDIT_COLORS","VIEW_COLORS"]
// },
// "colors":["red","green","blue"]
// }
// }
}
// normal user login with subsequent fetch. Please note, that normal users are not allowed to view their permissions.
{
val loginQuery =
"""
mutation JustLogin {
login(userName: "john", password: "apples")
}
"""
val loginResult = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → loginQuery))))
.flatMap(Unmarshal(_).to[JsValue])
printResult(loginQuery, loginResult)
// Prints:
//
// {
// "data":{
// "login":"a4d7fc91-e490-446e-9d4c-90b5bb22e51d",
// }
// }
val JsString(token) = Await.result(loginResult, 5 seconds).asJsObject.fields("data").asJsObject.fields("login")
val query =
"""
query ShowEverything {
me {userName, permissions}
colors
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query)))
.withHeaders(RawHeader("SecurityToken", token)))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "me":{
// "userName":"john",
// "permissions":null
// },
// "colors":["red","green","blue"]
// },
// "errors":[{
// "message":"You do not have permission to do this operation",
// "field":"me.permissions",
// "locations":[{
// "line":3,
// "column":25
// }]
// }]
// }
}
// login and immediately add colors. It's possible because `UpdateCtx` action updated the context
// for other mutation properties. Since mutation is strictly sequential, `addColor` gets updated context with the token
{
val query =
"""
mutation LoginAndMutate {
login(userName: "admin", password: "secret")
withMagenta: addColor(color: "magenta")
withOrange: addColor(color: "orange")
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query))))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "login":"a4d7fc91-e490-446e-9d4c-90b5bb22e51d",
// "withMagenta":["red","green","blue","magenta"],
// "withOrange":["red","green","blue","magenta","orange"]
// }
// }
}
// failed login. Caused all fields to produce errors. Since fields are optional, they will be `null` in JSON and no updates will take place
{
val query =
"""
mutation MutationWithWrongPassword {
login(userName: "admin", password: "please let me in")
withMagenta: addColor(color: "magenta")
withOrange: addColor(color: "orange")
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query))))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "login":null,
// "withMagenta":null,
// "withOrange":null
// },
// "errors":[{
// "message":"UserName or password is incorrect",
// "field":"login",
// "locations":[{
// "line":3,
// "column":11
// }]
// },{
// "message":"Invalid token",
// "field":"withMagenta",
// "locations":[{
// "line":5,
// "column":11
// }]
// },{
// "message":"Invalid token",
// "field":"withOrange",
// "locations":[{
// "line":6,
// "column":11
// }]
// }]
// }
}
// Login is successful, but user does not have permission to change the colors
{
val query =
"""
mutation MutationWithWrongPermissions {
login(userName: "john", password: "apples")
withMagenta: addColor(color: "magenta")
withOrange: addColor(color: "orange")
}
"""
val result = Http().singleRequest(HttpRequest(uri = Uri("http://localhost:8080/graphql").withQuery(Query("query" → query))))
printResult(query, result.flatMap(Unmarshal(_).to[JsValue]))
// Prints:
//
// {
// "data":{
// "login":"f40ea6b0-4479-4552-9c76-51bb4c6f9d29",
// "withMagenta":null,
// "withOrange":null
// },
// "errors":[{
// "message":"You do not have permission to do this operation",
// "field":"withMagenta",
// "locations":[{
// "line":5,
// "column":11
// }]
// },{
// "message":"You do not have permission to do this operation",
// "field":"withOrange",
// "locations":[{
// "line":6,
// "column":11
// }]
// }]
// }
}
}
startServer()
clientExampleRequests()
}
| OlegIlyenko/sangria-auth-example | src/main/scala/Server.scala | Scala | apache-2.0 | 10,039 |
package warsztat
import scala.concurrent.duration.{DurationInt, FiniteDuration}
class Implicits {
implicit val TIMEOUT: FiniteDuration = 2 seconds
def execute(id: String)
(implicit timeout: FiniteDuration): Option[String] = ???
val result = execute("1")
}
| bjankie1/warsztat-scala | src/main/scala/warsztat/Implicits.scala | Scala | apache-2.0 | 281 |
package com.nabijaczleweli.minecrasmer.block
import com.nabijaczleweli.minecrasmer.entity.tile.TileEntityOverclocker
import net.minecraft.block.ITileEntityProvider
import net.minecraft.world.World
object BlockAccessoryOverclocker extends AccessoryGeneric("overclocker") with ITileEntityProvider {
override def createNewTileEntity(world: World, meta: Int) =
new TileEntityOverclocker
}
| nabijaczleweli/ASMifier | src/main/scala/com/nabijaczleweli/minecrasmer/block/BlockAccessoryOverclocker.scala | Scala | mit | 390 |
package net.danielkza.http2.hpack
import scalaz._
import scalaz.std.AllInstances._
import scalaz.syntax.traverse._
import org.specs2.matcher.DataTables
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import akka.util.ByteString
import net.danielkza.http2.TestHelpers
import net.danielkza.http2.api.Header
import net.danielkza.http2.hpack.coders.{HeaderBlockCoder, HeaderCoder}
class HeaderBlockCoderTest extends Specification with DataTables with TestHelpers {
import HeaderRepr._
import Header.{plain, secure}
def dynTablePos(x: Int) = StaticTable.default.length + x
val (headers, reprs) = List(
// fully indexed from static table
plain (":status", "200" ) -> Indexed(8),
// name indexed from static table, dt-size = 1
plain (":status", "999" ) -> IncrementalLiteralWithIndexedName(14, bs"999"),
// new literal, dt-size = 2
plain ("fruit", "banana") -> IncrementalLiteral(bs"fruit", bs"banana"),
// new literal, dt-size = 3
plain ("color", "yellow") -> IncrementalLiteral(bs"color", bs"yellow"),
// repeat, fully indexed from dynamic table
plain ("fruit", "banana") -> Indexed(dynTablePos(2)),
// name indexed from dynamic table, dt-size = 4
plain ("fruit", "apple" ) -> IncrementalLiteralWithIndexedName(dynTablePos(2), bs"apple"),
// repeat, fully indexed from dynamic table
plain ("fruit", "apple" ) -> Indexed(dynTablePos(1)),
// repeat, fully indexed from dynamic table
plain ("color", "yellow") -> Indexed(dynTablePos(2)),
// literal never indexed
secure("drink", "soda" ) -> NeverIndexed(bs"drink", bs"soda"),
// repeat literal never indexed, must not be in dynamic table
secure("drink", "soda" ) -> NeverIndexed(bs"drink", bs"soda"),
// literal never indexed, name indexed from dynamic table
secure("color", "blue" ) -> NeverIndexedWithIndexedName(dynTablePos(2), bs"blue")
).unzip
val headerCoder = new HeaderCoder(HeaderCoder.compress.Never)
val encoded = {
val parts: \\/[HeaderError, List[ByteString]] = reprs.map(headerCoder.encode).sequenceU
parts.map(_.reduce(_ ++ _)).getOrElse(throw new AssertionError)
}
trait Context extends Scope {
val coder = new HeaderBlockCoder(headerCoder = headerCoder)
}
"HeaderBlockCoderTest" should {
"encode" in {
"a sequence of headers correctly" >> new Context {
coder.encode(headers) must_== \\/-(encoded)
}
}
"decode" in {
"a sequence of headers correctly" >> new Context {
coder.decode(encoded) must_== \\/-((headers, encoded.length))
}
}
"withDynamicTableCapacity" in {
ok
}
}
}
| danielkza/h2scala | core/src/test/scala/net/danielkza/http2/hpack/HeaderBlockCoderTest.scala | Scala | apache-2.0 | 2,706 |
package com.mattrjacobs.rxscala.slides
import rx.lang.scala.Observable
trait Slide141 extends App {
trait VideoService {
trait Video {
val id: Long
def getMetadata(): Observable[Map[String, Object]]
}
def getVideos(userId: Long): Observable[Video]
def getBookmark(video: Video, userId: Long): Observable[Map[String, Object]]
def getRating(video: Video, userId: Long): Observable[Map[String, Object]]
}
val videoService: VideoService
def getVideos(userId: Long): Observable[Map[String, Any]] =
videoService.getVideos(userId)
.take(10)
.flatMap(video => {
val metadata = video.getMetadata.map(md =>
Map("title" -> md.get("title"),
"length" -> md.get("duration")))
val bookmark = videoService.getBookmark(video, userId).map(b =>
Map("position" -> b.get("position")))
val rating = videoService.getRating(video, userId).map(r =>
Map("rating" -> r.get("rating")))
Observable.zip(Observable(List(metadata, bookmark, rating): _*)).map {
case m :: b :: r :: Nil =>
Map("id" -> video.id) ++ m ++ b ++ r
}
})
}
| mattrjacobs/RxScalaDemo | src/main/scala/slides/Slide141.scala | Scala | apache-2.0 | 1,173 |
package com.typesafe.slick.testkit.tests
import org.junit.Assert._
import com.typesafe.slick.testkit.util.{TestkitTest, TestDB}
class JoinTest(val tdb: TestDB) extends TestkitTest {
import tdb.profile.simple._
object Categories extends Table[(Int, String)]("categories") {
def id = column[Int]("id")
def name = column[String]("name")
def * = id ~ name
}
object Posts extends Table[(Int, String, Int)]("posts") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def title = column[String]("title")
def category = column[Int]("category")
def * = id ~ title ~ category
}
def test {
(Categories.ddl ++ Posts.ddl).create
Categories insertAll (
(1, "Scala"),
(2, "ScalaQuery"),
(3, "Windows"),
(4, "Software")
)
Posts.title ~ Posts.category insertAll (
("Test Post", -1),
("Formal Language Processing in Scala, Part 5", 1),
("Efficient Parameterized Queries in ScalaQuery", 2),
("Removing Libraries and HomeGroup icons from the Windows 7 desktop", 3),
("A ScalaQuery Update", 2)
)
val q1 = (for {
c <- Categories
p <- Posts if c.id is p.category
} yield p.id ~ c.id ~ c.name ~ p.title).sortBy(_._1)
println("Implicit join: "+q1.selectStatement)
q1.foreach(x => println(" "+x))
assertEquals(List((2,1), (3,2), (4,3), (5,2)), q1.map(p => p._1 ~ p._2).list)
val q2 = (for {
(c,p) <- Categories innerJoin Posts on (_.id is _.category)
} yield p.id ~ c.id ~ c.name ~ p.title).sortBy(_._1)
println("Explicit inner join: "+q2.selectStatement)
q2.foreach(x => println(" "+x))
assertEquals(List((2,1), (3,2), (4,3), (5,2)), q2.map(p => p._1 ~ p._2).list)
val q3 = (for {
(c,p) <- Categories leftJoin Posts on (_.id is _.category)
} yield (p.id, p.id.?.getOrElse(0) ~ c.id ~ c.name ~ p.title.?.getOrElse(""))).sortBy(_._1.nullsFirst).map(_._2)
println("Left outer join (nulls first): "+q3.selectStatement)
q3.foreach(x => println(" "+x))
assertEquals(List((0,4), (2,1), (3,2), (4,3), (5,2)), q3.map(p => p._1 ~ p._2).list)
val q3a = (for {
(c,p) <- Categories leftJoin Posts on (_.id is _.category)
} yield p.id ~ c.id ~ c.name ~ p.title).sortBy(_._1.nullsFirst)
assertFail(q3a.list) // reads NULL from non-nullable column
val q3b = (for {
(c,p) <- Categories leftJoin Posts on (_.id is _.category)
} yield (p.id, p.id.?.getOrElse(0) ~ c.id ~ c.name ~ p.title.?.getOrElse(""))).sortBy(_._1.nullsLast).map(_._2)
println("Left outer join (nulls last): "+q3b.selectStatement)
q3b.foreach(x => println(" "+x))
assertEquals(List((2,1), (3,2), (4,3), (5,2), (0,4)), q3b.map(p => p._1 ~ p._2).list)
ifCap(scap.joinRight) {
val q4 = (for {
(c,p) <- Categories rightJoin Posts on (_.id is _.category)
} yield p.id ~ c.id.?.getOrElse(0) ~ c.name.?.getOrElse("") ~ p.title).sortBy(_._1)
println("Right outer join: "+q4.selectStatement)
q4.foreach(x => println(" "+x))
assertEquals(List((1,0), (2,1), (3,2), (4,3), (5,2)), q4.map(p => p._1 ~ p._2).list)
}
}
}
| boldradius/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/JoinTest.scala | Scala | bsd-2-clause | 3,143 |
/**
* Copyright 2014 Frank Austin Nothaft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.snark.rdd
import org.apache.spark.SparkContext._
import org.apache.spark.{ Logging, SparkContext }
import org.apache.spark.rdd.RDD
import net.fnothaft.snark.SnarkContext._
import net.fnothaft.snark.{
ArrayStructure,
DenseArrayStructure,
NestedIndex,
SparseArrayStructure
}
import scala.annotation.tailrec
import scala.math.{ log => mathLog, pow }
import scala.reflect.ClassTag
object NestedRDD {
/**
* Creates a flat index RDD with n values. Package private.
*
* @param sc SparkContext to use for creating this RDD.
* @param n Number of values to have in RDD.
* @return Returns an RDD containing indices from 0 to n.
*/
private[rdd] def index(sc: SparkContext, n: Int): RDD[NestedIndex] = {
var step = pow(2, (mathLog(n.toDouble) / mathLog(2.0)).toInt).toInt
var rdd: RDD[NestedIndex] = sc.parallelize(Seq(NestedIndex(0, 0)))
@tailrec def fillIn(step: Int, rdd: RDD[NestedIndex]): RDD[NestedIndex] = {
if (step < 1) {
rdd
} else {
fillIn(step / 2, rdd.flatMap(i => Seq(i, NestedIndex(0, i.idx + step))))
}
}
fillIn(step, rdd).filter(_.idx < n)
}
/**
* Builds a new nested RDD out of a currently available RDD. Package private.
*
* @param rdd RDD to build from.
* @param structure Structure of this RDD.
* @return Returns a nested RDD.
*/
private[snark] def apply[T](rdd: RDD[(NestedIndex, T)],
structure: ArrayStructure,
strategy: PartitioningStrategy.Strategy = PartitioningStrategy.Auto)(implicit tTag: ClassTag[T]): NestedRDD[T] = strategy match {
case PartitioningStrategy.Auto => {
???
}
case _ => {
new NestedRDD[T](rdd, structure, strategy).repartition()
}
}
}
class NestedRDD[T] private[snark] (private[snark] val rdd: RDD[(NestedIndex, T)],
private[snark] val structure: ArrayStructure,
val strategy: PartitioningStrategy.Strategy = PartitioningStrategy.None) extends Serializable with Logging {
/**
* Maps a function to every element of this RDD.
*
* @param op A function to map to every element of this RDD.
* @return A new nested RDD with element type U. The RDD will have the same structure
* as the RDD the map is called on.
*
* @see mapWithIndex
*/
def map[U](op: T => U)(implicit uTag: ClassTag[U]): NestedRDD[U] = {
NestedRDD[U](rdd.map(kv => {
val (idx, v) = kv
(idx, op(v))
}), structure, strategy)
}
/**
* Maps a function to every element of this RDD, along with the index of each element.
*
* @param op A function that maps the value of a point, as well as it's index, to a new value.
* @return A new nested RDD with element type U. This RDD will have the same structure as
* the RDD the map is called on. Additionally, each point will retain it's index.
*
* @see map
*/
def mapWithIndex[U](op: (T, NestedIndex) => U)(implicit uTag: ClassTag[U]): NestedRDD[U] = {
NestedRDD[U](rdd.map(kv => {
val (idx, v) = kv
(idx, op(v, idx))
}), structure, strategy)
}
/**
* Executes a function on every element of this RDD. Does not create a new RDD. May be
* called to cause side effects.
*
* @param op Function to run on every element.
*/
def foreach(op: T => Unit) {
rdd.foreach(kv => op(kv._2))
}
/**
* Executes a function on every element of this RDD, along with the index of the element.
* Does not create a new RDD. May be called to cause side effects.
*
* @param op Function to run on every element.
*/
def foreach(op: (NestedIndex, T) => Unit) {
rdd.foreach(kv => op(kv._1, kv._2))
}
/**
* Performs a reduction operation on this RDD.
*
* @param op The reducing function to use. This function should be associative and commutative.
* @return Returns a single value derived by running the reduction function on all RDD elements.
*/
def reduce(op: (T, T) => T)(implicit tTag: ClassTag[T]): T = {
rdd.map(kv => {
val (idx, v) = kv
v
}).reduce(op)
}
/**
* Returns a count of the number of elements in this array.
*
* @note This method uses internal state and does not trigger any side execution. For behavior
* similar to org.apache.spark.rdd.RDD.count, use countWithSideEffects
*
* @return The number of elements in this RDD.
*
* @see countWithSideEffects
*/
def count: Long = structure.elements
/**
* Returns a count of the number of elements in this array.
*
* @note This method does not use internal state and behaves as org.apache.spark.rdd.RDD.count
* does, at the cost of speed.
*
* @return The number of elements in this RDD.
*
* @see count
*/
def countWithSideEffects: Long = {
val cRdd = rdd.count
val cStr = count
assert(cStr == cRdd,
"Array structure count (" + cStr + ") and RDD count (" + cRdd + ") disagree.")
cRdd
}
/**
* Flattens the nested structure of this RDD into a nested RDD with a single level of hierarchy.
*
* @return Returns a flat nested RDD.
*/
def flatten()(implicit tTag: ClassTag[T]): NestedRDD[T] = {
val idxRdd = NestedRDD.index(rdd.context, count.toInt)
NestedRDD[T](idxRdd.zip(rdd.map(kv => kv._2)), structure, strategy)
}
/**
* Performs an index-based combining operation.
*
* @param op Binary combining operation.
* @param index Nested index RDD to use. Must have same structure as this nested RDD.
*/
def combine(op: (T, T) => T)(index: NestedRDD[NestedIndex])(
implicit tTag: ClassTag[T]): NestedRDD[T] = {
assert(structure.equals(index.structure),
"Cannot do a combine on two nested arrays with different sizes.")
NestedRDD[T](rdd.zip(index.rdd)
.map(kvk => {
val ((_, v), (_, k)) = kvk
(k, v)
}).groupByKey()
.map(kv => {
val (k, s) = kv
(k, s.reduce(op))
}), structure, strategy)
}
protected def calculatePartitions[U](pRdd: RDD[(NestedIndex, U)]): ArrayStructure = {
val numSparsePartitions = pRdd.context.accumulator(0)
val structure = pRdd.map(kv => (kv._1.nest, kv._1.idx))
.groupByKey()
.map(kv => {
val (nest, indices) = kv
// collect length and max val
val l = indices.size
val m = indices.reduce(_ max _)
// if we have a higher max value than length, we are sparse
if (l < m) {
numSparsePartitions += 1
}
(nest, l)
}).collect()
if (numSparsePartitions.value == 0) {
new DenseArrayStructure(structure.toSeq
.sortBy(kv => kv._1)
.map(kv => kv._2.toLong))
} else {
new SparseArrayStructure(structure.map(kv => (kv._1, kv._2.toLong)).toMap)
}
}
/**
* Performs an index-based combining operation.
*
* @param op Binary combining operation.
* @param index Nested index RDD to use. Must have same structure as this nested RDD.
*/
def multiScatter(op: ((NestedIndex, T)) => Iterable[(NestedIndex, T)])(
combineOp: (T, T) => T)(
implicit tTag: ClassTag[T]): NestedRDD[T] = {
// flat map to remap values
val multiMappedRdd = rdd.flatMap(op)
.groupByKey()
.map(kv => (kv._1, kv._2.reduce(combineOp)))
// cache multimapped rdd
multiMappedRdd.cache()
// calculate index
val newIdx = calculatePartitions(multiMappedRdd)
NestedRDD[T](multiMappedRdd, newIdx, strategy)
}
@tailrec protected final def doScan[U](scanOp: (U, T) => U,
iter: Iterator[(NestedIndex, T)],
runningValue: U,
l: List[(NestedIndex, U)] = List()): (List[(NestedIndex, U)], U) = {
if (!iter.hasNext) {
(l, runningValue)
} else {
val (currentIndex, value) = iter.next
val nextL = (currentIndex, runningValue) :: l
val nextVal = scanOp(runningValue, value)
doScan(scanOp, iter, nextVal, nextL)
}
}
/**
* Applies a prefix scan over the RDD. The scan proceeds in order given by the
* indices of all elements in the RDD.
*
* @param zero The zero value for the scan.
* @param op The function to apply during the scan.
* @return Returns a scanned RDD.
*/
def scan(zero: T)(op: (T, T) => T)(implicit tTag: ClassTag[T]): NestedRDD[T] = {
scan[T](zero, zero)(op, op)
}
/**
* Applies a prefix scan over the RDD. The scan proceeds in order given by the
* indices of all elements in the RDD.
*
* @param scanZero The zero value for the scan.
* @param updateZero The zero value for the update pass.
* @param scanOp The function to apply during the scan.
* @param updateOp The function to apply during the update pass.
* @return Returns a scanned RDD.
*/
def scan[U](scanZero: U,
updateZero: U)(scanOp: (U, T) => U,
updateOp: (U, U) => U)(implicit tTag: ClassTag[T],
uTag: ClassTag[U]): NestedRDD[U] = {
// do the first scan pass
val firstPass = rdd.groupBy(kv => kv._1.nest)
.map(kv => {
val (nest, nestValues) = kv
// sort nest values
val sortedNest = nestValues.toSeq
.sortBy(p => p._1)
.toIterator
// scan
val (newValues, propegate) = doScan(scanOp, sortedNest, scanZero)
(nest, newValues, propegate)
})
// cache the first pass
firstPass.cache()
// collect the propegated values
val collectedPropegates = firstPass.map(kv => (kv._1, kv._3))
.collect
.toSeq
.sortBy(kv => kv._1)
.map(kv => kv._2)
.toArray
// do scan update...
var runningValue = updateZero
(0 until collectedPropegates.length).foreach(i => {
val currentValue = collectedPropegates(i)
// update in place
collectedPropegates(i) = runningValue
// calculate new running value
runningValue = updateOp(runningValue, currentValue)
})
// map and do update
val finalScanRDD = firstPass.flatMap(kv => kv._2)
.map(kv => {
val (idx, value) = kv
// look up update
val update = collectedPropegates(idx.nest)
// update and return
(idx, updateOp(value, update))
})
// unpersist cached rdd
firstPass.unpersist()
NestedRDD[U](finalScanRDD, structure, strategy)
}
/**
* Executes an elemental operation across two nested RDDs. The two nested RDDs must have the
* same structure. In this operation, both elements at an index have a function applied to them.
*
* @param op Function to apply.
* @param r Other nested RDD to perform P operation on. Must have the same structure as this RDD.
*/
def p[U, V](op: (T, U) => V)(r: NestedRDD[U])(implicit uTag: ClassTag[U], vTag: ClassTag[V]): NestedRDD[V] = {
assert(structure.equals(r.structure),
"Cannot do a p-operation on two nested arrays with different sizes: " +
structure + ", " + r.structure)
NestedRDD[V](rdd.zip(r.rdd)
.map(kvp => {
val ((idx, t), (idx2, u)): ((NestedIndex, T), (NestedIndex, U)) = kvp
assert(idx == idx2)
(idx, op(t, u))
}), structure, strategy)
}
def p(op: (T, T) => T, preserveUnpaired: Boolean)(r: NestedRDD[T])(implicit tTag: ClassTag[T]): NestedRDD[T] = {
println("nested p")
NestedRDD[T]((rdd ++ r.rdd).groupByKey().flatMap(kv => {
val (idx, seq) = kv
if (seq.size == 2 || preserveUnpaired) {
println("reducing " + idx + ", " + seq + " to " + seq.reduce(op))
Some((idx, seq.reduce(op)))
} else {
None
}
}), structure, strategy)
}
/**
* Applies a reduce within each nested segment. This operates on all nested segments.
*
* @param op Reduction function to apply.
* @return Returns a map, which maps each nested segment ID to the reduction value.
*/
def segmentedReduce(op: (T, T) => T)(implicit tTag: ClassTag[T]): Map[Int, T] = {
segmentedReduceToRdd(op).collect.toMap
}
/**
* Applies a reduce within each nested segment. This operates on all nested segments.
*
* @param op Reduction function to apply.
* @return Returns a map, which maps each nested segment ID to the reduction value.
*/
def segmentedReduceToRdd(op: (T, T) => T)(implicit tTag: ClassTag[T]): RDD[(Int, T)] = {
rdd.map(kv => (kv._1.nest, kv._2))
.groupByKey()
.map(ks => {
val (k, s): (Int, Iterable[T]) = ks
(k, s.reduce(op))
})
}
def segmentedScan(zero: T)(op: (T, T) => T)(implicit tTag: ClassTag[T]): NestedRDD[T] = {
segmentedScan[T](zero)(op, op)
}
def segmentedScan(zeros: Seq[T])(op: (T, T) => T)(implicit tTag: ClassTag[T]): NestedRDD[T] = {
segmentedScan[T](zeros)(op, op)
}
/**
* Performs a scan on all of the segments of this RDD.
*
* @param op Function to use for the scan.
* @param zero Zero value to use for the scan.
* @return New RDD where each segment has been operated on by a scan.
*/
def segmentedScan[U](zero: U)(scanOp: (U, T) => U,
updateOp: (U, U) => U)(implicit uTag: ClassTag[U]): NestedRDD[U] = {
segmentedScan((0 until structure.nests).map(i => zero))(scanOp, updateOp)
}
/**
* Performs a scan on all of the segments of this RDD, with a different zero value
* per each segment.
*
* @param op Function to use for the scan.
* @param zero Sequence of zero values to use for the scan.
* @return New RDD where each segment has been operated on by a scan.
*/
def segmentedScan[U](zeros: Seq[U])(scanOp: (U, T) => U,
updateOp: (U, U) => U)(implicit uTag: ClassTag[U]): NestedRDD[U] = {
assert(zeros.length == structure.nests,
"Zeros must match to structure of RDD.")
NestedRDD[U](rdd.keyBy(kv => kv._1.nest)
.groupByKey()
.flatMap(ns => {
val (n, s) = ns
val zero = zeros(n)
val sorted = s.toSeq.sortBy(kv => kv._1)
val idx = sorted.map(kv => kv._1)
val vals = sorted.map(kv => kv._2)
.scanLeft(zero)(scanOp)
.dropRight(1)
idx.zip(vals)
}), structure, strategy)
}
/**
* Returns the value at a certain nested index.
*/
def get(idx: NestedIndex)(implicit tTag: ClassTag[T]): T = {
val collected = rdd.filter(kv => kv._1.equals(idx))
.collect
assert(collected.length != 0, "Value with index " + idx + " not found.")
assert(collected.length == 1, "Cannot have more than one value with index " + idx)
collected.head._2
}
def toRDD()(implicit tTag: ClassTag[T]): RDD[T] = {
rdd.map(kv => kv._2)
}
private def sortPartitions(repartitionedRdd: RDD[(NestedIndex, T)]): RDD[(NestedIndex, T)] = {
repartitionedRdd.mapPartitions(iter => {
iter.toList.sortBy(kv => kv._1).toIterator
})
}
def matchPartitioning[U](otherRdd: NestedRDD[U])(implicit tTag: ClassTag[T]): NestedRDD[T] = otherRdd match {
case srdd: SegmentedRDD[U] => {
new SegmentedRDD[T](sortPartitions(rdd.partitionBy(new SegmentPartitioner(structure))),
structure,
strategy)
}
case urdd: UniformRDD[U] => {
new UniformRDD[T](sortPartitions(rdd.partitionBy(new UniformPartitioner(urdd.structure.asInstanceOf[DenseArrayStructure],
urdd.rdd.partitions.length))),
structure,
strategy)
}
case _ => {
// no-op
this
}
}
def cache() = rdd.cache()
def unpersist() = rdd.unpersist()
protected def repartition()(implicit tTag: ClassTag[T]): NestedRDD[T] = repartition(strategy)
protected def repartition(newStrategy: PartitioningStrategy.Strategy)(implicit tTag: ClassTag[T]): NestedRDD[T] = newStrategy match {
case PartitioningStrategy.Segmented => {
new SegmentedRDD[T](sortPartitions(rdd.partitionBy(new SegmentPartitioner(structure))),
structure,
strategy)
}
case PartitioningStrategy.Uniform => {
structure match {
case dense: DenseArrayStructure => {
new UniformRDD[T](sortPartitions(rdd.partitionBy(new UniformPartitioner(dense,
rdd.partitions.length))),
structure,
strategy)
}
case _ => {
log.warn("Cannot uniformly partition sparse nested RDD. Falling back to segmented structure...")
new SegmentedRDD[T](sortPartitions(rdd.partitionBy(new SegmentPartitioner(structure))),
structure,
strategy)
}
}
}
case _ => {
// no-op
this
}
}
/**
* Collects the nested RDD on the master.
*
* @return The RDD in an array.
*/
def collect(): Array[(NestedIndex, T)] = {
rdd.collect()
}
}
| fnothaft/snark | snark-core/src/main/scala/net/fnothaft/snark/rdd/NestedRDD.scala | Scala | apache-2.0 | 17,554 |
println ("Primera claificacion")
val clalificacion1=readFloat()
println ("Segunda claificacion")
val clalificacion2=readFloat()
println ("Tercera claificacion")
val clalificacion3=readFloat()
println ("Examen Final")
val clalificacionExamenFinal=readFloat()
println ("Trabajo Final")
val Trabajo Final=readFloat()
val promedio=(calificacion1+calificacion2+calificacion3)/3
val final=promedio*.55+calificacionExamenFinal*.3+TrabajoFinal*.15
println ("Calificacion"+Final)
| ReneFloresG/poo1-1 | Ej1-1.scala | Scala | mit | 484 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import monix.execution.{Ack, Cancelable}
import monix.execution.Ack.{Continue, Stop}
import monix.execution.cancelables.CompositeCancelable
import scala.util.control.NonFatal
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
import scala.util.Success
private[reactive] final
class CombineLatest3Observable[A1,A2,A3,+R]
(obsA1: Observable[A1], obsA2: Observable[A2], obsA3: Observable[A3])
(f: (A1,A2,A3) => R)
extends Observable[R] { self =>
def unsafeSubscribeFn(out: Subscriber[R]): Cancelable = {
import out.scheduler
var isDone = false
// MUST BE synchronized by `self`
var lastAck = Continue : Future[Ack]
// MUST BE synchronized by `self`
var elemA1: A1 = null.asInstanceOf[A1]
// MUST BE synchronized by `self`
var hasElemA1 = false
// MUST BE synchronized by `self`
var elemA2: A2 = null.asInstanceOf[A2]
// MUST BE synchronized by `self`
var hasElemA2 = false
// MUST BE synchronized by `self`
var elemA3: A3 = null.asInstanceOf[A3]
// MUST BE synchronized by `self`
var hasElemA3 = false
// MUST BE synchronized by `self`
var completedCount = 0
// MUST BE synchronized by `self`
def rawOnNext(a1: A1, a2: A2, a3: A3): Future[Ack] =
if (isDone) Stop else {
var streamError = true
try {
val c = f(a1,a2,a3)
streamError = false
out.onNext(c)
} catch {
case NonFatal(ex) if streamError =>
isDone = true
out.onError(ex)
Stop
}
}
// MUST BE synchronized by `self`
def signalOnNext(a1: A1, a2: A2, a3: A3): Future[Ack] = {
lastAck = lastAck match {
case Continue => rawOnNext(a1,a2,a3)
case Stop => Stop
case async =>
async.flatMap {
// async execution, we have to re-sync
case Continue => self.synchronized(rawOnNext(a1,a2,a3))
case Stop => Stop
}
}
lastAck
}
def signalOnError(ex: Throwable): Unit = self.synchronized {
if (!isDone) {
isDone = true
out.onError(ex)
lastAck = Stop
}
}
def signalOnComplete(): Unit = self.synchronized {
completedCount += 1
if (completedCount == 3 && !isDone) {
lastAck match {
case Continue =>
isDone = true
out.onComplete()
case Stop =>
() // do nothing
case async =>
async.onComplete {
case Success(Continue) =>
self.synchronized {
if (!isDone) {
isDone = true
out.onComplete()
}
}
case _ =>
() // do nothing
}
}
lastAck = Stop
}
}
val composite = CompositeCancelable()
composite += obsA1.unsafeSubscribeFn(new Subscriber[A1] {
implicit val scheduler = out.scheduler
def onNext(elem: A1): Future[Ack] = self.synchronized {
if (isDone) Stop else {
elemA1 = elem
if (!hasElemA1) hasElemA1 = true
if (hasElemA2 && hasElemA3)
signalOnNext(elemA1, elemA2, elemA3)
else
Continue
}
}
def onError(ex: Throwable): Unit =
signalOnError(ex)
def onComplete(): Unit =
signalOnComplete()
})
composite += obsA2.unsafeSubscribeFn(new Subscriber[A2] {
implicit val scheduler = out.scheduler
def onNext(elem: A2): Future[Ack] = self.synchronized {
if (isDone) Stop else {
elemA2 = elem
if (!hasElemA2) hasElemA2 = true
if (hasElemA1 && hasElemA3)
signalOnNext(elemA1, elemA2, elemA3)
else
Continue
}
}
def onError(ex: Throwable): Unit =
signalOnError(ex)
def onComplete(): Unit =
signalOnComplete()
})
composite += obsA3.unsafeSubscribeFn(new Subscriber[A3] {
implicit val scheduler = out.scheduler
def onNext(elem: A3): Future[Ack] = self.synchronized {
if (isDone) Stop else {
elemA3 = elem
if (!hasElemA3) hasElemA3 = true
if (hasElemA1 && hasElemA2)
signalOnNext(elemA1, elemA2, elemA3)
else
Continue
}
}
def onError(ex: Throwable): Unit =
signalOnError(ex)
def onComplete(): Unit =
signalOnComplete()
})
composite
}
} | Wogan/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/CombineLatest3Observable.scala | Scala | apache-2.0 | 5,310 |
package org.retistruen.instrument
import org.retistruen._
/** A simple [[org.retistruen.Source]] [[org.retistruen.Emitter]] to be used as entry point */
class SourceEmitter[T](val name: String)
extends Source[T] with CachingEmitter[T]
/** An [[org.retistruen.OpenSource]] [[org.retistruen.Emitter]] to be used as entry point */
class OpenSourceEmitter[T: ReadableFromString](val name: String)
extends OpenSource[T] with CachingEmitter[T]
| plalloni/retistruen | src/main/scala/org/retistruen/instrument/SourceEmitter.scala | Scala | mit | 445 |
package com.criteo.dev.cluster.docker
import com.criteo.dev.cluster.aws.AwsUtilities.NodeRole
import com.criteo.dev.cluster._
import com.criteo.dev.cluster.config.GlobalConfig
import org.slf4j.LoggerFactory
import scala.sys.process.Process
/**
* Start docker container
*/
@Public object StartLocalCliAction extends CliAction[Unit] {
private val logger = LoggerFactory.getLogger(StartLocalCliAction.getClass)
override def command: String = "start-local"
override def usageArgs: List[Any] = List(Option("instanceId"))
override def help: String = "Starts a local cluster docker container. If instanceId specified, " +
"only start that one container, else starts them all."
override def applyInternal(args: List[String], config: GlobalConfig): Unit = {
val conf = config.backCompat
//instance id is optional
val instanceId = if (args.length == 1) Some(args(0)) else None
val dockerMetas = DockerUtilities.getDockerContainerMetadata(
DockerConstants.localClusterContainerLabel,
instanceId)
dockerMetas.foreach(d => {
val command = s"docker start ${d.id}"
DevClusterProcess.process (command).!!
//add other required confs needed by the setup action (target ip, port)
val dockerCluster = NodeFactory.getDockerNode(config.target.local, d)
DockerUtilities.blockOnSsh(dockerCluster)
StartServiceAction(dockerCluster, NodeRole.Master)
//print out new docker container info.
val dockerMetas = DockerUtilities.getDockerContainerMetadata(
DockerConstants.localClusterContainerLabel,
instanceId)
DockerUtilities.printClusterDockerContainerInfo(conf, dockerMetas)
})
}
}
| criteo/berilia | src/main/scala/com/criteo/dev/cluster/docker/StartLocalCliAction.scala | Scala | apache-2.0 | 1,693 |
package tastytest
final class annot extends scala.annotation.StaticAnnotation
| lrytz/scala | test/tasty/neg-isolated/src-3-A/annot.scala | Scala | apache-2.0 | 79 |
//
// $Id$
//
// Wiggle - a 2D game development library - http://code.google.com/p/wiggle/
// Copyright 2008-2010 Michael Bayne
// Distributed under the "Simplified BSD License" in LICENSE.txt
package wiggle.util
/**
* A trait used by things that wish to maintain a list of tasks and tick those tasks every frame.
*/
trait Taskable
{
/** Returns a view of our tasks. */
def tasks :Seq[Task] = _tasks
/** Adds a task to this taskable. The task will be initialized and ticked on the next call to
* {@link #tick}. If we're in the middle of ticking, this task won't participate in this tick. */
def add (task :Task) {
_newTasks = task :: _newTasks
}
/** Removes a task from this taskable. The task will be removed on the next call to {@link #tick}.
* If we're in the middle of ticking this task will not be removed until the next call. */
def remove (task :Task) = {
_deadTasks += task
}
/** Adds newly registered tasks, removes pending deletions and ticks all active tasks. */
def tick (time :Float) {
// remove any dead tasks from our list
if (!_deadTasks.isEmpty) {
_tasks = _tasks.filterNot(_deadTasks.contains)
_deadTasks = Set()
}
// add any new tasks to our current tasks list
if (!_newTasks.isEmpty) {
val olen = _tasks.length
val tasks = new Array[Task](olen + _newTasks.length)
System.arraycopy(_tasks, 0, tasks, 0, olen)
var idx = olen; while (_newTasks.length > 0) {
_newTasks.head.init(time)
tasks(idx) = _newTasks.head
_newTasks = _newTasks.tail
idx = idx+1
}
_tasks = tasks
}
// and tick all of our registered tasks (this is ugly because it's optimized)
val tasks = _tasks
val len = tasks.length
var idx = 0; while (idx < len) {
val t = tasks(idx)
if (t.tick(time)) remove(t)
idx = idx+1
}
}
private[this] var _tasks :Array[Task] = new Array(0)
private[this] var _newTasks :List[Task] = Nil
private[this] var _deadTasks :Set[Task] = Set()
}
| zdevzee/wiggle | src/main/scala/wiggle/util/Taskable.scala | Scala | bsd-3-clause | 2,046 |
import java.io.File
import avrohugger._
import avrohugger.format.Scavro
import org.specs2._
class ScavroGeneratorSpec extends mutable.Specification {
"a ScavroGenerator" should {
"correctly generate a case class definition in a package" in {
val infile = new java.io.File("avrohugger-core/src/test/avro/mail.avpr")
val gen = new Generator(Scavro)
val outDir = gen.defaultOutputDir + "/scavro/"
gen.fileToFile(infile, outDir)
val source = scala.io.Source.fromFile(s"$outDir/example/proto/model/Message.scala").mkString
source ===
"""|/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */
|package example.proto.model
|
|import org.apache.avro.Schema
|
|import com.oysterbooks.scavro.{AvroMetadata, AvroReader, AvroSerializeable}
|
|import example.proto.{Message => JMessage}
|
|case class Message(to: String, from: String, body: String) extends AvroSerializeable {
| type J = JMessage
| override def toAvro: JMessage = {
| new JMessage(to, from, body)
| }
|}
|
|object Message {
| implicit def reader = new AvroReader[Message] {
| override type J = JMessage
| }
| implicit val metadata: AvroMetadata[Message, JMessage] = new AvroMetadata[Message, JMessage] {
| override val avroClass: Class[JMessage] = classOf[JMessage]
| override val schema: Schema = JMessage.getClassSchema()
| override val fromAvro: (JMessage) => Message = {
| (j: JMessage) => Message(j.getTo.toString, j.getFrom.toString, j.getBody.toString)
| }
| }
|}""".stripMargin
}
}
}
| ppearcy/avrohugger | avrohugger-core/src/test/scala/scavro/ScavroGeneratorSpec.scala | Scala | apache-2.0 | 1,877 |
package filodb.stress
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.util.Try
import com.opencsv.CSVReader
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.{Milliseconds, StreamingContext}
import org.joda.time.DateTime
import filodb.core.DatasetRef
import filodb.spark._
/**
* Continuous streaming ingestion + queries stress test - basically the real-time ingestion use case.
* Tests for accuracy of data thus far ingested plus performance.
*
* To prepare, download the first month's worth of data from http://www.andresmh.com/nyctaxitrips/
* Also, run this to initialize the filo-stress keyspace:
* `filo-cli --database filostress --command init`
*
* Recommended to run this with the first million rows only as a first run to make sure everything works.
* Test at different memory settings - but recommend minimum 4G.
*
* Also, if you run this locally, run it using local-cluster to test clustering effects.
*/
object StreamingStress extends App {
val taxiCsv = args(0)
val numRuns = 250 // Make this higher when doing performance profiling
def puts(s: String): Unit = {
// scalastyle:off
println(s)
// scalastyle:on
}
// Setup SparkContext, etc.
val sess = SparkSession.builder.appName("FiloDB StreamingStress")
.config("spark.filodb.cassandra.keyspace", "filostress")
.config("spark.sql.shuffle.partitions", "4")
.config("spark.scheduler.mode", "FAIR")
.getOrCreate
val sc = sess.sparkContext
val ssc = new StreamingContext(sc, Milliseconds(1000))
val ref = DatasetRef("taxi_streaming")
FiloDriver.init(sc)
implicit val ec = FiloDriver.ec
puts(s"Truncating dataset (if it exists already)...")
try {
FiloDriver.client.truncateDataset(ref)
} catch {
case filodb.coordinator.client.ClientException(e) => puts(s"Ignoring error $e")
}
case class TaxiRecord(medallion: String,
hack_license: String,
vendor_id: String,
rate_code: Int,
store_and_fwd_flag: String,
pickup_datetime: Long,
dropoff_datetime: Long,
passenger_count: Int,
trip_time_in_secs: Int,
trip_distance: Double,
pickup_longitude: Double,
pickup_latitude: Double,
dropoff_longitude: Double,
dropoff_latitude: Double)
import sess.implicits._
private def toTimeMsLong(dtString: String): Long = {
val dt = DateTime.parse(dtString.replace(" ", "T"))
dt.getMillis
}
val queue = collection.mutable.Queue[RDD[TaxiRecord]]()
ssc.queueStream(queue)
.foreachRDD { rdd =>
if (rdd.isEmpty) { puts(" XXXX: EMPTY RDD!!! ") }
else {
rdd.toDF.write.format("filodb.spark").
option("dataset", "taxi_streaming").
option("row_keys", "pickup_datetime,hack_license").
option("partition_columns", "medallion:string").
// Flushing after each small batch would be very inefficient...
option("flush_after_write", "false").
mode(SaveMode.Append).save()
}
}
ssc.start()
val csvIngestThread = Future {
import collection.JavaConverters._
val reader = new CSVReader(new java.io.FileReader(taxiCsv))
val columns = reader.readNext.toSeq
reader.iterator.asScala
.map { parts =>
TaxiRecord(parts(0), parts(1), parts(2), parts(3).toInt,
parts(4), toTimeMsLong(parts(5)), toTimeMsLong(parts(6)), parts(7).toInt,
parts(8).toInt, parts(9).toDouble, parts(10).toDouble,
parts(11).toDouble, parts(12).toDouble, parts(13).toDouble)
}.grouped(3000)
.foreach { records =>
val rdd = ssc.sparkContext.parallelize(records)
queue += rdd
}
}
val readingThread = Future {
val taxiData = sess.filoDataset("taxi_streaming")
(0 until numRuns).foreach { i =>
val numRecords = taxiData.count()
puts(s"Taxi dataset now has ===> $numRecords records!")
val stats = Try(FiloDriver.client.ingestionStats(ref)).getOrElse("Oops, dataset not there yet")
puts(s" ==> Ingestion stats: $stats")
Thread sleep 700
}
}
Await.result(readingThread, 160000.seconds)
ssc.awaitTerminationOrTimeout(160000)
// clean up!
FiloDriver.shutdown()
sc.stop()
} | filodb/FiloDB | stress/src/main/scala/filodb.stress/StreamingStress.scala | Scala | apache-2.0 | 4,848 |
package ee.cone.c4actor_branch
import ee.cone.c4actor.ArgTypes.LazyList
import ee.cone.c4actor._
import scala.collection.immutable.Seq
import ee.cone.c4actor_branch.BranchProtocol.S_BranchResult
import ee.cone.c4actor.Types.SrcId
import ee.cone.c4actor_branch.BranchTypes.BranchKey
import ee.cone.c4proto._
object BranchTypes {
type BranchKey = SrcId
}
trait BranchMessage extends Product {
def method: String
def header: String=>String
def body: okio.ByteString
def deletes: Seq[LEvent[Product]]
}
trait BranchHandler extends Product {
def branchKey: SrcId
def exchange: BranchMessage => Context => Context
def seeds: Context => List[S_BranchResult]
}
trait BranchErrorSaver {
def saveErrors(
local: Context,
branchKey: BranchKey,
sessionKeys: List[SrcId],
exceptions: List[Exception]
): Context
}
trait BranchTask extends Product {
def branchKey: SrcId
def product: Product
def sessionKeys(visited: Set[SrcId] = Set.empty): Context => Set[BranchRel]
type Send = Option[(String,String) => Context => Context]
def sending: Context => (Send,Send)
def relocate(to: String): Context => Context
}
trait BranchOperations {
def toSeed(value: Product): S_BranchResult
def toRel(seed: S_BranchResult, parentSrcId: SrcId, parentIsSession: Boolean): (SrcId,BranchRel)
}
case class BranchRel(srcId: SrcId, seed: S_BranchResult, parentSrcId: SrcId, parentIsSession: Boolean)
@protocol("BranchApp") object BranchProtocol {
@Id(0x0040) case class S_BranchResult(
@Id(0x0041) hash: String,
@Id(0x0042) valueTypeId: Long,
@Id(0x0043) value: okio.ByteString,
@Id(0x0044) children: LazyList[S_BranchResult],
@Id(0x0045) position: String
)
@Id(0x004B) case class U_Redraw(
@Id(0x004C) srcId: String,
@Id(0x004D) branchKey: String
)
@Id(0x004E) case class N_RestPeriod(
@Id(0x004D) branchKey: String,
@Id(0x004F) value: Long
)
}
trait ToAlienSender {
def send(sessionKeys: Seq[String], evType: String, data: String): Context=>Context
}
trait BranchError {
def message(local: Context): String
} | conecenter/c4proto | base_lib/src/main/scala/ee/cone/c4actor_branch/BranchApi.scala | Scala | apache-2.0 | 2,096 |
package org.higherstate.jameson
import org.scalatest.{MustMatchers, WordSpec}
import org.higherstate.jameson.DefaultRegistry._
import org.higherstate.jameson.Dsl._
import org.higherstate.jameson.parsers.Parser
import org.higherstate.jameson.failures.Success
class NestedStructuresSpec extends WordSpec with MustMatchers {
"list with matches" should {
val listParser = as [ListParents]("parents" -> asList(matchAs("type", as [Child1], as [Child2])))
"handle multiple elements" in {
val json = """{"int":3,"parents":[{"type":"Child1", "tInt":3},{"tInt":3, "type":"Child1"}, {"tBool":false, "type":"Child2"}]}"""
val r = listParser(json)
r.isRight mustEqual (true)
}
val matchParser = matchAs("pType", "one" -> listParser, "two" -> listParser)
"inside a match" in {
val json = """{"int":3,"parents":[{"type":"Child1", "tInt":3},{"tInt":3, "type":"Child1"}, {"tBool":false, "type":"Child2"}], "pType":"two"}"""
val r = matchParser(json)
r.isRight mustEqual (true)
}
val doubleMatchParser = matchAs("dType", "one" -> matchParser, "two" -> matchParser)
"with a double match" in {
val json = """{"int":3,"dType":"one","parents":[{"type":"Child1", "tInt":3},{"tInt":3, "type":"Child1"},{"tBool":false, "type":"Child2"}], "pType":"two"}"""
val r = doubleMatchParser(json)
r.isRight mustEqual (true)
}
}
"self referencing json validation" should {
"handle self reference in selector" in {
lazy val parser:Parser[ParentContainer] = as [ParentContainer]("parent" -> asOption(self (parser)))
parser.parse("""{"parent":{"parent":{"parent":{}}}}""") mustEqual Success(ParentContainer(Some(ParentContainer(Some(ParentContainer(Some(ParentContainer(None))))))))
}
"parse with no recursing in a two level recursion" in {
lazy val parser:Parser[RecursiveChild1] = as [RecursiveChild1] ("child" -> as [RecursiveChild2] ("child" -> asOption(self(parser))))
parser("""{"value":1,"child":{"value":"two"}}""") mustEqual Success(RecursiveChild1(1, RecursiveChild2("two", None)))
}
"parse with recursion in a two level recursion" in {
lazy val parser:Parser[RecursiveChild2] = as [RecursiveChild2] ("child" -> asOption [RecursiveChild1] ("child" -> self(parser)))
parser("""{"value":"one","child":{"value":2,"child":{"value":"three"}}}""") mustEqual Success(RecursiveChild2("one", Some(RecursiveChild1(2, RecursiveChild2("three",None)))))
}
}
}
| HigherState/jameson | src/test/scala/org/higherstate/jameson/NestedStructuresSpec.scala | Scala | apache-2.0 | 2,487 |
package edu.umass.ciir.kbbridge.search
import collection.mutable
import edu.umass.ciir.kbbridge.util.ConfInfo
import edu.umass.ciir.kbbridge.data.DocumentProvider
import org.lemurproject.galago.tupleflow.Parameters
/**
* User: jdalton
* Date: 6/10/13
*/
object DocumentBridgeMap extends DocumentProvider {
// todo add collection flag to the lookup, e.g. (identifier, wiki)
def getDocument(identifier: String, params: Option[Parameters]) = {
getDefaultDocumentProvider.getDocument(identifier, params)
}
def getBridgeDocument(identifier: String, params: Option[Parameters]) = DocumentProvider.convertToBridgeDocument(identifier, getDocument(identifier, params))
def getFieldTermCount(cleanTerm: String, field: String) = getDefaultDocumentProvider.getFieldTermCount(cleanTerm, field)
val searcherMap: mutable.Map[String, GalagoRetrieval] = new mutable.HashMap[String, GalagoRetrieval]()
def getKbRetrieval:GalagoRetrieval= {
searcherMap.getOrElseUpdate("kb", {
new GalagoRetrieval(ConfInfo.galagoKbJsonParameterFile, ConfInfo.galagoUseLocalIndex, ConfInfo.galagoKbSrv, ConfInfo.galagoKbPort)
})
}
def getDefaultRetrieval: GalagoRetrieval= {
searcherMap.getOrElseUpdate("default", {
new GalagoRetrieval(ConfInfo.galagoDefaultJsonParameterFile, ConfInfo.galagoUseLocalIndex, ConfInfo.galagoDefaultSrv, ConfInfo.galagoDefaultPort)
})
}
def getKbDocumentProvider:DocumentProvider = {
getKbRetrieval
}
def getDefaultDocumentProvider: DocumentProvider = {
getDefaultRetrieval
}
def fakeTokenize(text: String) = getDefaultRetrieval.fakeTokenize(text)
private def getProvider(searcherName: String): Option[DocumentProvider] = {
searcherMap.get(searcherName)
}
def getProvider(searcherName: String, jsonConfigFile: String, galagoUseLocalIndex: Boolean, galagoSrv: String, galagoPort: String, candidateQueryType: String, resultLogFileName: String): DocumentProvider = {
searcherMap.getOrElseUpdate(searcherName, {
new GalagoRetrieval(jsonConfigFile, galagoUseLocalIndex, galagoSrv, galagoPort)
})
}
}
| daltonj/KbBridge | src/main/scala/edu/umass/ciir/kbbridge/search/DocumentBridgeMap.scala | Scala | apache-2.0 | 2,107 |
package ch16
object ex06 extends App {
val xml = <html>
<head>
<title> My list of jokes </title>
</head>
<body>
<div class="otherprojects">
<div class="otherprojects-item">
<a href="http://www.wiktionary.org/">
<span class="icon">
<img src="//upload.wikimedia.org/wikipedia/meta/3/3b/Wiktionary-logo_sister_1x.png" width="35" height="35" alt=""/>
</span>
Wiktionary
</a>
</div>
<div class="otherprojects-item">
<a href="http://www.wikivoyage.org/">
<span class="icon">
<img src="//upload.wikimedia.org/wikipedia/meta/7/74/Wikivoyage-logo_sister_1x.png" width="35" height="35" alt=""/>
</span>
Wikivoyage
</a>
</div>
<div class="otherprojects-item">
<a href="http://commons.wikimedia.org/">
<span class="icon">
<img src="//upload.wikimedia.org/wikipedia/meta/9/90/Commons-logo_sister_1x.png" width="35" height="47" alt="" style="vertical-align: top;"/>
</span>
Commons
</a>
</div>
</div>
</body>
</html>
val f = (xml \\\\ "a") map { x => (x.text.trim(), x.attribute("href").get) } foreach (a => println(s"${a._1} => ${a._2}"))
}
| tuxdna/scala-for-the-impatient-exercises | src/main/scala/ch16/ex06.scala | Scala | apache-2.0 | 1,648 |
package pl.newicom.dddd.messaging.event
import org.joda.time.DateTime
import pl.newicom.dddd.aggregate.DomainEvent
import pl.newicom.dddd.messaging.MetaData.CorrelationId
import pl.newicom.dddd.messaging.{EntityMessage, Message}
import pl.newicom.dddd.utils.UUIDSupport._
object EventMessage {
def unapply(em: EventMessage): Option[(String, DomainEvent)] = {
Some(em.id, em.event)
}
}
class EventMessage(
val event: DomainEvent,
val id: String = uuid,
val timestamp: DateTime = new DateTime)
extends Message with EntityMessage {
type MessageImpl = EventMessage
override def entityId = tryGetMetaAttribute[String](CorrelationId).orNull
override def payload = event
override def toString: String = {
val msgClass = getClass.getSimpleName
s"$msgClass(event = $event, id = $id, timestamp = $timestamp, metaData = $metadata)"
}
} | ahjohannessen/akka-ddd | akka-ddd-messaging/src/main/scala/pl/newicom/dddd/messaging/event/EventMessage.scala | Scala | mit | 870 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import org.scalacheck.Gen
import scalaz.Monad
import scalaz.Monoid
trait ScalazSpecs2Instances extends org.specs2.scalacheck.GenInstances {
implicit def scalazGenMonad: Monad[Gen] = specs2ToScalazMonad(genMonad)
// We cannot make this public implicit function since then it will conflict with e.g. `scalaz.idInstance`
private def specs2ToScalazMonad[F[_]](specsMonad: org.specs2.fp.Monad[F]): Monad[F] = new Monad[F] {
def point[A](a: => A): F[A] = specsMonad.point(a)
def bind[A, B](fa: F[A])(f: A => F[B]): F[B] = specsMonad.bind(fa)(f)
}
implicit def specs2ToScalazMonoid[A](implicit specsMonoid: org.specs2.fp.Monoid[A]): Monoid[A] = {
specs2ToScalazMonoidExplicit(specsMonoid)
}
implicit def specs2ToScalazMonoidExplicit[A](specsMonoid: org.specs2.fp.Monoid[A]): Monoid[A] = new Monoid[A] {
override def zero: A = specsMonoid.zero
override def append(f1: A, f2: => A): A = specsMonoid.append(f1, f2)
}
}
| quasar-analytics/quasar | foundation/src/test/scala/quasar/ScalazSpecs2Instances.scala | Scala | apache-2.0 | 1,562 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.workspace.modules.output
import de.fuberlin.wiwiss.silk.util.Identifier
import java.util.logging.Logger
/**
* Output module which holds all outputs in memory.
*/
class MemoryOutputModule extends OutputModule
{
private val log = Logger.getLogger(classOf[MemoryOutputModule].getName)
private var outputsTasks = Map[Identifier, OutputTask]()
def config = OutputConfig()
def config_=(c: OutputConfig) { }
override def tasks = synchronized { outputsTasks.values }
override def update(task : OutputTask) = synchronized
{
outputsTasks += (task.name -> task)
log.info("Updated output '" + task.name)
}
override def remove(taskId : Identifier) = synchronized
{
outputsTasks -= taskId
log.info("Removed output '" + taskId)
}
}
| fusepoolP3/p3-silk | silk-workspace/src/main/scala/de/fuberlin/wiwiss/silk/workspace/modules/output/MemoryOutputModule.scala | Scala | apache-2.0 | 1,362 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.