code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package scala.meta.tests package trees import munit._ import scala.meta._ class TreeSuite extends FunSuite { test("Name.unapply") { assert(Name.unapply(q"a").contains("a")) assert(Name.unapply(t"a").contains("a")) } }
scalameta/scalameta
tests/shared/src/test/scala/scala/meta/tests/trees/TreeSuite.scala
Scala
bsd-3-clause
232
package io.scalac.amqp.impl import com.google.common.primitives.Ints.saturatedCast import com.rabbitmq.client._ import io.scalac.amqp.Delivery import org.reactivestreams.{Subscriber, Subscription} import scala.annotation.tailrec import scala.collection.immutable.Queue import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.stm.{Ref, atomic} import scala.util.control.NonFatal private[amqp] class QueueSubscription(channel: Channel, queue: String, subscriber: Subscriber[_ >: Delivery]) extends DefaultConsumer(channel) with Subscription { val demand = Ref(0L) /** Number of messages stored in this buffer is limited by channel QOS. */ val buffer = Ref(Queue[Delivery]()) val running = Ref(false) var closeRequested = Ref(false) override def finalize(): Unit = { try closeChannel() finally super.finalize() } override def handleCancel(consumerTag: String) = try { subscriber.onComplete() } catch { case NonFatal(exception) β‡’ subscriber.onError(new IllegalStateException("Rule 2.13: onComplete threw an exception", exception)) } override def handleShutdownSignal(consumerTag: String, sig: ShutdownSignalException) = sig match { case sig if !sig.isInitiatedByApplication β‡’ subscriber.onError(sig) case _ β‡’ // shutdown initiated by us } override def handleDelivery(consumerTag: String, envelope: Envelope, properties: AMQP.BasicProperties, body: Array[Byte]) = { val delivery = Conversions.toDelivery(envelope, properties, body) buffer.single.transform(_ :+ delivery) deliverRequested() } @tailrec private def deliverRequested(): Unit = { val go = atomic { implicit txn β‡’ if (demand() > 0 && buffer().nonEmpty) running.transformAndExtract(r => (true, !r)) else false } if (go) { //buffer and demand could only grow since last check atomic { implicit txn β‡’ buffer().splitAt(saturatedCast(demand())) match { case (ready, left) β‡’ buffer() = left demand -= ready.size ready } }.foreach(deliver) running.single.set(false) deliverRequested() } } def deliver(delivery: Delivery): Unit = try { if(channel.isOpen()) { channel.basicAck(delivery.deliveryTag.underlying, false) subscriber.onNext(delivery) } } catch { case NonFatal(exception) β‡’ // 2.13: exception from onNext cancels subscription try closeChannel() catch { case NonFatal(_) β‡’ // mute } subscriber.onError(exception) } def closeChannel(): Unit = synchronized { if (closeRequested.single.compareAndSet(false, true) && channel.isOpen) { try { channel.close() } catch { case NonFatal(_) => } } } override def request(n: Long) = n match { case n if n <= 0 β‡’ try closeChannel() catch { case NonFatal(_) β‡’ // mute } subscriber.onError(new IllegalArgumentException("Rule 3.9: n <= 0")) case n if channel.isOpen β‡’ val newDemand = demand.single.transformAndGet(_ + n) newDemand match { case d if d < 0 β‡’ // 3.17: overflow try closeChannel() catch { case NonFatal(_) β‡’ // mute } subscriber.onError(new IllegalStateException("Rule 3.17: Pending + n > Long.MaxValue")) case d β‡’ Future(deliverRequested()) } case _ β‡’ // 3.6: nop } override def cancel() = try { closeChannel() } catch { case _: AlreadyClosedException β‡’ // 3.7: nop case NonFatal(exception) β‡’ subscriber.onError(exception) } override def toString() = atomic { implicit txn β‡’ s"QueueSubscription(channel=$channel, queue=$queue, subscriber=$subscriber, demand=${demand()}, " + s"buffer.size=${buffer().size})" } }
ScalaConsultants/reactive-rabbit
src/main/scala/io/scalac/amqp/impl/QueueSubscription.scala
Scala
apache-2.0
4,097
package org.jetbrains.plugins.scala.extensions import com.intellij.psi.PsiElement /** * Pavel Fatin */ object Children { def unapplySeq(e: PsiElement): Some[Seq[PsiElement]] = Some(e.getChildren.toSeq) }
ilinum/intellij-scala
src/org/jetbrains/plugins/scala/extensions/Children.scala
Scala
apache-2.0
209
package mesosphere.marathon package core.task.jobs import java.time.Clock import mesosphere.marathon.core.leadership.LeadershipModule import mesosphere.marathon.core.task.jobs.impl.{ ExpungeOverdueLostTasksActor, OverdueTasksActor } import mesosphere.marathon.core.task.termination.KillService import mesosphere.marathon.core.task.tracker.{ InstanceStateOpProcessor, InstanceTracker } import mesosphere.marathon.MarathonConf /** * This module contains periodically running jobs interacting with the task tracker. */ class TaskJobsModule(config: MarathonConf, leadershipModule: LeadershipModule, clock: Clock) { def handleOverdueTasks( taskTracker: InstanceTracker, stateOpProcessor: InstanceStateOpProcessor, killService: KillService): Unit = { leadershipModule.startWhenLeader( OverdueTasksActor.props( config, taskTracker, stateOpProcessor, killService, clock ), "killOverdueStagedTasks") } def expungeOverdueLostTasks(taskTracker: InstanceTracker, stateOpProcessor: InstanceStateOpProcessor): Unit = { leadershipModule.startWhenLeader( ExpungeOverdueLostTasksActor.props(clock, config, taskTracker, stateOpProcessor), "expungeOverdueLostTasks" ) } }
guenter/marathon
src/main/scala/mesosphere/marathon/core/task/jobs/TaskJobsModule.scala
Scala
apache-2.0
1,263
package knot.core.dispatch import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService} object ForkJoinDispatcher { def apply(): Dispatcher = { implicit val ec: ExecutionContextExecutorService = ExecutionContext.fromExecutorService(null) new AbstractDispatcher() {} } }
defvar/knot
knot-core/src/main/scala/knot/core/dispatch/ForkJoinDispatcher.scala
Scala
mit
299
package com.github.novamage.svalidator.validation.simple.internals case class SimpleValidationRuleStructureContainer[A, B](validationExpression: (B, A) => Boolean, conditionalValidation: Option[A => Boolean], errorMessageKey: Option[String], errorMessageFormatValues: Option[B => List[Any]], metadata: Map[String, List[Any]]) { }
NovaMage/SValidator
src/main/scala/com/github/novamage/svalidator/validation/simple/internals/SimpleValidationRuleStructureContainer.scala
Scala
mit
556
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.unicomplex import akka.actor.ActorSystem import akka.testkit.{ImplicitSender, TestKit} import com.typesafe.config.ConfigFactory import org.squbs.lifecycle.GracefulStop import Timeouts._ import org.scalatest.{BeforeAndAfterAll, SequentialNestedSuiteExecution} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.language.postfixOps object MultipleUnicomplexTest { val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath val classPaths = Array( "InitBlockCube", "InitCubeA", "InitCubeB", "InitFailCube" ) map (dummyJarsDir + "/" + _) val config = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = MultipleUnicomplexTest1 | ${JMX.prefixConfig} = true |} |default-listener.bind-port = 0 """.stripMargin ) val config2 = ConfigFactory.parseString( s""" |squbs { | actorsystem-name = MultipleUnicomplexTest2 | ${JMX.prefixConfig} = true |} |default-listener.bind-port = 0 """.stripMargin ) val boot = UnicomplexBoot(config) .createUsing {(name, config) => ActorSystem(name, config)} .scanComponents(classPaths) .initExtensions.start(startupTimeout) val boot2 = UnicomplexBoot(config2) .createUsing {(name, config) => ActorSystem(name, config)} .scanComponents(classPaths) .initExtensions.start(startupTimeout) // We know this test will never finish initializing. So don't waste time on timeouts. } class MultipleUnicomplexTest extends TestKit(MultipleUnicomplexTest.boot.actorSystem) with ImplicitSender with AnyWordSpecLike with Matchers with BeforeAndAfterAll with SequentialNestedSuiteExecution { val sys1 = system val sys2 = MultipleUnicomplexTest.boot2.actorSystem override def beforeAll(): Unit = { sys.addShutdownHook { Unicomplex(sys2).uniActor ! GracefulStop Unicomplex(sys1).uniActor ! GracefulStop } } override def afterAll(): Unit = { Unicomplex(sys2).uniActor ! GracefulStop Unicomplex(sys1).uniActor ! GracefulStop } "UniComplex" must { "get cube init reports" in { Unicomplex(sys1).uniActor ! ReportStatus val StatusReport(systemState, cubes, _) = expectMsgType[StatusReport] systemState should be(Failed) val cubeAReport = cubes.values.find(_._1.info.name == "CubeA").flatMap(_._2) cubeAReport should not be None assert(cubeAReport.get.state == Active) val cubeBReport = cubes.values.find(_._1.info.name == "CubeB").flatMap(_._2) cubeBReport should not be None cubeBReport.get.state should be(Active) val initFailReport = cubes.values.find(_._1.info.name == "InitFail").flatMap(_._2) initFailReport should not be None initFailReport.get.state should be(Failed) val initBlockReport = cubes.values.find(_._1.info.name == "InitBlock").flatMap(_._2) initBlockReport should not be None initBlockReport.get.state should be(Initializing) Unicomplex(sys2).uniActor ! ReportStatus val StatusReport(systemState2, cubes2, _) = expectMsgType[StatusReport] systemState2 should be(Failed) val cubeAReport2 = cubes2.values.find(_._1.info.name == "CubeA").flatMap(_._2) cubeAReport2 should not be None assert(cubeAReport2.get.state == Active) val cubeBReport2 = cubes2.values.find(_._1.info.name == "CubeB").flatMap(_._2) cubeBReport2 should not be None cubeBReport2.get.state should be(Active) val initFailReport2 = cubes.values.find(_._1.info.name == "InitFail").flatMap(_._2) initFailReport2 should not be None initFailReport2.get.state should be(Failed) val initBlockReport2 = cubes.values.find(_._1.info.name == "InitBlock").flatMap(_._2) initBlockReport2 should not be None initBlockReport2.get.state should be(Initializing) } } }
paypal/squbs
squbs-unicomplex/src/test/scala/org/squbs/unicomplex/MultipleUnicomplexTest.scala
Scala
apache-2.0
4,540
package org.bowlerframework.view.scuery import org.bowlerframework.view.scalate.ClasspathTemplateResolver import xml.{XML, NodeSeq} import java.io.{IOException, StringReader} import java.util.concurrent.ConcurrentHashMap import org.bowlerframework.{RequestResolver, RequestScope} /** * Created by IntelliJ IDEA. * User: wfaler * Date: 12/03/2011 * Time: 18:48 * To change this template use File | Settings | File Templates. */ trait MarkupContainer { def render: NodeSeq = MarkupContainer.getTemplate(this.getClass, MarkupContainer.localisationPreferences) } object MarkupContainer { private val templateCache = new ConcurrentHashMap[Class[_], ConcurrentHashMap[String, Option[NodeSeq]]] private val templateResolver = new ClasspathTemplateResolver val types = List(".html", ".xhtml", ".xml") var requestResolver: RequestResolver = new RequestResolver { def request = RequestScope.request } private def uri(cls: Class[_]): String = "/" + cls.getName.replace(".", "/") private def localisationPreferences: List[String] = { if (requestResolver.request != null && requestResolver.request.getLocales != null) return requestResolver.request.getLocales else return Nil } private def getTemplate(cls: Class[_], locales: List[String]): NodeSeq = { if (templateCache.get(cls) == null) templateCache.put(cls, new ConcurrentHashMap[String, Option[NodeSeq]]) val map = templateCache.get(cls) try { if (locales == Nil) { val option = map.get("default") if (option == null) throw new NoSuchElementException if (option == None) return getTemplate(cls.getSuperclass, localisationPreferences) else return option.get } else { val option = map.get(locales.head) if (option == null) throw new NoSuchElementException if (option == None) { val newLocales = locales.drop(1) return getTemplate(cls, newLocales) } else return option.get } } catch { case e: NoSuchElementException => { try { val nodeSeq = XML.load(new StringReader(templateResolver.resolveResource(uri(cls), MarkupContainer.types, { if (locales != Nil) List(locales.head); else Nil }).template)).asInstanceOf[NodeSeq] map.put({ if (locales != Nil) locales.head; else "default" }, Some(nodeSeq)) return nodeSeq } catch { case e: IOException => { map.put({ if (locales != Nil) locales.head; else "default" }, None) if (locales != Nil) { val newLocales = locales.drop(1) getTemplate(cls, newLocales) } else if (cls.getSuperclass != null && classOf[MarkupContainer].isAssignableFrom(cls.getSuperclass) && locales == Nil) { getTemplate(cls.getSuperclass, localisationPreferences) } else throw new IOException("Can't find any markup for Component of type " + cls.getName) } } } } } }
rkpandey/Bowler
core/src/main/scala/org/bowlerframework/view/scuery/MarkupContainer.scala
Scala
bsd-3-clause
3,084
/* * SPDX-License-Identifier: Apache-2.0 * * Copyright 2015-2021 Andre White. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.truthencode.ddo.model.effect import enumeratum.EnumEntry import io.truthencode.ddo.enhancement.BonusType import io.truthencode.ddo.support.numbers.Numbers import scala.util.{Success, Try} case class EffectParameterList(modifiers: Seq[ParameterModifier[_]]) class EffectParameterBuilder[T <: EffectParameterBuilder.EffectParams] protected ( ingredients: Seq[ParameterModifier[_]]) { import EffectParameterBuilder.EffectParams._ /** * Determines when the given effect is triggered on. Required * @param toggleOn * A valid Trigger Event * @return * applied trigger modifier to builder */ def toggleOnValue(toggleOn: TriggerEvent*): EffectParameterBuilder[T with ToggleOnParam] = { val npm = toggleOn.map { t => { new ParameterModifier[TriggerEvent] { override protected[this] val parameterToModify: TriggerEvent = t override lazy val parameter: Try[EffectParameter] = Success(EffectParameter.Trigger(t)) } } } val allGoodStuff: Seq[ParameterModifier[TriggerEvent]] = ingredients.collect(filterByType[TriggerEvent]) ++ npm EffectParameterBuilder(ingredients ++ allGoodStuff.toSet.toSeq) } def filterByType[U <: EnumEntry]: PartialFunction[ParameterModifier[_], ParameterModifier[U]] = new PartialFunction[ParameterModifier[_], ParameterModifier[U]] { override def isDefinedAt(x: ParameterModifier[_]): Boolean = x match { case x: ParameterModifier[U] => true } override def apply(v1: ParameterModifier[_]): ParameterModifier[U] = v1.asInstanceOf[ParameterModifier[U]] } /** * Determines when the given effect is triggered off. Required * @param toggleOff * A valid Trigger Event * @return * applied trigger modifier to builder */ def toggleOffValue(toggleOff: TriggerEvent*): EffectParameterBuilder[T with ToggleOffParam] = { val npm = toggleOff.map { t => { new ParameterModifier[TriggerEvent] { override protected[this] val parameterToModify: TriggerEvent = t override lazy val parameter: Try[EffectParameter] = Success(EffectParameter.Trigger(t)) } } } val pp: EffectParameterBuilder[T with ToggleOffParam] = EffectParameterBuilder( ingredients ++ npm) EffectParameterBuilder(ingredients ++ npm) } /** * Optional value that flags this effect as using magnitude * @return * applied magnitude to builder. */ def addMagnitude(): EffectParameterBuilder[T] = { val npm = new ParameterModifier[Numbers] { override protected[this] val parameterToModify: Numbers = Numbers.Magnitude override lazy val parameter: Try[EffectParameter] = Success(EffectParameter.Magnitude) } EffectParameterBuilder(ingredients :+ npm) } /** * Denotes this is a DC check, effect or condition * @return * applied DC to the builder */ def addDifficultyCheck(): EffectParameterBuilder[T] = { val npm = new ParameterModifier[Numbers] { override protected[this] val parameterToModify: Numbers = Numbers.DifficultyCheck override lazy val parameter: Try[EffectParameter] = Success(EffectParameter.DifficultyCheck) } EffectParameterBuilder(ingredients :+ npm) } /** * Required. Sets the BonusType of the effect for purposes of stacking. * @param bt * A valid BonusType * @return * applied BonusType to the builder */ def addBonusType(bt: BonusType): EffectParameterBuilder[T with BonusTypeParam] = { val npm = new ParameterModifier[BonusType] { override protected[this] val parameterToModify: BonusType = bt override lazy val parameter: Try[EffectParameter] = Success(EffectParameter.BonusType(bt)) } EffectParameterBuilder(ingredients :+ npm) } /** * Builds the ParameterModifier set if all required parameters are set. * @param ev * validation evidence for the builder. * @return * A valid EffectParameterList containing all ParameterModifiers. */ def build(implicit ev: T =:= FullPizza): EffectParameterList = EffectParameterList(ingredients) } object EffectParameterBuilder { def apply(): EffectParameterBuilder[EffectParams.EmptyParameters] = apply[EffectParams.EmptyParameters](Seq()) def apply[T <: EffectParams](ingredients: Seq[ParameterModifier[_]]): EffectParameterBuilder[T] = new EffectParameterBuilder[T](ingredients) sealed trait EffectParams object EffectParams { type FullPizza = EmptyParameters with ToggleOnParam with ToggleOffParam with BonusTypeParam sealed trait EmptyParameters extends EffectParams sealed trait ToggleOnParam extends EffectParams sealed trait ToggleOffParam extends EffectParams sealed trait BonusTypeParam extends EffectParams } // def makePizzaWithOptional: EffectParameterList[Int, EffectParameter] = // EffectParameterBuilder[Int, EffectParameter]() // .toggleOnValue("mozzarella") // .addBonusType // .addMagnitude("Some Option") // .toggleOffValue("olives") // .build }
adarro/ddo-calc
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/effect/EffectParameterBuilder.scala
Scala
apache-2.0
5,734
package lcs import leon._ import mem._ import lang._ import annotation._ import instrumentation._ import invariant._ import collection._ /** * A memoized implementation of computing the length of the * longest common subsequence between two sequences. Here, * the sequences are represented as integer arrays * whose elements can be looked up in unit time and zero memory allocations. * The lookup function is not verified by the algorithm (and so is marked as @extern), * as it uses mutable variables and arrays. * Rest of the implementation for computing the optimal length using a recurrence * relation is purely functional and uses memoization. **/ object LongestCommonSubsequence { @ignore var xstring = Array[BigInt]() @ignore var ystring = Array[BigInt]() @extern def lookup(i: BigInt, j: BigInt) = { (xstring(i.toInt), ystring(j.toInt)) } ensuring (_ => steps <= 1) // deps and it's lemmas def deps(i: BigInt, j: BigInt): Boolean = { require(i >= 0 && j >= 0) cached(lcs(i, j)) && (if (i <= 0 && j <= 0) true else if (i <= 0) deps(i, j - 1) else if (j <= 0) deps(i - 1, j) else deps(i - 1, j) && deps(i, j - 1)) } @invisibleBody @traceInduct def depsMono(i: BigInt, j: BigInt, st1: Set[Fun[BigInt]], st2: Set[Fun[BigInt]]) = { require(i >= 0 && j >= 0) (st1.subsetOf(st2) && (deps(i, j) in st1)) ==> (deps(i, j) in st2) } holds @traceInduct def depsLem(i: BigInt, j: BigInt, m: BigInt, n: BigInt) = { require(i >= 0 && j >= 0 && m >= 0 && n >= 0) (i <= m && j <= n && deps(m, n)) ==> deps(i, j) } holds @invstate @memoize @invisibleBody def lcs(i: BigInt, j: BigInt): BigInt = { require((i >=0 && j >= 0) && (i == 0 || deps(i - 1, j)) && (j == 0 || deps(i, j-1))) if (i == 0 || j == 0) BigInt(0) else { val (xi, yj) = lookup(i, j) if (xi == yj) lcs(i - 1, j - 1) + 1 else { val s1 = lcs(i - 1, j) val s2 = lcs(i, j - 1) if (s1 >= s2) s1 else s2 } } } ensuring (_ => steps <= ?) @invisibleBody def invoke(i: BigInt, j: BigInt, n: BigInt) = { require((i >=0 && j >= 0 && n >= j) && (i == 0 || deps(i - 1, j)) && (j == 0 || deps(i, j-1))) lcs(i, j) } ensuring (res => { val in = inSt[BigInt] val out = outSt[BigInt] (i == 0 || (depsMono(i - 1, j, in, out) && depsMono(i - 1, n, in, out))) && (j == 0 || depsMono(i, j - 1, in, out)) && deps(i, j) && steps <= ? }) /** * Given a m x n DP problem, the following function solves the subproblems by traversing the problem space * from right to left, and bottom to top. * @param m - number of rows remaining * @param n - max. number of columns * @param j - number of columns remaining (initially set to n) * @result returns a list of solutions for each sub-problem (the size of the resulting list will be quadratic) */ def bottomup(m: BigInt, j: BigInt, n: BigInt): List[BigInt] = { require(0 <= m && 0 <= j && j <= n) if (m == 0 && j == 0) { Cons(invoke(m, j, n), Nil[BigInt]()) } else if(j == 0) { val tail = bottomup(m - 1, n, n) Cons(invoke(m, j, n), tail) } else { val tail = bottomup(m, j - 1, n) Cons(invoke(m, j, n), tail) } } ensuring {_ => bottomUpPost(m, j, n) && steps <= ? * (m * n) + ? * m + ? * j + ? } @invisibleBody def bottomUpPost(m: BigInt, j: BigInt, n: BigInt): Boolean = { require(m >= 0 && n >= j && j >= 0) (m == 0 || (deps(m - 1, n) && (j == n || depsLem(m - 1, j + 1, m - 1, n)))) && deps(m, j) && depsLem(m, 0, m, j) } /** * The returned list has the solution to all the sub-problems of the dynammic progamming * algrithm. Its size if quadratic in this case. * The length of the longest common subsequence between the sequences: xstring of length m and * ystring of length n is given by first entry of the returned list. **/ def lcsSols(m: BigInt, n: BigInt): List[BigInt] = { require(0 <= m && 0 <= n) bottomup(m, n, n) } ensuring(_ => steps <= ? * (m * n) + ? * m + ? * n + ?) }
regb/leon
testcases/benchmarks/steps/LongestCommonSubsequence.scala
Scala
gpl-3.0
4,137
package com.akka.ui.chat.server class ChatServer { }
DuCalixte/LearningScalaWithNetworking
SimpleUIClientServerChat/src/com/akka/ui/chat/server/ChatServer.scala
Scala
mit
54
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.admin import scala.collection._ import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.ZkNodeExistsException import org.apache.zookeeper.data.ACL import joptsimple.OptionParser import kafka.common.AdminCommandFailedException import kafka.common.TopicAndPartition import kafka.utils._ import org.apache.zookeeper.server.auth.DigestAuthenticationProvider import org.apache.zookeeper.data.Id import java.util.ArrayList import org.apache.zookeeper.ZooDefs object PreferredReplicaLeaderElectionCommand extends Logging { def main(args: Array[String]): Unit = { val parser = new OptionParser val jsonFileOpt = parser.accepts("path-to-json-file", "The JSON file with the list of partitions " + "for which preferred replica leader election should be done, in the following format - \\n" + "{\\"partitions\\":\\n\\t[{\\"topic\\": \\"foo\\", \\"partition\\": 1},\\n\\t {\\"topic\\": \\"foobar\\", \\"partition\\": 2}]\\n}\\n" + "Defaults to all existing partitions") .withRequiredArg .describedAs("list of partitions for which preferred replica leader election needs to be triggered") .ofType(classOf[String]) val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the " + "form host:port. Multiple URLS can be given to allow fail-over.") .withRequiredArg .describedAs("urls") .ofType(classOf[String]) if(args.length == 0) CommandLineUtils.printUsageAndDie(parser, "This tool causes leadership for each partition to be transferred back to the 'preferred replica'," + " it can be used to balance leadership among the servers.") val options = parser.parse(args : _*) CommandLineUtils.checkRequiredArgs(parser, options, zkConnectOpt) val zkConnect = options.valueOf(zkConnectOpt) var zkClient: ZkClient = null try { zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer) zkClient.addAuthInfo("digest", "bonc:bonc12@$".getBytes) val acl = new ArrayList[ACL]() val idAdmin = new Id("digest",DigestAuthenticationProvider.generateDigest("bonc:bonc12@$")) acl.add(new ACL(ZooDefs.Perms.ALL, idAdmin)) val partitionsForPreferredReplicaElection = if (!options.has(jsonFileOpt)) ZkUtils.getAllPartitions(zkClient) else parsePreferredReplicaElectionData(Utils.readFileAsString(options.valueOf(jsonFileOpt))) val preferredReplicaElectionCommand = new PreferredReplicaLeaderElectionCommand(zkClient, partitionsForPreferredReplicaElection ,acl) preferredReplicaElectionCommand.moveLeaderToPreferredReplica() println("Successfully started preferred replica election for partitions %s".format(partitionsForPreferredReplicaElection)) } catch { case e: Throwable => println("Failed to start preferred replica election") println(Utils.stackTrace(e)) } finally { if (zkClient != null) zkClient.close() } } def parsePreferredReplicaElectionData(jsonString: String): immutable.Set[TopicAndPartition] = { Json.parseFull(jsonString) match { case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match { case Some(partitionsList) => val partitionsRaw = partitionsList.asInstanceOf[List[Map[String, Any]]] val partitions = partitionsRaw.map { p => val topic = p.get("topic").get.asInstanceOf[String] val partition = p.get("partition").get.asInstanceOf[Int] TopicAndPartition(topic, partition) } val duplicatePartitions = Utils.duplicates(partitions) val partitionsSet = partitions.toSet if (duplicatePartitions.nonEmpty) throw new AdminOperationException("Preferred replica election data contains duplicate partitions: %s".format(duplicatePartitions.mkString(","))) partitionsSet case None => throw new AdminOperationException("Preferred replica election data is empty") } case None => throw new AdminOperationException("Preferred replica election data is empty") } } def writePreferredReplicaElectionData(zkClient: ZkClient, partitionsUndergoingPreferredReplicaElection: scala.collection.Set[TopicAndPartition], acl: java.util.List[ACL]) { val zkPath = ZkUtils.PreferredReplicaLeaderElectionPath val partitionsList = partitionsUndergoingPreferredReplicaElection.map(e => Map("topic" -> e.topic, "partition" -> e.partition)) val jsonData = Json.encode(Map("version" -> 1, "partitions" -> partitionsList)) try { ZkUtils.createPersistentPath(zkClient, zkPath, jsonData, acl) info("Created preferred replica election path with %s".format(jsonData)) } catch { case nee: ZkNodeExistsException => val partitionsUndergoingPreferredReplicaElection = PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(ZkUtils.readData(zkClient, zkPath)._1) throw new AdminOperationException("Preferred replica leader election currently in progress for " + "%s. Aborting operation".format(partitionsUndergoingPreferredReplicaElection)) case e2: Throwable => throw new AdminOperationException(e2.toString) } } } class PreferredReplicaLeaderElectionCommand(zkClient: ZkClient, partitions: scala.collection.Set[TopicAndPartition], acl: java.util.List[ACL]) extends Logging { def moveLeaderToPreferredReplica() = { try { val validPartitions = partitions.filter(p => validatePartition(zkClient, p.topic, p.partition)) PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkClient, validPartitions, acl) } catch { case e: Throwable => throw new AdminCommandFailedException("Admin command failed", e) } } def validatePartition(zkClient: ZkClient, topic: String, partition: Int): Boolean = { // check if partition exists val partitionsOpt = ZkUtils.getPartitionsForTopics(zkClient, List(topic)).get(topic) partitionsOpt match { case Some(partitions) => if(partitions.contains(partition)) { true } else { error("Skipping preferred replica leader election for partition [%s,%d] ".format(topic, partition) + "since it doesn't exist") false } case None => error("Skipping preferred replica leader election for partition " + "[%s,%d] since topic %s doesn't exist".format(topic, partition, topic)) false } } }
roadboy/KafkaACL
core/src/main/scala/kafka/admin/PreferredReplicaLeaderElectionCommand.scala
Scala
apache-2.0
7,479
object Test { private case object FooA def main(argv : Array[String]) : Unit = { Console.println(FooA) } }
som-snytt/dotty
tests/run/t601.scala
Scala
apache-2.0
118
/*********************************************************************** * Copyright (c) 2013-2016 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.tools.export import java.io.{StringReader, StringWriter} import org.geotools.data.DataUtilities import org.geotools.data.collection.ListFeatureCollection import org.geotools.data.simple.SimpleFeatureCollection import org.junit.runner.RunWith import org.locationtech.geomesa.features.ScalaSimpleFeature import org.locationtech.geomesa.tools.export.formats.DelimitedExporter import org.locationtech.geomesa.tools.ingest.AutoIngestDelimited import org.locationtech.geomesa.tools.utils.DataFormats import org.locationtech.geomesa.tools.utils.DataFormats._ import org.locationtech.geomesa.utils.geotools.{GeoToolsDateFormat, SimpleFeatureTypes} import org.locationtech.geomesa.utils.text.WKTUtils import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ @RunWith(classOf[JUnitRunner]) class DelimitedExportImportTest extends Specification { val dt1 = java.util.Date.from(java.time.LocalDateTime.parse("2016-01-01T00:00:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC)) val dt2 = java.util.Date.from(java.time.LocalDateTime.parse("2016-01-02T00:00:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC)) val pt1 = WKTUtils.read("POINT(1 0)") val pt2 = WKTUtils.read("POINT(0 2)") def export(features: SimpleFeatureCollection, format: DataFormat): String = { val writer = new StringWriter() val export = new DelimitedExporter(writer, format, None, true) export.export(features) export.close() writer.toString } "Delimited export import" should { "export and import simple schemas" >> { val sft = SimpleFeatureTypes.createType("tools", "name:String,dtg:Date,*geom:Point:srid=4326") val features = List( new ScalaSimpleFeature("1", sft, Array("name1", dt1, pt1)), new ScalaSimpleFeature("2", sft, Array("name2", dt2, pt2)) ) val fc = new ListFeatureCollection(sft, features) "in tsv" >> { val format = DataFormats.Tsv val results = export(fc, format) val reader = AutoIngestDelimited.getCsvFormat(format).parse(new StringReader(results)) try { val (newSft, newFeatures) = AutoIngestDelimited.createSimpleFeatures("tools", reader.iterator()) SimpleFeatureTypes.encodeType(newSft) mustEqual SimpleFeatureTypes.encodeType(sft) newFeatures.map(DataUtilities.encodeFeature).toList mustEqual features.map(DataUtilities.encodeFeature) } finally { reader.close() } } "in csv" >> { val format = DataFormats.Csv val results = export(fc, format) val reader = AutoIngestDelimited.getCsvFormat(format).parse(new StringReader(results)) try { val (newSft, newFeatures) = AutoIngestDelimited.createSimpleFeatures("tools", reader.iterator()) SimpleFeatureTypes.encodeType(newSft) mustEqual SimpleFeatureTypes.encodeType(sft) newFeatures.map(DataUtilities.encodeFeature).toList mustEqual features.map(DataUtilities.encodeFeature) } finally { reader.close() } } } "export and import lists and maps" >> { val sft = SimpleFeatureTypes.createType("tools", "name:String,fingers:List[Int],toes:Map[String,Int],dtg:Date,*geom:Point:srid=4326") val features = List( new ScalaSimpleFeature("1", sft, Array("name1", List(1, 2).asJava, Map("one" -> 1, "1" -> 0).asJava, dt1, pt1)), new ScalaSimpleFeature("2", sft, Array("name2", List(2, 1).asJava, Map("two" -> 2, "2" -> 0).asJava, dt1, pt1)) ) val fc = new ListFeatureCollection(sft, features) "in tsv" >> { val format = DataFormats.Tsv val results = export(fc, format) val reader = AutoIngestDelimited.getCsvFormat(format).parse(new StringReader(results)) val (newSft, newFeatures) = try { val (newSft, newFeatures) = AutoIngestDelimited.createSimpleFeatures("tools", reader.iterator()) (newSft, newFeatures.toList) } finally { reader.close() } SimpleFeatureTypes.encodeType(newSft) mustEqual SimpleFeatureTypes.encodeType(sft) forall(0 until sft.getAttributeCount) { i => newFeatures.map(_.getAttribute(i)) mustEqual features.map(_.getAttribute(i)) } } "in csv" >> { val format = DataFormats.Csv val results = export(fc, format) val reader = AutoIngestDelimited.getCsvFormat(format).parse(new StringReader(results)) val (newSft, newFeatures) = try { val (newSft, newFeatures) = AutoIngestDelimited.createSimpleFeatures("tools", reader.iterator()) (newSft, newFeatures.toList) } finally { reader.close() } SimpleFeatureTypes.encodeType(newSft) mustEqual SimpleFeatureTypes.encodeType(sft) forall(0 until sft.getAttributeCount) { i => newFeatures.map(_.getAttribute(i)) mustEqual features.map(_.getAttribute(i)) } } } } }
MutahirKazmi/geomesa
geomesa-tools/src/test/scala/org/locationtech/geomesa/tools/export/DelimitedExportImportTest.scala
Scala
apache-2.0
5,574
package it.codingjam.lagioconda.config import it.codingjam.lagioconda.{ChromosomeOps, GeneMapping} import it.codingjam.lagioconda.ChromosomeOps.CombineChromosome import it.codingjam.lagioconda.ga.{CrossoverPointLike, MutationPointLike, RandomCrossoverPoint, RandomMutationPoint} import it.codingjam.lagioconda.population.Population import it.codingjam.lagioconda.selection.{SelectionFunction, WheelSelection} case class PopulationConfig(size: Int, eliteCount: Int, numberOfGenes: Int, geneMapping: GeneMapping) case class MutationConfig(chance: Int, strategy: MutationPointLike, size: Int, times: Int) case class AlgorithmConfig(mutation: MutationConfig, crossoverPoint: CrossoverPointLike, crossover: CombineChromosome) case class Config(population: PopulationConfig, alpha: Int, algorithm: AlgorithmConfig, selection: SelectionFunction, hillClimb: HillClimbConfig) case class HillClimbConfig(active: Boolean, slopeHeight: Double, slopeSize: Int, addGene: Boolean, fullGeneHillClimbChance: Int, randomGene: Boolean, lastGene: Boolean) case object GeneMappingConfig { val Default = GeneMapping(mX = 8, mY = 16, mR = 24, mRed = 32, mGreen = 40, mBlue = 48) val SmallRadius = GeneMapping(mX = 8, mY = 16, mR = 20, mRed = 28, mGreen = 36, mBlue = 44) } case object PopulationConfig { val Ga = PopulationConfig(size = Population.Size, eliteCount = Population.EliteCount, numberOfGenes = 250, geneMapping = GeneMappingConfig.SmallRadius) val VecGen = PopulationConfig(size = Population.Size, eliteCount = Population.EliteCount, numberOfGenes = 1, geneMapping = GeneMappingConfig.Default) } case object AlgorithmConfig { val Default = AlgorithmConfig(mutation = MutationConfig.Default, crossoverPoint = new RandomCrossoverPoint, crossover = ChromosomeOps.uniformCrossover) val GaWithHillClimb = AlgorithmConfig(mutation = MutationConfig.GaWithHillClimb, crossoverPoint = new RandomCrossoverPoint, crossover = ChromosomeOps.uniformCrossover) } case object MutationConfig { val Default = MutationConfig(chance = 5, strategy = new RandomMutationPoint, size = 1, times = 4) val GaWithHillClimb = MutationConfig(chance = 5, strategy = new RandomMutationPoint, size = 1, times = 3) } case object HillClimb { val Default = HillClimbConfig(active = true, slopeHeight = 0.0001, slopeSize = 200, addGene = false, fullGeneHillClimbChance = 0, randomGene = true, lastGene = false) val Off = Default.copy(active = false) val VecGenLike = Default.copy(addGene = true, slopeHeight = 0.0001, slopeSize = 500, fullGeneHillClimbChance = 5, randomGene = false, lastGene = true) } case object Config { val VanillaGa = Config(population = PopulationConfig.Ga, alpha = 255, algorithm = AlgorithmConfig.Default, selection = new WheelSelection, hillClimb = HillClimb.Off) val GaWithHillClimb = Config(population = PopulationConfig.Ga, alpha = 255, algorithm = AlgorithmConfig.GaWithHillClimb, selection = new WheelSelection, hillClimb = HillClimb.Default) val VecGenLike = Config(population = PopulationConfig.VecGen, alpha = 200, algorithm = AlgorithmConfig.Default, selection = new WheelSelection, hillClimb = HillClimb.VecGenLike) }
coding-jam/lagioconda
common/src/main/scala/it/codingjam/lagioconda/Config.scala
Scala
apache-2.0
4,168
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util.random import java.util.Random import cern.jet.random.Poisson import cern.jet.random.engine.DRand /** * A pseudorandom sampler. It is possible to change the sampled item type. For example, we might * want to add weights for stratified sampling or importance sampling. Should only use * transformations that are tied to the sampler and cannot be applied after sampling. * * @tparam T item type * @tparam U sampled item type */ trait RandomSampler[T, U] extends Pseudorandom with Cloneable with Serializable { /** take a random sample */ def sample(items: Iterator[T]): Iterator[U] override def clone: RandomSampler[T, U] = throw new NotImplementedError("clone() is not implemented.") } /** * A sampler based on Bernoulli trials. * * @param lb lower bound of the acceptance range * @param ub upper bound of the acceptance range * @param complement whether to use the complement of the range specified, default to false * @tparam T item type */ class BernoulliSampler[T](lb: Double, ub: Double, complement: Boolean = false) (implicit random: Random = new XORShiftRandom) extends RandomSampler[T, T] { def this(ratio: Double)(implicit random: Random = new XORShiftRandom) = this(0.0d, ratio)(random) override def setSeed(seed: Long) = random.setSeed(seed) override def sample(items: Iterator[T]): Iterator[T] = { items.filter { item => val x = random.nextDouble() (x >= lb && x < ub) ^ complement } } override def clone = new BernoulliSampler[T](lb, ub) } /** * A sampler based on values drawn from Poisson distribution. * * @param poisson a Poisson random number generator * @tparam T item type */ class PoissonSampler[T](mean: Double) (implicit var poisson: Poisson = new Poisson(mean, new DRand)) extends RandomSampler[T, T] { override def setSeed(seed: Long) { poisson = new Poisson(mean, new DRand(seed.toInt)) } override def sample(items: Iterator[T]): Iterator[T] = { items.flatMap { item => val count = poisson.nextInt() if (count == 0) { Iterator.empty } else { Iterator.fill(count)(item) } } } override def clone = new PoissonSampler[T](mean) }
cloudera/spark
core/src/main/scala/org/apache/spark/util/random/RandomSampler.scala
Scala
apache-2.0
3,033
package dregex import dregex.impl.RegexTree import dregex.impl.CharInterval import dregex.impl.Normalization import scala.collection.immutable.Seq /** * The purpose of this class is to enforce that set operation between regular expressions are only done when it is * legal to do so, that is, when the regex are compatible. * * The way this is enforced is that every compiled regular expression contains a reference to a [[Universe]], and * only expressions with the same universe are allowed to mix in set operation. * * The same [[Universe]] ensures the same "alphabet" and [[Normalization]] rules. Regular expressions compiled as a * group will always have the same universe. * * In general, dealing with this class or calling the constructor is not necessary; a call to one of the `compile` * methods is simpler and more direct. However, there are cases in which the intermediate [[ParsedRegex]]s are * needed. Most notably, when caching [[CompiledRegex]] instances (which are in general more expensive to create). */ class Universe(parsedTrees: Seq[RegexTree.Node], val normalization: Normalization) { import RegexTree._ private[dregex] val alphabet: Map[AbstractRange, Seq[CharInterval]] = { CharInterval.calculateNonOverlapping(parsedTrees.flatMap(t => collect(t))) } /** * Regular expressions can have character classes and wildcards. In order to produce a NFA, they should be expanded * to disjunctions. As the base alphabet is Unicode, just adding a wildcard implies a disjunction of more than one * million code points. Same happens with negated character classes or normal classes with large ranges. * * To prevent this, the sets are not expanded to all characters individually, but only to disjoint intervals. * * Example: * * [abc] -> a-c * [^efg] -> 0-c|h-MAX * mno[^efg] -> def(0-c|h-l|m|n|o|p-MAX) * . -> 0-MAX * * Care must be taken when the regex is meant to be used for an operation with another regex (such as intersection * or difference). In this case, the sets must be disjoint across all the "universe" * * This method collects the interval, so they can then be made disjoint. */ private[dregex] def collect(ast: Node): Seq[AbstractRange] = ast match { // Lookaround is also a ComplexPart, order important case Lookaround(dir, cond, value) => collect(value) :+ Wildcard case complex: ComplexPart => complex.values.flatMap(collect) case range: AbstractRange => Seq(range) case CharSet(ranges) => ranges } } object Universe { val Empty = new Universe(Seq(), Normalization.NoNormalization) }
marianobarrios/dregex
src/main/scala/dregex/Universe.scala
Scala
bsd-2-clause
2,701
package org.jetbrains.sbt package project.data.service import java.io.File import java.util import com.intellij.openapi.externalSystem.model.DataNode import com.intellij.openapi.externalSystem.model.project.ProjectData import com.intellij.openapi.externalSystem.service.project.IdeModifiableModelsProvider import com.intellij.openapi.module.Module import com.intellij.openapi.project.Project import com.intellij.openapi.util.io.FileUtil import com.intellij.util.SystemProperties import org.jetbrains.android.facet.{AndroidFacet, AndroidFacetType, AndroidRootUtil} import org.jetbrains.plugins.scala.project.external.{AbstractDataService, AbstractImporter, Importer} import org.jetbrains.sbt.project.data.AndroidFacetData /** * @author Nikolay Obedin * @since 8/12/14. */ class AndroidFacetDataService extends AbstractDataService[AndroidFacetData, AndroidFacet](AndroidFacetData.Key) { override def createImporter(toImport: Seq[DataNode[AndroidFacetData]], projectData: ProjectData, project: Project, modelsProvider: IdeModifiableModelsProvider): Importer[AndroidFacetData] = new AndroidFacetDataService.Importer(toImport, projectData, project, modelsProvider) } object AndroidFacetDataService { private class Importer(dataToImport: Seq[DataNode[AndroidFacetData]], projectData: ProjectData, project: Project, modelsProvider: IdeModifiableModelsProvider) extends AbstractImporter[AndroidFacetData](dataToImport, projectData, project, modelsProvider) { override def importData(): Unit = dataToImport.foreach { facetNode => for { module <- getIdeModuleByNode(facetNode) facet = getOrCreateFacet(module) } { configureFacet(module, facet, facetNode.getData) } } private def getOrCreateFacet(module: Module): AndroidFacet = Option(getModifiableFacetModel(module).getFacetByType(AndroidFacet.ID)).getOrElse(createFacet(module)) private def createFacet(module: Module): AndroidFacet = { val model = getModifiableFacetModel(module) val facetType = new AndroidFacetType val facet = facetType.createFacet(module, "Android", facetType.createDefaultConfiguration(), null) model.addFacet(facet) facet } private def configureFacet(module: Module, facet: AndroidFacet, data: AndroidFacetData): Unit = { val configuration = facet.getConfiguration.getState val base = AndroidRootUtil.getModuleDirPath(module) def getRelativePath(f: File) = "/" + FileUtil.getRelativePath(base, FileUtil.toSystemIndependentName(f.getAbsolutePath), '/') configuration.GEN_FOLDER_RELATIVE_PATH_APT = getRelativePath(data.gen) configuration.GEN_FOLDER_RELATIVE_PATH_AIDL = getRelativePath(data.gen) configuration.MANIFEST_FILE_RELATIVE_PATH = getRelativePath(data.manifest) configuration.RES_FOLDER_RELATIVE_PATH = getRelativePath(data.res) configuration.ASSETS_FOLDER_RELATIVE_PATH = getRelativePath(data.assets) configuration.LIBS_FOLDER_RELATIVE_PATH = getRelativePath(data.libs) configuration.APK_PATH = getRelativePath(data.apk) configuration.myProGuardCfgFiles = new util.ArrayList[String]() if (data.proguardConfig.nonEmpty) { val proguardFile = new File(module.getProject.getBasePath) / "proguard-sbt.txt" FileUtil.writeToFile(proguardFile, data.proguardConfig.mkString(SystemProperties.getLineSeparator)) configuration.myProGuardCfgFiles.add(proguardFile.getCanonicalPath) configuration.RUN_PROGUARD = true } } } }
jastice/intellij-scala
scala/integration/android/src/org/jetbrains/sbt/project/data/service/AndroidFacetDataService.scala
Scala
apache-2.0
3,718
package artisanal.pickle.maker import models._ import parser._ import org.specs2._ import mutable._ import specification._ import scala.reflect.internal.pickling.ByteCodecs import scala.tools.scalap.scalax.rules.scalasig._ import com.novus.salat.annotations.util._ import scala.reflect.ScalaSignature class NothingSpec extends mutable.Specification { "a ScalaSig for case class MyRecord_Nothing(l: Nothing)" should { "have the correct string" in { val mySig = new ScalaSig(List("case class"), List("models", "MyRecord_Nothing"), List(("l", "Nothing"))) val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_Nothing]).map(ScalaSigAttributeParsers.parse(_)).get val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get correctParsedSig.toString === myParsedSig.toString } } }
julianpeeters/artisanal-pickle-maker
src/test/scala/singleValueMember/NothingSpec.scala
Scala
apache-2.0
894
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.docs import java.io.File import java.util.concurrent.Callable import play.api._ import play.api.http.FileMimeTypes import play.api.mvc._ import play.api.routing.Router import play.core._ import play.core.server._ import scala.concurrent.Future import scala.util.Success /** * Used to start the documentation server. */ class DocServerStart { def start(projectPath: File, buildDocHandler: BuildDocHandler, translationReport: Callable[File], forceTranslationReport: Callable[File], port: java.lang.Integer): ReloadableServer = { val application: Application = { val environment = Environment(projectPath, this.getClass.getClassLoader, Mode.Test) val context = ApplicationLoader.createContext(environment) val components = new BuiltInComponentsFromContext(context) with NoHttpFiltersComponents { lazy val router = Router.empty } components.application } Play.start(application) val applicationProvider = new ApplicationProvider { implicit val ec = application.actorSystem.dispatcher implicit val fileMimeTypes = application.injector.instanceOf[FileMimeTypes] override def get = Success(application) override def handleWebCommand(request: RequestHeader) = buildDocHandler.maybeHandleDocRequest(request).asInstanceOf[Option[Result]].orElse( if (request.path == "/@report") { if (request.getQueryString("force").isDefined) { forceTranslationReport.call() Some(Results.Redirect("/@report")) } else { Some(Results.Ok.sendFile(translationReport.call(), inline = true, fileName = _ => "report.html")) } } else None ).orElse( Some(Results.Redirect("/@documentation")) ) } val config = ServerConfig( rootDir = projectPath, port = Some(port), mode = Mode.Test, properties = System.getProperties ) val serverProvider: ServerProvider = ServerProvider.fromConfiguration(getClass.getClassLoader, config.configuration) val context = ServerProvider.Context( config, applicationProvider, application.actorSystem, application.materializer, stopHook = () => Future.successful(()) ) serverProvider.createServer(context) } }
ktoso/playframework
framework/src/play-docs/src/main/scala/play/docs/DocServerStart.scala
Scala
apache-2.0
2,408
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2006-2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ package scala package collection package generic import scala.language.higherKinds abstract class OrderedTraversableFactory[CC[X] <: Traversable[X] with GenericOrderedTraversableTemplate[X, CC]] extends GenericOrderedCompanion[CC] { class GenericCanBuildFrom[A](implicit ord: Ordering[A]) extends CanBuildFrom[CC[_], A, CC[A]] { def apply(from: CC[_]) = from.genericOrderedBuilder[A] def apply = newBuilder[A] } }
felixmulder/scala
src/library/scala/collection/generic/OrderedTraversableFactory.scala
Scala
bsd-3-clause
957
/* * Grammar of Graphics in Scala * Copyright (c) 2011, ggscala.org */ package org.ggscala.model import scala.reflect.ClassTag import org.ggscala.model.TypeCode._ import org.ggscala.model.Factor._ object DataColumn { /** * Associates a name (id) and type code with a DataVector. Provides * column metadata + data for MultiColumnSources. */ trait DataColumn { var id : String val _type : TypeCode var data : DataVector[Any] } /** * An iterable sequence of arbitrary type backing each of the columns * provided by a MultiColumnSource */ trait DataVector[+T <: Any] extends Seq[T] { type DataType /** Concatenate a data vector with this data vector. */ def cbind( data:DataVector[DataType] )( implicit ev:ClassTag[DataType] ) : DataVector[DataType] } /** A DataVector which wraps a Seq */ class IterableDataVector[T]( protected val values:Seq[T] ) extends DataVector[T] { override type DataType = T protected def factory( vals:Seq[DataType] ) : DataVector[DataType] = new IterableDataVector(vals) override def iterator = values.iterator override def apply( idx:Int ) = values(idx) override def length = values.length def cbind( data:DataVector[DataType] )( implicit ev:ClassTag[DataType] ) = factory( values.map(_.asInstanceOf[DataType]) ++ data.iterator ) } /** A DataVector which is backed by a fully instantiated Array */ class ArrayDataVector[T]( protected val values: Array[T] ) extends DataVector[T] { override type DataType = T protected def factory( vals:Array[DataType] ) : DataVector[DataType] = new ArrayDataVector(vals) override def iterator = values.iterator override def apply( idx:Int ) = values(idx) override def length = values.length def toArray = values // I'm positive this horrific bit of casting can be avoided, but I haven't been patient // enough to work out the right type signatures // but what this means in the end is that a StringVector can cbind a StringVector and produce a StringVector // ...and no new code is written def cbind( data:DataVector[DataType] )( implicit ev:ClassTag[DataType] ) = factory( Array.concat( values.asInstanceOf[Array[DataType]], data.asInstanceOf[ArrayDataVector[DataType]].values ) ) } /** A specialization of DataVector for String. */ class StringVector( values: Array[String] ) extends ArrayDataVector[String](values) { override type DataType = String protected override def factory( vals:Array[DataType] ) : DataVector[DataType] = new StringVector(vals) } /** A specialization of DataVector for Double. */ class DoubleVector( values: Array[Double] ) extends ArrayDataVector[Double](values) { override type DataType = Double protected override def factory( vals:Array[DataType] ) : DataVector[DataType] = new DoubleVector(vals) } /** Attempts to unmarshal a string array into a DataVector of the requested type */ def stringArrayToDataVector( values:Array[String], _type:TypeCode ) = _type match { case StringTypeCode => new StringVector( values ) case DoubleTypeCode => new DoubleVector( values.map( _.toDouble ) ) case FactorTypeCode => new FactorVector( values ) case AnyTypeCode => new IterableDataVector[Any]( values ) } /** Unmarshals an array of objects into a DataVector of the requested type */ def anyArrayToDataVector[_ <: Any]( values:Array[_], _type:TypeCode ) = _type match { case StringTypeCode => new StringVector( values.asInstanceOf[Array[String]] ) case DoubleTypeCode => new DoubleVector( values.asInstanceOf[Array[Double]] ) case FactorTypeCode => new FactorVector( values.asInstanceOf[Array[String]].map(_.toString) ) case AnyTypeCode => new IterableDataVector[Any]( values ) } }
drkeoni/ggscala
src/main/scala/org/ggscala/model/DataColumn.scala
Scala
mit
3,818
package com.socrata.tileserver package util import scala.collection.JavaConverters._ import scala.language.implicitConversions import com.rojoma.json.v3.io.JsonReader.fromString import com.vividsolutions.jts.geom.{Coordinate, Geometry, GeometryFactory} import no.ecc.vectortile.VectorTileDecoder import org.apache.commons.codec.binary.Base64 import org.mockito.Matchers import org.mockito.Mockito.{verify, when} import org.scalatest.mockito.MockitoSugar import org.scalatest.prop.PropertyChecks import org.scalatest.{FunSuite, MustMatchers} import TileEncoder.Feature // scalastyle:off import.grouping class TileEncoderTest extends TestBase with MockitoSugar { implicit def byteToInt(pt: (Byte, Byte)): (Int, Int) = pt match { case (x: Byte, y: Byte) => (x.toInt, y.toInt) } def convert(feature: VectorTileDecoder.Feature): Feature = { val geom = feature.getGeometry val attrs = feature.getAttributes.asScala.toMap mapValues { o: Object => fromString(o.toString) } (geom, attrs) } test("Features are encoded as bytes only if they are valid") { import gen.Points._ forAll { (pt0: ValidPoint, pt1: ValidPoint, pt2: InvalidPoint, attr0: (String, String), attr1: (String, String)) => val decoder = new VectorTileDecoder val valid = Set(feature(pt0, 1, Map(attr0)), feature(pt1, 2, Map(attr1))) val invalid = Set(feature(pt2)) val bytes = TileEncoder(invalid ++ valid).bytes val decoded = decoder.decode(bytes) decoded.getLayerNames must equal (Set("main").asJava) val features = decoded.asScala.map(convert) features must have size (valid.size) valid foreach { features must contain (_) } } } test("Features are encoded as base64 bytes only if they are valid") { import gen.Points._ forAll { (pt0: ValidPoint, pt1: ValidPoint, pt2: InvalidPoint, attr0: (String, String), attr1: (String, String)) => val decoder = new VectorTileDecoder val valid = Set(feature(pt0, 1, Map(attr0)), feature(pt1, 2, Map(attr1))) val invalid = Set(feature(pt2)) val base64 = TileEncoder(invalid ++ valid).base64 val bytes = Base64.decodeBase64(base64) val decoded = decoder.decode(bytes) decoded.getLayerNames must equal (Set("main").asJava) val features = decoded.asScala.map(convert) features must have size (valid.size) valid foreach { features must contain (_)} } } // Behavior is undefined for invalid features. test("toString includes all valid features") { import gen.Points._ forAll { (pt0: ValidPoint, pt1: ValidPoint, pt2: ValidPoint, attr0: (String, String), attr1: (String, String)) => val (k0, v0) = attr0 val (k1, v1) = attr1 val features = Set(feature(pt0, 1, Map(attr0)), feature(pt1, 2, Map(attr1)), feature(pt2, 1)) val str = TileEncoder(features).toString features foreach { case (geom, _) => str must include (geom.toString) } str must include (encode(k0)) str must include (encode(v0)) str must include (encode(k1)) str must include (encode(v1)) } } test("Polygons are encoded on separate layers") { import gen.Points._ val factory = new GeometryFactory() forAll { (top: ValidPoint, pt: ValidPoint) => val decoder = new VectorTileDecoder val poly = factory.createPolygon(Array(new Coordinate(top.x, top.y), new Coordinate(top.x + 32, top.y - 32), new Coordinate(top.x - 32, top.y - 32), new Coordinate(top.x, top.y))) val bytes: Array[Byte] = TileEncoder(Set(feature(pt), poly -> Map.empty)).bytes val decoded = decoder.decode(bytes) decoded.getLayerNames must equal (Set("main", "polygon").asJava) val features = decoded.asScala.map(convert) features must have size (2) } } }
socrata-platform/tileserver
src/test/scala/com.socrata.tileserver/util/TileEncoderTest.scala
Scala
apache-2.0
4,276
/* * EtherNet/IP * Copyright (C) 2014 Kevin Herron * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.digitalpetri.ethernetip.encapsulation.commands import org.scalatest.FunSuite class UnRegisterSessionTest extends FunSuite { test("UnRegisterSession is round-trip encodable/decodable") { val command = UnRegisterSession() val decoded = UnRegisterSession.decode(UnRegisterSession.encode(command)) assert(command == decoded) } }
digitalpetri/scala-ethernet-ip
enip-core/src/test/scala/com/digitalpetri/ethernetip/encapsulation/commands/UnRegisterSessionTest.scala
Scala
apache-2.0
1,086
/* * Copyright 2001-2015 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalactic.equalities import org.scalactic.Equality import scala.annotation.tailrec import scala.collection.{GenSeq, mutable} import scala.language.{higherKinds, implicitConversions} /** * An [[Equality]] that allows the comparison of values nested in [[Map]]s using whatever Equality is * in scope for the contained key and value types. */ trait RecursiveMapEquality { implicit def recursiveMapEquality[K, V, MAP[k, v] <: collection.GenMap[k, v]](implicit eqK: Equality[K], eqV: Equality[V]): Equality[MAP[K, V]] = new Equality[MAP[K, V]] { def areEqual(mapA: MAP[K, V], other: Any): Boolean = { @tailrec def nextElement(seqA: GenSeq[(K,V)], mapB: mutable.Buffer[(_,_)]): Boolean = (seqA, mapB) match { case (a, b) if a.isEmpty && b.isEmpty => true case (a, b) if a.isEmpty || b.isEmpty => false case (a, b) => val elemA = a.head val index = b.indexWhere(kv => eqK.areEqual(elemA._1, kv._1) && eqV.areEqual(elemA._2, kv._2)) if (index < 0) { false } else { b.remove(index) nextElement(a.tail, b) } } other match { case mapB: collection.GenMap[_, _] => nextElement(mapA.toSeq, mapB.toBuffer) case _ => false } } } } object RecursiveMapEquality extends RecursiveMapEquality
SRGOM/scalatest
scalactic/src/main/scala/org/scalactic/equalities/RecursiveMapEquality.scala
Scala
apache-2.0
2,008
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.{execution, DataFrame, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range, Repartition, Sort, Union} import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec} import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ReuseExchange, ShuffleExchangeExec} import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { import testImplicits._ setupTestData() private def testPartialAggregationPlan(query: LogicalPlan): Unit = { val planner = spark.sessionState.planner import planner._ val plannedOption = Aggregation(query).headOption val planned = plannedOption.getOrElse( fail(s"Could query play aggregation query $query. Is it an aggregation query?")) val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n } // For the new aggregation code path, there will be four aggregate operator for // distinct aggregations. assert( aggregations.size == 2 || aggregations.size == 4, s"The plan of query $query does not have partial aggregations.") } test("count is partially aggregated") { val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("count distinct is partially aggregated") { val query = testData.groupBy('value).agg(countDistinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("mixed aggregates are partially aggregated") { val query = testData.groupBy('value).agg(count('value), countDistinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("mixed aggregates with same distinct columns") { def assertNoExpand(plan: SparkPlan): Unit = { assert(plan.collect { case e: ExpandExec => e }.isEmpty) } withTempView("v") { Seq((1, 1.0, 1.0), (1, 2.0, 2.0)).toDF("i", "j", "k").createTempView("v") // one distinct column val query1 = sql("SELECT sum(DISTINCT j), max(DISTINCT j) FROM v GROUP BY i") assertNoExpand(query1.queryExecution.executedPlan) // 2 distinct columns val query2 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT j, k) FROM v GROUP BY i") assertNoExpand(query2.queryExecution.executedPlan) // 2 distinct columns with different order val query3 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT k, j) FROM v GROUP BY i") assertNoExpand(query3.queryExecution.executedPlan) } } test("sizeInBytes estimation of limit operator for broadcast hash join optimization") { def checkPlan(fieldTypes: Seq[DataType]): Unit = { withTempView("testLimit") { val fields = fieldTypes.zipWithIndex.map { case (dataType, index) => StructField(s"c${index}", dataType, true) } :+ StructField("key", IntegerType, true) val schema = StructType(fields) val row = Row.fromSeq(Seq.fill(fields.size)(null)) val rowRDD = sparkContext.parallelize(row :: Nil) spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("testLimit") val planned = sql( """ |SELECT l.a, l.b |FROM testData2 l JOIN (SELECT * FROM testLimit LIMIT 1) r ON (l.a = r.key) """.stripMargin).queryExecution.sparkPlan val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join } val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join } assert(broadcastHashJoins.size === 1, "Should use broadcast hash join") assert(sortMergeJoins.isEmpty, "Should not use sort merge join") } } val simpleTypes = NullType :: BooleanType :: ByteType :: ShortType :: IntegerType :: LongType :: FloatType :: DoubleType :: DecimalType(10, 5) :: DecimalType.SYSTEM_DEFAULT :: DateType :: TimestampType :: StringType :: BinaryType :: Nil withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "16434") { checkPlan(simpleTypes) } val complexTypes = ArrayType(DoubleType, true) :: ArrayType(StringType, false) :: MapType(IntegerType, StringType, true) :: MapType(IntegerType, ArrayType(DoubleType), false) :: StructType(Seq( StructField("a", IntegerType, nullable = true), StructField("b", ArrayType(DoubleType), nullable = false), StructField("c", DoubleType, nullable = false))) :: Nil withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "901617") { checkPlan(complexTypes) } } test("InMemoryRelation statistics propagation") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") { withTempView("tiny") { testData.limit(3).createOrReplaceTempView("tiny") sql("CACHE TABLE tiny") val a = testData.as("a") val b = spark.table("tiny").as("b") val planned = a.join(b, $"a.key" === $"b.key").queryExecution.sparkPlan val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join } val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join } assert(broadcastHashJoins.size === 1, "Should use broadcast hash join") assert(sortMergeJoins.isEmpty, "Should not use shuffled hash join") spark.catalog.clearCache() } } } test("SPARK-11390 explain should print PushedFilters of PhysicalRDD") { withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") { withTempPath { file => val path = file.getCanonicalPath testData.write.parquet(path) val df = spark.read.parquet(path) df.createOrReplaceTempView("testPushed") withTempView("testPushed") { val exp = sql("select * from testPushed where key = 15").queryExecution.sparkPlan assert(exp.toString.contains("PushedFilters: [IsNotNull(key), EqualTo(key,15)]")) } } } } test("efficient terminal limit -> sort should use TakeOrderedAndProject") { val query = testData.select('key, 'value).sort('key).limit(2) val planned = query.queryExecution.executedPlan assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec]) assert(planned.output === testData.select('key, 'value).logicalPlan.output) } test("terminal limit -> project -> sort should use TakeOrderedAndProject") { val query = testData.select('key, 'value).sort('key).select('value, 'key).limit(2) val planned = query.queryExecution.executedPlan assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec]) assert(planned.output === testData.select('value, 'key).logicalPlan.output) } test("terminal limits that are not handled by TakeOrderedAndProject should use CollectLimit") { val query = testData.select('value).limit(2) val planned = query.queryExecution.sparkPlan assert(planned.isInstanceOf[CollectLimitExec]) assert(planned.output === testData.select('value).logicalPlan.output) } test("TakeOrderedAndProject can appear in the middle of plans") { val query = testData.select('key, 'value).sort('key).limit(2).filter('key === 3) val planned = query.queryExecution.executedPlan assert(planned.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined) } test("CollectLimit can appear in the middle of a plan when caching is used") { val query = testData.select('key, 'value).limit(2).cache() val planned = query.queryExecution.optimizedPlan.asInstanceOf[InMemoryRelation] assert(planned.cachedPlan.isInstanceOf[CollectLimitExec]) } test("TakeOrderedAndProjectExec appears only when number of limit is below the threshold.") { withSQLConf(SQLConf.TOP_K_SORT_FALLBACK_THRESHOLD.key -> "1000") { val query0 = testData.select('value).orderBy('key).limit(100) val planned0 = query0.queryExecution.executedPlan assert(planned0.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined) val query1 = testData.select('value).orderBy('key).limit(2000) val planned1 = query1.queryExecution.executedPlan assert(planned1.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isEmpty) } } test("SPARK-23375: Cached sorted data doesn't need to be re-sorted") { val query = testData.select('key, 'value).sort('key.desc).cache() assert(query.queryExecution.optimizedPlan.isInstanceOf[InMemoryRelation]) val resorted = query.sort('key.desc) assert(resorted.queryExecution.optimizedPlan.collect { case s: Sort => s}.isEmpty) assert(resorted.select('key).collect().map(_.getInt(0)).toSeq == (1 to 100).reverse) // with a different order, the sort is needed val sortedAsc = query.sort('key) assert(sortedAsc.queryExecution.optimizedPlan.collect { case s: Sort => s}.size == 1) assert(sortedAsc.select('key).collect().map(_.getInt(0)).toSeq == (1 to 100)) } test("PartitioningCollection") { withTempView("normal", "small", "tiny") { testData.createOrReplaceTempView("normal") testData.limit(10).createOrReplaceTempView("small") testData.limit(3).createOrReplaceTempView("tiny") // Disable broadcast join withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { { val plan = sql( """ |SELECT * |FROM | normal JOIN small ON (normal.key = small.key) | JOIN tiny ON (small.key = tiny.key) """.stripMargin ).queryExecution.executedPlan val numExchanges = collect(plan) { case exchange: ShuffleExchangeExec => exchange }.length assert(numExchanges === 5) } { val plan = sql( """ |SELECT * |FROM | normal JOIN small ON (normal.key = small.key) | JOIN tiny ON (normal.key = tiny.key) """.stripMargin ).queryExecution.executedPlan // This second query joins on different keys: val numExchanges = collect(plan) { case exchange: ShuffleExchangeExec => exchange }.length assert(numExchanges === 5) } } } } test("collapse adjacent repartitions") { val doubleRepartitioned = testData.repartition(10).repartition(20).coalesce(5) def countRepartitions(plan: LogicalPlan): Int = plan.collect { case r: Repartition => r }.length assert(countRepartitions(doubleRepartitioned.queryExecution.analyzed) === 3) assert(countRepartitions(doubleRepartitioned.queryExecution.optimizedPlan) === 2) doubleRepartitioned.queryExecution.optimizedPlan match { case Repartition (numPartitions, shuffle, Repartition(_, shuffleChild, _)) => assert(numPartitions === 5) assert(shuffle === false) assert(shuffleChild) } } /////////////////////////////////////////////////////////////////////////// // Unit tests of EnsureRequirements for Exchange /////////////////////////////////////////////////////////////////////////// // When it comes to testing whether EnsureRequirements properly ensures distribution requirements, // there two dimensions that need to be considered: are the child partitionings compatible and // do they satisfy the distribution requirements? As a result, we need at least four test cases. private def assertDistributionRequirementsAreSatisfied(outputPlan: SparkPlan): Unit = { if (outputPlan.children.length > 1) { val childPartitionings = outputPlan.children.zip(outputPlan.requiredChildDistribution) .filter { case (_, UnspecifiedDistribution) => false case (_, _: BroadcastDistribution) => false case _ => true }.map(_._1.outputPartitioning) if (childPartitionings.map(_.numPartitions).toSet.size > 1) { fail(s"Partitionings doesn't have same number of partitions: $childPartitionings") } } outputPlan.children.zip(outputPlan.requiredChildDistribution).foreach { case (child, requiredDist) => assert(child.outputPartitioning.satisfies(requiredDist), s"$child output partitioning does not satisfy $requiredDist:\\n$outputPlan") } } test("EnsureRequirements with child partitionings with different numbers of output partitions") { val clustering = Literal(1) :: Nil val distribution = ClusteredDistribution(clustering) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 1)), DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 2)) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) } test("EnsureRequirements with compatible child partitionings that do not satisfy distribution") { val distribution = ClusteredDistribution(Literal(1) :: Nil) // The left and right inputs have compatible partitionings but they do not satisfy the // distribution because they are clustered on different columns. Thus, we need to shuffle. val childPartitioning = HashPartitioning(Literal(2) :: Nil, 1) assert(!childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = childPartitioning), DummySparkPlan(outputPartitioning = childPartitioning) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.isEmpty) { fail(s"Exchange should have been added:\\n$outputPlan") } } test("EnsureRequirements with compatible child partitionings that satisfy distribution") { // In this case, all requirements are satisfied and no exchange should be added. val distribution = ClusteredDistribution(Literal(1) :: Nil) val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = childPartitioning), DummySparkPlan(outputPartitioning = childPartitioning) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"Exchange should not have been added:\\n$outputPlan") } } // This is a regression test for SPARK-9703 test("EnsureRequirements should not repartition if only ordering requirement is unsatisfied") { // Consider an operator that imposes both output distribution and ordering requirements on its // children, such as sort merge join. If the distribution requirements are satisfied but // the output ordering requirements are unsatisfied, then the planner should only add sorts and // should not need to add additional shuffles / exchanges. val outputOrdering = Seq(SortOrder(Literal(1), Ascending)) val distribution = ClusteredDistribution(Literal(1) :: Nil) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = SinglePartition), DummySparkPlan(outputPartitioning = SinglePartition) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(outputOrdering, outputOrdering) ) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"No Exchanges should have been added:\\n$outputPlan") } } test("EnsureRequirements eliminates Exchange if child has same partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val partitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(partitioning.satisfies(distribution)) val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning)) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 2) { fail(s"Topmost Exchange should have been eliminated:\\n$outputPlan") } } test("SPARK-30036: Remove unnecessary RoundRobinPartitioning " + "if SortExec is followed by RoundRobinPartitioning") { val distribution = OrderedDistribution(SortOrder(Literal(1), Ascending) :: Nil) val partitioning = RoundRobinPartitioning(5) assert(!partitioning.satisfies(distribution)) val inputPlan = SortExec(SortOrder(Literal(1), Ascending) :: Nil, global = true, child = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning))) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assert(outputPlan.find { case ShuffleExchangeExec(_: RoundRobinPartitioning, _, _) => true case _ => false }.isEmpty, "RoundRobinPartitioning should be changed to RangePartitioning") withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { // when enable AQE, the post partiiton number is changed. val query = testData.select('key, 'value).repartition(2).sort('key.asc) assert(query.rdd.getNumPartitions == 2) assert(query.rdd.collectPartitions()(0).map(_.get(0)).toSeq == (1 to 50)) } } test("SPARK-30036: Remove unnecessary HashPartitioning " + "if SortExec is followed by HashPartitioning") { val distribution = OrderedDistribution(SortOrder(Literal(1), Ascending) :: Nil) val partitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(!partitioning.satisfies(distribution)) val inputPlan = SortExec(SortOrder(Literal(1), Ascending) :: Nil, global = true, child = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning))) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assert(outputPlan.find { case ShuffleExchangeExec(_: HashPartitioning, _, _) => true case _ => false }.isEmpty, "HashPartitioning should be changed to RangePartitioning") withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { // when enable AQE, the post partiiton number is changed. val query = testData.select('key, 'value).repartition(5, 'key).sort('key.asc) assert(query.rdd.getNumPartitions == 5) assert(query.rdd.collectPartitions()(0).map(_.get(0)).toSeq == (1 to 20)) } } test("EnsureRequirements does not eliminate Exchange with different partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val partitioning = HashPartitioning(Literal(2) :: Nil, 5) assert(!partitioning.satisfies(distribution)) val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning)) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 1) { fail(s"Topmost Exchange should not have been eliminated:\\n$outputPlan") } } test("EnsureRequirements should respect ClusteredDistribution's num partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil, Some(13)) // Number of partitions differ val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 13) val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(!childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil, requiredChildDistribution = Seq(distribution), requiredChildOrdering = Seq(Seq.empty)) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) val shuffle = outputPlan.collect { case e: ShuffleExchangeExec => e } assert(shuffle.size === 1) assert(shuffle.head.outputPartitioning === finalPartitioning) } test("Reuse exchanges") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 5) val childPartitioning = HashPartitioning(Literal(2) :: Nil, 5) assert(!childPartitioning.satisfies(distribution)) val shuffle = ShuffleExchangeExec(finalPartitioning, DummySparkPlan( children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil, requiredChildDistribution = Seq(distribution), requiredChildOrdering = Seq(Seq.empty))) val inputPlan = SortMergeJoinExec( Literal(1) :: Nil, Literal(1) :: Nil, Inner, None, shuffle, shuffle) val outputPlan = ReuseExchange(spark.sessionState.conf).apply(inputPlan) if (outputPlan.collect { case e: ReusedExchangeExec => true }.size != 1) { fail(s"Should re-use the shuffle:\\n$outputPlan") } if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size != 1) { fail(s"Should have only one shuffle:\\n$outputPlan") } // nested exchanges val inputPlan2 = SortMergeJoinExec( Literal(1) :: Nil, Literal(1) :: Nil, Inner, None, ShuffleExchangeExec(finalPartitioning, inputPlan), ShuffleExchangeExec(finalPartitioning, inputPlan)) val outputPlan2 = ReuseExchange(spark.sessionState.conf).apply(inputPlan2) if (outputPlan2.collect { case e: ReusedExchangeExec => true }.size != 2) { fail(s"Should re-use the two shuffles:\\n$outputPlan2") } if (outputPlan2.collect { case e: ShuffleExchangeExec => true }.size != 2) { fail(s"Should have only two shuffles:\\n$outputPlan") } } /////////////////////////////////////////////////////////////////////////// // Unit tests of EnsureRequirements for Sort /////////////////////////////////////////////////////////////////////////// private val exprA = Literal(1) private val exprB = Literal(2) private val exprC = Literal(3) private val orderingA = SortOrder(exprA, Ascending) private val orderingB = SortOrder(exprB, Ascending) private val orderingC = SortOrder(exprC, Ascending) private val planA = DummySparkPlan(outputOrdering = Seq(orderingA), outputPartitioning = HashPartitioning(exprA :: Nil, 5)) private val planB = DummySparkPlan(outputOrdering = Seq(orderingB), outputPartitioning = HashPartitioning(exprB :: Nil, 5)) private val planC = DummySparkPlan(outputOrdering = Seq(orderingC), outputPartitioning = HashPartitioning(exprC :: Nil, 5)) assert(orderingA != orderingB && orderingA != orderingC && orderingB != orderingC) private def assertSortRequirementsAreSatisfied( childPlan: SparkPlan, requiredOrdering: Seq[SortOrder], shouldHaveSort: Boolean): Unit = { val inputPlan = DummySparkPlan( children = childPlan :: Nil, requiredChildOrdering = Seq(requiredOrdering), requiredChildDistribution = Seq(UnspecifiedDistribution) ) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (shouldHaveSort) { if (outputPlan.collect { case s: SortExec => true }.isEmpty) { fail(s"Sort should have been added:\\n$outputPlan") } } else { if (outputPlan.collect { case s: SortExec => true }.nonEmpty) { fail(s"No sorts should have been added:\\n$outputPlan") } } } test("EnsureRequirements skips sort when either side of join keys is required after inner SMJ") { Seq(Inner, Cross).foreach { joinType => val innerSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB) // Both left and right keys should be sorted after the SMJ. Seq(orderingA, orderingB).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = innerSmj, requiredOrdering = Seq(ordering), shouldHaveSort = false) } } } test("EnsureRequirements skips sort when key order of a parent SMJ is propagated from its " + "child SMJ") { Seq(Inner, Cross).foreach { joinType => val childSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB) val parentSmj = SortMergeJoinExec(exprB :: Nil, exprC :: Nil, joinType, None, childSmj, planC) // After the second SMJ, exprA, exprB and exprC should all be sorted. Seq(orderingA, orderingB, orderingC).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = parentSmj, requiredOrdering = Seq(ordering), shouldHaveSort = false) } } } test("EnsureRequirements for sort operator after left outer sort merge join") { // Only left key is sorted after left outer SMJ (thus doesn't need a sort). val leftSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, LeftOuter, None, planA, planB) Seq((orderingA, false), (orderingB, true)).foreach { case (ordering, needSort) => assertSortRequirementsAreSatisfied( childPlan = leftSmj, requiredOrdering = Seq(ordering), shouldHaveSort = needSort) } } test("EnsureRequirements for sort operator after right outer sort merge join") { // Only right key is sorted after right outer SMJ (thus doesn't need a sort). val rightSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, RightOuter, None, planA, planB) Seq((orderingA, true), (orderingB, false)).foreach { case (ordering, needSort) => assertSortRequirementsAreSatisfied( childPlan = rightSmj, requiredOrdering = Seq(ordering), shouldHaveSort = needSort) } } test("EnsureRequirements adds sort after full outer sort merge join") { // Neither keys is sorted after full outer SMJ, so they both need sorts. val fullSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, FullOuter, None, planA, planB) Seq(orderingA, orderingB).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = fullSmj, requiredOrdering = Seq(ordering), shouldHaveSort = true) } } test("EnsureRequirements adds sort when there is no existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq.empty), requiredOrdering = Seq(orderingB), shouldHaveSort = true) } test("EnsureRequirements skips sort when required ordering is prefix of existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA, orderingB)), requiredOrdering = Seq(orderingA), shouldHaveSort = false) } test("EnsureRequirements skips sort when required ordering is semantically equal to " + "existing ordering") { val exprId: ExprId = NamedExpression.newExprId val attribute1 = AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId, qualifier = Seq("col1_qualifier") ) val attribute2 = AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId) val orderingA1 = SortOrder(attribute1, Ascending) val orderingA2 = SortOrder(attribute2, Ascending) assert(orderingA1 != orderingA2, s"$orderingA1 should NOT equal to $orderingA2") assert(orderingA1.semanticEquals(orderingA2), s"$orderingA1 should be semantically equal to $orderingA2") assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA1)), requiredOrdering = Seq(orderingA2), shouldHaveSort = false) } // This is a regression test for SPARK-11135 test("EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA)), requiredOrdering = Seq(orderingA, orderingB), shouldHaveSort = true) } test("SPARK-24242: RangeExec should have correct output ordering and partitioning") { val df = spark.range(10) val rangeExec = df.queryExecution.executedPlan.collect { case r: RangeExec => r } val range = df.queryExecution.optimizedPlan.collect { case r: Range => r } assert(rangeExec.head.outputOrdering == range.head.outputOrdering) assert(rangeExec.head.outputPartitioning == RangePartitioning(rangeExec.head.outputOrdering, df.rdd.getNumPartitions)) val rangeInOnePartition = spark.range(1, 10, 1, 1) val rangeExecInOnePartition = rangeInOnePartition.queryExecution.executedPlan.collect { case r: RangeExec => r } assert(rangeExecInOnePartition.head.outputPartitioning == SinglePartition) val rangeInZeroPartition = spark.range(-10, -9, -20, 1) val rangeExecInZeroPartition = rangeInZeroPartition.queryExecution.executedPlan.collect { case r: RangeExec => r } assert(rangeExecInZeroPartition.head.outputPartitioning == UnknownPartitioning(0)) } test("SPARK-24495: EnsureRequirements can return wrong plan when reusing the same key in join") { val plan1 = DummySparkPlan(outputOrdering = Seq(orderingA), outputPartitioning = HashPartitioning(exprA :: exprA :: Nil, 5)) val plan2 = DummySparkPlan(outputOrdering = Seq(orderingB), outputPartitioning = HashPartitioning(exprB :: Nil, 5)) val smjExec = SortMergeJoinExec( exprA :: exprA :: Nil, exprB :: exprC :: Nil, Inner, None, plan1, plan2) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, _, _) => assert(leftKeys == Seq(exprA, exprA)) assert(rightKeys == Seq(exprB, exprC)) case _ => fail() } } test("SPARK-27485: EnsureRequirements.reorder should handle duplicate expressions") { val plan1 = DummySparkPlan( outputPartitioning = HashPartitioning(exprA :: exprB :: exprA :: Nil, 5)) val plan2 = DummySparkPlan() val smjExec = SortMergeJoinExec( leftKeys = exprA :: exprB :: exprB :: Nil, rightKeys = exprA :: exprC :: exprC :: Nil, joinType = Inner, condition = None, left = plan1, right = plan2) val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, SortExec(_, _, ShuffleExchangeExec(HashPartitioning(leftPartitioningExpressions, _), _, _), _), SortExec(_, _, ShuffleExchangeExec(HashPartitioning(rightPartitioningExpressions, _), _, _), _)) => assert(leftKeys === smjExec.leftKeys) assert(rightKeys === smjExec.rightKeys) assert(leftKeys === leftPartitioningExpressions) assert(rightKeys === rightPartitioningExpressions) case _ => fail(outputPlan.toString) } } test("SPARK-24500: create union with stream of children") { val df = Union(Stream( Range(1, 1, 1, 1), Range(1, 2, 1, 1))) df.queryExecution.executedPlan.execute() } test("SPARK-25278: physical nodes should be different instances for same logical nodes") { val range = Range(1, 1, 1, 1) val df = Union(range, range) val ranges = df.queryExecution.optimizedPlan.collect { case r: Range => r } assert(ranges.length == 2) // Ensure the two Range instances are equal according to their equal method assert(ranges.head == ranges.last) val execRanges = df.queryExecution.sparkPlan.collect { case r: RangeExec => r } assert(execRanges.length == 2) // Ensure the two RangeExec instances are different instances assert(!execRanges.head.eq(execRanges.last)) } test("SPARK-24556: always rewrite output partitioning in ReusedExchangeExec " + "and InMemoryTableScanExec") { def checkOutputPartitioningRewrite( plans: Seq[SparkPlan], expectedPartitioningClass: Class[_]): Unit = { assert(plans.size == 1) val plan = plans.head val partitioning = plan.outputPartitioning assert(partitioning.getClass == expectedPartitioningClass) val partitionedAttrs = partitioning.asInstanceOf[Expression].references assert(partitionedAttrs.subsetOf(plan.outputSet)) } def checkReusedExchangeOutputPartitioningRewrite( df: DataFrame, expectedPartitioningClass: Class[_]): Unit = { val reusedExchange = collect(df.queryExecution.executedPlan) { case r: ReusedExchangeExec => r } checkOutputPartitioningRewrite(reusedExchange, expectedPartitioningClass) } def checkInMemoryTableScanOutputPartitioningRewrite( df: DataFrame, expectedPartitioningClass: Class[_]): Unit = { val inMemoryScan = collect(df.queryExecution.executedPlan) { case m: InMemoryTableScanExec => m } checkOutputPartitioningRewrite(inMemoryScan, expectedPartitioningClass) } // when enable AQE, the reusedExchange is inserted when executed. withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { // ReusedExchange is HashPartitioning val df1 = Seq(1 -> "a").toDF("i", "j").repartition($"i") val df2 = Seq(1 -> "a").toDF("i", "j").repartition($"i") checkReusedExchangeOutputPartitioningRewrite(df1.union(df2), classOf[HashPartitioning]) // ReusedExchange is RangePartitioning val df3 = Seq(1 -> "a").toDF("i", "j").orderBy($"i") val df4 = Seq(1 -> "a").toDF("i", "j").orderBy($"i") checkReusedExchangeOutputPartitioningRewrite(df3.union(df4), classOf[RangePartitioning]) // InMemoryTableScan is HashPartitioning Seq(1 -> "a").toDF("i", "j").repartition($"i").persist() checkInMemoryTableScanOutputPartitioningRewrite( Seq(1 -> "a").toDF("i", "j").repartition($"i"), classOf[HashPartitioning]) // InMemoryTableScan is RangePartitioning spark.range(1, 100, 1, 10).toDF().persist() checkInMemoryTableScanOutputPartitioningRewrite( spark.range(1, 100, 1, 10).toDF(), classOf[RangePartitioning]) } // InMemoryTableScan is PartitioningCollection withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m").persist() checkInMemoryTableScanOutputPartitioningRewrite( Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m"), classOf[PartitioningCollection]) } } test("SPARK-26812: wrong nullability for complex datatypes in union") { def testUnionOutputType(input1: DataType, input2: DataType, output: DataType): Unit = { val query = Union( LocalRelation(StructField("a", input1)), LocalRelation(StructField("a", input2))) assert(query.output.head.dataType == output) } // Map testUnionOutputType( MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = true), MapType(StringType, StringType, valueContainsNull = true)) testUnionOutputType( MapType(StringType, StringType, valueContainsNull = true), MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = true)) testUnionOutputType( MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = false)) // Array testUnionOutputType( ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = true), ArrayType(StringType, containsNull = true)) testUnionOutputType( ArrayType(StringType, containsNull = true), ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = true)) testUnionOutputType( ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = false)) // Struct testUnionOutputType( StructType(Seq( StructField("f1", StringType, nullable = false), StructField("f2", StringType, nullable = true), StructField("f3", StringType, nullable = false))), StructType(Seq( StructField("f1", StringType, nullable = true), StructField("f2", StringType, nullable = false), StructField("f3", StringType, nullable = false))), StructType(Seq( StructField("f1", StringType, nullable = true), StructField("f2", StringType, nullable = true), StructField("f3", StringType, nullable = false)))) } test("Do not analyze subqueries twice") { // Analyzing the subquery twice will result in stacked // CheckOverflow & PromotePrecision expressions. val df = sql( """ |SELECT id, | (SELECT 1.3000000 * AVG(CAST(id AS DECIMAL(10, 3))) FROM range(13)) AS ref |FROM range(5) |""".stripMargin) val Seq(subquery) = stripAQEPlan(df.queryExecution.executedPlan).subqueriesAll subquery.foreach { node => node.expressions.foreach { expression => expression.foreach { case PromotePrecision(_: PromotePrecision) => fail(s"$expression contains stacked PromotePrecision expressions.") case CheckOverflow(_: CheckOverflow, _, _) => fail(s"$expression contains stacked CheckOverflow expressions.") case _ => // Ok } } } } } // Used for unit-testing EnsureRequirements private case class DummySparkPlan( override val children: Seq[SparkPlan] = Nil, override val outputOrdering: Seq[SortOrder] = Nil, override val outputPartitioning: Partitioning = UnknownPartitioning(0), override val requiredChildDistribution: Seq[Distribution] = Nil, override val requiredChildOrdering: Seq[Seq[SortOrder]] = Nil ) extends SparkPlan { override protected def doExecute(): RDD[InternalRow] = throw new UnsupportedOperationException override def output: Seq[Attribute] = Seq.empty }
ptkool/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
Scala
apache-2.0
40,579
/*** * Copyright 2014 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker.step import javax.xml.namespace.QName import javax.xml.validation.{Schema, ValidatorHandler} import com.rackspace.com.papi.components.checker.util.ValidatorHandlerPool._ import org.xml.sax.{Attributes, ErrorHandler, Locator, SAXException, SAXParseException} import org.xml.sax.helpers.AttributesImpl class XSDStringValidator(val simpleType : QName, val schema : Schema, val elementName : String) extends Locator { lazy val attributes : Attributes = { val ah = new AttributesImpl() ah.addAttribute ("http://www.w3.org/2001/XMLSchema-instance", "type", "xsi:type", "", simpleType.getPrefix+":"+simpleType.getLocalPart) ah } def validate (in : String) : Option[SAXParseException] = { val capture = new ErrorCapture var handler : ValidatorHandler = null try { val inArray = in.toCharArray() handler = borrowValidatorHandler(schema) handler.setErrorHandler(capture) handler.setDocumentLocator (this) handler.startDocument handler.startPrefixMapping(simpleType.getPrefix, simpleType.getNamespaceURI) handler.startPrefixMapping("xsi", "http://www.w3.org/2001/XMLSchema-instance") handler.startElement("", elementName, elementName, attributes) handler.characters(inArray, 0, inArray.length) handler.endElement("", elementName, elementName) handler.endPrefixMapping(simpleType.getPrefix) handler.endPrefixMapping("xsi") handler.endDocument } catch { case e : SAXException => /* Ignore here, the error is reported by capture */ } finally { if (handler != null) { returnValidatorHandler(schema, handler) } } capture.error } // // Locator calls // def getPublicId = "" def getSystemId = "" def getLineNumber = 1 def getColumnNumber = 1 } // // An error handler that simply captures the first error it sees. It // ignores warnings. // private class ErrorCapture extends ErrorHandler { var error : Option[SAXParseException] = None def error(exception : SAXParseException) : Unit = { if (error.isEmpty) { error = Some(exception) throw exception } } def fatalError(exception : SAXParseException) : Unit = { if (error.isEmpty) { error = Some(exception) throw exception } } def warning(exception : SAXParseException) : Unit = { //Log? } }
wdschei/api-checker
core/src/main/scala/com/rackspace/com/papi/components/checker/step/XSDStringValidator.scala
Scala
apache-2.0
3,058
package org.smackscalahelpers import org.jivesoftware._ import smack._ import smackx._ import smackx.pubsub._ import smackx.pubsub.FormType import smackx.pubsub.LeafNode import smackx.packet._ import smackx.packet.DataForm import scala.collection.JavaConversions._ class StorageNode( val name:String, val leafNode:LeafNode){ import scala.xml._ /* PRIVATE METHODS*/ private def updateConfig(priv:Boolean){ val cfg = new ConfigureForm( leafNode.getNodeConfiguration.createAnswerForm ) cfg.setPersistentItems(true) cfg.setDeliverPayloads(true) cfg.setMaxItems(0) // 0 => unlimited if (priv == false) cfg.setAccessModel(AccessModel.open) else cfg.setAccessModel(AccessModel.whitelist) leafNode.sendConfigurationForm(cfg) } private def setToNodeMap[T]( set:Set[T] ):Map[String,Node] = set.asInstanceOf[Set[Item]] .map( x => x.getId() -> x.toXML) .map( x => x._1 -> XML.loadString(x._2).asInstanceOf[Node] ) .map( x => x._1 -> x._2.child.head ) .toMap private def createItem(content:Node, id:String):Node = Utility.trim(<item id={id} >{ content }</item>) /* PUBLIC METHODS */ def set(item:Node){ set(item, "") } def set(item:Node, id:String){ leafNode.publish(new Item{ override def toXML():String = createItem(item, id).toString })} def get():Map[String,Node] = setToNodeMap( leafNode.getItems().toSet ) def get(id:String):Option[Node] = { val items = get(Set(id)) if (items.contains(id)) Some(items(id)) else None } def get(ids:Set[String]):Map[String,Node] = setToNodeMap( leafNode.getItems(ids).toSet ) def delete(id:String){ leafNode.deleteItem(id) } def delete(ids:Set[String]){ leafNode.deleteItem(ids) } def deleteAll(){ leafNode.deleteAllItems } def setToPrivateNode(priv:Boolean = true){ updateConfig(priv) } } class PubSubStorage (conn:XMPPConnection){ private val ACCESS_MODEL = "pubsub#access_model" private val PERSIST_ITEMS = "pubsub#persist_items" private val cfg = new ConfigureForm(FormType.submit) private val pubSubManager = new PubSubManager(conn) cfg.setPersistentItems(true) cfg.setDeliverPayloads(false) cfg.setAccessModel(AccessModel.whitelist) private def castNodeToStorageNode(node:Node):StorageNode = node match{ case l:LeafNode => new StorageNode(node.getId,l) case _ => throw new Exception("could not create node") } def createNode(name:String):Option[StorageNode] = { try{ Some( castNodeToStorageNode( pubSubManager.createNode(name, cfg))) }catch{ case _ => None } } def getNode(name:String):Option[StorageNode] = { try{ Some(castNodeToStorageNode(pubSubManager.getNode(name))) }catch{ case _ => None } } def deleteNode(name:String){ pubSubManager.deleteNode(name) } }
flosse/smackScalaHelpers
src/main/scala/org/smackscalahelpers/PubSubStorage.scala
Scala
mit
2,870
package roles import models.{User, UserScope} import play.api.libs.concurrent.Execution.Implicits.defaultContext import scalikejdbc.async.AsyncDBSession import utils.Route import utils.exceptions.{AccessForbidden, ResourceNotFound} import scala.concurrent.Future trait AuthorizedUser { this: User => def checkScope(route: Route)(implicit s: AsyncDBSession): Future[Unit] = { def handleNotFound[U]: PartialFunction[Throwable, U] = { case e: ResourceNotFound => throw new AccessForbidden } UserScope.findScope(userType, route.method, route.uri) map (_ => ()) recover handleNotFound } }
KIWIKIGMBH/kiwierp
kiwierp-backend/app/roles/AuthorizedUser.scala
Scala
mpl-2.0
615
package domino.service_providing import domino.capsule.CapsuleContext import org.osgi.framework.BundleContext import domino.OsgiContext /** * A class that mixes in the [[ServiceProviding]] trait. Use this if you want to use a class instead of a trait. */ class SimpleServiceProviding( protected val capsuleContext: CapsuleContext, protected val bundleContext: BundleContext) extends ServiceProviding { def this(osgiContext: OsgiContext) = this(osgiContext, osgiContext.bundleContext) }
helgoboss/domino
src/main/scala/domino/service_providing/SimpleServiceProviding.scala
Scala
mit
501
package com.zobot.client.packet.definitions.clientbound.login import com.zobot.client.packet.Packet case class LoginSuccess(uuid: Any, username: String) extends Packet { override lazy val packetId = 0x02 override lazy val packetData: Array[Byte] = fromAny(uuid) ++ fromVarString(username) }
BecauseNoReason/zobot
src/main/scala/com/zobot/client/packet/definitions/clientbound/login/LoginSuccess.scala
Scala
mit
306
package taczombie.model import scala.Array.canBuildFrom import scala.collection.immutable.HashSet import scala.io.Source import scala.collection.mutable.HashMap import taczombie.model.util.LevelCreator import scala.concurrent.Lock object GameFactory { var counter : Int = 1 val generateIdLock : Lock = new Lock() val readFileLock : Lock = new Lock() def generateId : Int = { generateIdLock.acquire counter = counter + 1 val newId = counter generateIdLock.release newId } val defaultFile = getClass().getResource("/TestLevel_correct") def newGame(random : Boolean = false, file : String = defaultFile.toString(), humans: Int = defaults.defaultHumans, zombies: Int= defaults.defaultZombies) : Game = { val (gameField, playerMap) = { if(random == false) createGameFieldAndPlayerMap(humans, zombies, file) else { val level = (new LevelCreator()).create( defaults.defaultHeight, defaults.defaultWidth, humans) val array = scala.collection.mutable.ArrayBuffer[String]() for(line <- level) { val lineConcatString = line.foldLeft("")((result, element) => result concat element) array += lineConcatString } for(s <- array) println(s) createGameFieldAndPlayerMap( humans, zombies, level = array.toArray[String], name = array.hashCode().toString) } } if(gameField == null || playerMap == null) return null else new Game(generateId, gameField, playerMap, GameState.InGame) } private def createGameFieldAndPlayerMap( humanTokenCount : Int, zombieTokenCount : Int, file : String = null, level : Array[String] = null, name : String = null) : (GameField, Players) = { readFileLock.acquire val (mapStringArray, gameName) = { if(file != null) { (Source.fromFile(file).getLines.toArray, file.split("/").last) } else (level, name) } readFileLock.release if((mapStringArray.foldLeft(Set[Int]()){(set, line) => set + line.length }.size) > 1) { println("level has different line lengths") return (null, null) } val levelWidth = (mapStringArray apply 0).size val levelHeight = mapStringArray.size var humanBase : (Int,Int) = (0,0) var zombieBase : (Int,Int) = (0,0) var coinsPlaced : Int = 0 // collect tokens val humanTokenIds = scala.collection.mutable.ListBuffer[Int]() val zombieTokenIds = scala.collection.mutable.ListBuffer[Int]() val gameFieldCells = scala.collection.mutable.HashMap[(Int,Int), GameFieldCell]() for (row <- 0 until levelHeight) yield { for (col <- 0 until levelWidth) yield { val tuple = (row,col) var tmpGameFieldCell = new GameFieldCell((tuple), HashSet[GameObject]()) var validCharacterRead = true mapStringArray(row)(col) match { case '#' => tmpGameFieldCell = tmpGameFieldCell.addHere(new Wall(0,(tuple))) case '.' => tmpGameFieldCell = tmpGameFieldCell.addHere(new Coin(this.generateId,(tuple))) coinsPlaced += 1 case ';' => tmpGameFieldCell = tmpGameFieldCell.addHere(new Powerup(this.generateId, (tuple))) case 'H' => humanBase = (tuple) val humanToken = new HumanToken(this.generateId, (tuple)) humanTokenIds.+=(humanToken.id) tmpGameFieldCell = tmpGameFieldCell.addHere(humanToken) case 'Z' => zombieBase = (tuple) for(i <- 0 until zombieTokenCount) { val zombieToken = new ZombieToken(this.generateId, (tuple)) zombieTokenIds.+=(zombieToken.id) tmpGameFieldCell = tmpGameFieldCell.addHere(zombieToken) } case c : Char => { println("unkown char: " + c.toByte) return (null, null) } } if(validCharacterRead) { gameFieldCells.+=((tuple,tmpGameFieldCell)) } } } // check if we enough human bases for humans val missingHumans = humanTokenCount-humanTokenIds.size for(i <- (0 until missingHumans)) { val humanToken = new HumanToken(this.generateId, (humanBase)) humanTokenIds.+=(humanToken.id) gameFieldCells.update(humanBase, gameFieldCells.apply(humanBase) .addHere(humanToken)) } // Create the player map with a human and a zombie player with tokens // TODO: make this scalable for more players var players : Players = new Players() val humanId = defaults.defaultHumanName + this.generateId players = players.updatedWithNewPlayer(new Human(humanId, humanTokenIds.toList)) val zombieId = defaults.defaultZombieName + this.generateId players = players.updatedWithNewPlayer(new Zombie(zombieId, zombieTokenIds.toList)) val gameField = new GameField(gameName, gameFieldCells.toMap, levelWidth, levelHeight, humanBase, zombieBase, coinsPlaced) (gameField, players) } }
mahieke/TacZombie
model/src/main/scala/taczombie/model/GameFactory.scala
Scala
gpl-2.0
5,470
package org.jetbrains.plugins.scala.editor.todo /** tests [[ScalaIndexPatternBuilder]] */ class ScalaTodoIndexerTest extends ScalaTodoItemsTestBase { def testTodo_LineComment(): Unit = testTodos( s"""// ${start}TODO: do something$end |// unrelated comment line |val x = 42 |""".stripMargin ) def testTodo_LineComment_1(): Unit = testTodos( s"""// ${start}TODO: do something$end | |// unrelated comment line |val x = 42 |""".stripMargin ) def testTodo_LineComment_MultilineTodo(): Unit = testTodos( s"""// ${start}TODO: do something$end |// ${start}todo description continue$end |val x = 42 |""".stripMargin ) def testTodo_BlockComment(): Unit = testTodos( s"""/* | * ${start}TODO: do something$end | * unrelated comment line | */ |val x = 42 |""".stripMargin ) def testTodo_BlockComment_1(): Unit = testTodos( s"""/* | * ${start}TODO: do something$end | * | * unrelated comment line | */ |val x = 42 |""".stripMargin ) def testTodo_BlockComment_MultilineTodo(): Unit = testTodos( s"""/* | * ${start}TODO: do something$end | * ${start}todo description continue$end | */ |val x = 42 |""".stripMargin ) def testTodo_ScaladocComment(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * unrelated comment line | */ |val x = 42 |""".stripMargin ) def testTodo_ScaladocComment_1(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * | * unrelated comment line | */ |val x = 42 |""".stripMargin ) def testTodo_ScaladocComment_AndSomeTodoOfAnotherType(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * | * unrelated comment line | */ |val x = 42 | |//${start}TODO: do somthing else$end |""".stripMargin ) def testTodo_ScaladocComment_MultilineTodo(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * ${start}todo description continue$end | */ |val x = 42 |""".stripMargin ) def testTodo_ScaladocComment_WithOtherFields(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * @param x some description | * @returs something | */ |def foo(x: String) = 42 |""".stripMargin ) def testTodo_ScaladocComment_WithOtherFields_MultilineTodo(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * ${start}todo description continue$end | * @param x some description | * @returs something | */ |def foo(x: String) = 42 |""".stripMargin ) def testTodo_ScaladocComment_TagTodo(): Unit = testTodos( s"""/** | * @param x some description | * @${start}todo do something$end | * @returns something | */ |def foo(x: String) = 42 |""".stripMargin ) def testTodo_ScaladocComment_InWorksheet_HealthCheck(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * unrelated comment line | */ |val x = 42 |""".stripMargin, fileType = "sc" ) def testTodo_ScaladocComment_InSbt_HealthCheck(): Unit = testTodos( s"""/** | * ${start}TODO: do something$end | * unrelated comment line | */ |val x = 42 |""".stripMargin, fileType = "sbt" ) }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/editor/todo/ScalaTodoIndexerTest.scala
Scala
apache-2.0
3,614
package org.jetbrains.jps.incremental.scala package local import java.io.File import org.jetbrains.jps.incremental.ModuleLevelBuilder.ExitCode import org.jetbrains.jps.incremental.scala.data._ import xsbti.compile.AnalysisStore import sbt.internal.inc.FileAnalysisStore /** * @author Pavel Fatin */ class LocalServer extends Server { private var cachedCompilerFactory: Option[CompilerFactory] = None private val lock = new Object() def compile(sbtData: SbtData, compilerData: CompilerData, compilationData: CompilationData, client: Client): ExitCode = { val compiler = try lock.synchronized { val compilerFactory = compilerFactoryFrom(sbtData) client.progress("Instantiating compiler...") compilerFactory.createCompiler(compilerData, client, LocalServer.createAnalysisStore) } catch { case e: Throwable => compilationData.sources.foreach(f => client.sourceStarted(f.toString)) throw e } if (!client.isCanceled) { compiler.compile(compilationData, client) } client.compilationEnd() ExitCode.OK } private def compilerFactoryFrom(sbtData: SbtData): CompilerFactory = cachedCompilerFactory.getOrElse { val factory = new CachingFactory(new CompilerFactoryImpl(sbtData), 10, 600, 10) cachedCompilerFactory = Some(factory) factory } } object LocalServer { private def createAnalysisStore(cacheFile: File): AnalysisStore = { val store = FileAnalysisStore.binary(cacheFile) AnalysisStore.getThreadSafeStore(AnalysisStore.getCachedStore(store)) } }
triplequote/intellij-scala
scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/local/LocalServer.scala
Scala
apache-2.0
1,558
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openwhisk.core.containerpool import scala.concurrent.Future import spray.json._ import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.entity.ActivationResponse.ContainerHttpError import org.apache.openwhisk.core.entity.ActivationResponse._ trait ContainerClient { def post(endpoint: String, body: JsValue, retry: Boolean)( implicit tid: TransactionId): Future[Either[ContainerHttpError, ContainerResponse]] def close(): Future[Unit] }
starpit/openwhisk
common/scala/src/main/scala/org/apache/openwhisk/core/containerpool/ContainerClient.scala
Scala
apache-2.0
1,295
package toguru.toggles import play.api.mvc.{Result, Results} import toguru.toggles.ToggleActor.{PersistFailed, ToggleDoesNotExist} trait ToggleActorResponses extends Results with JsonResponses { type ResponseHandler = Any => Result type HandlerDecorator = ResponseHandler => ResponseHandler def both(left: HandlerDecorator, right: HandlerDecorator): HandlerDecorator = left.andThen(right) def whenToggleExists(handler: ResponseHandler)(implicit actionId: String): ResponseHandler = { case ToggleDoesNotExist(id) => publishFailure(actionId, id, "reason" -> "toggle does not exist") NotFound(errorJson("Not found", s"A toggle with id $id does not exist", "Provide an existing toggle id")) case r => handler(r) } def whenPersisted(handler: ResponseHandler)(implicit actionId: String): ResponseHandler = { case PersistFailed(toggleId, cause) => publishFailure(actionId, cause, "toggleId" -> toggleId) InternalServerError(errorJson("Internal server error", cause.getMessage)) case r => handler(r) } }
andreas-schroeder/toguru
app/toguru/toggles/ToggleActorResponses.scala
Scala
mit
1,059
/* * Copyright (c) 2013 Aviat Networks. * This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details. */ package vvv.docreg.backend import org.specs2.mutable._ import org.specs2.mock._ import vvv.docreg.db.{TestDbScope, TestDbVendor} import vvv.docreg.model.{UserLookupProvider, Subscription} import vvv.docreg.agent.SubscriberInfo import net.liftweb.common.Full import org.mockito.Matchers class SubscriptionReconcileTest extends Specification with Mockito { sequential "SubscritionReconcile" should { "Do nothing for no subscriptions" >> new TestDbScope { import org.squeryl.PrimitiveTypeMode._ transaction{ val (p1, _, _) = db.createProjects val (u1, u2) = db.createUsers val (d, _, _, _) = db.createDocument(p1, u1) val lookup = mock[UserLookupProvider] val x = new SubscriptionReconcile { val userLookup = lookup } x.reconcileSubscriptions(d, Nil) Subscription.forDocument(d) must beEmpty } } "Parse options to either notification or bookmark" >> new TestDbScope { import org.squeryl.PrimitiveTypeMode._ transaction{ val (p1, _, _) = db.createProjects val (u1, u2) = db.createUsers val (d, _, _, _) = db.createDocument(p1, u1) val lookup = mock[UserLookupProvider] lookup.lookup(Matchers.eq(Some("jroads")), Matchers.eq(Some("j@f.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u1)) val x = new SubscriptionReconcile { val userLookup = lookup } "always option is notification" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "always") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(1) ss(0).documentId must be_==(1) ss(0).notification must be_==(true) ss(0).bookmark must be_==(false) } "bookmark option is bookmark" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "bookmark") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(1) ss(0).documentId must be_==(1) ss(0).notification must be_==(false) ss(0).bookmark must be_==(true) } "both options are possible" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "bookmark always") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(1) ss(0).documentId must be_==(1) ss(0).notification must be_==(true) ss(0).bookmark must be_==(true) } "even when other junk exists in options" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "stuff always foo bar") :: Nil) val s = Subscription.forDocument(d).apply(0) s.notification must be_==(true) s.bookmark must be_==(false) } "currently the mail files have no space separation in options" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "alwaysalwaysalways") :: Nil) val s = Subscription.forDocument(d).apply(0) s.notification must be_==(true) s.bookmark must be_==(false) } "currently the mail files have no space separation in options" >> { Subscription.unsubscribe(d, u1) Subscription.forDocument(d) must beEmpty x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "alwaysnomailbookmarkalwaysalways") :: Nil) val s = Subscription.forDocument(d).apply(0) s.notification must be_==(true) s.bookmark must be_==(true) } } } "add multiple subscriptions, ignoring duplicates" >> new TestDbScope { import org.squeryl.PrimitiveTypeMode._ transaction{ val (p1, _, _) = db.createProjects val (u1, u2) = db.createUsers val (d, _, _, _) = db.createDocument(p1, u1) val lookup = mock[UserLookupProvider] lookup.lookup(Matchers.eq(Some("Asutherl")), Matchers.eq(Some("alan.sutherland@hstx.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u2)) lookup.lookup(Matchers.eq(Some("Sabernethy")), Matchers.eq(Some("scott_abernethy@stratexnet.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u1)) lookup.lookup(Matchers.eq(Some("scott.abernethy@aviatnet.com")), Matchers.eq(Some("scott.abernethy@Aviatnet.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u1)) val x = new SubscriptionReconcile { val userLookup = lookup } val subsA = SubscriberInfo("Asutherl","alan.sutherland@hstx.com","always") val subsB = SubscriberInfo("Sabernethy","scott_abernethy@stratexnet.com","always") val subsC = SubscriberInfo("scott.abernethy@aviatnet.com","scott.abernethy@Aviatnet.com","always bookmark") x.reconcileSubscriptions(d, List(subsA, subsB, subsC)) val ss = Subscription.forDocument(d) ss must haveSize(2) ss(0).userId must be_==(2) ss(0).documentId must be_==(1) ss(0).notification must be_==(true) ss(0).bookmark must be_==(false) ss(1).userId must be_==(1) ss(1).documentId must be_==(1) ss(1).notification must be_==(true) ss(1).bookmark must be_==(true) } } "No change" >> new TestDbScope { import org.squeryl.PrimitiveTypeMode._ transaction{ val (p1, _, _) = db.createProjects val (u1, u2) = db.createUsers val (d, _, _, _) = db.createDocument(p1, u1) val lookup = mock[UserLookupProvider] lookup.lookup(Matchers.eq(Some("fg")), Matchers.eq(Some("fg@google.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u2)) val x = new SubscriptionReconcile { val userLookup = lookup } "for subsequent reconciles" >> { x.reconcileSubscriptions(d, SubscriberInfo("fg", "fg@google.com", "always") :: Nil) x.reconcileSubscriptions(d, SubscriberInfo("fg", "fg@google.com", "always") :: Nil) x.reconcileSubscriptions(d, SubscriberInfo("fg", "fg@google.com", "always") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(2) ss(0).documentId must be_==(1) ss(0).notification must be_==(true) ss(0).bookmark must be_==(false) } "unless the represent a change in subscription option" >> { x.reconcileSubscriptions(d, SubscriberInfo("fg", "fg@google.com", "bookmark") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(2) ss(0).documentId must be_==(1) ss(0).notification must be_==(false) ss(0).bookmark must be_==(true) } } } "Remove subscriptions if no longer there" >> new TestDbScope { import org.squeryl.PrimitiveTypeMode._ transaction{ val (p1, _, _) = db.createProjects val (u1, u2) = db.createUsers val (d, _, _, _) = db.createDocument(p1, u1) val lookup = mock[UserLookupProvider] lookup.lookup(Matchers.eq(Some("jroads")), Matchers.eq(Some("j@f.com")), Matchers.eq(None), Matchers.anyString()) returns(Full(u1)) val x = new SubscriptionReconcile { val userLookup = lookup } "when no subscriptions should exist" >> { Subscription.subscribe(d, u1) Subscription.subscribe(d, u2) Subscription.forDocument(d) must haveSize(2) x.reconcileSubscriptions(d, Nil) Subscription.forDocument(d) must beEmpty } "when only some of the existing subscriptions should now exist" >> { Subscription.subscribe(d, u1) Subscription.subscribe(d, u2) Subscription.forDocument(d) must haveSize(2) x.reconcileSubscriptions(d, SubscriberInfo("jroads", "j@f.com", "always") :: Nil) val ss = Subscription.forDocument(d) ss must haveSize(1) ss(0).userId must be_==(1) ss(0).documentId must be_==(1) ss(0).notification must be_==(true) ss(0).bookmark must be_==(false) } } } } }
scott-abernethy/docreg-web
src/test/scala/vvv/docreg/backend/SubscriptionReconcileTest.scala
Scala
gpl-3.0
8,950
package org.jetbrains.plugins.scala.project.template import com.intellij.facet.impl.ui.libraries.LibraryOptionsPanel import com.intellij.framework.library.FrameworkLibraryVersionFilter import com.intellij.openapi.roots.ui.configuration.projectRoot.LibrariesContainer import com.intellij.ui.components.{ComponentsKt, JBTextField} import com.intellij.util.ui.UI import org.jetbrains.annotations.Nls import org.jetbrains.plugins.scala.ScalaBundle import org.jetbrains.plugins.scala.project.ScalaLibraryType import javax.swing.{JLabel, JPanel} trait ScalaSDKStepLike extends PackagePrefixStepLike { protected def librariesContainer: LibrariesContainer //noinspection ScalaExtractStringToBundle @Nls protected val scalaSdkLabelText: String = "Scala S\\u001BDK:" protected lazy val libraryPanel = new LibraryOptionsPanel( ScalaLibraryType.Description, "", FrameworkLibraryVersionFilter.ALL, librariesContainer, false ) } trait PackagePrefixStepLike { protected val packagePrefixTextField: JBTextField = { val tf = new JBTextField() tf.getEmptyText.setText(ScalaBundle.message("package.prefix.example")) tf } protected val packagePrefixHelpText: String = ScalaBundle.message("package.prefix.help") protected val packagePrefixPanelWithTooltip: JPanel = UI.PanelFactory .panel(packagePrefixTextField) .withTooltip(packagePrefixHelpText) .createPanel() protected val packagePrefixLabelText: String = ScalaBundle.message("package.prefix.label") /** * In NewProjectWizard we can't use `prefixPanel` created with `UI.PanelFactory.panel.withTooltip` * because it adds some strange indent to the left of the panel, which looks ugly.<br> * I didn't find a nice way to fix this, so we set tooltip on a label when using NPW */ protected val packagePrefixLabel: JLabel = { val label = ComponentsKt.Label(packagePrefixLabelText, null, null, false, null) label.setToolTipText(packagePrefixHelpText) label } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/project/template/ScalaSDKStepLike.scala
Scala
apache-2.0
1,996
/* * Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.wegtam.tensei.agent import akka.actor.{ Actor, ActorLogging, ActorRef, Props } import com.wegtam.tensei.agent.DummyActor.DummyActorRelay object DummyActor { def props(): Props = Props(classOf[DummyActor]) case class DummyActorRelay(msg: Any, receiver: ActorRef) } /** * A dummy actor for testing. * * It can relay messages that has been sent to it. */ class DummyActor extends Actor with ActorLogging { override def receive: Receive = { case DummyActorRelay(msg, receiver) => receiver ! msg case msg => log.debug("DummyActor got an unhandled message: {}", msg) } }
Tensei-Data/tensei-agent
src/test/scala/com/wegtam/tensei/agent/DummyActor.scala
Scala
agpl-3.0
1,370
package edu.gemini.model.p1.immutable import edu.gemini.model.p1.{ mutable => M } object GmosSBlueprintLongslit { def apply(m: M.GmosSBlueprintLongslit) = new GmosSBlueprintLongslit( m.getDisperser, m.getFilter, m.getFpu) } case class GmosSBlueprintLongslit(disperser: GmosSDisperser, filter: GmosSFilter, fpu: GmosSFpu) extends GmosSBlueprintSpectrosopyBase { def name = "GMOS-S Longslit %s %s %s".format(disperser.value, filter.value, fpu.value) def mutable(n:Namer) = { val m = Factory.createGmosSBlueprintLongslit m.setId(n.nameOf(this)) m.setName(name) m.setDisperser(disperser) m.setFilter(filter) m.setFpu(fpu) m } def toChoice(n:Namer) = { val c = Factory.createGmosSBlueprintChoice c.setLongslit(mutable(n)) c.setRegime(M.GmosSWavelengthRegime.OPTICAL) c } }
arturog8m/ocs
bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/immutable/GmosSBlueprintLongslit.scala
Scala
bsd-3-clause
843
/* * ApplyFactory.scala * Description needed * * Created By: Glenn Takata (gtakata@cra.com) * Creation Date: Dec 15, 2014 * * Copyright 2014 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email figaro@cra.com for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.algorithm.factored.factors.factory import com.cra.figaro.algorithm.PointMapper import com.cra.figaro.algorithm.factored.factors._ import com.cra.figaro.algorithm.lazyfactored._ import com.cra.figaro.language._ /** * A Sub-Factory for Apply Elements */ object ApplyFactory { /** * Factor constructor for an Apply Element that has one input */ def makeFactors[T, U](apply: Apply1[T, U])(implicit mapper: PointMapper[U]): List[Factor[Double]] = { val applyMap: scala.collection.mutable.Map[T, U] = LazyValues(apply.universe).getMap(apply) val arg1Var = Variable(apply.arg1) val resultVar = Variable(apply) val applyValues = LazyValues(apply.universe).storedValues(apply) val factor = new SparseFactor[Double](List(arg1Var), List(resultVar)) val arg1Indices = arg1Var.range.zipWithIndex for { (arg1Val, arg1Index) <- arg1Indices //(resultVal, resultIndex) <- resultIndices } { // See logic in makeCares if (arg1Val.isRegular) { // arg1Val.value should have been placed in applyMap at the time the values of this apply were computed. // By using applyMap, we can make sure that any contained elements in the result of the apply are the same // now as they were when values were computed. val resultVal = mapper.map(applyMap(arg1Val.value), applyValues.regularValues) val resultIndex = resultVar.range.indexWhere(_.value == resultVal) factor.set(List(arg1Index, resultIndex), 1.0) } else if (!arg1Val.isRegular && resultVar.range.exists(!_.isRegular)) { val resultIndex = resultVar.range.indexWhere(!_.isRegular) factor.set(List(arg1Index, resultIndex), 1.0) } } List(factor) } /** * Factor constructor for an Apply Element that has two inputs */ def makeFactors[T1, T2, U](apply: Apply2[T1, T2, U])(implicit mapper: PointMapper[U]): List[Factor[Double]] = { val applyMap: scala.collection.mutable.Map[(T1, T2), U] = LazyValues(apply.universe).getMap(apply) val arg1Var = Variable(apply.arg1) val arg2Var = Variable(apply.arg2) val resultVar = Variable(apply) val applyValues = LazyValues(apply.universe).storedValues(apply) val factor = new SparseFactor[Double](List(arg1Var, arg2Var), List(resultVar)) val arg1Indices = arg1Var.range.zipWithIndex val arg2Indices = arg2Var.range.zipWithIndex val resultIndices = resultVar.range.zipWithIndex for { (arg1Val, arg1Index) <- arg1Indices (arg2Val, arg2Index) <- arg2Indices //(resultVal, resultIndex) <- resultIndices } { if (arg1Val.isRegular && arg2Val.isRegular) { // arg1Val.value should have been placed in applyMap at the time the values of this apply were computed. // By using applyMap, we can make sure that any contained elements in the result of the apply are the same now as they were when values were computed. val resultVal = mapper.map(applyMap((arg1Val.value, arg2Val.value)), applyValues.regularValues) val resultIndex = resultVar.range.indexWhere(_.value == resultVal) factor.set(List(arg1Index, arg2Index, resultIndex), 1.0) } else if ((!arg1Val.isRegular || !arg2Val.isRegular) && resultVar.range.exists(!_.isRegular)) { val resultIndex = resultVar.range.indexWhere(!_.isRegular) factor.set(List(arg1Index, arg2Index, resultIndex), 1.0) } } List(factor) } /** * Factor constructor for an Apply Element that has three inputs */ def makeFactors[T1, T2, T3, U](apply: Apply3[T1, T2, T3, U])(implicit mapper: PointMapper[U]): List[Factor[Double]] = { val applyMap: scala.collection.mutable.Map[(T1, T2, T3), U] = LazyValues(apply.universe).getMap(apply) val arg1Var = Variable(apply.arg1) val arg2Var = Variable(apply.arg2) val arg3Var = Variable(apply.arg3) val resultVar = Variable(apply) val applyValues = LazyValues(apply.universe).storedValues(apply) val factor = new SparseFactor[Double](List(arg1Var, arg2Var, arg3Var), List(resultVar)) val arg1Indices = arg1Var.range.zipWithIndex val arg2Indices = arg2Var.range.zipWithIndex val arg3Indices = arg3Var.range.zipWithIndex //val resultIndices = resultVar.range.zipWithIndex for { (arg1Val, arg1Index) <- arg1Indices (arg2Val, arg2Index) <- arg2Indices (arg3Val, arg3Index) <- arg3Indices //(resultVal, resultIndex) <- resultIndices } { if (arg1Val.isRegular && arg2Val.isRegular && arg3Val.isRegular) { // arg1Val.value should have been placed in applyMap at the time the values of this apply were computed. // By using applyMap, we can make sure that any contained elements in the result of the apply are the same now as they were when values were computed. val resultVal = mapper.map(applyMap((arg1Val.value, arg2Val.value, arg3Val.value)), applyValues.regularValues) val resultIndex = resultVar.range.indexWhere(_.value == resultVal) factor.set(List(arg1Index, arg2Index, arg3Index, resultIndex), 1.0) } else if ((!arg1Val.isRegular || !arg2Val.isRegular || !arg3Val.isRegular) && resultVar.range.exists(!_.isRegular)) { val resultIndex = resultVar.range.indexWhere(!_.isRegular) factor.set(List(arg1Index, arg2Index, arg3Index, resultIndex), 1.0) } } List(factor) } /** * Factor constructor for an Apply Element that has four inputs */ def makeFactors[T1, T2, T3, T4, U](apply: Apply4[T1, T2, T3, T4, U])(implicit mapper: PointMapper[U]): List[Factor[Double]] = { val applyMap: scala.collection.mutable.Map[(T1, T2, T3, T4), U] = LazyValues(apply.universe).getMap(apply) val arg1Var = Variable(apply.arg1) val arg2Var = Variable(apply.arg2) val arg3Var = Variable(apply.arg3) val arg4Var = Variable(apply.arg4) val resultVar = Variable(apply) val applyValues = LazyValues(apply.universe).storedValues(apply) val factor = new SparseFactor[Double](List(arg1Var, arg2Var, arg3Var, arg4Var), List(resultVar)) val arg1Indices = arg1Var.range.zipWithIndex val arg2Indices = arg2Var.range.zipWithIndex val arg3Indices = arg3Var.range.zipWithIndex val arg4Indices = arg4Var.range.zipWithIndex //val resultIndices = resultVar.range.zipWithIndex for { (arg1Val, arg1Index) <- arg1Indices (arg2Val, arg2Index) <- arg2Indices (arg3Val, arg3Index) <- arg3Indices (arg4Val, arg4Index) <- arg4Indices //(resultVal, resultIndex) <- resultIndices } { if (arg1Val.isRegular && arg2Val.isRegular && arg3Val.isRegular && arg4Val.isRegular) { // arg1Val.value should have been placed in applyMap at the time the values of this apply were computed. // By using applyMap, we can make sure that any contained elements in the result of the apply are the same now as they were when values were computed. val resultVal = mapper.map(applyMap((arg1Val.value, arg2Val.value, arg3Val.value, arg4Val.value)), applyValues.regularValues) val resultIndex = resultVar.range.indexWhere(_.value == resultVal) factor.set(List(arg1Index, arg2Index, arg3Index, arg4Index, resultIndex), 1.0) } else if ((!arg1Val.isRegular || !arg2Val.isRegular || !arg3Val.isRegular || !arg4Val.isRegular) && resultVar.range.exists(!_.isRegular)) { val resultIndex = resultVar.range.indexWhere(!_.isRegular) factor.set(List(arg1Index, arg2Index, arg3Index, arg4Index, resultIndex), 1.0) } } List(factor) } /** * Factor constructor for an Apply Element that has five inputs */ def makeFactors[T1, T2, T3, T4, T5, U](apply: Apply5[T1, T2, T3, T4, T5, U])(implicit mapper: PointMapper[U]): List[Factor[Double]] = { val applyMap: scala.collection.mutable.Map[(T1, T2, T3, T4, T5), U] = LazyValues(apply.universe).getMap(apply) val arg1Var = Variable(apply.arg1) val arg2Var = Variable(apply.arg2) val arg3Var = Variable(apply.arg3) val arg4Var = Variable(apply.arg4) val arg5Var = Variable(apply.arg5) val resultVar = Variable(apply) val applyValues = LazyValues(apply.universe).storedValues(apply) val factor = new SparseFactor[Double](List(arg1Var, arg2Var, arg3Var, arg4Var, arg5Var), List(resultVar)) val arg1Indices = arg1Var.range.zipWithIndex val arg2Indices = arg2Var.range.zipWithIndex val arg3Indices = arg3Var.range.zipWithIndex val arg4Indices = arg4Var.range.zipWithIndex val arg5Indices = arg5Var.range.zipWithIndex // val resultIndices = resultVar.range.zipWithIndex for { (arg1Val, arg1Index) <- arg1Indices (arg2Val, arg2Index) <- arg2Indices (arg3Val, arg3Index) <- arg3Indices (arg4Val, arg4Index) <- arg4Indices (arg5Val, arg5Index) <- arg5Indices // (resultVal, resultIndex) <- resultIndices } { if (arg1Val.isRegular && arg2Val.isRegular && arg3Val.isRegular && arg4Val.isRegular && arg5Val.isRegular) { // arg1Val.value should have been placed in applyMap at the time the values of this apply were computed. // By using applyMap, we can make sure that any contained elements in the result of the apply are the same now as they were when values were computed. val resultVal = mapper.map(applyMap((arg1Val.value, arg2Val.value, arg3Val.value, arg4Val.value, arg5Val.value)), applyValues.regularValues) val resultIndex = resultVar.range.indexWhere(_.value == resultVal) factor.set(List(arg1Index, arg2Index, arg3Index, arg4Index, arg5Index, resultIndex), 1.0) } else if ((!arg1Val.isRegular || !arg2Val.isRegular || !arg3Val.isRegular || !arg4Val.isRegular || !arg5Val.isRegular) && resultVar.range.exists(!_.isRegular)) { val resultIndex = resultVar.range.indexWhere(!_.isRegular) factor.set(List(arg1Index, arg2Index, arg3Index, arg4Index, arg5Index, resultIndex), 1.0) } } List(factor) } }
jyuhuan/figaro
Figaro/src/main/scala/com/cra/figaro/algorithm/factored/factors/factory/ApplyFactory.scala
Scala
bsd-3-clause
10,398
/* * Copyright 2015 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600a.v3.formats import play.api.libs.json.Json import uk.gov.hmrc.ct.ct600a.v3.LoansToParticipators object LoansFormatter { import uk.gov.hmrc.ct.ct600a.v3.formats._ def LoansFromJsonString(json: String): LoansToParticipators = Json.fromJson[LoansToParticipators](Json.parse(json)).get def toJsonString(loans2p: LoansToParticipators): String = Json.toJson(loans2p).toString() def asBoxString(loans2p: LoansToParticipators): Option[String] = Some(toJsonString(loans2p)) }
keithhall/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600a/v3/formats/Loans.scala
Scala
apache-2.0
1,117
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.tailhq.dynaml.examples import breeze.linalg.DenseVector import io.github.tailhq.dynaml.graphics.charts.Highcharts._ import io.github.tailhq.dynaml.pipes._ import org.apache.log4j.Logger import io.github.tailhq.dynaml.DynaMLPipe import io.github.tailhq.dynaml.evaluation.RegressionMetrics import io.github.tailhq.dynaml.graph.FFNeuralGraph import io.github.tailhq.dynaml.models.neuralnets.FeedForwardNetwork import scala.collection.mutable.{MutableList => ML} /** * Created by mandar on 15/12/15. */ object AbottPowerPlantNN { private val logger = Logger.getLogger(this.getClass) implicit val transform = DataPipe( (d: Stream[(DenseVector[Double], DenseVector[Double])]) => d ) def apply( delta: Int, hidden: Int = 2, nCounts: List[Int] = List(), acts: List[String], num_training: Int = 2000, num_test: Int = 1000, stepSize: Double = 0.01, maxIt: Int = 300, mini: Double = 1.0, alpha: Double = 0.0, regularization: Double = 0.5 ): Unit = runExperiment( hidden, nCounts, acts, num_training, num_test, delta, Map( "tolerance" -> "0.0001", "step" -> stepSize.toString, "maxIterations" -> maxIt.toString, "miniBatchFraction" -> mini.toString, "momentum" -> alpha.toString, "regularization" -> regularization.toString ) ) def runExperiment( hidden: Int = 2, nCounts: List[Int] = List(), act: List[String], num_training: Int = 200, num_test: Int, deltaT: Int = 2, opt: Map[String, String] ): Unit = { val names = Map( 5 -> "Drum pressure PSI", 6 -> "Excess Oxygen", 7 -> "Water level in Drum", 8 -> "Steam Flow kg/s" ) val modelTrainTest = (trainTest: ( ( Iterable[(DenseVector[Double], DenseVector[Double])], Iterable[(DenseVector[Double], DenseVector[Double])] ), (DenseVector[Double], DenseVector[Double]) )) => { logger.info("Number of Inputs: " + trainTest._1._1.head._1.length) logger.info("Number of Outputs: " + trainTest._1._1.head._2.length) val gr = FFNeuralGraph(trainTest._1._1.head._1.length, 4, hidden, act, nCounts) val model = new FeedForwardNetwork[ Stream[(DenseVector[Double], DenseVector[Double])] ](trainTest._1._1.toStream, gr)(transform) model .setLearningRate(opt("step").toDouble) .setMaxIterations(opt("maxIterations").toInt) .setBatchFraction(opt("miniBatchFraction").toDouble) .setMomentum(opt("momentum").toDouble) .setRegParam(opt("regularization").toDouble) .learn() val res = model.test(trainTest._1._2.toStream) val l = trainTest._1._1.head._1.length + trainTest._1._1.head._2.length val means = trainTest._2._1((l - 4) until l) val stdDevs = trainTest._2._2((l - 4) until l) val scoresAndLabelsPipe1 = DataPipe( (res: Seq[(DenseVector[Double], DenseVector[Double])]) => { res.map(r => ((r._1 *:* stdDevs) + means, (r._2 *:* stdDevs) + means)) } ) > DataPipe((res: Seq[(DenseVector[Double], DenseVector[Double])]) => { val num_outputs = res.head._1.length val outputAcc: List[ML[(Double, Double)]] = List(ML(), ML(), ML(), ML()) res.foreach(r => { (0 until num_outputs).foreach(output => { outputAcc(output) ++= ML((r._1(output), r._2(output))) }) }) outputAcc.map(_.toList) }) val scoresAndLabels = scoresAndLabelsPipe1.run(res) var index = 5 scoresAndLabels.foreach((output) => { val metrics = new RegressionMetrics(output, output.length) metrics.setName(names(index)) metrics.print() metrics.generateFitPlot() //Plotting time series prediction comparisons line((1 to output.length).toList, output.map(_._2)) hold() line((1 to output.length).toList, output.map(_._1)) legend( List( names(index), "Predicted " + names(index) + " (one hour ahead)" ) ) title( "Steam Generator; Abbott Power Plant, Champaign IL: " + names(index) ) unhold() index += 1 }) } //Load Abott power plant data into a stream //Extract the time and Dst values //separate data into training and test //pipe training data to model and then generate test predictions //create RegressionMetrics instance and produce plots val preProcessPipe = DynaMLPipe.fileToStream > DynaMLPipe.trimLines > DynaMLPipe.replaceWhiteSpaces > DynaMLPipe.extractTrainingFeatures( List(0, 5, 6, 7, 8, 1, 2, 3, 4), Map() ) > DynaMLPipe.removeMissingLines > IterableDataPipe((line: String) => { val splits = line.split(",") val timestamp = splits.head.toDouble val feat = DenseVector(splits.tail.map(_.toDouble)) (timestamp, feat) }) > DataPipe( (lines: Iterable[(Double, DenseVector[Double])]) => lines.toList .sliding(deltaT + 1) .map((history) => { val hist = history.take(history.length - 1).map(_._2) val featuresAcc: ML[Double] = ML() (0 until hist.head.length).foreach((dimension) => { //for each dimension/regressor take points t to t-order featuresAcc ++= hist.map(vec => vec(dimension)) }) val features = DenseVector(featuresAcc.toArray) //assert(history.length == deltaT + 1, "Check one") //assert(features.length == deltaT, "Check two") (features, history.last._2(0 to 3)) }) .toStream ) val trainTestPipe = DynaMLPipe.duplicate(preProcessPipe) > DataPipe( (data: ( Stream[(DenseVector[Double], DenseVector[Double])], Stream[(DenseVector[Double], DenseVector[Double])] )) => { (data._1.take(num_training), data._2.takeRight(num_test)) } ) > DynaMLPipe.trainTestGaussianStandardizationMO > DataPipe(modelTrainTest) val dataFile = dataDir + "/steamgen.csv" trainTestPipe((dataFile, dataFile)) } }
mandar2812/DynaML
dynaml-examples/src/main/scala/io/github/tailhq/dynaml/examples/AbottPowerPlantNN.scala
Scala
apache-2.0
7,373
package de.vorb.vision.binarization import javax.imageio.ImageIO import java.io.File import java.awt.image.BufferedImage import java.awt.image.BufferedImageOp import java.awt.image.ColorConvertOp object BinarizationTest extends App { print("Reading image... "); val color = ImageIO.read(new File("src/test/resources/color.png")) println("done.") val gray = new BufferedImage(color.getWidth, color.getHeight, BufferedImage.TYPE_BYTE_GRAY) // conversion val conv: BufferedImageOp = new ColorConvertOp(color.getColorModel().getColorSpace(), gray.getColorModel().getColorSpace(), null); print("Converting image to grayscale... "); conv.filter(color, gray); println("done.") print("Saving image... ") ImageIO.write(gray, "png", new File("src/test/resources/gray.png")) println("done.") print("Image binarization... ") val bin = Sauvola.binarize(gray, 0.2, 5); println("done.") print("Saving image... ") ImageIO.write(bin, "png", new File("src/test/resources/sauvola.png")) println("done.") println("Exit.") }
pvorb/image-binarization
src/test/scala/de/vorb/vision/binarization/BinarizationTest.scala
Scala
mit
1,076
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.File import java.lang.management.ManagementFactory import java.nio.file.{Files, Paths} import scala.language.postfixOps import com.scalapenos.sbt.prompt.SbtPrompt.autoImport._ import com.sun.management.OperatingSystemMXBean import com.typesafe.tools.mima.plugin.MimaKeys._ import com.typesafe.tools.mima.plugin.MimaPlugin._ import sbt.Keys._ import sbt.Tests._ import sbt._ import sbtassembly.AssemblyKeys._ import sbtassembly.AssemblyPlugin._ import sbtassembly._ import sbtrelease.ReleasePlugin._ import sbtsparkpackage.SparkPackagePlugin.autoImport._ object Settings extends Build { import BuildUtil._ val versionStatus = settingKey[Unit]("The Scala version used in cross-build reapply for '+ package', '+ publish'.") val cassandraServerClasspath = taskKey[String]("Cassandra server classpath") val mavenLocalResolver = BuildUtil.mavenLocalResolver // Travis has limited quota, so we cannot use many C* instances simultaneously val isTravis = sys.props.getOrElse("travis", "false").toBoolean val osmxBean = ManagementFactory.getOperatingSystemMXBean.asInstanceOf[OperatingSystemMXBean] val sysMemoryInMB = osmxBean.getTotalPhysicalMemorySize >> 20 val singleRunRequiredMem = 3 * 1024 + 512 val parallelTasks = if (isTravis) 1 else Math.max(1, ((sysMemoryInMB - 1550) / singleRunRequiredMem).toInt) // Due to lack of entrophy on virtual machines we want to use /dev/urandom instead of /dev/random val useURandom = Files.exists(Paths.get("/dev/urandom")) val uRandomParams = if (useURandom) Seq("-Djava.security.egd=file:/dev/./urandom") else Seq.empty lazy val mainDir = { val dir = new File(".") IO.delete(new File(dir, "target/ports")) dir } val cassandraTestVersion = sys.props.get("test.cassandra.version").getOrElse(Versions.Cassandra) lazy val TEST_JAVA_OPTS = Seq( "-Xmx512m", s"-Dtest.cassandra.version=$cassandraTestVersion", "-Dsun.io.serialization.extendedDebugInfo=true", s"-DbaseDir=${mainDir.getAbsolutePath}") ++ uRandomParams var TEST_ENV: Option[Map[String, String]] = None val asfSnapshotsResolver = "ASF Snapshots" at "https://repository.apache.org/content/groups/snapshots" val asfStagingResolver = "ASF Staging" at "https://repository.apache.org/content/groups/staging" def currentCommitSha = ("git rev-parse --short HEAD" !!).split('\n').head.trim def versionSuffix = { sys.props.get("publish.version.type").map(_.toLowerCase) match { case Some("release") => "" case Some("commit-release") => s"-$currentCommitSha" case _ => "-SNAPSHOT" } } def currentVersion = ("git describe --tags --match v*" !!).trim.substring(1) lazy val buildSettings = Seq( organization := "com.datastax.spark", version in ThisBuild := currentVersion, scalaVersion := Versions.scalaVersion, crossScalaVersions := Versions.crossScala, crossVersion := CrossVersion.binary, versionStatus := Versions.status(scalaVersion.value, scalaBinaryVersion.value) ) lazy val sparkPackageSettings = Seq( spName := "datastax/spark-cassandra-connector", sparkVersion := Versions.Spark, spAppendScalaVersion := true, spIncludeMaven := true, spIgnoreProvided := true, spShade := true, credentials += Credentials(Path.userHome / ".ivy2" / ".credentials") ) override lazy val settings = super.settings ++ buildSettings ++ Seq( normalizedName := "spark-cassandra-connector", name := "DataStax Apache Cassandra connector for Apache Spark", organization := "com.datastax.spark", description := """ |A library that exposes Cassandra tables as Spark RDDs, writes Spark RDDs to |Cassandra tables, and executes CQL queries in Spark applications.""".stringPrefix, homepage := Some(url("https://github.com/datastax/spark-cassandra-connector")), licenses := Seq(("Apache License 2.0", url("http://www.apache.org/licenses/LICENSE-2.0"))), promptTheme := ScalapenosTheme ) val parentSettings = noPublish ++ Seq( managedSourceDirectories := Nil, (unmanagedSourceDirectories in Compile) := Nil, (unmanagedSourceDirectories in Test) := Nil ) lazy val noPublish = Seq( publish := {}, publishLocal := {}, publishArtifact := false ) val encoding = Seq("-encoding", "UTF-8") val installSparkTask = taskKey[Unit]("Optionally install Spark from Git to local Maven repository") lazy val projectSettings = Seq( concurrentRestrictions in Global += Tags.limit(Tags.Test, parallelTasks), aggregate in update := false, incOptions := incOptions.value.withNameHashing(true), ivyScala := ivyScala.value map { _.copy(overrideScalaVersion = true) }, // when sbt-release enabled: enableCrossBuild = true, /* Can not use -Xfatal-warnings until this known issue fixed: org.apache.cassandra.io.util.DataOutputPlus not found - continuing with a stub. */ scalacOptions ++= encoding ++ Seq( s"-target:jvm-${Versions.JDK}", "-deprecation", "-feature", "-language:_", "-unchecked", "-Xlint"), scalacOptions in ThisBuild ++= Seq("-deprecation", "-feature"), // 2.11 javacOptions ++= encoding ++ Seq( "-source", Versions.JDK, "-target", Versions.JDK, "-Xlint:unchecked", "-Xlint:deprecation" ), scalacOptions in (Compile, doc) ++= Seq( "-implicits", "-doc-root-content", "rootdoc.txt" ), javacOptions in (Compile, doc) := encoding ++ Seq( "-source", Versions.JDK ), evictionWarningOptions in update := EvictionWarningOptions.default .withWarnTransitiveEvictions(false) .withWarnDirectEvictions(false) .withWarnScalaVersionEviction(false), cleanKeepFiles ++= Seq("resolution-cache", "streams", "spark-archives").map(target.value / _), updateOptions := updateOptions.value.withCachedResolution(cachedResoluton = true), ivyLoggingLevel in ThisBuild := UpdateLogging.Quiet, parallelExecution in ThisBuild := true, parallelExecution in Global := true, apiMappings ++= DocumentationMapping.mapJarToDocURL( (managedClasspath in (Compile, doc)).value, Dependencies.documentationMappings), installSparkTask := { val dir = new File(".").toPath SparkInstaller(scalaBinaryVersion.value, dir) }, resolvers ++= Seq(mavenLocalResolver, asfStagingResolver, asfSnapshotsResolver), update <<= (installSparkTask, update) map {(_, out) => out} ) lazy val mimaSettings = mimaDefaultSettings ++ Seq( previousArtifact := None, //Some("a" % "b_2.10.4" % "1.2"), binaryIssueFilters ++= Seq.empty ) lazy val defaultSettings = projectSettings ++ mimaSettings ++ releaseSettings ++ testSettings lazy val rootSettings = Seq( cleanKeepFiles ++= Seq("resolution-cache", "streams", "spark-archives").map(target.value / _), updateOptions := updateOptions.value.withCachedResolution(true) ) lazy val demoSettings = projectSettings ++ noPublish ++ Seq( publishArtifact in (Test,packageBin) := false, javaOptions in run ++= Seq("-Djava.library.path=./sigar","-Xms128m", "-Xmx1024m", "-XX:+UseConcMarkSweepGC") ) val testConfigs = inConfig(Test)(Defaults.testTasks) ++ inConfig(IntegrationTest)(Defaults.itSettings) val pureTestClasspath = taskKey[Set[String]]("Show classpath which is obtained as (test:fullClasspath + it:fullClasspath) - compile:fullClasspath") lazy val customTasks = Seq( pureTestClasspath := { val testDeps = (fullClasspath in Test value) map (_.data.getAbsolutePath) toSet val itDeps = (fullClasspath in IntegrationTest value) map (_.data.getAbsolutePath) toSet val compileDeps = (fullClasspath in Compile value) map (_.data.getAbsolutePath) toSet val cp = (testDeps ++ itDeps) -- compileDeps println("TEST_CLASSPATH=" + cp.mkString(File.pathSeparator)) cp } ) lazy val assembledSettings = defaultSettings ++ customTasks ++ sbtAssemblySettings ++ sparkPackageSettings val testOptionSettings = Seq( Tests.Argument(TestFrameworks.ScalaTest, "-oDF"), Tests.Argument(TestFrameworks.JUnit, "-oDF", "-v", "-a") ) lazy val testArtifacts = Seq( artifactName in (Test,packageBin) := { (sv: ScalaVersion, module: ModuleID, artifact: Artifact) => baseDirectory.value.name + "-test_" + sv.binary + "-" + module.revision + "." + artifact.extension }, artifactName in (IntegrationTest,packageBin) := { (sv: ScalaVersion, module: ModuleID, artifact: Artifact) => baseDirectory.value.name + "-it_" + sv.binary + "-" + module.revision + "." + artifact.extension }, publishArtifact in Test := false, publishArtifact in (Test,packageBin) := true, publishArtifact in (IntegrationTest,packageBin) := true, publish in (Test,packageBin) := (), publish in (IntegrationTest,packageBin) := () ) def makeTestGroups(tests: Seq[TestDefinition]): Seq[Group] = { // if we have many C* instances and we can run multiple tests in parallel, then group by package name // additional groups for auth and ssl is just an optimisation def multiCInstanceGroupingFunction(test: TestDefinition): String = { if (test.name.toLowerCase.contains("auth")) "auth" else if (test.name.toLowerCase.contains("ssl")) "ssl" else if (test.name.contains("CustomFromDriverSpec")) "customdriverspec" else if (test.name.contains("CETSpec") || test.name.contains("CETTest")) "cetspec" else if (test.name.contains("PSTSpec") || test.name.contains("PSTTest")) "pstspec" else test.name.reverse.dropWhile(_ != '.').reverse } // if we have a single C* create as little groups as possible to avoid restarting C* // the minimum - we need to run REPL and streaming tests in separate processes // additional groups for auth and ssl is just an optimisation // A new group is made for CustomFromDriverSpec because the ColumnType needs to be // Initilized afresh def singleCInstanceGroupingFunction(test: TestDefinition): String = { val pkgName = test.name.reverse.dropWhile(_ != '.').reverse if (test.name.toLowerCase.contains("authenticate")) "auth" else if (test.name.toLowerCase.contains("ssl")) "ssl" else if (pkgName.contains(".repl")) "repl" else if (pkgName.contains(".streaming")) "streaming" else if (test.name.contains("CustomFromDriverSpec")) "customdriverspec" else if (test.name.contains("CETSpec") || test.name.contains("CETTest")) "cetspec" else if (test.name.contains("PSTSpec") || test.name.contains("PSTTest")) "pstspec" else "other" } val groupingFunction = if (parallelTasks == 1) singleCInstanceGroupingFunction _ else multiCInstanceGroupingFunction _ tests.groupBy(groupingFunction).map { case (pkg, testsSeq) => new Group( name = pkg, tests = testsSeq, runPolicy = SubProcess(ForkOptions( runJVMOptions = TEST_JAVA_OPTS, envVars = TEST_ENV.getOrElse(sys.env), outputStrategy = Some(StdoutOutput)))) }.toSeq } lazy val testSettings = testConfigs ++ testArtifacts ++ Seq( parallelExecution in Test := true, parallelExecution in IntegrationTest := true, javaOptions in IntegrationTest ++= TEST_JAVA_OPTS, testOptions in Test ++= testOptionSettings, testOptions in IntegrationTest ++= testOptionSettings, testGrouping in IntegrationTest <<= definedTests in IntegrationTest map makeTestGroups, fork in Test := true, fork in IntegrationTest := true, managedSourceDirectories in Test := Nil, (compile in IntegrationTest) <<= (compile in Test, compile in IntegrationTest) map { (_, c) => c }, (internalDependencyClasspath in IntegrationTest) <<= Classpaths.concat( internalDependencyClasspath in IntegrationTest, exportedProducts in Test) ) lazy val pureCassandraSettings = Seq( test in IntegrationTest <<= ( cassandraServerClasspath in CassandraSparkBuild.cassandraServerProject in IntegrationTest, envVars in IntegrationTest, test in IntegrationTest) { case (cassandraServerClasspathTask, envVarsTask, testTask) => cassandraServerClasspathTask.flatMap(_ => envVarsTask).flatMap(_ => testTask) }, envVars in IntegrationTest := { val env = sys.env + ("CASSANDRA_CLASSPATH" -> (cassandraServerClasspath in CassandraSparkBuild.cassandraServerProject in IntegrationTest).value) + ("SPARK_LOCAL_IP" -> "127.0.0.1") TEST_ENV = Some(env) env } ) lazy val sbtAssemblySettings = assemblySettings ++ Seq( parallelExecution in assembly := false, assemblyJarName in assembly <<= (baseDirectory, version) map { (dir, version) => s"${dir.name}-assembly-$version.jar" }, run in Compile <<= Defaults.runTask(fullClasspath in Compile, mainClass in (Compile, run), runner in (Compile, run)), assemblyOption in assembly ~= { _.copy(includeScala = false) }, assemblyMergeStrategy in assembly <<= (assemblyMergeStrategy in assembly) { (old) => { case PathList("META-INF", "MANIFEST.MF") => MergeStrategy.discard case PathList("META-INF", xs @ _*) => MergeStrategy.last case x => old(x) } }, assemblyShadeRules in assembly := { val shadePackage = "shade.com.datastax.spark.connector" Seq( ShadeRule.rename("com.google.common.**" -> s"$shadePackage.google.common.@1").inAll ) } ) }
christobill/spark-cassandra-connector
project/Settings.scala
Scala
apache-2.0
14,345
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.predictionio.tools import org.apache.predictionio.tools.console.Console import org.apache.predictionio.tools.Common._ import org.apache.predictionio.tools.ReturnTypes._ import org.apache.predictionio.workflow.JsonExtractorOption import org.apache.predictionio.workflow.JsonExtractorOption.JsonExtractorOption import java.io.File import grizzled.slf4j.Logging import scala.sys.process._ case class WorkflowArgs( batch: String = "", variantJson: Option[File] = None, verbosity: Int = 0, engineParamsKey: Option[String] = None, engineFactory: Option[String] = None, evaluation: Option[String] = None, engineParamsGenerator: Option[String] = None, stopAfterRead: Boolean = false, stopAfterPrepare: Boolean = false, skipSanityCheck: Boolean = false, jsonExtractor: JsonExtractorOption = JsonExtractorOption.Both) object RunWorkflow extends Logging { def runWorkflow( wa: WorkflowArgs, sa: SparkArgs, pioHome: String, engineDirPath: String, verbose: Boolean = false): Expected[(Process, () => Unit)] = { val jarFiles = jarFilesForScala(engineDirPath).map(_.toURI) val variantJson = wa.variantJson.getOrElse(new File(engineDirPath, "engine.json")) val ei = Console.getEngineInfo( variantJson, engineDirPath) val args = Seq( "--engine-id", ei.engineId, "--engine-version", ei.engineVersion, "--engine-variant", variantJson.toURI.toString, "--verbosity", wa.verbosity.toString) ++ wa.engineFactory.map( x => Seq("--engine-factory", x)).getOrElse(Seq()) ++ wa.engineParamsKey.map( x => Seq("--engine-params-key", x)).getOrElse(Seq()) ++ (if (wa.batch != "") Seq("--batch", wa.batch) else Seq()) ++ (if (verbose) Seq("--verbose") else Seq()) ++ (if (wa.skipSanityCheck) Seq("--skip-sanity-check") else Seq()) ++ (if (wa.stopAfterRead) Seq("--stop-after-read") else Seq()) ++ (if (wa.stopAfterPrepare) { Seq("--stop-after-prepare") } else { Seq() }) ++ wa.evaluation.map(x => Seq("--evaluation-class", x)). getOrElse(Seq()) ++ // If engineParamsGenerator is specified, it overrides the evaluation. wa.engineParamsGenerator.orElse(wa.evaluation) .map(x => Seq("--engine-params-generator-class", x)) .getOrElse(Seq()) ++ (if (wa.batch != "") Seq("--batch", wa.batch) else Seq()) ++ Seq("--json-extractor", wa.jsonExtractor.toString) Runner.runOnSpark( "org.apache.predictionio.workflow.CreateWorkflow", args, sa, jarFiles, pioHome, verbose) } }
himanshudhami/PredictionIO
tools/src/main/scala/org/apache/predictionio/tools/RunWorkflow.scala
Scala
apache-2.0
3,461
/* * Copyright 2015 ligaDATA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ligadata.pmml.udfs import org.scalatest.FlatSpec class TestCustomUdfs extends FlatSpec { "Concat" should "return one String with no space" in { //input val input1:String ="textOne" val input2:String ="textTwo" //expected val expected:String="textOnetextTwo" //actual val actual:String=CustomUdfs.Concat(input1,input2) assert(expected === actual) } it should "concat Ints" in { //input val input1:Int =1 val input2:Int=5 //expected val expected:String="15" //actual val actual:String=CustomUdfs.Concat(input1,input2) assert(expected === actual) } it should "replace null values with empty string" in { //input val input1:String ="textOne" val input2:String =null //expected val expected:String="textOne" //actual val actual:String=CustomUdfs.Concat(input1,input2) assert(expected === actual) } "matchTermsetBoolean" should "ignore cases" in{ //input val inputString ="ligadata company" val context= Array("Apple","Google","LIGADATA") val degree=1 //actual val actual = CustomUdfs.matchTermsetBoolean(inputString, context, degree) assert(actual) } "matchTermsetBoolean" should "return false if inputString is null" in{ //input val inputString =null val context= Array("Apple","Google","LIGADATA") val degree=1 val expected= false //actual val actual = CustomUdfs.matchTermsetBoolean(inputString, context, degree) assertResult(expected)(actual) } "matchTermsetBoolean" should "return false if context is null" in{ //input val inputString ="ligadata" val context= null val degree=1 //expected val expected= false //actual val actual = CustomUdfs.matchTermsetBoolean(inputString, context, degree) assertResult(expected)(actual) } "getMatchingTokens" should "matched string delimited by ." in{ //input val inputString ="ligadata company" val context= Array("Apple","Google","LIGADATA") //expected val expected= ".LIGADATA" //actual val actual = CustomUdfs.getMatchingTokens(inputString, context) assert(expected===actual) } it should "return false if inputString is null" in{ //input val inputString ="ligadata" val context= null //expected val expected= "" //actual val actual = CustomUdfs.getMatchingTokens(inputString, context) assert(expected===actual) } it should "return false if context is null" in{ //input val inputString =null val context= Array("Apple","Google","LIGADATA") //expected val expected= "" //actual val actual = CustomUdfs.getMatchingTokens(inputString, context) assert(expected===actual) } }
traytonwhite/Kamanja
trunk/Pmml/PmmlUdfs/src/test/scala/com/ligadata/pmml/udfs/TestCustomUdfs.scala
Scala
apache-2.0
3,579
/* * Copyright 2011 TomTom International BV * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package tomtom.splitter.layer7 import java.io.File import java.util.concurrent.{ExecutorService, Executors, Semaphore} import com.typesafe.scalalogging.Logger import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponseStatus} import org.scalatest.{BeforeAndAfterEach, Matchers, WordSpec} import org.slf4j.LoggerFactory import tomtom.splitter.config.Config import tomtom.splitter.layer7.DataType._ import tomtom.splitter.layer7.SourceType._ class LoadTest extends WordSpec with Matchers with BeforeAndAfterEach { // bring up a reference server that can accept commands to either // respond normally, respond slowly, or return an error implicit val executor: ExecutorService = Executors.newCachedThreadPool val log = Logger(LoggerFactory.getLogger(getClass)) import tomtom.splitter.layer7.PortFactory.findPort val proxyPort = findPort() val referencePort = findPort() val shadowPort = findPort() val referenceServer = new CommandableServer("reference", referencePort) val shadowServer = new CommandableServer("shadow", shadowPort) var proxyConfig: FixtureConfig = _ val testThreads = 20 val requests = 10000 val refKey = (Reference, Response) val shadKey = (Shadow, Response) val requestKey = (Reference, Request) @volatile var mutex = None: Option[Semaphore] var _dataSunk: List[FixtureSink] = Nil val _seenIds = collection.mutable.Map[Int, HttpRequest]() val _dups = collection.mutable.Map[Int, HttpRequest]() def notifier(testSink: FixtureSink) { val refRequestId = testSink.messages(refKey).headers.get("X-Request-Id") val shadRequestId = testSink.messages(shadKey).headers.get("X-Request-Id") require(refRequestId == shadRequestId) //log.warn("testSink: " + testSink.messages(requestKey).asInstanceOf[HttpRequest].getUri + " -> " + refRequestId) val requestId = refRequestId.toInt LoadTest.this synchronized { _dataSunk ::= testSink if (_seenIds.contains(requestId)) { log.warn(s"Already seen $requestId") _dups += (requestId -> testSink.messages(requestKey).asInstanceOf[HttpRequest]) } else { _seenIds += (requestId -> testSink.messages(requestKey).asInstanceOf[HttpRequest]) } } mutex.map(_.release()) } def dataSunk = LoadTest.this synchronized { _dataSunk } Config.loadFile(new File("src/test/resources/test.config")) Config.config.configOpt("audit").foreach(Logging.config(_)) override def beforeEach() { referenceServer.start() shadowServer.start() import tomtom.splitter.layer7.FixtureConfig._ // implicit port-to-ProxiedServer proxyConfig = FixtureConfig(proxyPort, referencePort, shadowPort, notifier) proxyConfig.start() this synchronized { _dataSunk = Nil _seenIds.clear() } } override def afterEach() { referenceServer.stop() shadowServer.stop() proxyConfig.stop() for (id <- _dups.keys) { log.warn("--------DUP----------") log.warn(_dups(id).toString) log.warn("--------ORIG---------") log.warn(_seenIds(id).toString) } } "A proxy server under load" should { "maintain coherence" in { val threadPool = Executors.newFixedThreadPool(testThreads) val countdown = (1 to requests).iterator mutex = Some(new Semaphore(requests)) mutex.foreach(_.acquire(requests)) val clients = for (i <- 1 to testThreads) yield { val client = HttpClient(port = proxyPort) threadPool.submit(new Runnable { override def run() { var request = 0 while (countdown synchronized { if (countdown.hasNext) { request = countdown.next() request match { case x if x % 100 == 0 => // println(x) case _ => } true } else { false } }) { val path = "/request=" + request // log.warn("Submitting {}", path) client << (path, { case (r, _) => assert(r.getStatus === HttpResponseStatus.OK) }) } client.close() mutex.foreach(_.release()) } }) client } threadPool.shutdown() clients foreach { _.assertOk() } // println("Trying to acquire " + requests) mutex.foreach(_.acquire(requests)) // println("Acquired " + requests) threadPool.shutdownNow dataSunk foreach { fixtureSink => fixtureSink.messages.get(refKey) match { case Some(response) => val requestId = response.headers.get("X-Request-Id") fixtureSink.messages.get(shadKey) match { case Some(shadResponse) => val shadRequestId = shadResponse.headers.get("X-Request-Id") assert(requestId === shadRequestId, "shadResponse = " + shadResponse) case None => log.warn(s"fixture $fixtureSink doesn't contain $shadKey") } case None => log.warn(s"fixture $fixtureSink doesn't contain $refKey") } } } } type ? = this.type }
ebowman/splitter
src/test/scala/tomtom/splitter/layer7/LoadTest.scala
Scala
apache-2.0
5,886
package cn.gridx.scala.lang.generics.TypeParameterization /** * Created by tao on 11/23/15. */ class Apple extends Fruit { def name: String = "apple" }
TaoXiao/Scala
lang/src/main/scala/cn/gridx/scala/lang/generics/TypeParameterization/Apple.scala
Scala
apache-2.0
159
package com.sksamuel.elastic4s import java.util.UUID import com.sksamuel.elastic4s.admin._ import com.sksamuel.elastic4s.mappings.FieldType._ import com.sksamuel.elastic4s.mappings._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} /** @author Stephen Samuel */ trait ElasticDsl extends IndexDsl with AliasesDsl with BulkDsl with ClusterDsl with CountDsl with CreateIndexDsl with DeleteIndexDsl with DeleteDsl with FacetDsl with FieldStatsDsl with ExplainDsl with GetDsl with IndexAdminDsl with IndexRecoveryDsl with IndexStatusDsl with MappingDsl with MoreLikeThisDsl with MultiGetDsl with OptimizeDsl with PercolateDsl with SearchDsl with SettingsDsl with ScoreDsl with ScrollDsl with SnapshotDsl with TemplateDsl with UpdateDsl with ValidateDsl with ElasticImplicits { case object add { def alias(alias: String): AddAliasExpectsIndex = { require(alias.nonEmpty, "alias name must not be null or empty") new AddAliasExpectsIndex(alias) } } def addAlias(name: String): AddAliasExpectsIndex = add alias name @deprecated("use `add/remove/get alias` instead of `aliases add` for a more readable dsl", "1.4.0.Beta2") case object aliases { @deprecated("use `add alias` instead of `aliases add` for a more readable dsl", "1.4.0.Beta2") def add(alias: String) = new AddAliasExpectsIndex(alias) @deprecated("use `remove alias` instead of `aliases remove` for a more readable dsl", "1.4.0.Beta2") def remove(alias: String) = new RemoveAliasExpectsIndex(alias) @deprecated("use `get alias` instead of `aliases get` for a more readable dsl", "1.4.0.Beta2") def get(aliases: String*) = new GetAliasDefinition(aliases) } def aliases(aliasMutations: MutateAliasDefinition*): IndicesAliasesRequestDefinition = aliases(aliasMutations) def aliases(aliasMutations: Iterable[MutateAliasDefinition]): IndicesAliasesRequestDefinition = { new IndicesAliasesRequestDefinition(aliasMutations.toSeq: _*) } def agg = aggregation case object aggregation { def avg(name: String) = new AvgAggregationDefinition(name) def children(name: String) = new ChildrenAggregationDefinition(name) def count(name: String) = new ValueCountAggregationDefinition(name) def cardinality(name: String) = new CardinalityAggregationDefinition(name) def datehistogram(name: String) = new DateHistogramAggregation(name) def daterange(name: String) = new DateRangeAggregation(name) def extendedstats(name: String) = new ExtendedStatsAggregationDefinition(name) def filter(name: String) = new FilterAggregationDefinition(name) def filters(name: String) = new FiltersAggregationDefinition(name) def geobounds(name: String) = new GeoBoundsAggregationDefinition(name) def geodistance(name: String) = new GeoDistanceAggregationDefinition(name) def geohash(name:String) = new GeoHashGridAggregationDefinition(name) def global(name: String) = new GlobalAggregationDefinition(name) def histogram(name: String) = new HistogramAggregation(name) def ipRange(name: String) = new IpRangeAggregationDefinition(name) def max(name: String) = new MaxAggregationDefinition(name) def min(name: String) = new MinAggregationDefinition(name) def missing(name: String) = new MissingAggregationDefinition(name) def nested(name: String) = new NestedAggregationDefinition(name) def reverseNested(name: String) = new ReverseNestedAggregationDefinition(name) def percentiles(name: String) = new PercentilesAggregationDefinition(name) def percentileranks(name: String) = new PercentileRanksAggregationDefinition(name) def range(name: String) = new RangeAggregationDefinition(name) def sigTerms(name: String) = new SigTermsAggregationDefinition(name) def stats(name: String) = new StatsAggregationDefinition(name) def sum(name: String) = new SumAggregationDefinition(name) def terms(name: String) = new TermAggregationDefinition(name) def topHits(name: String) = new TopHitsAggregationDefinition(name) } def attachmentField(name: String) = field(name).typed(AttachmentType) def binaryField(name: String) = field(name).typed(BinaryType) def booleanField(name: String) = field(name).typed(BooleanType) def byteField(name: String) = field(name).typed(ByteType) @deprecated("use score sort, geo sort, field sort or script sort", "1.6.0") case object by { def score: ScoreSortDefinition = ElasticDsl.score.sort def geo(field: String): GeoDistanceSortDefinition = ElasticDsl.geo sort field def field(field: String): FieldSortDefinition = ElasticDsl.field.sort(field) def script(script: String) = ElasticDsl.script.sort(script) } case object clear { def cache(indexes: Iterable[String]): ClearCacheDefinition = new ClearCacheDefinition(indexes.toSeq) def cache(indexes: String*): ClearCacheDefinition = new ClearCacheDefinition(indexes) def scroll(id: String, ids: String*): ClearScrollDefinition = clearScroll(id +: ids) def scroll(ids: Iterable[String]): ClearScrollDefinition = clearScroll(ids) } def clearCache(indexes: String*): ClearCacheDefinition = new ClearCacheDefinition(indexes) def clearCache(indexes: Iterable[String]): ClearCacheDefinition = new ClearCacheDefinition(indexes.toSeq) def clearIndex(indexes: String*): ClearCacheDefinition = new ClearCacheDefinition(indexes) def clearIndex(indexes: Iterable[String]): ClearCacheDefinition = new ClearCacheDefinition(indexes.toSeq) def clearScroll(id: String, ids: String*): ClearScrollDefinition = ClearScrollDefinition(id +: ids) def clearScroll(ids: Iterable[String]): ClearScrollDefinition = ClearScrollDefinition(ids.toSeq) case object close { def index(index: String): CloseIndexDefinition = new CloseIndexDefinition(index) } def closeIndex(index: String): CloseIndexDefinition = close index index case object cluster { def persistentSettings(settings: Map[String, String]) = ClusterSettingsDefinition(settings, Map.empty) def transientSettings(settings: Map[String, String]) = ClusterSettingsDefinition(Map.empty, settings) } def clusterPersistentSettings(settings: Map[String, String]) = cluster persistentSettings settings def clusterTransientSettings(settings: Map[String, String]) = cluster transientSettings settings def clusterState = new ClusterStateDefinition def clusterHealth = new ClusterHealthDefinition() def clusterStats = new ClusterStatsDefinition @deprecated("use clusterStats", "1.6.1") def clusterStatus = new ClusterStatsDefinition def clusterHealth(indices: String*) = new ClusterHealthDefinition(indices: _*) case object commonGrams { def tokenfilter(name: String): CommonGramsTokenFilter = CommonGramsTokenFilter(name) } def commonGramsTokenFilter(name: String) = CommonGramsTokenFilter(name) case object completion { def suggestion(name: String) = new CompletionSuggestionDefinition(name) } def completionSuggestion: CompletionSuggestionDefinition = completion suggestion UUID.randomUUID.toString def completionSuggestion(name: String): CompletionSuggestionDefinition = completion suggestion name case object count { def from(indexType: IndexType): CountDefinition = from(IndexesTypes(indexType)) def from(indexesTypes: IndexesTypes): CountDefinition = new CountDefinition(indexesTypes) def from(indexes: Iterable[String]): CountDefinition = from(IndexesTypes(indexes)) def from(indexes: String*): CountDefinition = from(IndexesTypes(indexes)) } @deprecated("use countFrom", "1.6.0") def count(indexesTypes: IndexesTypes): CountDefinition = new CountDefinition(indexesTypes) @deprecated("use countFrom", "1.6.0") def count(indexes: String*): CountDefinition = new CountDefinition(IndexesTypes(indexes)) def countFrom(index: (String, String)): CountDefinition = count from index def countFrom(indexes: String*): CountDefinition = count from indexes def countFrom(indexes: IndexType): CountDefinition = count from indexes case object create { def index(name: String) = { require(name.nonEmpty, "index name must not be null or empty") new CreateIndexDefinition(name) } def snapshot(name: String) = { require(name.nonEmpty, "snapshot name must not be null or empty") new CreateSnapshotExpectsIn(name) } def repository(name: String): CreateRepositoryExpectsType = { require(name.nonEmpty, "repository name must not be null or empty") new CreateRepositoryExpectsType(name) } def template(name: String): CreateIndexTemplateExpectsPattern = { require(name.nonEmpty, "template name must not be null or empty") new CreateIndexTemplateExpectsPattern(name) } } def createIndex(name: String) = create index name def createSnapshot(name: String) = create snapshot name def createRepository(name: String) = create repository name def createTemplate(name: String) = create template name def dateField(name: String) = field(name).typed(DateType) def doubleField(name: String) = field(name).typed(DoubleType) case object delete { def id(id: Any): DeleteByIdExpectsFrom = new DeleteByIdExpectsFrom(id) @deprecated( "Delete by query will be removed in 2.0. Instead, use the scroll/scan API to find all matching IDs and then issue a bulk req", "1.6.0") def from(indexesTypes: IndexesTypes): DeleteByQueryExpectsClause = new DeleteByQueryExpectsClause(indexesTypes) @deprecated( "Delete by query will be removed in 2.0. Instead, use the scroll/scan API to find all matching IDs and then issue a bulk req", "1.6.0") def from(indexType: IndexType): DeleteByQueryExpectsClause = from(IndexesTypes(indexType)) @deprecated( "Delete by query will be removed in 2.0. Instead, use the scroll/scan API to find all matching IDs and then issue a bulk req", "1.6.0") def from(index: String): DeleteByQueryExpectsClause = from(IndexesTypes(index)) @deprecated( "Delete by query will be removed in 2.0. Instead, use the scroll/scan API to find all matching IDs and then issue a bulk req", "1.6.0") def from(indexes: String*): DeleteByQueryExpectsType = from(indexes) @deprecated( "Delete by query will be removed in 2.0. Instead, use the scroll/scan API to find all matching IDs and then issue a bulk req", "1.6.0") def from(indexes: Iterable[String]): DeleteByQueryExpectsType = new DeleteByQueryExpectsType(indexes.toSeq) def index(indexes: String*): DeleteIndexDefinition = new DeleteIndexDefinition(indexes: _*) def index(indexes: Iterable[String]): DeleteIndexDefinition = new DeleteIndexDefinition(indexes.toSeq: _*) def snapshot(name: String): DeleteSnapshotExpectsIn = new DeleteSnapshotExpectsIn(name) def template(name: String) = new DeleteIndexTemplateDefinition(name) def mapping(indexes: String*) = DeleteMappingDefinition(indexes) def mapping(indexType: IndexType) = DeleteMappingDefinition(List(indexType.index)).types(indexType.`type`) } def delete(id: Any): DeleteByIdExpectsFrom = new DeleteByIdExpectsFrom(id) def deleteIndex(indexes: String*): DeleteIndexDefinition = new DeleteIndexDefinition(indexes: _*) def deleteIndex(indexes: Iterable[String]): DeleteIndexDefinition = new DeleteIndexDefinition(indexes.toSeq: _*) def deleteSnapshot(name: String): DeleteSnapshotExpectsIn = delete snapshot name def deleteTemplate(name: String): DeleteIndexTemplateDefinition = delete template name def deleteMapping(indexes: String*) = DeleteMappingDefinition(indexes) def deleteMapping(indexType: IndexType) = DeleteMappingDefinition(List(indexType.index)).types(indexType.`type`) case object explain { def id(id: Any): ExplainExpectsIndex = new ExplainExpectsIndex(id) } def explain(id: Any): ExplainExpectsIndex = explain id id case object field extends TypeableFields { val name = "" def name(name: String): FieldDefinition = new FieldDefinition(name) def sort(field: String): FieldSortDefinition = new FieldSortDefinition(field) def stats(fields: String*): FieldStatsDefinition = new FieldStatsDefinition(fields = fields) def stats(fields: Iterable[String]): FieldStatsDefinition = new FieldStatsDefinition(fields = fields.toSeq) } def field(name: String): FieldDefinition = field name name def fieldStats(fields: String*): FieldStatsDefinition = new FieldStatsDefinition(fields = fields) def fieldStats(fields: Iterable[String]): FieldStatsDefinition = new FieldStatsDefinition(fields = fields.toSeq) def fieldSort(name: String) = field sort name case object flush { def index(indexes: Iterable[String]): FlushIndexDefinition = new FlushIndexDefinition(indexes.toSeq) def index(indexes: String*): FlushIndexDefinition = new FlushIndexDefinition(indexes) } def flushIndex(indexes: Iterable[String]): FlushIndexDefinition = flush index indexes def flushIndex(indexes: String*): FlushIndexDefinition = flush index indexes case object fuzzyCompletion { def suggestion(name: String) = new FuzzyCompletionSuggestionDefinition(name) } def fuzzyCompletionSuggestion: FuzzyCompletionSuggestionDefinition = fuzzyCompletionSuggestion(UUID .randomUUID .toString) def fuzzyCompletionSuggestion(name: String): FuzzyCompletionSuggestionDefinition = fuzzyCompletion suggestion name case object geo { def sort(field: String): GeoDistanceSortDefinition = new GeoDistanceSortDefinition(field) } def geoSort(name: String): GeoDistanceSortDefinition = geo sort name def completionField(name: String) = field(name).typed(CompletionType) def floatField(name: String) = field(name).typed(FloatType) def multiField(name: String) = field(name).typed(MultiFieldType) def geopointField(name: String) = field(name).typed(GeoPointType) def geoshapeField(name: String) = field(name).typed(GeoShapeType) case object get { def id(id: Any) = { require(id.toString.nonEmpty, "id must not be null or empty") new GetWithIdExpectsFrom(id.toString) } def alias(aliases: String*): GetAliasDefinition = new GetAliasDefinition(aliases) def cluster(stats: StatsKeyword): ClusterStatsDefinition = new ClusterStatsDefinition def cluster(health: HealthKeyword): ClusterHealthDefinition = new ClusterHealthDefinition def mapping(ixTp: IndexType): GetMappingDefinition = new GetMappingDefinition(List(ixTp.index)).types(ixTp.`type`) def mapping(indexes: Iterable[String]): GetMappingDefinition = new GetMappingDefinition(indexes) def mapping(indexes: String*): GetMappingDefinition = mapping(indexes) def segments(indexes: String*): GetSegmentsDefinition = new GetSegmentsDefinition(indexes) def segments(indexes: Iterable[String]): GetSegmentsDefinition = new GetSegmentsDefinition(indexes.toSeq) def settings(indexes: String*): GetSettingsDefinition = new GetSettingsDefinition(indexes) def settings(indexes: Iterable[String]): GetSettingsDefinition = new GetSettingsDefinition(indexes.toSeq) def template(name: String): GetTemplateDefinition = new GetTemplateDefinition(name) def snapshot(names: Iterable[String]): GetSnapshotsExpectsFrom = new GetSnapshotsExpectsFrom(names.toSeq) def snapshot(names: String*): GetSnapshotsExpectsFrom = snapshot(names) } def get(id: Any): GetWithIdExpectsFrom = new GetWithIdExpectsFrom(id.toString) def getAlias(aliases: String*): GetAliasDefinition = new GetAliasDefinition(aliases) def getMapping(ixTp: IndexType): GetMappingDefinition = new GetMappingDefinition(List(ixTp.index)).types(ixTp.`type`) def getSegments(indexes: String*): GetSegmentsDefinition = get segments (indexes.toSeq: _*) def getSettings(indexes: String*): GetSettingsDefinition = get settings indexes def getSnapshot(names: Iterable[String]): GetSnapshotsExpectsFrom = get snapshot names def getSnapshot(names: String*): GetSnapshotsExpectsFrom = get snapshot names def getTemplate(name: String): GetTemplateDefinition = get template name def intField(name: String) = field(name).typed(IntegerType) def ipField(name: String) = field(name).typed(IpType) trait HealthKeyword case object health extends HealthKeyword case object highlight { def field(name: String): HighlightDefinition = new HighlightDefinition(name) } def highlight(field: String): HighlightDefinition = new HighlightDefinition(field) trait StatsKeyword case object stats extends StatsKeyword @deprecated("use index keyword", "1.4.0") def insert: index.type = index case object index { def exists(indexes: Iterable[String]): IndexExistsDefinition = new IndexExistsDefinition(indexes.toSeq) def exists(indexes: String*): IndexExistsDefinition = new IndexExistsDefinition(indexes) def into(index: String): IndexDefinition = { require(index.nonEmpty, "index must not be null or empty") into(index.split("/").head, index.split("/").last) } def into(index: String, `type`: String): IndexDefinition = { require(index.nonEmpty, "index must not be null or empty") new IndexDefinition(index, `type`) } def into(kv: (String, String)): IndexDefinition = { into(kv._1, kv._2) } def into(indexType: IndexType): IndexDefinition = { require(indexType != null, "indexType must not be null or empty") new IndexDefinition(indexType.index, indexType.`type`) } def stats(indexes: Iterable[String]): IndicesStatsDefinition = new IndicesStatsDefinition(indexes.toSeq) def stats(indexes: String*): IndicesStatsDefinition = new IndicesStatsDefinition(indexes) } def indexExists(indexes: Iterable[String]): IndexExistsDefinition = new IndexExistsDefinition(indexes.toSeq) def indexExists(indexes: String*): IndexExistsDefinition = new IndexExistsDefinition(indexes) def indexInto(indexType: IndexType): IndexDefinition = { require(indexType != null, "indexType must not be null or empty") new IndexDefinition(indexType.index, indexType.`type`) } def indexInto(index: String, `type`: String): IndexDefinition = { require(index.nonEmpty, "index must not be null or empty") new IndexDefinition(index, `type`) } def indexStats(indexes: Iterable[String]): IndicesStatsDefinition = new IndicesStatsDefinition(indexes.toSeq) def indexStats(indexes: String*): IndicesStatsDefinition = new IndicesStatsDefinition(indexes) case object inner { def hits(name: String): QueryInnerHitsDefinition = new QueryInnerHitsDefinition(name) def hit(name: String): InnerHitDefinition = new InnerHitDefinition(name) } def innerHit(name: String): InnerHitDefinition = inner hit name def innerHits(name: String): QueryInnerHitsDefinition = inner hits name def longField(name: String) = field(name).typed(LongType) case object mapping { def name(name: String): MappingDefinition = { require(name.nonEmpty, "mapping name must not be null or empty") new MappingDefinition(name) } } def mapping(name: String): MappingDefinition = mapping name name @deprecated("The More Like This API will be removed in 2.0. Instead, use the More Like This Query", "1.6.0") case object more { def like(id: Any): MltExpectsIndex = { require(id.toString.nonEmpty, "id must not be null or empty") new MltExpectsIndex(id.toString) } } @deprecated("The More Like This API will be removed in 2.0. Instead, use the More Like This Query", "1.6.0") def mlt: morelike.type = morelike @deprecated("The More Like This API will be removed in 2.0. Instead, use the More Like This Query", "1.6.0") case object morelike { def id(id: Any): MltExpectsIndex = { require(id.toString.nonEmpty, "id must not be null or empty") new MltExpectsIndex(id.toString) } } def multiget(gets: Iterable[GetDefinition]): MultiGetDefinition = new MultiGetDefinition(gets) def multiget(gets: GetDefinition*): MultiGetDefinition = new MultiGetDefinition(gets) def nestedField(name: String): NestedFieldDefinition = field(name).typed(NestedType) case object ngram { def tokenfilter(name: String): NGramTokenFilter = NGramTokenFilter(name) } def ngramTokenFilter(name: String): NGramTokenFilter = NGramTokenFilter(name) def objectField(name: String): ObjectFieldDefinition = field(name).typed(ObjectType) case object edgeNGram { def tokenfilter(name: String): EdgeNGramTokenFilter = EdgeNGramTokenFilter(name) } def edgeNGramTokenfilter(name: String): EdgeNGramTokenFilter = EdgeNGramTokenFilter(name) case object open { def index(index: String): OpenIndexDefinition = new OpenIndexDefinition(index) } def openIndex(index: String): OpenIndexDefinition = open index index case object optimize { def index(indexes: Iterable[String]): OptimizeDefinition = new OptimizeDefinition(indexes.toSeq: _*) def index(indexes: String*): OptimizeDefinition = index(indexes) } @deprecated("use optimizeIndex", "1.6.2") def optimize(indexes: String*): OptimizeDefinition = new OptimizeDefinition(indexes: _*) def optimizeIndex(indexes: String*): OptimizeDefinition = optimize index indexes def optimizeIndex(indexes: Iterable[String]): OptimizeDefinition = optimize index indexes case object percolate { def in(index: String): PercolateDefinition = { require(index.nonEmpty, "index must not be null or empty") new PercolateDefinition(index) } def in(indexType: IndexType): PercolateDefinition = new PercolateDefinition(IndexesTypes(indexType)) } def percolateIn(index: String): PercolateDefinition = percolate in index def percolateIn(indexType: IndexType): PercolateDefinition = percolate in indexType case object phrase { def suggestion(name: String): PhraseSuggestionDefinition = new PhraseSuggestionDefinition(name) } def phraseSuggestion: PhraseSuggestionDefinition = phrase suggestion UUID.randomUUID.toString def phraseSuggestion(name: String): PhraseSuggestionDefinition = phrase suggestion name case object put { def mapping(indexType: IndexType): PutMappingDefinition = new PutMappingDefinition(indexType) } def putMapping(indexType: IndexType): PutMappingDefinition = new PutMappingDefinition(indexType) case object recover { def index(indexes: Iterable[String]): IndexRecoveryDefinition = new IndexRecoveryDefinition(indexes.toSeq) def index(indexes: String*): IndexRecoveryDefinition = new IndexRecoveryDefinition(indexes) } def recoverIndex(indexes: String*): IndexRecoveryDefinition = recover index indexes def recoverIndex(indexes: Iterable[String]): IndexRecoveryDefinition = recover index indexes case object refresh { def index(indexes: Iterable[String]): IndexRecoveryDefinition = recover index indexes def index(indexes: String*): IndexRecoveryDefinition = recover index indexes } def refreshIndex(indexes: Iterable[String]): IndexRecoveryDefinition = refresh index indexes def refreshIndex(indexes: String*): IndexRecoveryDefinition = refresh index indexes case object remove { def alias(alias: String): RemoveAliasExpectsIndex = { require(alias.nonEmpty, "alias must not be null or empty") new RemoveAliasExpectsIndex(alias) } } def removeAlias(alias: String): RemoveAliasExpectsIndex = remove alias alias case object register { def id(id: Any): RegisterExpectsIndex = { require(id.toString.nonEmpty, "id must not be null or empty") new RegisterExpectsIndex(id.toString) } } def register(id: Any): RegisterExpectsIndex = register id id case object repository { @deprecated("use `create repository` instead of `repository create` for a more readable dsl", "1.4.0.Beta2") def create(name: String): CreateRepositoryExpectsType = new CreateRepositoryExpectsType(name) } case object restore { def snapshot(name: String): RestoreSnapshotExpectsFrom = { require(name.nonEmpty, "snapshot name must not be null or empty") new RestoreSnapshotExpectsFrom(name) } } def restoreSnapshot(name: String): RestoreSnapshotExpectsFrom = restore snapshot name case object score { def sort: ScoreSortDefinition = new ScoreSortDefinition } def scoreSort: ScoreSortDefinition = score.sort case object script { def sort(script: String): ScriptSortDefinition = new ScriptSortDefinition(script) def field(n: String): ExpectsScript = ExpectsScript(field = n) } def scriptSort(scripttext: String): ScriptSortDefinition = script sort scripttext def scriptField(n: String): ExpectsScript = script field n @deprecated("use search keyword", "1.4.0.Beta2") def select: search.type = search case object search { def in(indexes: String*): SearchDefinition = in(IndexesTypes(indexes)) def in(tuple: (String, String)): SearchDefinition = in(IndexesTypes(tuple)) def in(indexesTypes: IndexesTypes): SearchDefinition = new SearchDefinition(indexesTypes) def in(indexType: IndexType): SearchDefinition = new SearchDefinition(IndexesTypes(indexType)) def scroll(id: String): SearchScrollDefinition = new SearchScrollDefinition(id) } @deprecated("use search", "1.6.0") def select(indexes: String*): SearchDefinition = search(indexes: _*) def search(indexType: IndexType): SearchDefinition = search in indexType def search(indexes: String*): SearchDefinition = new SearchDefinition(IndexesTypes(indexes)) def searchScroll(id: String): SearchScrollDefinition = new SearchScrollDefinition(id) case object shingle { def tokenfilter(name: String): ShingleTokenFilter = ShingleTokenFilter(name) } def shingleTokenFilter(name: String): ShingleTokenFilter = ShingleTokenFilter(name) def shortField(name: String) = field(name).typed(ShortType) case object snapshot { @deprecated("use `create snapshot` instead of `snapshot create` for a more readable dsl", "1.4.0.Beta2") def create(name: String): CreateSnapshotExpectsIn = new CreateSnapshotExpectsIn(name) @deprecated("use `restore snapshot` instead of `snapshot restore` for a more readable dsl", "1.4.0.Beta2") def restore(name: String): RestoreSnapshotExpectsFrom = new RestoreSnapshotExpectsFrom(name) @deprecated("use `delete snapshot` instead of `snapshot delete` for a more readable dsl", "1.4.0.Beta2") def delete(name: String): DeleteSnapshotExpectsIn = new DeleteSnapshotExpectsIn(name) } case object snowball { def tokenfilter(name: String): SnowballTokenFilter = SnowballTokenFilter(name) } def snowballTokenFilter(name: String): SnowballTokenFilter = SnowballTokenFilter(name) @deprecated("use sort by <type>", "1.6.1") case object sortby { def score: ScoreSortDefinition = new ScoreSortDefinition def geo(field: String): GeoDistanceSortDefinition = new GeoDistanceSortDefinition(field) def field(field: String): FieldSortDefinition = new FieldSortDefinition(field) def script(script: String): ScriptSortDefinition = new ScriptSortDefinition(script) } case object stemmer { def tokenfilter(name: String): StemmerTokenFilter = StemmerTokenFilter(name) } def stemmerTokenFilter(name: String): StemmerTokenFilter = StemmerTokenFilter(name) def stringField(name: String): StringFieldDefinition = field(name).typed(StringType) def suggestions(suggestions: SuggestionDefinition*): SuggestDefinition = SuggestDefinition(suggestions) def suggestions(suggestions: Iterable[SuggestionDefinition]): SuggestDefinition = SuggestDefinition(suggestions.toSeq) case object template { @deprecated("use `create template` instead of `template create` for a more readable dsl", "1.4.0.Beta2") def create(name: String): CreateIndexTemplateExpectsPattern = new CreateIndexTemplateExpectsPattern(name) @deprecated("use `delete template` instead of `template delete` for a more readable dsl", "1.4.0.Beta2") def delete(name: String): DeleteIndexTemplateDefinition = new DeleteIndexTemplateDefinition(name) def name(name: String): DynamicTemplateDefinition = new DynamicTemplateDefinition(name) } def template(name: String): DynamicTemplateDefinition = template name name case object term { def suggestion(name: String): TermSuggestionDefinition = new TermSuggestionDefinition(name) } def termSuggestion: TermSuggestionDefinition = term suggestion UUID.randomUUID.toString def termSuggestion(name: String): TermSuggestionDefinition = term suggestion name case object timestamp { def enabled(en: Boolean): TimestampDefinition = TimestampDefinition(en) } def timestamp(en: Boolean): TimestampDefinition = TimestampDefinition(en) def tokenCountField(name: String) = field(name).typed(TokenCountType) class TypesExistExpectsIn(types: Seq[String]) { def in(indexes: String*): TypesExistsDefinition = new TypesExistsDefinition(indexes, types) } case object types { def exist(types: String*): TypesExistExpectsIn = new TypesExistExpectsIn(types) } def typesExist(types: String*): TypesExistExpectsIn = new TypesExistExpectsIn(types) case object update { def id(id: Any): UpdateExpectsIndex = { require(id.toString.nonEmpty, "id must not be null or empty") new UpdateExpectsIndex(id.toString) } def settings(index: String): UpdateSettingsDefinition = new UpdateSettingsDefinition(index) } def update(id: Any): UpdateExpectsIndex = new UpdateExpectsIndex(id.toString) case object validate { def in(indexType: IndexType): ValidateDefinition = new ValidateDefinition(indexType.index, indexType.`type`) def in(value: String): ValidateDefinition = { require(value.nonEmpty, "value must not be null or empty") in(value.split("/").toSeq) } def in(value: Seq[String]): ValidateDefinition = in((value.head, value(1))) def in(tuple: (String, String)): ValidateDefinition = new ValidateDefinition(tuple._1, tuple._2) } def validateIn(indexType: IndexType): ValidateDefinition = validate in indexType def validateIn(value: String): ValidateDefinition = validate in value implicit class RichFuture[T](future: Future[T]) { def await(implicit duration: Duration = 10.seconds): T = Await.result(future, duration) } } object ElasticDsl extends ElasticDsl //mapping char filter // //htmlStrip char filter // //patternReplace char filter
alexander-svendsen/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/ElasticDsl.scala
Scala
apache-2.0
30,368
/*********************************************************************** * Copyright (c) 2013-2017 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.accumulo.tools.status import com.beust.jcommander.Parameters import org.locationtech.geomesa.accumulo.data.AccumuloDataStore import org.locationtech.geomesa.accumulo.tools.status.AccumuloVersionRemoteCommand.AccumuloVersionParams import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams} import org.locationtech.geomesa.tools.status.VersionRemoteCommand class AccumuloVersionRemoteCommand extends VersionRemoteCommand[AccumuloDataStore] with AccumuloDataStoreCommand { override val params = new AccumuloVersionParams } object AccumuloVersionRemoteCommand { @Parameters(commandDescription = "Display the GeoMesa version installed on a cluster") class AccumuloVersionParams extends AccumuloDataStoreParams }
ronq/geomesa
geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/status/AccumuloVersionRemoteCommand.scala
Scala
apache-2.0
1,254
/* * Copyright 2013 The SIRIS Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * The SIRIS Project is a cooperation between Beuth University, Berlin and the * HCI Group at the University of WΓΌrzburg. The project is funded by the German * Federal Ministry of Education and Research (grant no. 17N4409). */ package simx.components.ai.feature import simx.core.entity.typeconversion.ConvertibleTrait import simx.core.entity.description.{SValSet, NamedSValSet, EntityAspect} import simx.core.ontology.types._ import simx.core.ontology.EntityDescription import simx.core.ontology.GroundedSymbol import simx.core.ontology.types import simx.components.ai.feature.collection.BufferedSValSet import simx.core.worldinterface.naming.NameIt import simx.core.entity.Entity import simx.core.entity.component.EntityCreationHandling /** * Created by IntelliJ IDEA. * User: martin * Date: 9/3/13 * Time: 12:05 PM */ object ImplicitEitherConversion { implicit def left2Either[A,B](a:A):Either[A,B] = Left(a) implicit def right2Either[A,B](b:B):Either[A,B] = Right(b) } trait FeatureBase extends Serializable { val description: ConvertibleTrait[_] def requirements: List[ConvertibleTrait[_]] def production(input : BufferedSValSet) : description.dataType } class NullFeature() extends FeatureBase { val description = NullType.as(local.Symbols.feature).withType(classOf[Symbol]) def production(requiredInput: BufferedSValSet) = 'NullFeature val requirements = Nil } /** * targetDescription has to provide types.Transformation (e.g. 'simx.components.vrpn.devices.TrackingTarget') */ trait InputFeatureBase extends Serializable { val description: ConvertibleTrait[_] def source: Either[EntityDescription, Entity] def annotations: Seq[GroundedSymbol] def relativeTo: Option[Seq[GroundedSymbol]] } class InputNullFeature() extends InputFeatureBase { val description = NullType.as(local.Symbols.feature).withType(classOf[Symbol]) def source = Left(new EntityDescription()) def annotations = Seq() def relativeTo = None } trait Realizable{ def realize(h : Entity => Any = _ => {})( implicit entityCreationContext : EntityCreationHandling ) } abstract class Feature[T](val featureDescription: ConvertibleTrait[T]) extends FeatureBase with Realizable with Serializable { thisFeatureDescription => //Just renaming val description = featureDescription def featureAspect = new EntityAspect(local.Symbols.feature, local.Symbols.feature) { def getCreateParams = addCVars(SValSet(local.Feature(thisFeatureDescription), local.Record(false))) def getProvidings = Set(description, local.Record) def getFeatures = Set(description, local.Record) } def aspects: List[EntityAspect] = List(featureAspect, NameIt("Feat[" + FeatureEventDescription.stringRepFor(featureDescription) + "]")) def realize(h : Entity => Any = _ => {})( implicit entityCreationContext : EntityCreationHandling ) {new EntityDescription(aspects).realize(h)} } abstract class InputFeature[T](val featureDescription: ConvertibleTrait[T], annotation: GroundedSymbol, additionalAnnotations: GroundedSymbol*) extends InputFeatureBase with Realizable with Serializable { thisFeatureDescription => //Just renaming val description = featureDescription def annotations = Seq(annotation) ++ additionalAnnotations def inputFeatureAspect = new EntityAspect(local.Symbols.feature, local.Symbols.inputFeature) { def getCreateParams = addCVar(local.InputFeature(thisFeatureDescription)) def getProvidings = if(description.semantics == types.Transformation.semantics) Set(types.Position.withAnnotations(annotations:_*), types.Orientation.withAnnotations(annotations:_*)) else Set(description.addAnnotations(annotations:_*)) def getFeatures = getProvidings } def aspects: List[EntityAspect] = List(inputFeatureAspect, NameIt("InpF[" + annotations.map(_.value.toString).mkString(",") + "]")) def realize(h : Entity => Any = _ => {})( implicit entityCreationContext : EntityCreationHandling ) {new EntityDescription(aspects).realize(h)} } case class FakeSource[T](sourceDescription: ConvertibleTrait[T], entityName: Option[String] = None) extends EntityDescription ( new EntityAspect(local.Symbols.feature, local.Symbols.fakeSource) { override def getCreateParams = NamedSValSet(aspectType) override def getFeatures = Set(sourceDescription) override def getProvidings = getFeatures }, NameIt("FakeSource" + entityName.fold("")("[" + _ + "]")) )
simulator-x/feature
src/simx/components/ai/feature/FeatureBase.scala
Scala
apache-2.0
5,093
package main import java.awt.geom.{Ellipse2D, Line2D, Path2D} import java.awt.{BasicStroke, Color, Graphics2D, RenderingHints} import utilities.{CubicCurve, MyMath, Vec2} /** * Created by weijiayi on 2/29/16. */ class CurveDrawer(g2d: Graphics2D, pointTransform: Vec2 => Vec2, widthScale: Double, dotsPerUnit: Double = 20.0, thicknessScale: Double = 1.0) { g2d.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON) def setColor(color: Color) = g2d.setColor(color) def drawColorfulCurve(curve: CubicCurve, wF: Double => Double, cF: Option[Double => Color]): Unit = { val points = curve.samples(dotsPerUnit) val tangents = curve.sampleTangents(dotsPerUnit) val dots = points.length val dt = 1.0/dots for(i <- 0 until dots-1){ cF.foreach{f => setColor(f(i*dt)) } val r0 = wF(i*dt) val r1 = wF((i+1)*dt) val p0 = points(i) val p1 = points(i + 1) val (t0,t1) = (tangents(i),tangents(i+1)) drawThicknessLine(p0, p1, t0, t1, r0*thicknessScale, r1*thicknessScale) } } def drawCurveWithTimeUsed(curve: CubicCurve, wF: Double => Double, timeUsed: Double => Boolean = (_) => false): Boolean = { val points = curve.samples(dotsPerUnit) val tangents = curve.sampleTangents(dotsPerUnit) val dots = points.length val dt = 1.0/dots for(i <- 0 until dots-1){ val r0 = wF(i*dt) val r1 = wF((i+1)*dt) val p0 = points(i) val p1 = points(i + 1) val (t0,t1) = (tangents(i),tangents(i+1)) val length: Double = (p0 - p1).length val curvature = (t0 - t1).length / length val slower = math.sqrt(1.0+ curvature) if(timeUsed(length*slower)) return true drawThicknessLine(p0, p1, t0, t1, r0*thicknessScale, r1*thicknessScale) } false } def drawCurveControlPoints(inkCurve: LetterSeg, startColor: Color, endColor: Color, controlLineColor: Color, lineWidth: Double): Unit = inkCurve match{ case LetterSeg(curve, start, end, _, _) => // setColor(endpointColor) // drawDot(curve.p0, start) // drawDot(curve.p3, end) // val controlR = (start+end)/2 // setColor(startColor) // drawDot(curve.p1, controlR) // // setColor(endColor) // drawDot(curve.p2, controlR) setColor(controlLineColor) drawLine(curve.p1,curve.p0,lineWidth, noWidthScale = true) drawLine(curve.p2,curve.p3,lineWidth, noWidthScale = true) } def drawLine(p0: Vec2, p1: Vec2, width: Double, noWidthScale: Boolean = false, dashed: Option[(Float,Float)] = None): Unit ={ val w = width * (if(noWidthScale) 1.0 else widthScale) val stroke = dashed match{ case Some((a,b)) => new BasicStroke(w.toFloat, BasicStroke.CAP_BUTT, BasicStroke.JOIN_BEVEL, 0, Array(a,b), 0) case None => new BasicStroke(w.toFloat) } g2d.setStroke(stroke) val line = new Line2D.Double(pointTransform(p0), pointTransform(p1)) g2d.draw(line) } def drawThicknessLine(p0: Vec2, p1: Vec2, tangent0: Vec2, tangent1: Vec2, startWidth: Double, endWidth: Double): Unit = { val n0 = Vec2(tangent0.y, -tangent0.x) val n1 = Vec2(tangent1.y, -tangent1.x) val v0 = pointTransform(p0 + n0 * startWidth/2) val v1 = pointTransform(p0 - n0 * startWidth/2) val v2 = pointTransform(p1 - n1 * endWidth/2) val v3 = pointTransform(p1 + n1 * endWidth/2) val path = new Path2D.Double() path.moveTo(v0.x, v0.y) path.lineTo(v1.x, v1.y) path.lineTo(v2.x, v2.y) path.lineTo(v3.x, v3.y) path.closePath() g2d.fill(path) } def drawDot(center: Vec2, radius: Double): Unit = { val c = pointTransform(center) val r = radius * widthScale val dot = new Ellipse2D.Double(c.x-r, c.y-r, 2*r, 2*r) g2d.fill(dot) } def drawLetter(letter: MuseChar, mainStrokeColor: Color, highlights: Seq[Int], highlightStart: Color, highlightEnd: Color) = { letter.segs.zipWithIndex.foreach{case (s, i) => if(!(highlights contains i)){ setColor(mainStrokeColor) drawColorfulCurve(s.curve, MyMath.linearInterpolate(s.startWidth, s.endWidth), cF = None) } } letter.segs.zipWithIndex.foreach{case (s, i) => if(highlights contains i){ drawColorfulCurve(s.curve, MyMath.linearInterpolate(s.startWidth, s.endWidth), cF = Some(MyMath.linearInterpolate(highlightStart, highlightEnd))) } } } } object CurveDrawer{ def colorWithAlpha(c: Color, double: Double): Color = { val a = (double * 255).toInt new Color(c.getRed, c.getGreen, c.getBlue, a) } }
MrVPlusOne/Muse-CGH
src/main/CurveDrawer.scala
Scala
mit
4,599
package com.virdis import java.util.regex.Pattern /** * Created by sandeep on 11/3/15. */ trait TweetCleanUp { val nonAscii = "[^\\p{ASCII}]+" val newLineOrTab = "\\\n|\\\t" val backSlash = "\\\\" val p = Pattern.compile(nonAscii) private var unicodeCounter: Long = 0L /* Check non ascii characters */ def checkNonAsciiCharacters(input:String): Boolean = { p.matcher(input).find() } /* Check if input contains non ascii characters replace newline/tab with space and remove unicode and backslash */ def cleanAndProcessInput(input: String): String = { if (checkNonAsciiCharacters(input)) { unicodeCounter = unicodeCounter + 1 input.replaceAll(newLineOrTab, " ").replaceAll(nonAscii + "|" + backSlash, "") } else { input.replaceAll(newLineOrTab, " ").replaceAll(backSlash, "") } } def unicodeCharacterCount = unicodeCounter } object tweetCleaner extends TweetCleanUp
virdis/data-challenge
src/main/scala/com/virdis/TweetCleanUp.scala
Scala
apache-2.0
957
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.nodes.physical.stream import org.apache.flink.table.planner.calcite.FlinkTypeFactory import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecOverAggregate import org.apache.flink.table.planner.plan.nodes.exec.{InputProperty, ExecNode} import org.apache.flink.table.planner.plan.utils.OverAggregateUtil import org.apache.calcite.plan.{RelOptCluster, RelTraitSet} import org.apache.calcite.rel.RelNode import org.apache.calcite.rel.`type`.RelDataType import org.apache.calcite.rel.core.Window import java.util /** * Stream physical RelNode for time-based over [[Window]]. */ class StreamPhysicalOverAggregate( cluster: RelOptCluster, traitSet: RelTraitSet, inputRel: RelNode, outputRowType: RelDataType, val logicWindow: Window) extends StreamPhysicalOverAggregateBase( cluster, traitSet, inputRel, outputRowType, logicWindow) { override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = { new StreamPhysicalOverAggregate( cluster, traitSet, inputs.get(0), outputRowType, logicWindow ) } override def translateToExecNode(): ExecNode[_] = { new StreamExecOverAggregate( OverAggregateUtil.createOverSpec(logicWindow), InputProperty.DEFAULT, FlinkTypeFactory.toLogicalRowType(getRowType), getRelDetailedDescription ) } }
lincoln-lil/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalOverAggregate.scala
Scala
apache-2.0
2,230
package models import play.api.db._ import play.api.Play.current import play.api.i18n.Lang import anorm._ import anorm.SqlParser._ import java.util.Date import play.Logger case class Gallerys( id: Pk[Long] , filename: Option[String] , default: Boolean , position: Long , cat_id: Long , caption: Option[String] ) case class Map( id: Pk[Long] , annotation: Option[String] , shape: String , shape_id: String ) case class mapDimensions( id: Pk[Long] , width: String , height: String ) case class mapAnnotation( annotation: String ) case class mapShapes( shape: String ) case class Tours( id: Pk[Long] , prev: Option[Long] , next: Option[Long] , nextcomment: Option[String] , tournumber: Option[Int] ) case class Gallery_e( caption: Option[String] ) case class Exhibition( id: Pk[Long] , name: Option[String] , name_en: Option[String] , level: Long , sub: Option[Long] , position: Long , date_begin: Option[Date] , date_end: Option[Date] , comment: Option[String] , comment_en: Option[String] , status_id: Boolean , number: Option[Long] , type_id: Option[Long] , file: Option[String] , file2: Option[String] , file3: Option[String] , width: Option[String] , height: Option[String] ) case class ExhibitionCat( id: Pk[Long] , name: Option[String] , name_en: Option[String] , sub: Option[Long] , type_id: Option[Long] , number: Option[Long] ) case class Quiz( question_id: Option[String] , id: Option[String] , title: Option[String] , question: Option[String] , answer_a: Option[String] , answer_b: Option[String] , answer_c: Option[String] , answer_d: Option[String] ) case class Simple(id: Pk[Long]) case class Search(number: Long) object Exhibition{ val types:Seq[(Option[Long], String, Option[String])] = Seq( (None, "menu", None) , (Some(1),"text", Some("list-alt")) , (Some(3), "images", Some("picture")) , (Some(4),"audio", Some("volume-up")) , (Some(5),"video", Some("film")) , (Some(6),"map", Some("map")) , (Some(7),"quiz", Some("question-sign")) ) val typesSelect:Seq[(String, String)] = types.map(a => ({if(a._1.isDefined){a._1.get.toString}else{""}} ->a._2)) val table = "arborescence" val query:String = """ SELECT id, name, name_en, level, sub, position , date_begin, date_end, comment, comment_en, file1, file2, file3 , status_id, number, type_id, prev, next, nextcomment, tournumber, height, width FROM """+table+""" """ /* get whole ascending branch */ def branch(id: Long, sort: Boolean = true) : List[Exhibition] = { var sub:Option[Long] = Some(id) var exhibition:List[Exhibition] = List() while(sub.isDefined){ val a = detail(sub.get) Logger.info(a.toString) if(a.isDefined){ sub = a.get.sub exhibition = exhibition :+ a.get } else{ sub = None } } // sort by level if(sort){ exhibition.sortBy(_.level) } else{ exhibition } } def list(id: Option[Long] = None): List[Exhibition] = { val q = query+" WHERE 1"+ {if(id.isDefined){if(id.get==0){" AND sub IS NULL"}else{" AND sub={sub}"}}else{""}}+ " ORDER BY position" DB.withConnection{implicit c => SQL(q) .on( 'sub -> {if(id.isDefined){id.get}else{0}} ) .as(parser *) } } def listSub(id: Long): List[Exhibition] = { var sub:Option[Long] = Some(id) var a: List[Exhibition] = List() val b = detail(sub.get) Logger.info(b.toString) val c = b.get.sub var level:Option[Long] = None list().map{l=> if(level.isDefined && l.level<=level.get){ level = None } if(level.isDefined){ a= a :+ l } if(l.id.get==id){ level = Some(l.level) } } a } def detail(id: Long): Option[Exhibition] = { DB.withConnection{implicit c => SQL(query+" WHERE id={id}") .on('id -> id) .as(parser singleOpt) } } def detailFromPosition(position: Long): Option[Exhibition] = { DB.withConnection{implicit c => SQL(query+" WHERE position={position}") .on('position -> position) .as(parser singleOpt) } } def listBetweenIds(id1: Long, id2: Long = 0): List[Long] = { var q_next:String = """SELECT id FROM """+table+""" WHERE id NOT IN({id1},{id2}) AND position """ if(id2 == 0){ q_next = q_next + """ > (SELECT position FROM """+table+""" WHERE id={id1})""" } else{ q_next = q_next + """ BETWEEN (SELECT position FROM """+table+""" WHERE id={id1}) AND (SELECT position FROM """+table+""" WHERE id={id2}) """ } DB.withConnection{implicit c => val rows = SQL(q_next) .on( 'id1 -> id1, 'id2 -> id2 ) .apply() .map(row => row[Long]("id") ) .toList rows } } def listSubAfterId(id: Long, direction: Boolean): List[Long] = { val q:String = """SELECT id FROM """+table+""" WHERE 1 AND position """+getDirectionString(direction, 1)+""" (SELECT position FROM """+table+""" WHERE id={id}) AND level > (SELECT level FROM """+table+""" WHERE id={id}) """ DB.withConnection{implicit c => val rows = SQL(q) .on( 'id -> id ) .apply() .map(row => row[Long]("id") ) .toList rows } } // returns a string with element separated with , def listToSQL(l: List[Long]): Option[String] = { if(l.length==0){ return None } var r:String="(" l.map{a=> r=r+a.toString+"," } r = r.dropRight(1)+")" Some(r) } val parser = { get[Pk[Long]]("id")~ get[Option[String]]("name")~ get[Option[String]]("name_en")~ get[Long]("level")~ get[Option[Long]]("sub")~ get[Long]("position")~ get[Option[Date]]("date_begin")~ get[Option[Date]]("date_end")~ get[Option[String]]("comment")~ get[Option[String]]("comment_en")~ get[Boolean]("status_id")~ get[Option[Long]]("number")~ get[Option[Long]]("type_id")~ get[Option[String]]("file1")~ get[Option[String]]("file2")~ get[Option[String]]("file3")~ get[Option[String]]("width")~ get[Option[String]]("height") map { case id~name~name_en~level~sub~position~date_begin~date_end~comment~comment_en~status_id~number~type_id~file~file2~file3~width~height => Exhibition(id, name, name_en, level, sub, position, date_begin, date_end, comment, comment_en, status_id, number, type_id, if(file.isDefined){Some(Utils.url+file.get)}else{None}, if(file2.isDefined){Some(Utils.url+file2.get)}else{None}, if(file3.isDefined){Some(Utils.url+file3.get)}else{None}, width, height) } } // if id isDefined -> returns last element of subcategory def last(id:Long = 0):Option[Exhibition] = { val q:String = query+ "WHERE "+{if(id>0){"sub={id}"}else{"1"}}+ " ORDER BY position DESC"+ " LIMIT 0,1" //Logger.info(q) DB.withConnection{implicit c => SQL(q) .on('id -> id) .as(parser singleOpt) } } /********** * * alter * todo: * add: add without caring of where, find right position (catch element), move * idea => do not touch attribute position anywhere but in method move!! <- avoids al lot of errors * ***********/ def insertOrUpdate(data: Exhibition, id: Long = 0): Long = { Logger.info(data.comment.toString) // get info about sub var sub:Option[Exhibition] = None var level:Long = 1 var position:Long = 1 if(id==0){ if(data.sub.isDefined){ sub = detail(data.sub.get) level = sub.get.level + 1 // get position val last_sub:Option[Exhibition] = detailNextPosition(sub.get) if(last_sub.isDefined){ position = last_sub.get.position } // no previous entry in that level else{ position = last().get.position + 1 } } else{ // get last position val lastElement = last() if(lastElement.isDefined){ position = lastElement.get.position + 1 } } // shift position DB.withConnection{implicit c => SQL("UPDATE "+table+" SET position=position+1 WHERE position>={position}") .on('position -> position) .executeUpdate } } // prepare query val query = {if(id>0){"UPDATE "}else{"INSERT INTO "}}+table+ """ SET name={name} , name_en={name_en} , date_begin={date_begin} , date_end={date_end} , comment={comment} , comment_en={comment_en} , status_id={status_id} , number={number} , type_id={type_id} , width={width} , height={height} """+ { if(id>0){" WHERE id={id}"} else{ """ , level={level}, sub={sub}, position={position} , date_added=NOW() """ } } // insert/update entry DB.withConnection{implicit c => SQL(query) .on( 'name -> data.name, 'name_en -> data.name_en, 'sub -> data.sub, 'level -> level, 'position -> position, 'date_begin -> data.date_begin, 'date_end -> data.date_end, 'comment -> data.comment, 'comment_en -> data.comment_en, 'status_id -> data.status_id, 'number -> data.number, 'type_id -> data.type_id, 'width -> data.width, 'height -> data.height, 'id -> id ) .executeUpdate } Utils.getLastId(table).getOrElse(0) } // todo: allow changing of sub: update sub, level and position! -> only one function : move(a,b) def update(data: Exhibition, id: Long){ // prepare query val query:String = "UPDATE "+ table+ " SET name={name} WHERE id={id}" // insert/update entry DB.withConnection{implicit c => SQL(query) .on( 'name -> data.name, 'id -> id ) .executeUpdate } } /* returns number of entries in submenus */ def nSubs(id: Long): Option[Long] = { val qb:String = "SELECT COUNT(id) as c FROM "+table+" WHERE sub={sub}" try{ DB.withConnection{implicit c=> val row = SQL(qb) .on('sub -> id) .apply().head Some(row[Long]("c")) } } catch{ case _ : Throwable => None } } // translate direction to different strings def getDirectionString(direction:Boolean, t:Long = 0):String = { var r:List[String] = List() t match{ case 1 => {r = List(">","<")} case 2 => {r = List("ASC","DESC")} case _ => {r = List("+","-")} } if(direction){ r(0) } else{ r(1) } } def detailNextPosition(a: Exhibition, direction: Boolean = true, boundary: Boolean = true): Option[Exhibition] = { val q_next:String = query+""" WHERE 1 AND position"""+getDirectionString(direction, 1)+"""{position} AND level"""+{if(boundary){"<"}else{""}}+"""={level} ORDER BY position """+getDirectionString(direction, 2)+""" LIMIT 0,1 """ // //level={level} DB.withConnection{implicit c => SQL(q_next) .on( 'position -> a.position, 'level -> a.level, 'sub -> a.sub ) .as(parser singleOpt) } } /* get first parent element */ def getParent(id: Long): Option[Exhibition] ={ val a = detail(id) if(a.isDefined){ val q:String = query+" WHERE position<{position} AND level<{level} LIMIT 0,1" DB.withConnection{implicit c => SQL(q) .on( 'position -> a.get.position, 'level -> a.get.level ) .as(parser singleOpt) } } else{ None } } /* a: origin vertex/node b: target node */ def move(a: Exhibition, b:Exhibition){ val direction:Boolean = { if(a.position>b.position){ false } else{ true } } // end of tail and end of tail target val c = detailNextPosition(a, true, true) val d = detailNextPosition(b, true, true) // get tails var ids:List[Long] = List() var idsA:List[Long] = List() if(c.isDefined){ ids = listBetweenIds(a.id.get, c.get.id.get) :+ a.id.get Logger.info("origin (A) "+a.id.get+" and end of tail (C) "+c.get.id.get) } else{ ids = listBetweenIds(a.id.get, 0) :+ a.id.get } if(d.isDefined){ idsA = listBetweenIds(b.id.get, d.get.id.get) :+ b.id.get Logger.info("target (B) "+b.id.get+" and end of tail (D) "+d.get.id.get) } else{ idsA = listBetweenIds(b.id.get, 0) :+ b.id.get } // end tails // sumaary of operations Logger.info("tail (A - C) ids: "+ids.toString+", dpos: "+ids.size.toString) Logger.info("tail (B - D) "+idsA.toString+", dposA: "+idsA.size.toString) Logger.info("move tail of: "+getDirectionString(direction)+idsA.size.toString) Logger.info("move target tail of "+getDirectionString(!direction)+ids.size) // prepare queries val q1:String = "UPDATE "+table+" SET position=position"+getDirectionString(direction)+"{dposA} WHERE id IN"+listToSQL(ids).get+"" val q2:String = "UPDATE "+table+" SET position=position"+getDirectionString(!direction)+"{dpos} WHERE id IN"+listToSQL(idsA).get+"" // execute queries DB.withConnection{implicit c => SQL(q1) .on( 'dposA -> idsA.size ) .executeUpdate SQL(q2) .on( 'dpos -> ids.size ) .executeUpdate } } /* moves a item up or down, within same category @arg id: id @arg direction: indicates in which direction to move the object (0: down, 1: up) --> transformation d' = 1 - 2d (minus because going up is actually decreasing value of position) todo: when it is at the end (b undefined) */ def moveOneStep(id: Long, direction: Boolean){ val a = detail(id) if(a.isDefined){ // target val b = detailNextPosition(a.get, direction) if(b.isDefined && b.get.level == a.get.level){ move(a.get, b.get) } } } def delete(id: Long){ val a = detail(id) // if entry exists and no assoaicted subs val nSub = nSubs(id) Logger.info("nsub:"+nSub.toString) if(a.isDefined && nSub.isDefined && nSub.get==0){ val qs:String = "UPDATE "+table+" SET position=position-1 WHERE position>{position}" DB.withConnection{implicit c => SQL(qs) .on('position -> a.get.position) .executeUpdate } val q:String = "DELETE FROM "+table+" WHERE id={id}" DB.withConnection{implicit c => SQL(q) .on('id -> id) .executeUpdate } } } def subSelect(id: Option[Long] = None): Seq[(String, String)] = { var sub: Seq[(String, String)] = Seq(("","-")) list(id).map{l => sub = sub :+ (l.id.get.toString,{">" * l.level.toInt}+" "+l.name ) } sub } def getIdFromNumber(number: Long): Option[Long] ={ val activeExh = list(Some(0)).filter((a: Exhibition) => a.status_id) var activeCat:List[Exhibition] = List() activeExh.map{a=> activeCat = activeCat ::: listSub(a.id.get) } val interestCat = activeCat.filter((a:Exhibition) => (a.number.isDefined && a.number.get==number)) if(interestCat.size>0){ Some(interestCat.head.id.get) } else{ None } } // QUIZ def serveQuestions(id: Long):List[Quiz] = { DB.withConnection{implicit c => SQL("SELECT * FROM museum.quiz WHERE parent_id={id} ORDER BY question_id ASC") .on('id -> id) .as(quizParser *) } } val quizParser = { get[Option[String]]("question_id")~ get[Option[String]]("parent_id")~ get[Option[String]]("title")~ get[Option[String]]("question")~ get[Option[String]]("answer_a")~ get[Option[String]]("answer_b")~ get[Option[String]]("answer_c")~ get[Option[String]]("answer_d") map { case question_id~id~title~question~answer_a~answer_b~answer_c~answer_d=> Quiz(question_id,id, title, question, answer_a, answer_b, answer_c, answer_d) } } def addQuestion(id: Long, question_id: Long){ Logger.info("Adding Question to Database...") // prepare query val query = "INSERT INTO museum.quiz SET parent_id='"+id+"', question_id='"+question_id+"'"; // val query = "UPDATE museum.maps SET question='"+question+"', answerstring_1='"+answerstring_1+"', answerstring_2='"+answerstring_2+"', answerstring_3='"+answerstring_3+"', answerstring_4='"+answerstring_4+"', responsestring_1='"+responsestring_1+"', responsestring_2='"+responsestring_2+"', responsestring_3='"+responsestring_3+"', responsestring_4='"+responsestring_4+"' WHERE id='"+id+"'"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate Logger.info("Done.") } } def updateQuestion(id: Long, question_id: String, field: String, value: String){ Logger.info("Updating Question, field: "+field+"...") // prepare query val query = "UPDATE museum.quiz SET parent_id='"+id+"', "+field+"='"+value+"' WHERE question_ID='"+question_id+"'"; // val query = "UPDATE museum.maps SET question='"+question+"', answerstring_1='"+answerstring_1+"', answerstring_2='"+answerstring_2+"', answerstring_3='"+answerstring_3+"', answerstring_4='"+answerstring_4+"', responsestring_1='"+responsestring_1+"', responsestring_2='"+responsestring_2+"', responsestring_3='"+responsestring_3+"', responsestring_4='"+responsestring_4+"' WHERE id='"+id+"'"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate Logger.info("Done.") } } object Tour{ val table = "arborescence" def list(id: Long):List[Tours] = { DB.withConnection{implicit c => SQL("SELECT * FROM "+table+" WHERE id={id} ORDER BY position ASC") .on('id -> id) .as(parser *) } } def detail(id: Long):Tours = { DB.withConnection{implicit c => SQL("SELECT * FROM "+table+" WHERE id={id}") .on('id -> id) .as(parser single) } } val parser = { get[Pk[Long]]("id")~ get[Option[Long]]("prev")~ get[Option[Long]]("next")~ get[Option[String]]("nextcomment")~ get[Option[Int]]("tournumber") map { case id~prev~next~nextcomment~tournumber=> Tours(id, prev, next, nextcomment, tournumber) } } } object Maps{ val table = "maps" def upload(id: Long, shape_id: String, shape: String){ val query = "INSERT INTO museum.maps SET arborescence_id="+id+", shape_id='"+shape_id+"', shape='"+shape+"', date_added=NOW();"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate } } def insert(id: Long, shape_id: String, shape: String){ val query = "INSERT INTO museum.maps SET arborescence_id="+id+", shape_id='"+shape_id+"', shape='"+shape+"', date_added=NOW();"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate } } def update(id: Long, shape_id: String, shape: String){ // prepare query val query = "UPDATE museum.maps SET shape='"+shape+"' WHERE shape_id='"+shape_id+"'"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate } } def annotate(shape_id: String, annotation: String){ // prepare query val query = "UPDATE museum.maps SET annotation='"+annotation+"' WHERE shape_id='"+shape_id+"'"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate } } def delete(shape_id: String){ // prepare query val query = "DELETE FROM museum.maps WHERE shape_id='"+shape_id+"'"; // insert/update entry DB.withConnection{implicit c => SQL(query).executeUpdate } } def serve(id: Long) = { // prepare query val query = "SELECT * FROM "+table+" WHERE arborescence_id='"+id+"'"; // Select items DB.withConnection{implicit c => SQL(query) .on('id -> id) .as(parseShapes *) } } def serveSingle(id: Long, shape_id: String) = { // prepare query val query = "SELECT shape FROM "+table+" WHERE shape_id='"+shape_id+"'"; // Select items DB.withConnection{implicit c => SQL(query) .on('id -> id) .as(parseShapes *) } } val parseShapes = { get[String]("shape") map { case shape=> shape } } val parseAnnotation = { get[String]("annotation") map { case annotation=> annotation } } val parser = { get[Pk[Long]]("arborescence_id")~ get[Option[String]]("annotation")~ get[String]("shape")~ get[String]("shape_id") map { case id~annotation~shape~shape_id=> Map(id, annotation, shape, shape_id) } } } object Gallery{ val table = "gallery" /* move the element up or down id: element ids needs to be moved sid: associated id upOrDown: { (true, up, left, -1), (false, down, right, +1) } */ def move(id: Long, sid: Long, upOrDown: Boolean){ // retrieve element info val d = detail(id) // retrieve size of sub list val slist = list(sid) // evaluate cases where nothing needs to be done if( !(!upOrDown && d.position>=slist.size) && !(upOrDown && d.position<=1) ){ val new_position = d.position + {if(upOrDown){-1}else{1}} // get element with new position val elem = slist.find{a => a.position==new_position} // edit positions if(elem.isDefined){ changePosition(d.id.get, new_position) changePosition(elem.get.id.get, d.position) } } } def changePosition(id: Long, position: Long){ DB.withConnection{implicit c=> SQL("UPDATE "+table+" SET position={position} WHERE id={id}") .on('id -> id, 'position -> position) .executeUpdate } } def insertFileName(id: Long, filename: Option[String]){ DB.withConnection{implicit c=> SQL("UPDATE "+table+" SET filename={filename} WHERE id={id}") .on('id -> id, 'filename -> filename) .executeUpdate } } def insert(id: Long): Option[Long] = { import concurrent._ import ExecutionContext.Implicits._ val r = new scala.util.Random val a = r.nextInt(500) // todo add pause of random duration (to overcome concurrency ...) //future{blocking( Thread.sleep(a) //)} // only a problem for very small image .. should actually work // problem with concurrency!! // include position // 1 retrieve latest position val new_position = list(id).size + 1 val query:String = "INSERT INTO "+table+" SET cat_id={id}, position={position}" DB.withConnection{implicit c => SQL(query) .on('id -> id, 'position -> new_position) .executeUpdate } if(new_position>=list(id).size+2){ Logger.info("see!") } Utils.getLastId(table) } def list(id: Long):List[Gallerys] = { DB.withConnection{implicit c => SQL("SELECT * FROM "+table+" WHERE cat_id={id} ORDER BY position ASC") .on('id -> id) .as(parser *) } } def detail(id: Long):Gallerys = { DB.withConnection{implicit c => SQL("SELECT * FROM "+table+" WHERE id={id}") .on('id -> id) .as(parser single) } } def delete(id: Long){ //get position + associated id --> update positions val d = detail(id) DB.withConnection{implicit c => SQL("UPDATE "+table+" SET position=position-1 WHERE position>{position} AND cat_id={cat_id}") .on('position -> d.position, 'cat_id -> d.cat_id) .executeUpdate SQL("DELETE FROM "+table+" WHERE id={id}") .on('id -> id) .executeUpdate } } val parser = { get[Pk[Long]]("id")~ get[Option[String]]("filename")~ get[Long]("position")~ get[Long]("cat_id")~ get[Option[String]]("caption") map { case id~filename~position~cat_id~caption => Gallerys(id, filename, false, position, cat_id, caption) } } def update(id: Long, data: Gallery_e){ import play.Logger Logger.info(data.toString) DB.withConnection{implicit c => SQL(""" UPDATE """+table+""" SET caption={caption} WHERE id={id} """) .on( 'caption -> data.caption, 'id -> id ) .executeUpdate } } } object File{ def insert(id: Long, filename: String, type_id: Option[Long], nr:Long=1){ val file_field:String= {if(nr==1){"file1"} else if (nr==2){"file2"} else{"file3"}} val q :String = "UPDATE "+table+" SET "+file_field+"={filename} "+{if(nr==1){", type_id={type_id}"}else{""}}+" WHERE id={id}" DB.withConnection{implicit c=> SQL(q) .on('id -> id, 'filename -> filename, 'type_id -> type_id) .executeUpdate } } def delete(id: Long, nr: Long = 1){ val file_field:String= {if(nr==1){"file1"} else if(nr==2){"file2"} else{"file3"}} val q :String = "UPDATE "+table+" SET "+file_field+"=NULL WHERE id={id}" DB.withConnection{implicit c=> SQL(q) .on('id -> id) .executeUpdate } } } } /* CREATE TABLE `gallery` ( `id` int(11) NOT NULL AUTO_INCREMENT, `cat_id` int(11) NOT NULL, `caption` varchar(256) DEFAULT NULL, `position` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=71 DEFAULT CHARSET=utf8 */
musethno/MGS
app/models/Exhibition.scala
Scala
mit
24,732
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala package tools.nsc package symtab package classfile import java.io.IOException import java.lang.Integer.toHexString import scala.collection.{immutable, mutable} import scala.collection.mutable.{ArrayBuffer, ListBuffer} import scala.annotation.switch import scala.reflect.internal.JavaAccFlags import scala.reflect.internal.pickling.ByteCodecs import scala.reflect.internal.util.ReusableInstance import scala.reflect.io.NoAbstractFile import scala.tools.nsc.util.ClassPath import scala.tools.nsc.io.AbstractFile import scala.util.control.NonFatal /** This abstract class implements a class file parser. * * @author Martin Odersky */ abstract class ClassfileParser(reader: ReusableInstance[ReusableDataReader]) { val symbolTable: SymbolTable { def settings: Settings } val loaders: SymbolLoaders { val symbolTable: ClassfileParser.this.symbolTable.type } import symbolTable._ /** * If typer phase is defined then perform member lookup of a symbol * `sym` at typer phase. This method results from refactoring. The * original author of the logic that uses typer phase didn't explain * why we need to force infos at that phase specifically. It only mentioned * that ClassfileParse can be called late (e.g. at flatten phase) and * we make to make sure we handle such situation properly. */ protected def lookupMemberAtTyperPhaseIfPossible(sym: Symbol, name: Name): Symbol /** The compiler classpath. */ def classPath: ClassPath import definitions._ import scala.reflect.internal.ClassfileConstants._ import Flags._ protected var file: AbstractFile = _ // the class file protected var in: DataReader = _ // the class file reader protected var clazz: ClassSymbol = _ // the class symbol containing dynamic members protected var staticModule: ModuleSymbol = _ // the module symbol containing static members protected var instanceScope: Scope = _ // the scope of all instance definitions protected var staticScope: Scope = _ // the scope of all static definitions protected var pool: ConstantPool = _ // the classfile's constant pool protected var isScala: Boolean = _ // does class file describe a scala class? protected var isScalaRaw: Boolean = _ // this class file is a scala class with no pickled info protected var busy: Symbol = _ // lock to detect recursive reads protected var currentClass: String = _ // JVM name of the current class protected var classTParams = Map[Name,Symbol]() protected var srcfile0 : Option[AbstractFile] = None protected def moduleClass: Symbol = staticModule.moduleClass private var sawPrivateConstructor = false private def ownerForFlags(jflags: JavaAccFlags) = if (jflags.isStatic) moduleClass else clazz def srcfile = srcfile0 // u1, u2, and u4 are what these data types are called in the JVM spec. // They are an unsigned byte, unsigned char, and unsigned int respectively. // We bitmask u1 into an Int to make sure it's 0-255 (and u1 isn't used // for much beyond tags) but leave u2 alone as it's already unsigned. protected final def u1(): Int = in.nextByte & 0xFF protected final def u2(): Int = in.nextChar.toInt protected final def u4(): Int = in.nextInt protected final def s1(): Int = in.nextByte.toInt // sign-extend the byte to int protected final def s2(): Int = (in.nextByte.toInt << 8) | u1 // sign-extend and shift the first byte, or with the unsigned second byte private def readInnerClassFlags() = readClassFlags() private def readClassFlags() = JavaAccFlags classFlags u2 private def readMethodFlags() = JavaAccFlags methodFlags u2 private def readFieldFlags() = JavaAccFlags fieldFlags u2 private def readTypeName() = readName().toTypeName private def readName() = pool.getName(u2).name @annotation.unused private def readType() = pool getType u2 private object unpickler extends scala.reflect.internal.pickling.UnPickler { val symbolTable: ClassfileParser.this.symbolTable.type = ClassfileParser.this.symbolTable } private def handleMissing(e: MissingRequirementError) = { if (settings.debug) e.printStackTrace throw new IOException(s"Missing dependency '${e.req}', required by $file") } private def handleError(e: Exception) = { if (settings.debug) e.printStackTrace() throw new IOException(s"class file '$file' is broken\\n(${e.getClass}/${e.getMessage})") } private def mismatchError(c: Symbol) = { throw new IOException(s"class file '$file' has location not matching its contents: contains $c") } private def parseErrorHandler[T]: PartialFunction[Throwable, T] = { case e: MissingRequirementError => handleMissing(e) case e: RuntimeException => handleError(e) } @inline private def pushBusy[T](sym: Symbol)(body: => T): T = { if (busy eq sym) throw new IOException(s"unsatisfiable cyclic dependency in '$sym'") else if ((busy ne null) && (busy ne NoSymbol)) throw new IOException(s"illegal class file dependency between '$sym' and '$busy'") busy = sym try body catch parseErrorHandler finally busy = NoSymbol } /** * `clazz` and `module` are the class and module symbols corresponding to the classfile being * parsed. Note that the ClassfileLoader unconditionally creates both of these symbols, they may * may get invalidated later on (.exists). * * Note that using `companionModule` / `companionClass` does not always work to navigate between * those two symbols, namely when they are shadowed by a type / value in the a package object * (scala-dev#248). */ def parse(file: AbstractFile, clazz: ClassSymbol, module: ModuleSymbol): Unit = { this.file = file pushBusy(clazz) { reader.using { reader => this.in = reader.reset(file) this.clazz = clazz this.staticModule = module this.isScala = false val magic = in.getInt(in.bp) if (magic != JAVA_MAGIC && file.name.endsWith(".sig")) { currentClass = clazz.javaClassName isScala = true unpickler.unpickle(in.buf.take(file.sizeOption.get), 0, clazz, staticModule, file.name) } else { parseHeader() this.pool = new ConstantPool parseClass() pool = null } } in = null } } private def parseHeader(): Unit = { val magic = u4 if (magic != JAVA_MAGIC) abort(s"class file ${file} has wrong magic number 0x${toHexString(magic)}") val minor, major = u2 if (major < JAVA_MAJOR_VERSION || major == JAVA_MAJOR_VERSION && minor < JAVA_MINOR_VERSION) abort(s"class file ${file} has unknown version $major.$minor, should be at least $JAVA_MAJOR_VERSION.$JAVA_MINOR_VERSION") } protected class NameOrString(val value: String) { private var _name: Name = null def name: Name = { if (_name eq null) _name = TermName(value) _name } } def getClassSymbol(name: String): Symbol = { name match { case name if name.endsWith(nme.MODULE_SUFFIX_STRING) => rootMirror getModuleByName newTermName(name).dropModule case name => classNameToSymbol(name) } } /** * Constructor of this class should not be called directly, use `newConstantPool` instead. */ protected class ConstantPool { protected val len = u2 protected val starts = new Array[Int](len) protected val values = new Array[AnyRef](len) protected val internalized = new Array[NameOrString](len) val initBp = in.bp { var i = 1 while (i < starts.length) { starts(i) = in.bp i += 1 (u1: @switch) match { case CONSTANT_UTF8 | CONSTANT_UNICODE => in skip u2 case CONSTANT_CLASS | CONSTANT_STRING | CONSTANT_METHODTYPE => in skip 2 case CONSTANT_MODULE | CONSTANT_PACKAGE => in skip 2 case CONSTANT_METHODHANDLE => in skip 3 case CONSTANT_FIELDREF | CONSTANT_METHODREF | CONSTANT_INTFMETHODREF => in skip 4 case CONSTANT_NAMEANDTYPE | CONSTANT_INTEGER | CONSTANT_FLOAT => in skip 4 case CONSTANT_INVOKEDYNAMIC => in skip 4 case CONSTANT_LONG | CONSTANT_DOUBLE => in skip 8 ; i += 1 case _ => errorBadTag(in.bp - 1) } } } val endBp = in.bp def recordAtIndex[T <: AnyRef](value: T, idx: Int): T = { values(idx) = value value } def firstExpecting(index: Int, expected: Int): Int = { val start = starts(index) val first = in.getByte(start).toInt if (first == expected) start + 1 else this errorBadTag start } /** Return the name found at given index. */ def getName(index: Int): NameOrString = ( if (index <= 0 || len <= index) errorBadIndex(index) else values(index) match { case name: NameOrString => name case _ => val start = firstExpecting(index, CONSTANT_UTF8) val len = in.getChar(start).toInt recordAtIndex(new NameOrString(in.getUTF(start, len + 2)), index) } ) /** Return the name found at given index in the constant pool, with '/' replaced by '.'. */ def getExternalName(index: Int): NameOrString = { if (index <= 0 || len <= index) errorBadIndex(index) if (internalized(index) == null) internalized(index) = new NameOrString(getName(index).value.replace('/', '.')) internalized(index) } def getClassSymbol(index: Int): Symbol = { if (index <= 0 || len <= index) errorBadIndex(index) values(index) match { case sym: Symbol => sym case _ => val result = ClassfileParser.this.getClassSymbol(getClassName(index).value) recordAtIndex(result, index) } } /** Return the external name of the class info structure found at 'index'. * Use 'getClassSymbol' if the class is sure to be a top-level class. */ def getClassName(index: Int): NameOrString = { val start = firstExpecting(index, CONSTANT_CLASS) getExternalName((in.getChar(start)).toInt) } /** Return a name and a type at the given index. If the type is a method * type, a dummy symbol is created in `ownerTpe`, which is used as the * owner of its value parameters. This might lead to inconsistencies, * if a symbol of the given name already exists, and has a different * type. */ protected def getNameAndType(index: Int, ownerTpe: Type): (Name, Type) = { if (index <= 0 || len <= index) errorBadIndex(index) values(index) match { case p: ((Name @unchecked, Type @unchecked)) => p case _ => val start = firstExpecting(index, CONSTANT_NAMEANDTYPE) val name = getName(in.getChar(start).toInt) // create a dummy symbol for method types val dummy = ownerTpe.typeSymbol.newMethod(name.name.toTermName, ownerTpe.typeSymbol.pos) val tpe = getType(dummy, in.getChar(start + 2).toInt) // fix the return type, which is blindly set to the class currently parsed val restpe = tpe match { case MethodType(formals, _) if name.name == nme.CONSTRUCTOR => MethodType(formals, ownerTpe) case _ => tpe } ((name.name, restpe)) } } /** Return the type of a class constant entry. Since * arrays are considered to be class types, they might * appear as entries in 'newarray' or 'cast' opcodes. */ def getClassOrArrayType(index: Int): Type = { if (index <= 0 || len <= index) errorBadIndex(index) else values(index) match { case tp: Type => tp case cls: Symbol => cls.tpe_* case _ => val name = getClassName(index) name.value.charAt(0) match { case ARRAY_TAG => recordAtIndex(sigToType(null, name.value), index) case _ => recordAtIndex(classNameToSymbol(name.value), index).tpe_* } } } def getType(index: Int): Type = getType(null, index) def getType(sym: Symbol, index: Int): Type = sigToType(sym, getExternalName(index).value) def getSuperClassName(index: Int): NameOrString = if (index == 0) null else getClassName(index) // the only classfile that is allowed to have `0` in the super_class is java/lang/Object (see jvm spec) private def createConstant(index: Int): Constant = { val start = starts(index) Constant((in.getByte(start).toInt: @switch) match { case CONSTANT_STRING => getName(in.getChar(start + 1).toInt).value case CONSTANT_INTEGER => in.getInt(start + 1) case CONSTANT_FLOAT => in.getFloat(start + 1) case CONSTANT_LONG => in.getLong(start + 1) case CONSTANT_DOUBLE => in.getDouble(start + 1) case CONSTANT_CLASS => getClassOrArrayType(index).typeSymbol.tpe_* // !!! Is this necessary or desirable? case _ => errorBadTag(start) }) } def getConstant(index: Char): Constant = getConstant(index.toInt) def getConstant(index: Int): Constant = ( if (index <= 0 || len <= index) errorBadIndex(index) else values(index) match { case const: Constant => const case sym: Symbol => Constant(sym.tpe_*) case tpe: Type => Constant(tpe) case _ => recordAtIndex(createConstant(index), index) } ) private def getSubArray(bytes: Array[Byte]): Array[Byte] = { val decodedLength = ByteCodecs.decode(bytes) val arr = new Array[Byte](decodedLength) System.arraycopy(bytes, 0, arr, 0, decodedLength) arr } /** * Get an array of bytes stored in the classfile as a string. The data is encoded in the format * described in object [[scala.reflect.internal.pickling.ByteCodecs]]. Used for the ScalaSignature annotation argument. */ def getBytes(index: Int): Array[Byte] = { if (index <= 0 || len <= index) errorBadIndex(index) else values(index) match { case xs: Array[Byte] => xs case _ => val start = firstExpecting(index, CONSTANT_UTF8) val len = (in getChar start).toInt val bytes = new Array[Byte](len) in.getBytes(start + 2, bytes) recordAtIndex(getSubArray(bytes), index) } } /** * Get an array of bytes stored in the classfile as an array of strings. The data is encoded in * the format described in object [[scala.reflect.internal.pickling.ByteCodecs]]. Used for the ScalaLongSignature annotation * argument. */ def getBytes(indices: List[Int]): Array[Byte] = { val head = indices.head values(head) match { case xs: Array[Byte] => xs case _ => val arr: Array[Byte] = indices.toArray flatMap { index => if (index <= 0 || ConstantPool.this.len <= index) errorBadIndex(index) val start = firstExpecting(index, CONSTANT_UTF8) val len = (in getChar start).toInt val s = start + 2 val result = new Array[Byte](len) in.getBytes(s, result) result } recordAtIndex(getSubArray(arr), head) } } /** Throws an exception signaling a bad constant index. */ protected def errorBadIndex(index: Int) = abort(s"bad constant pool index: $index at pos: ${in.bp}") /** Throws an exception signaling a bad tag at given address. */ protected def errorBadTag(start: Int) = abort(s"bad constant pool tag ${in.getByte(start)} at byte $start") } def stubClassSymbol(name: Name): Symbol = { // scala/bug#5593 Scaladoc's current strategy is to visit all packages in search of user code that can be documented // therefore, it will rummage through the classpath triggering errors whenever it encounters package objects // that are not in their correct place (see bug for details) // TODO More consistency with use of stub symbols in `Unpickler` // - better owner than `NoSymbol` // - remove eager warning val msg = s"Class $name not found - continuing with a stub." if ((!settings.isScaladoc) && (settings.verbose || settings.developer)) warning(msg) NoSymbol.newStubSymbol(name.toTypeName, msg) } private def lookupClass(name: String) = try { def lookupTopLevel = { if (name contains '.') rootMirror getClassByName name else // FIXME - we shouldn't be doing ad hoc lookups in the empty package, getClassByName should return the class definitions.getMember(rootMirror.EmptyPackageClass, newTypeName(name)) } // For inner classes we usually don't get here: `classNameToSymbol` already returns the symbol // of the inner class based on the InnerClass table. However, if the classfile is missing the // InnerClass entry for `name`, it might still be that there exists an inner symbol (because // some other classfile _does_ have an InnerClass entry for `name`). In this case, we want to // return the actual inner symbol (C.D, with owner C), not the top-level symbol C$D. This is // what the logic below is for (see PR #5822 / scala/bug#9937). val split = if (isScalaRaw) -1 else name.lastIndexOf('$') if (split > 0 && split < name.length) { val outerName = name.substring(0, split) val innerName = name.substring(split + 1, name.length) val outerSym = classNameToSymbol(outerName) // If the outer class C cannot be found, look for a top-level class C$D if (outerSym.isInstanceOf[StubSymbol]) lookupTopLevel else { val innerNameAsName = newTypeName(innerName) // We have a java-defined class name C$D and look for a member D of C. But we don't know if // D is declared static or not, so we have to search both in class C and its companion. val r = if (outerSym == clazz) staticScope.lookup(innerNameAsName) orElse instanceScope.lookup(innerNameAsName) else lookupMemberAtTyperPhaseIfPossible(outerSym, innerNameAsName) orElse lookupMemberAtTyperPhaseIfPossible(outerSym.companionModule, innerNameAsName) r orElse lookupTopLevel } } else lookupTopLevel } catch { // The handler // - prevents crashes with deficient InnerClassAttributes (scala/bug#2464, 0ce0ad5) // - was referenced in the bugfix commit for scala/bug#3756 (4fb0d53), not sure why // - covers the case when a type alias in a package object shadows a class symbol, // getClassByName throws a MissingRequirementError (scala-dev#248) case ex: FatalError => // getClassByName can throw a MissingRequirementError (which extends FatalError) // definitions.getMember can throw a FatalError, for example in pos/t5165b if (settings.debug) ex.printStackTrace() stubClassSymbol(newTypeName(name)) } /** Return the class symbol of the given name. */ def classNameToSymbol(name: String): Symbol = { if (innerClasses contains name) innerClasses innerSymbol name else lookupClass(name) } def parseClass(): Unit = { unpickleOrParseInnerClasses() val jflags = readClassFlags() val classNameIndex = u2 currentClass = pool.getClassName(classNameIndex).value // Ensure that (top-level) classfiles are in the correct directory val isTopLevel = !(currentClass contains '$') // Java class name; *don't* try to to use Scala name decoding (scala/bug#7532) if (isTopLevel) { val c = pool.getClassSymbol(classNameIndex) // scala-dev#248: when a type alias (in a package object) shadows a class symbol, getClassSymbol returns a stub // TODO: this also prevents the error when it would be useful (`mv a/C.class .`) if (!c.isInstanceOf[StubSymbol] && c != clazz) mismatchError(c) } if (isScala) { () // We're done } else if (isScalaRaw) { val decls = clazz.enclosingPackage.info.decls for (c <- List(clazz, staticModule, staticModule.moduleClass)) { c.setInfo(NoType) decls.unlink(c) } } else { val sflags = jflags.toScalaFlags // includes JAVA addEnclosingTParams(clazz) // Create scopes before calling `enterOwnInnerClasses` instanceScope = newScope staticScope = newScope val staticInfo = ClassInfoType(List(), staticScope, moduleClass) val parentIndex = u2 val parentName = if (parentIndex == 0) null else pool.getClassName(parentIndex) val ifaceCount = u2 val ifaces = for (i <- List.range(0, ifaceCount)) yield pool.getSuperClassName(u2) val completer = new ClassTypeCompleter(clazz.name, jflags, parentName, ifaces) enterOwnInnerClasses() clazz setInfo completer clazz setFlag sflags moduleClass setInfo staticInfo moduleClass setFlag JAVA staticModule setInfo moduleClass.tpe staticModule setFlag JAVA propagatePackageBoundary(jflags, clazz, staticModule, moduleClass) val fieldsStartBp = in.bp skipMembers() // fields skipMembers() // methods parseAttributes(clazz, completer) in.bp = fieldsStartBp 0 until u2 foreach (_ => parseField()) sawPrivateConstructor = false 0 until u2 foreach (_ => parseMethod()) val needsConstructor = ( !sawPrivateConstructor && !(instanceScope containsName nme.CONSTRUCTOR) && ((sflags & INTERFACE) == 0 || (sflags | JAVA_ANNOTATION) != 0) ) if (needsConstructor) instanceScope enter clazz.newClassConstructor(NoPosition) // we could avoid this if we eagerly created class type param symbols here to expose through the // ClassTypeCompleter to satisfy the calls to rawInfo.typeParams from Symbol.typeParams. That would // require a refactor of `sigToType`. // // We would also need to make sure that clazzTParams is populated before member type completers called sig2type. clazz.initialize } } /** Add type parameters of enclosing classes */ def addEnclosingTParams(clazz: Symbol): Unit = { var sym = clazz.owner while (sym.isClass && !sym.isModuleClass) { for (t <- sym.tpe.typeArgs) classTParams = classTParams + (t.typeSymbol.name -> t.typeSymbol) sym = sym.owner } } def parseField(): Unit = { val jflags = readFieldFlags() val sflags = jflags.toScalaFlags if ((sflags & PRIVATE) != 0L) { in.skip(4); skipAttributes() } else { val name = readName() val lazyInfo = new MemberTypeCompleter(name, jflags, pool.getExternalName(u2).value) val sym = ownerForFlags(jflags).newValue(name.toTermName, NoPosition, sflags) // Note: the info may be overwritten later with a generic signature // parsed from SignatureATTR sym setInfo { if (jflags.isEnum) ConstantType(Constant(sym)) else lazyInfo } propagatePackageBoundary(jflags, sym) parseAttributes(sym, lazyInfo) addJavaFlagsAnnotations(sym, jflags) getScope(jflags) enter sym // sealed java enums if (jflags.isEnum) { val enumClass = sym.owner.linkedClassOfClass enumClass match { case NoSymbol => devWarning(s"no linked class for java enum $sym in ${sym.owner}. A referencing class file might be missing an InnerClasses entry.") case linked => if (!linked.isSealed) // Marking the enum class SEALED | ABSTRACT enables exhaustiveness checking. See also JavaParsers. // This is a bit of a hack and requires excluding the ABSTRACT flag in the backend, see method javaClassfileFlags. linked setFlag (SEALED | ABSTRACT) linked addChild sym } } } } def parseMethod(): Unit = { val jflags = readMethodFlags() val sflags = jflags.toScalaFlags if (jflags.isPrivate) { val isConstructor = pool.getName(u2).value == "<init>" // opt avoid interning a Name for private methods we're about to discard if (isConstructor) sawPrivateConstructor = true in.skip(2); skipAttributes() } else { if ((sflags & PRIVATE) != 0L) { in.skip(4); skipAttributes() } else { val name = readName() val sym = ownerForFlags(jflags).newMethod(name.toTermName, NoPosition, sflags) // Note: the info may be overwritten later with a generic signature // parsed from SignatureATTR val lazyInfo = new MemberTypeCompleter(name, jflags, pool.getExternalName(u2).value) sym.info = lazyInfo propagatePackageBoundary(jflags, sym) parseAttributes(sym, lazyInfo) addJavaFlagsAnnotations(sym, jflags) getScope(jflags) enter sym } } } private def sigToType(sym: Symbol, sig: String): Type = { val sigChars = sig.toCharArray var index = 0 val end = sig.length def accept(ch: Char): Unit = { assert(sig.charAt(index) == ch, (sig.charAt(index), ch)) index += 1 } def subName(isDelimiter: Char => Boolean): String = { val start = index while (!isDelimiter(sig.charAt(index))) { index += 1 } new String(sigChars, start, index - start) } def sig2type(tparams: immutable.Map[Name,Symbol], skiptvs: Boolean): Type = { val tag = sig.charAt(index); index += 1 tag match { case BYTE_TAG => ByteTpe case CHAR_TAG => CharTpe case DOUBLE_TAG => DoubleTpe case FLOAT_TAG => FloatTpe case INT_TAG => IntTpe case LONG_TAG => LongTpe case SHORT_TAG => ShortTpe case VOID_TAG => UnitTpe case BOOL_TAG => BooleanTpe case 'L' => def processInner(tp: Type): Type = tp match { case TypeRef(pre, sym, args) if (!sym.isStatic) => typeRef(processInner(pre.widen), sym, args) case _ => tp } def processClassType(tp: Type): Type = tp match { case TypeRef(pre, classSym, args) => val existentials = new ListBuffer[Symbol]() if (sig.charAt(index) == '<') { accept('<') val xs = new ListBuffer[Type]() var i = 0 while (sig.charAt(index) != '>') { sig.charAt(index) match { case variance @ ('+' | '-' | '*') => index += 1 val bounds = variance match { case '+' => TypeBounds.upper(sig2type(tparams, skiptvs)) case '-' => val tp = sig2type(tparams, skiptvs) // Interpret `sig2type` returning `Any` as "no bounds"; // morally equivalent to TypeBounds.empty, but we're representing Java code, so use ObjectTpeJava for AnyTpe. if (tp.typeSymbol == AnyClass) TypeBounds.upper(definitions.ObjectTpeJava) else TypeBounds(tp, definitions.ObjectTpeJava) case '*' => TypeBounds.upper(definitions.ObjectTpeJava) } val newtparam = sym.newExistential(newTypeName("?"+i), sym.pos) setInfo bounds existentials += newtparam xs += newtparam.tpeHK i += 1 case _ => xs += sig2type(tparams, skiptvs) } } accept('>') assert(xs.length > 0, tp) debuglogResult("new existential")(newExistentialType(existentials.toList, typeRef(pre, classSym, xs.toList))) } // isMonomorphicType is false if the info is incomplete, as it usually is here // so have to check unsafeTypeParams.isEmpty before worrying about raw type case below, // or we'll create a boatload of needless existentials. else if (classSym.isMonomorphicType || classSym.unsafeTypeParams.isEmpty) tp else debuglogResult(s"raw type from $classSym") { // raw type - existentially quantify all type parameters classExistentialType(pre, classSym) } case tp => assert(sig.charAt(index) != '<', s"sig=$sig, index=$index, tp=$tp") tp } val classSym = classNameToSymbol(subName(c => c == ';' || c == '<')) assert(!classSym.isOverloaded, classSym.alternatives) val classTpe = if (classSym eq ObjectClass) ObjectTpeJava else classSym.tpe_* var tpe = processClassType(processInner(classTpe)) while (sig.charAt(index) == '.') { accept('.') val name = newTypeName(subName(c => c == ';' || c == '<' || c == '.')) val clazz = tpe.member(name) val dummyArgs = Nil // the actual arguments are added in processClassType val inner = typeRef(pre = tpe, sym = clazz, args = dummyArgs) tpe = processClassType(inner) } accept(';') tpe case ARRAY_TAG => while ('0' <= sig.charAt(index) && sig.charAt(index) <= '9') index += 1 var elemtp = sig2type(tparams, skiptvs) // make unbounded Array[T] where T is a type variable into Array[T with Object] // (this is necessary because such arrays have a representation which is incompatible // with arrays of primitive types. // see also RestrictJavaArraysMap (when compiling java sources directly) if (elemtp.typeSymbol.isAbstractType && elemtp.upperBound =:= ObjectTpe) { elemtp = intersectionType(List(elemtp, ObjectTpe)) } arrayType(elemtp) case '(' => // we need a method symbol. given in line 486 by calling getType(methodSym, ..) assert(sym ne null, sig) val paramtypes = new ListBuffer[Type]() while (sig.charAt(index) != ')') { paramtypes += sig2type(tparams, skiptvs) } index += 1 val restype = if (sym != null && sym.isClassConstructor) { accept('V') clazz.tpe_* } else sig2type(tparams, skiptvs) MethodType(sym.newSyntheticValueParams(paramtypes.toList), restype) case 'T' => val n = newTypeName(subName(';'.==)) index += 1 if (skiptvs) AnyTpe else tparams(n).typeConstructor } } // sig2type(tparams, skiptvs) def sig2typeBounds(tparams: immutable.Map[Name, Symbol], skiptvs: Boolean): Type = { val ts = new ListBuffer[Type] while (sig.charAt(index) == ':') { index += 1 if (sig.charAt(index) != ':') // guard against empty class bound ts += sig2type(tparams, skiptvs) } TypeBounds.upper(intersectionType(ts.toList, sym)) } var tparams = classTParams val newTParams = new ListBuffer[Symbol]() if (sig.charAt(index) == '<') { assert(sym != null, sig) index += 1 val start = index while (sig.charAt(index) != '>') { val tpname = newTypeName(subName(':'.==)) val s = sym.newTypeParameter(tpname) tparams = tparams + (tpname -> s) sig2typeBounds(tparams, skiptvs = true) newTParams += s } index = start while (sig.charAt(index) != '>') { val tpname = newTypeName(subName(':'.==)) val s = tparams(tpname) s.setInfo(sig2typeBounds(tparams, skiptvs = false)) } accept('>') } val ownTypeParams = newTParams.toList if (!ownTypeParams.isEmpty) sym.setInfo(new TypeParamsType(ownTypeParams)) val tpe = if ((sym eq null) || !sym.isClass) sig2type(tparams, skiptvs = false) else { classTParams = tparams val parents = new ListBuffer[Type]() while (index < end) { val parent = sig2type(tparams, skiptvs = false) // here the variance doesn't matter parents += (if (parent == ObjectTpeJava) ObjectTpe else parent) } ClassInfoType(parents.toList, instanceScope, sym) } GenPolyType(ownTypeParams, tpe) } // sigToType /** * Only invoked for java classfiles. */ private def parseAttributes(sym: symbolTable.Symbol, completer: JavaTypeCompleter): Unit = { def parseAttribute(): Unit = { val attrName = readTypeName() val attrLen = u4 attrName match { case tpnme.SignatureATTR => val sigIndex = u2 val sig = pool.getExternalName(sigIndex) assert(sym.rawInfo == completer, sym) completer.sig = sig.value case tpnme.SyntheticATTR => sym.setFlag(SYNTHETIC | ARTIFACT) in.skip(attrLen) case tpnme.BridgeATTR => sym.setFlag(BRIDGE | ARTIFACT) in.skip(attrLen) case tpnme.DeprecatedATTR => val arg = Literal(Constant("see corresponding Javadoc for more information.")) sym.addAnnotation(DeprecatedAttr, arg, Literal(Constant(""))) in.skip(attrLen) case tpnme.ConstantValueATTR => completer.constant = pool.getConstant(u2) case tpnme.MethodParametersATTR => def readParamNames(): Unit = { val paramCount = u1 val paramNames = new Array[NameOrString](paramCount) val paramNameAccess = new Array[Int](paramCount) var i = 0 while (i < paramCount) { paramNames(i) = pool.getExternalName(u2) paramNameAccess(i) = u2 i += 1 } completer.paramNames = new ParamNames(paramNames, paramNameAccess) } readParamNames() case tpnme.AnnotationDefaultATTR => // Methods of java annotation classes that have a default sym.addAnnotation(AnnotationDefaultAttr) in.skip(attrLen) case tpnme.RuntimeAnnotationATTR => val numAnnots = u2 for (n <- 0 until numAnnots; annot <- parseAnnotation(u2)) sym.addAnnotation(annot) // TODO 1: parse runtime visible annotations on parameters // case tpnme.RuntimeParamAnnotationATTR // TODO 2: also parse RuntimeInvisibleAnnotation / RuntimeInvisibleParamAnnotation, // i.e. java annotations with RetentionPolicy.CLASS? case tpnme.ExceptionsATTR => parseExceptions(attrLen, completer) case tpnme.SourceFileATTR => /* if (forInteractive) { // opt: disable this code in the batch compiler for performance reasons. // it appears to be looking for the .java source file mentioned in this attribute // in the output directories of scalac. // // References: // https://issues.scala-lang.org/browse/SI-2689 // https://github.com/scala/scala/commit/7315339782f6e19ddd6199768352a91ef66eb27d // https://github.com/scala-ide/scala-ide/commit/786ea5d4dc44065379a05eb3ac65d37f8948c05d // // TODO: Does Scala-IDE actually intermingle source and classfiles in a way that this could ever find something? // If they want to use this, they'll need to enable the new setting -Ypresentation-locate-source-file. val srcfileLeaf = readName().toString.trim val srcpath = sym.enclosingPackage match { case NoSymbol => srcfileLeaf case rootMirror.EmptyPackage => srcfileLeaf case pkg => pkg.fullName(File.separatorChar)+File.separator+srcfileLeaf } srcfile0 = settings.outputDirs.srcFilesFor(file, srcpath).find(_.exists) } else in.skip(attrLen) */ in.skip(attrLen) case tpnme.CodeATTR => if (sym.owner.isInterface) { sym setFlag JAVA_DEFAULTMETHOD log(s"$sym in ${sym.owner} is a java8+ default method.") } in.skip(attrLen) case _ => in.skip(attrLen) } } /* * Parse the "Exceptions" attribute which denotes the exceptions * thrown by a method. */ def parseExceptions(len: Int, completer: JavaTypeCompleter): Unit = { val nClasses = u2 for (n <- 0 until nClasses) { // FIXME: this performs an equivalent of getExceptionTypes instead of getGenericExceptionTypes (scala/bug#7065) val cls = pool.getClassName(u2) completer.exceptions ::= cls } } // begin parseAttributes for (i <- 0 until u2) parseAttribute() } def parseAnnotArg(): Option[ClassfileAnnotArg] = { val tag = u1 val index = u2 tag match { case STRING_TAG => Some(LiteralAnnotArg(Constant(pool.getName(index).value))) case BOOL_TAG | BYTE_TAG | CHAR_TAG | SHORT_TAG | INT_TAG | LONG_TAG | FLOAT_TAG | DOUBLE_TAG => Some(LiteralAnnotArg(pool.getConstant(index))) case CLASS_TAG => Some(LiteralAnnotArg(Constant(pool.getType(index)))) case ENUM_TAG => val t = pool.getType(index) val n = readName() val module = t.typeSymbol.companionModule val s = module.info.decls.lookup(n) if (s != NoSymbol) Some(LiteralAnnotArg(Constant(s))) else { warning( sm"""While parsing annotations in ${file}, could not find $n in enum ${module.nameString}. |This is likely due to an implementation restriction: an annotation argument cannot refer to a member of the annotated class (scala/bug#7014).""" ) None } case ARRAY_TAG => val arr = new ArrayBuffer[ClassfileAnnotArg]() var hasError = false for (i <- 0 until index) parseAnnotArg() match { case Some(c) => arr += c case None => hasError = true } if (hasError) None else Some(ArrayAnnotArg(arr.toArray)) case ANNOTATION_TAG => parseAnnotation(index) map (NestedAnnotArg(_)) } } // TODO scala/bug#9296 duplicated code, refactor /** * Parse and return a single annotation. If it is malformed, return None. */ def parseAnnotation(attrNameIndex: Int): Option[AnnotationInfo] = try { val attrType = pool.getType(attrNameIndex) val nargs = u2 val nvpairs = new ListBuffer[(Name, ClassfileAnnotArg)] var hasError = false for (i <- 0 until nargs) { val name = readName() parseAnnotArg() match { case Some(c) => nvpairs += ((name, c)) case None => hasError = true } } if (hasError) None else Some(AnnotationInfo(attrType, List(), nvpairs.toList)) } catch { case f: FatalError => throw f // don't eat fatal errors, they mean a class was not found case NonFatal(ex) => // We want to be robust when annotations are unavailable, so the very least // we can do is warn the user about the exception // There was a reference to ticket 1135, but that is outdated: a reference to a class not on // the classpath would *not* end up here. A class not found is signaled // with a `FatalError` exception, handled above. Here you'd end up after a NPE (for example), // and that should never be swallowed silently. warning(s"Caught: $ex while parsing annotations in ${file}") if (settings.debug) ex.printStackTrace() None // ignore malformed annotations } /** Apply `@native`/`@transient`/`@volatile` annotations to `sym`, * if the corresponding flag is set in `flags`. */ def addJavaFlagsAnnotations(sym: Symbol, flags: JavaAccFlags): Unit = flags.toScalaAnnotations(symbolTable) foreach (ann => sym.addAnnotation(ann)) /** Enter own inner classes in the right scope. It needs the scopes to be set up, * and implicitly current class' superclasses. */ private def enterOwnInnerClasses(): Unit = { def className(name: String): String = name.substring(name.lastIndexOf('.') + 1, name.length) def enterClassAndModule(entry: InnerClassEntry, file: AbstractFile): Unit = { def jflags = entry.jflags val name = entry.originalName val sflags = jflags.toScalaFlags val owner = ownerForFlags(jflags) val scope = getScope(jflags) def newStub(name: Name) = { val stub = owner.newStubSymbol(name, s"Class file for ${entry.externalName} not found") stub.setPos(owner.pos) stub.setFlag(JAVA) } val (innerClass, innerModule) = if (file == NoAbstractFile) { (newStub(name.toTypeName), newStub(name.toTermName)) } else { val cls = owner.newClass(name.toTypeName, NoPosition, sflags) val mod = owner.newModule(name.toTermName, NoPosition, sflags) val completer = new loaders.ClassfileLoader(file, cls, mod) cls setInfo completer mod setInfo completer mod.moduleClass setInfo loaders.moduleClassLoader cls.associatedFile = file mod.moduleClass.associatedFile = file /** * need to set privateWithin here because the classfile of a nested protected class is public in bytecode, * so propagatePackageBoundary will not set it when the symbols are completed */ if (jflags.isProtected) { cls.privateWithin = cls.enclosingPackage mod.privateWithin = cls.enclosingPackage } (cls, mod) } scope enter innerClass scope enter innerModule val decls = innerClass.enclosingPackage.info.decls def unlinkIfPresent(name: Name) = { val e = decls lookupEntry name if (e ne null) decls unlink e } val cName = newTermName(className(entry.externalName)) unlinkIfPresent(cName) unlinkIfPresent(cName.toTypeName) } for (entry <- innerClasses.entries) { // create a new class member for immediate inner classes if (entry.outerName == currentClass) { val file = classPath.findClassFile(entry.externalName.toString) enterClassAndModule(entry, file.getOrElse(NoAbstractFile)) } } } /** * Either * - set `isScala` and invoke the unpickler, or * - set `isScalaRaw`, or * - parse inner classes (for Java classfiles) * * Expects `in.bp` to point to the `access_flags` entry, restores the old `bp`. */ def unpickleOrParseInnerClasses(): Unit = { val oldbp = in.bp in.skip(4) // access_flags, this_class skipSuperclasses() skipMembers() // fields skipMembers() // methods var innersStart = -1 var runtimeAnnotStart = -1 val numAttrs = u2 var i = 0 while (i < numAttrs) { val attrName = readTypeName() val attrLen = u4 attrName match { case tpnme.ScalaSignatureATTR => isScala = true if (runtimeAnnotStart != -1) i = numAttrs case tpnme.ScalaATTR => isScalaRaw = true i = numAttrs case tpnme.InnerClassesATTR => innersStart = in.bp case tpnme.RuntimeAnnotationATTR => runtimeAnnotStart = in.bp if (isScala) i = numAttrs case _ => } in.skip(attrLen) i += 1 } if (isScala) { def parseScalaSigBytes(): Array[Byte] = { val tag = u1 assert(tag == STRING_TAG, tag) pool.getBytes(u2) } def parseScalaLongSigBytes(): Array[Byte] = { val tag = u1 assert(tag == ARRAY_TAG, tag) val stringCount = u2 val entries = for (i <- 0 until stringCount) yield { val stag = u1 assert(stag == STRING_TAG, stag) u2 } pool.getBytes(entries.toList) } def checkScalaSigAnnotArg() = { val numArgs = u2 assert(numArgs == 1, s"ScalaSignature has $numArgs arguments") val name = readName() assert(name == nme.bytes, s"ScalaSignature argument has name $name") } def skipAnnotArg(): Unit = u1 match { case STRING_TAG | BOOL_TAG | BYTE_TAG | CHAR_TAG | SHORT_TAG | INT_TAG | LONG_TAG | FLOAT_TAG | DOUBLE_TAG | CLASS_TAG => in.skip(2) case ENUM_TAG => in.skip(4) case ARRAY_TAG => val num = u2 for (i <- 0 until num) skipAnnotArg() case ANNOTATION_TAG => in.skip(2) // type skipAnnotArgs() } def skipAnnotArgs() = { val numArgs = u2 for (i <- 0 until numArgs) { in.skip(2) skipAnnotArg() } } val SigTpe = ScalaSignatureAnnotation.tpe val LongSigTpe = ScalaLongSignatureAnnotation.tpe assert(runtimeAnnotStart != -1, s"No RuntimeVisibleAnnotations in classfile with ScalaSignature attribute: $clazz") in.bp = runtimeAnnotStart val numAnnots = u2 var i = 0 var bytes: Array[Byte] = null while (i < numAnnots && bytes == null) { pool.getType(u2) match { case SigTpe => checkScalaSigAnnotArg() bytes = parseScalaSigBytes() case LongSigTpe => checkScalaSigAnnotArg() bytes = parseScalaLongSigBytes() case t => skipAnnotArgs() } i += 1 } AnyRefClass // Force scala.AnyRef, otherwise we get "error: Symbol AnyRef is missing from the classpath" assert(bytes != null, s"No Scala(Long)Signature annotation in classfile with ScalaSignature attribute: $clazz") unpickler.unpickle(bytes, 0, clazz, staticModule, file.name) } else if (!isScalaRaw && innersStart != -1) { in.bp = innersStart val entries = u2 for (i <- 0 until entries) { val innerIndex, outerIndex, nameIndex = u2 val jflags = readInnerClassFlags() if (innerIndex != 0 && outerIndex != 0 && nameIndex != 0) innerClasses add InnerClassEntry(pool.getClassName(innerIndex), pool.getClassName(outerIndex), pool.getName(nameIndex), jflags) } } in.bp = oldbp } /** An entry in the InnerClasses attribute of this class file. */ case class InnerClassEntry(external: NameOrString, outer: NameOrString, name: NameOrString, jflags: JavaAccFlags) { def externalName = external.value def outerName = outer.value def originalName = name.name def isModule = originalName.isTermName def scope = if (jflags.isStatic) staticScope else instanceScope def enclosing = if (jflags.isStatic) enclModule else enclClass // The name of the outer class, without its trailing $ if it has one. private def strippedOuter = outerName.stripSuffix(nme.MODULE_SUFFIX_STRING) private def isInner = innerClasses contains strippedOuter private def enclClass = if (isInner) innerClasses innerSymbol strippedOuter else classNameToSymbol(strippedOuter) private def enclModule = enclClass.companionModule } /** Return the class symbol for the given name. It looks it up in its outer class. * Forces all outer class symbols to be completed. * * If the given name is not an inner class, it returns the symbol found in `definitions`. */ object innerClasses { private val inners = mutable.HashMap[String, InnerClassEntry]() def contains(name: String) = inners contains name def getEntry(name: String) = inners get name def entries = inners.values def add(entry: InnerClassEntry): Unit = { devWarningIf(inners contains entry.externalName) { val existing = inners(entry.externalName) s"Overwriting inner class entry! Was $existing, now $entry" } inners(entry.externalName) = entry } def innerSymbol(externalName: String): Symbol = this getEntry externalName match { case Some(entry) => innerSymbol(entry) case _ => NoSymbol } private def innerSymbol(entry: InnerClassEntry): Symbol = { val name = entry.originalName.toTypeName val enclosing = entry.enclosing val member = { if (enclosing == clazz) entry.scope lookup name else lookupMemberAtTyperPhaseIfPossible(enclosing, name) } def newStub = { enclosing .newStubSymbol(name, s"Unable to locate class corresponding to inner class entry for $name in owner ${entry.outerName}") .setPos(enclosing.pos) } member.orElse(newStub) } } class TypeParamsType(override val typeParams: List[Symbol]) extends LazyType with FlagAgnosticCompleter { override def complete(sym: Symbol): Unit = { throw new AssertionError("cyclic type dereferencing") } } class LazyAliasType(alias: Symbol) extends LazyType with FlagAgnosticCompleter { override def complete(sym: Symbol): Unit = { sym setInfo createFromClonedSymbols(alias.initialize.typeParams, alias.tpe)(typeFun) } } private class ParamNames(val names: Array[NameOrString], val access: Array[Int]) { assert(names.length == access.length, "Require as many names as access") def length = names.length } private abstract class JavaTypeCompleter extends LazyType { var constant: Constant = _ var sig: String = _ var paramNames: ParamNames = _ var exceptions: List[NameOrString] = Nil } private final class ClassTypeCompleter(name: Name, jflags: JavaAccFlags, parent: NameOrString, ifaces: List[NameOrString]) extends JavaTypeCompleter { override def complete(sym: symbolTable.Symbol): Unit = { val info = if (sig != null) sigToType(sym, sig) else { val superTpe = if (parent == null) definitions.AnyClass.tpe_* else getClassSymbol(parent.value).tpe_* val superTpe1 = if (superTpe == ObjectTpeJava) ObjectTpe else superTpe val ifacesTypes = ifaces.filterNot(_ eq null).map(x => getClassSymbol(x.value).tpe_*) ClassInfoType(superTpe1 :: ifacesTypes, instanceScope, clazz) } sym.setInfo(info) } } private final class MemberTypeCompleter(name: Name, jflags: JavaAccFlags, descriptor: String) extends JavaTypeCompleter { override def isJavaVarargsMethod: Boolean = jflags.isVarargs override def javaThrownExceptions: List[Symbol] = exceptions.map(e => classNameToSymbol(e.value)) override def complete(sym: symbolTable.Symbol): Unit = { def descriptorInfo = sigToType(sym, descriptor) val hasOuterParam = (name == nme.CONSTRUCTOR) && (descriptorInfo match { case MethodType(params, restpe) => // if this is a non-static inner class, remove the explicit outer parameter innerClasses getEntry currentClass match { case Some(entry) if !entry.jflags.isStatic => /* About `clazz.owner.hasPackageFlag` below: scala/bug#5957 * For every nested java class A$B, there are two symbols in the scala compiler. * 1. created by SymbolLoader, because of the existence of the A$B.class file, owner: package * 2. created by ClassfileParser of A when reading the inner classes, owner: A * If symbol 1 gets completed (e.g. because the compiled source mentions `A$B`, not `A#B`), the * ClassfileParser for 1 executes, and clazz.owner is the package. */ assert(params.head.tpe.typeSymbol == clazz.owner || clazz.owner.hasPackageFlag, "" + params.head.tpe.typeSymbol + ": " + clazz.owner) true case _ => false } case _ => false }) val info = if (sig != null) { sigToType(sym, sig) } else if (name == nme.CONSTRUCTOR) { descriptorInfo match { case MethodType(params, restpe) => val paramsNoOuter = if (hasOuterParam) params.tail else params val newParams = paramsNoOuter match { case (init :+ tail) if jflags.isSynthetic => // scala/bug#7455 strip trailing dummy argument ("access constructor tag") from synthetic constructors which // are added when an inner class needs to access a private constructor. init case _ => paramsNoOuter } MethodType(newParams, clazz.tpe) case info => info } } else { descriptorInfo } if (constant != null) { val c1 = convertTo(constant, info.resultType) if (c1 ne null) sym.setInfo(ConstantType(c1)) else { devWarning(s"failure to convert $constant to ${info.resultType}") sym.setInfo(info) } } else { sym.setInfo(if (sym.isMethod && jflags.isVarargs) arrayToRepeated(info) else info) } for (e <- exceptions) { // we call initialize due to the fact that we call Symbol.isMonomorphicType in addThrowsAnnotation // and that method requires Symbol to be forced to give the right answers, see scala/bug#7107 for details val cls = getClassSymbol(e.value) sym withAnnotation AnnotationInfo.lazily { val throwableTpe = cls.tpe_* AnnotationInfo(appliedType(ThrowsClass, throwableTpe), List(Literal(Constant(throwableTpe))), Nil) } } // Note: the info may be overwritten later with a generic signature // parsed from SignatureATTR if (paramNames != null) { import scala.tools.asm.Opcodes.ACC_SYNTHETIC if (sym.hasRawInfo && sym.isMethod) { val paramNamesNoOuter = (if (hasOuterParam) 1 else 0) to paramNames.length val params = sym.rawInfo.params foreach2(paramNamesNoOuter.toList, params) { case (i, param) => val isSynthetic = (paramNames.access(i) & ACC_SYNTHETIC) != 0 if (!isSynthetic) { param.name = paramNames.names(i).name.toTermName.encode param.resetFlag(SYNTHETIC) } } // there's not anything we can do, but it's slightly worrisome devWarningIf(!sameLength(paramNamesNoOuter.toList, params)) { sm"""MethodParameters length mismatch while parsing $sym: | rawInfo.params: ${sym.rawInfo.params}""" } } } } private def convertTo(c: Constant, pt: Type): Constant = { if (pt.typeSymbol == BooleanClass && c.tag == IntTag) Constant(c.value != 0) else c convertTo pt } } def skipAttributes(): Unit = { var attrCount: Int = u2 while (attrCount > 0) { in skip 2 in skip u4 attrCount -= 1 } } def skipMembers(): Unit = { var memberCount: Int = u2 while (memberCount > 0) { in skip 6 skipAttributes() memberCount -= 1 } } def skipSuperclasses(): Unit = { in.skip(2) // superclass val ifaces = u2 in.skip(2 * ifaces) } protected def getScope(flags: JavaAccFlags): Scope = if (flags.isStatic) staticScope else instanceScope }
martijnhoekstra/scala
src/compiler/scala/tools/nsc/symtab/classfile/ClassfileParser.scala
Scala
apache-2.0
56,118
package edu.gemini.phase2.skeleton.auxfile import java.io.File object FileError { def apply(file: File, msg: String): FileError = FileError(file, new RuntimeException(msg)) } case class FileError(file: File, exception: Exception)
spakzad/ocs
bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/skeleton/auxfile/FileError.scala
Scala
bsd-3-clause
238
// tells Scala where to find the TMT classes import scalanlp.io._; import scalanlp.stage._; import scalanlp.stage.text._; import scalanlp.text.tokenize._; import scalanlp.pipes.Pipes.global._; import edu.stanford.nlp.tmt.stage._; import edu.stanford.nlp.tmt.model.lda._; if ( args.length < 2 ) { System.err.println( "Arguments: inputFile outputPath [numTopics] [numIters] [termSmoothing] [topicSmoothing]" ); System.err.println( " inputFile: tab-delimited file containing the training corpus" ); System.err.println( " (first column = docID, second column = text)" ); System.err.println( " outputPath: path for saving output model data" ); System.err.println( " numOfTopics: number of topics to train [default=20]" ); System.err.println( " maxIters: number of iterations to execute [default=1000]" ); System.err.println( " termSmoothing: [default=0.01]" ); System.err.println( "topicSmoothing: [default=0.01]" ); System.exit( -1 ); } val inputFile = args(0); val outputPath = args(1); val indexColumn = 1; val textColumn = 2; val numOfTopics = if ( args.length > 2 ) { args(2).toInt } else { 20 }; val maxIters = if ( args.length > 3 ) { args(3).toInt } else { 1000 }; val termSmoothing = if ( args.length > 4 ) { args(4).toDouble } else { 0.01 }; val topicSmoothing = if ( args.length > 5 ) { args(5).toDouble } else { 0.01 }; System.err.println( "LDA Learning Parameters..." ); System.err.println( " inputFile = " + inputFile ); System.err.println( " outputPath = " + outputPath ); System.err.println( " numOfTopics = " + numOfTopics ); System.err.println( " maxIters = " + maxIters ); System.err.println( " termSmoothing = " + termSmoothing ); System.err.println( "topicSmoothing = " + topicSmoothing ); System.err.println(); val alphabetsOnly = { RegexSearchTokenizer( "[0-9A-Za-z_]*[A-Za-z_]+[0-9A-Za-z_]*" ) ~> // keep tokens with alphabets CaseFolder() ~> // fold to lower case StopWordFilter( "en" ) // remove common English words } System.err.println( "Loading source text..." ); val source = TSVFile( inputFile ) ~> IDColumn( indexColumn ); val text = source ~> Column( textColumn ) ~> TokenizeWith( alphabetsOnly ) ~> TermCounter(); System.err.println( "Defining dataset and model..." ); val dataset = LDADataset( text ); val modelParams = LDAModelParams( numTopics=numOfTopics, dataset=dataset, topicSmoothing=topicSmoothing, termSmoothing=termSmoothing ); val modelPath = file( outputPath ); System.err.println( "Learning LDA topics..." ); val model = TrainCVB0LDA( modelParams, dataset, output=modelPath, maxIterations=maxIters ); val perDocTopicDistributions = InferCVB0DocumentTopicDistributions( model, dataset ); System.err.println( "Writing term counts to disk..." ); val termCounts = text.meta[ TermCounts ]; CSVFile( file( outputPath + "/term-counts.csv" ) ).write( { for ( term <- termCounts.index.iterator ) yield ( term, termCounts.getTF( term ), termCounts.getDF( term ) ) } ); //System.err.println( "Writing topics per doc..." ) //CSVFile( file( outputPath + "/topics-per-doc.csv" ) ).write( perDocTopicDistributions );
StanfordHCI/termite
pipeline/stmt/lda-learn.scala
Scala
bsd-3-clause
3,232
package com.ignition.frame.mllib import scala.xml.{ Elem, Node } import org.apache.spark.mllib.stat.Statistics import org.apache.spark.rdd.RDD.rddToPairRDDFunctions import org.apache.spark.sql.{ DataFrame, Row } import org.apache.spark.sql.types.StructType import org.json4s.JValue import org.json4s.JsonDSL._ import org.json4s.jvalue2monadic import com.ignition.frame.{ FrameTransformer, SparkRuntime } import com.ignition.types.double import com.ignition.util.JsonUtils.RichJValue import com.ignition.util.XmlUtils.RichNodeSeq import CorrelationMethod.{ CorrelationMethod, PEARSON } /** * Correlation methods. */ object CorrelationMethod extends Enumeration { type CorrelationMethod = Value val PEARSON = Value("pearson") val SPEARMAN = Value("spearman") } /** * Computes the correlation between data series using MLLib library. * * @author Vlad Orzhekhovskiy */ case class Correlation(dataFields: Iterable[String], groupFields: Iterable[String] = Nil, method: CorrelationMethod = PEARSON) extends FrameTransformer with MLFunctions { import Correlation._ def add(fields: String*) = copy(dataFields = dataFields ++ fields) def %(fields: String*) = add(fields: _*) def groupBy(fields: String*) = copy(groupFields = fields) def method(method: CorrelationMethod) = copy(method = method) protected def compute(arg: DataFrame)(implicit runtime: SparkRuntime): DataFrame = { val df = optLimit(arg, runtime.previewMode) val rdd = toVectors(df, dataFields, groupFields) rdd.persist val keys = rdd.keys.distinct.collect val rows = keys map { key => val slice = rdd filter (_._1 == key) values val matrix = Statistics.corr(slice) val data = for { rowIdx <- 0 until dataFields.size colIdx <- rowIdx + 1 until dataFields.size } yield matrix(rowIdx, colIdx) Row.fromSeq(key.toSeq ++ data) } val targetRDD = ctx.sparkContext.parallelize(rows) val fieldSeq = dataFields.toSeq val targetFields = (groupFields map df.schema.apply toSeq) ++ (for { rowIdx <- 0 until dataFields.size colIdx <- rowIdx + 1 until dataFields.size } yield double(s"corr_${fieldSeq(rowIdx)}_${fieldSeq(colIdx)}")) val schema = StructType(targetFields) ctx.createDataFrame(targetRDD, schema) } def toXml: Elem = <node method={ method.toString }> <aggregate> { dataFields map { name => <field name={ name }/> } } </aggregate> { if (!groupFields.isEmpty) <group-by> { groupFields map (f => <field name={ f }/>) } </group-by> } </node>.copy(label = tag) def toJson: org.json4s.JValue = { val groupBy = if (groupFields.isEmpty) None else Some(groupFields) val aggregate = dataFields map (_.toString) ("tag" -> tag) ~ ("method" -> method.toString) ~ ("groupBy" -> groupBy) ~ ("aggregate" -> aggregate) } } /** * Correlation companion object. */ object Correlation { val tag = "correlation" def apply(dataFields: String*): Correlation = apply(dataFields, Nil) def fromXml(xml: Node) = { val dataFields = (xml \\ "aggregate" \\ "field") map { _ \\ "@name" asString } val groupFields = (xml \\ "group-by" \\ "field") map (_ \\ "@name" asString) val method = CorrelationMethod.withName(xml \\ "@method" asString) apply(dataFields, groupFields, method) } def fromJson(json: JValue) = { val dataFields = (json \\ "aggregate" asArray) map (_ asString) val groupFields = (json \\ "groupBy" asArray) map (_ asString) val method = CorrelationMethod.withName(json \\ "method" asString) apply(dataFields, groupFields, method) } }
uralian/ignition
src/main/scala/com/ignition/frame/mllib/Correlation.scala
Scala
apache-2.0
3,699
package com.ftchinese.jobs.common /** * Task case class * Created by wanbo on 16/3/23. * * @param production: Default is true, if false the task just for testing. * @param createTime: The time millis when task was created. */ case class TaskMessage(message: String = "", sound: String = "", action: String = "", label: String = "", production: Boolean = true, createTime: Long = System.currentTimeMillis())
FTChinese/push
src/main/scala/com/ftchinese/jobs/common/TaskMessage.scala
Scala
mit
414
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.rdd import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import org.bdgenomics.adam.models.{ ReferenceRegion, SequenceDictionary } import org.bdgenomics.adam.rdd.PairingRDD._ import scala.math._ /** * A base is 'covered' by a region set if any region in the set contains the base itself. * * The 'coverage regions' of a region set are the unique, disjoint, non-adjacent, * minimal set of regions which contain every covered base, and no bases which are not covered. * * The Coverage class calculates the coverage regions for a given region set. * * @param window A parameter (which should be a positive number) that determines the parallelism * which Coverage uses to calculate the coverage regions -- larger window sizes * indicate less parallelism, but also fewer subsequent passes. */ class Coverage(val window: Long) extends Serializable { require(window > 0) type Region = ReferenceRegion /** * Calling findCoverageRegions calculates (as an RDD) the coverage regions for a given * RDD of input regions. * * The primary method. * * @param coveringRegions The input regions whose coverage regions are to be calculated * @return an RDD containing the ReferenceRegions corresponding to the coverage regions * of the input set 'coveringRegions' */ def findCoverageRegions(coveringRegions: RDD[ReferenceRegion]): RDD[ReferenceRegion] = { // First, map each input region to a window val windowKeyedRegions: RDD[(Region, Region)] = coveringRegions.flatMap(regionToWindows) // Then, within each window, calculate the coverage regions. This complete list // might contain pairs of regions that are adjacent (i.e. adjacent at the window // boundaries), therefore we ... val possiblyAdjacent: RDD[Region] = windowKeyedRegions.groupByKey().flatMap { case (window: Region, cRegions: Iterable[Region]) => calculateCoverageRegions(cRegions) } // ... collapse the adjacent regions down into single contiguous regions. collapseAdjacent(possiblyAdjacent) } /** * Uses the fixed window-width to key each Region by the corresponding window Region * to which it belongs (through overlap). Since a Region can overlap several windows, * there may be >1 value in the resulting Seq. * * @param region An input Region which is to be keyed to 1 or more windows. * @return A Seq of Region pairs, where the first element of each pair is one of the windows * (of fixed-width) and the second element is the input Region */ def regionToWindows(region: ReferenceRegion): Seq[(Region, Region)] = { val windowStart = region.start / window val windowEnd = region.end / window (windowStart to windowEnd).map { case (widx: Long) => val wstart = widx * window val wend = wstart + window val wRegion = ReferenceRegion(region.referenceName, wstart, wend) val clippedRegion = ReferenceRegion(region.referenceName, max(wstart, region.start), min(wend, region.end)) (wRegion, clippedRegion) } } def optionOrdering(or1: Option[Region], or2: Option[Region]): Int = (or1, or2) match { case (None, None) => 0 case (None, Some(r2)) => -1 case (Some(r1), None) => 1 case (Some(r1), Some(r2)) => r1.compareTo(r2) } /** * This is a helper function for findCoverageRegions -- basically, it takes a set * of input ReferenceRegions, it finds all pairs of regions that are adjacent to each * other (i.e. pairs (r1, r2) where r1.end == r2.start and r1.referenceName == r2.referenceName), * and it collapses all such adjacent regions into single contiguous regions. * * @param regions The input regions set; we assume that this input set is non-overlapping * (that no two regions in the input set overlap each other) * @return The collapsed set of regions -- no two regions in the returned RDD should be * adjacent, all should be at least one base-pair apart (or on separate * chromosomes). */ def collapseAdjacent(regions: RDD[Region]): RDD[Region] = { val pairs = regions.sortBy(p => p).pairWithEnds() val points: RDD[OrientedPoint] = pairs.flatMap { case (None, Some(region)) => Seq(OrientedPoint(region.referenceName, region.start, true)) case (Some(region), None) => Seq(OrientedPoint(region.referenceName, region.end, false)) case (Some(r1), Some(r2)) => if (r1.isAdjacent(r2)) { Seq() } else { Seq( OrientedPoint(r1.referenceName, r1.end, false), OrientedPoint(r2.referenceName, r2.start, true) ) } case _ => Seq() } val paired = points.pair() val pairedAndFiltered = paired.filter(p => p._1.chrom == p._2.chrom && p._1.polarity && p._2.pos - p._1.pos >= 0) pairedAndFiltered.map { case (p1: OrientedPoint, p2: OrientedPoint) => ReferenceRegion(p1.chrom, p1.pos, p2.pos) } } def getAllWindows(sc: SparkContext, dict: SequenceDictionary): RDD[ReferenceRegion] = { val chromRegions: RDD[ReferenceRegion] = sc.parallelize( dict.records.toSeq.map { case seqRecord => ReferenceRegion(seqRecord.name, 0, seqRecord.length) } ) val windowRegions: RDD[ReferenceRegion] = chromRegions.flatMap { case chromRegion => (0 until chromRegion.length().toInt by window.toInt).map { start => ReferenceRegion(chromRegion.referenceName, start, start + window) } } windowRegions } def calculateCoverageRegions(regions: Iterable[ReferenceRegion]): Iterator[ReferenceRegion] = calculateCoverageRegions(regions.iterator) /** * Calculates the coverage regions for an input set -- note that this input set is an * Iterable, not an RDD. This is the method which we call on each individual partition * of the RDD, in order to calculate an initial set of disjoint-but-possibly-adjacent * regions within the partition. * * @param regions The input set of ReferenceRegion objects * @return The 'coverage regions' of the input set */ def calculateCoverageRegions(regions: Iterator[ReferenceRegion]): Iterator[ReferenceRegion] = { if (regions.isEmpty) { Iterator() } else { val sregions = regions.toArray.sorted if (sregions.size == 1) { return sregions.iterator } // We're calculating the 'coverage regions' here. // We do this in a few steps: // 1. sort the regions in lexicographic (seq-start-end) order -- this happened above. // let the conceptual variables STARTS and ENDS be two arrays, each of len(regions), // which contain the .start and .end fields of the (ordered) regions. // 2. Next, we calculate an array of length len(regions), called MAXENDS, where // MAXENDS(i) = max(ENDS[0:i-1]) // 3. Now, for any index i, if STARTS(i) > MAXENDS(i), then we call region i a // 'split' region -- a region that doesn't overlap any region that came before it, // and which _starts_ a 'coverage region.' We calculate the set // SPLITS = { i : STARTS(i) > MAXENDS(i) } // 4. Finally, we pair the splits -- each pair of splits corresponds to a single, // contiguous coverage region. // TODO: // Calculating the MAXENDS and SPLITS sets in two passes here, although we could probably // do it in one if we really thought about it... val maxEnds: Array[Long] = sregions.map(_.end).scanLeft(0L)(max) val splitIndices: Seq[Int] = 0 +: (1 until sregions.size).filter(i => sregions(i).start > maxEnds(i)) :+ sregions.size // splitIndices.sliding(2).map { case Vector(i1, i2) => ReferenceRegion(sregions(i1).referenceName, sregions(i1).start, maxEnds(i2)) }.toIterator } } } case class OrientedPoint(chrom: String, pos: Long, polarity: Boolean) extends Ordered[OrientedPoint] with Serializable { override def compare(that: OrientedPoint): Int = { if (chrom != that.chrom) { chrom.compare(that.chrom) } else { val c1 = pos.compare(that.pos) if (c1 != 0) { c1 } else { // we actually want the *reverse* ordering from the Java Boolean.compareTo // function! // c.f. https://docs.oracle.com/javase/7/docs/api/java/lang/Boolean.html#compareTo(java.lang.Boolean) -polarity.compare(that.polarity) } } } }
erictu/adam
adam-core/src/main/scala/org/bdgenomics/adam/rdd/Coverage.scala
Scala
apache-2.0
9,435
package org.shelmet.heap import org.scalatest._ class CommandLineTest extends FlatSpec { "A command line parser" should "parse default options" in { Main.parser.parse(Seq("test.bin"),new Config()) map { config => assert(config.port === Config.DEFAULT_HTTP_PORT) assert(config.dumpFile.getName === "test.bin") } getOrElse { // arguments are bad, usage message will have been displayed fail("Should not reach here") } } }
rorygraves/shelmet
src/test/scala/org/shelmet/heap/CommandLineTest.scala
Scala
gpl-2.0
480
/* * Copyright 2001-2008 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.junit import org.scalatest._ import scala.collection.immutable.TreeSet import org.scalatest.junit.junit4helpers._ import org.junit.Test import org.junit.Ignore class JUnitSuiteSpec extends FunSpec with SharedHelpers { describe("A JUnitSuite") { it("should return the test names in alphabetical order from testNames") { val a = new JUnitSuite { @Test def doThis() {} @Test def doThat() {} } assertResult(List("doThat", "doThis")) { a.testNames.iterator.toList } val b = new JUnitSuite {} assertResult(List[String]()) { b.testNames.iterator.toList } val c = new JUnitSuite { @Test def doThat() {} @Test def doThis() {} } assertResult(List("doThat", "doThis")) { c.testNames.iterator.toList } } it("should return the proper testNames for test methods whether or not they take an Informer") { val a = new JUnitSuite { @Test def doThis() = () @Test def doThat(info: Informer) = () } assert(a.testNames === TreeSet("doThis")) val b = new JUnitSuite {} assert(b.testNames === TreeSet[String]()) } it("should return names of methods that are annotated with Test, take no params, but have a return type " + "other than Unit from testNames") { val a = new TestWithNonUnitMethod assert(a.testNames === TreeSet("doThat", "doTheOtherThing", "doThis")) } it("should return a tags map from the tags method that contains only methods marked with org.junit.Ignore") { val a = new JUnitSuite { @Ignore @Test def testThis() = () @Test def testThat() = () } assert(a.tags === Map("testThis" -> Set("org.scalatest.Ignore"))) val b = new JUnitSuite { @Test def testThis() = () @Ignore @Test def testThat() = () } assert(b.tags === Map("testThat" -> Set("org.scalatest.Ignore"))) val c = new JUnitSuite { @Ignore @Test def testThis() = () @Ignore @Test def testThat() = () } assert(c.tags === Map("testThis" -> Set("org.scalatest.Ignore"), "testThat" -> Set("org.scalatest.Ignore"))) val d = new JUnitSuite { @SlowAsMolasses @Test def testThis() = () @SlowAsMolasses @Ignore @Test def testThat() = () } assert(d.tags === Map("testThat" -> Set("org.scalatest.Ignore"))) val e = new JUnitSuite {} assert(e.tags === Map()) } it("should execute all tests when run is called with testName None") { TestWasCalledSuite.reinitialize() val b = new TestWasCalledSuite b.run(None, Args(SilentReporter)) assert(TestWasCalledSuite.theDoThisCalled) assert(TestWasCalledSuite.theDoThatCalled) } it("should execute one test when run is called with a defined testName") { TestWasCalledSuite.reinitialize() val a = new TestWasCalledSuite a.run(Some("doThis"), Args(SilentReporter)) assert(TestWasCalledSuite.theDoThisCalled) assert(!TestWasCalledSuite.theDoThatCalled) } it("should throw IllegalArgumentException if run is passed a testName that does not exist") { val a = new TestWasCalledSuite intercept[IllegalArgumentException] { // Here, they forgot that the name is actually doThis(Fixture) a.run(Some("misspelled"), Args(SilentReporter)) } } it("should run no tests if tags to include is non-empty") { TestWasCalledSuite.reinitialize() val a = new TestWasCalledSuite a.run(None, Args(SilentReporter, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty)) assert(!TestWasCalledSuite.theDoThisCalled) assert(!TestWasCalledSuite.theDoThatCalled) } it("should return the correct test count from its expectedTestCount method") { val a = new ASuite assert(a.expectedTestCount(Filter()) === 1) val b = new BSuite assert(b.expectedTestCount(Filter()) === 0) val c = new org.scalatest.junit.junit4helpers.CSuite assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0) assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1) val d = new DSuite assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0) assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0) assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 6) assert(d.expectedTestCount(Filter()) === 6) val e = new ESuite assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0) assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0) assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1) assert(e.expectedTestCount(Filter()) === 1) } it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " + "known in JDK 1.5, excluding AssertionError") { val a = new ShouldFailSuite val rep = new EventRecordingReporter a.run(None, Args(rep)) val tf = rep.testFailedEventsReceived assert(tf.size === 3) } } }
svn2github/scalatest
src/test/scala/org/scalatest/junit/JUnitSuiteSpec.scala
Scala
apache-2.0
6,145
package net.slozzer.babel final case class Translation[+A](locale: Locale, value: A) { def map[B](f: A => B): Translation[B] = copy(value = f(value)) def as[B](value: B): Translation[B] = map(_ => value) def mapWithLocale[B](f: (Locale, A) => B): Translation[B] = copy(value = f(locale, value)) def toTuple: (Locale, A) = (locale, value) override def toString: String = s"$locale -> $value" }
Taig/lokal
modules/core/src/main/scala/net/slozzer/babel/Translation.scala
Scala
mit
408
/* * This file is part of AckCord, licensed under the MIT License (MIT). * * Copyright (c) 2019 Katrix * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package ackcord.cachehandlers import java.time.Instant import scala.collection.mutable import ackcord.CacheSnapshot.BotUser import ackcord.data._ import ackcord.{CacheSnapshotWithMaps, MemoryCacheSnapshot, SnowflakeMap} import shapeless.tag._ /** * A mutable builder for creating a new snapshot */ class CacheSnapshotBuilder( var botUser: User @@ BotUser, var dmChannelMap: mutable.Map[ChannelId, DMChannel], var groupDmChannelMap: mutable.Map[ChannelId, GroupDMChannel], var unavailableGuildMap: mutable.Map[GuildId, UnavailableGuild], var guildMap: mutable.Map[GuildId, Guild], var messageMap: mutable.Map[ChannelId, mutable.Map[MessageId, Message]], var lastTypedMap: mutable.Map[ChannelId, mutable.Map[UserId, Instant]], var userMap: mutable.Map[UserId, User], var banMap: mutable.Map[GuildId, mutable.Map[UserId, Ban]] ) extends CacheSnapshotWithMaps { override type MapType[K, V] = mutable.Map[SnowflakeType[K], V] def toImmutable: MemoryCacheSnapshot = { def convertNested[K1, K2, V]( map: mutable.Map[SnowflakeType[K1], mutable.Map[SnowflakeType[K2], V]] ): SnowflakeMap[K1, SnowflakeMap[K2, V]] = SnowflakeMap.from(map.map { case (k, v) => k -> SnowflakeMap.from(v) }) MemoryCacheSnapshot( botUser = botUser, dmChannelMap = SnowflakeMap.from(dmChannelMap), groupDmChannelMap = SnowflakeMap.from(groupDmChannelMap), unavailableGuildMap = SnowflakeMap.from(unavailableGuildMap), guildMap = SnowflakeMap.from(guildMap), messageMap = convertNested(messageMap), lastTypedMap = convertNested(lastTypedMap), userMap = SnowflakeMap.from(userMap), banMap = convertNested(banMap) ) } override def getChannelMessages(channelId: ChannelId): mutable.Map[SnowflakeType[Message], Message] = messageMap.getOrElseUpdate(channelId, mutable.Map.empty) override def getChannelLastTyped(channelId: ChannelId): mutable.Map[SnowflakeType[User], Instant] = lastTypedMap.getOrElseUpdate(channelId, mutable.Map.empty) override def getGuildBans(id: GuildId): mutable.Map[SnowflakeType[User], Ban] = banMap.getOrElseUpdate(id, mutable.Map.empty) } object CacheSnapshotBuilder { def apply(snapshot: MemoryCacheSnapshot): CacheSnapshotBuilder = { def toMutableMap[K, V](map: SnowflakeMap[K, V]): mutable.Map[SnowflakeType[K], V] = { val builder = mutable.Map.newBuilder[SnowflakeType[K], V] builder.sizeHint(map) builder ++= map builder.result() } def toMutableMapNested[K1, K2, V](map: SnowflakeMap[K1, SnowflakeMap[K2, V]]) = toMutableMap(map.map { case (k, v) => k -> toMutableMap(v) }) new CacheSnapshotBuilder( botUser = snapshot.botUser, dmChannelMap = toMutableMap(snapshot.dmChannelMap), groupDmChannelMap = toMutableMap(snapshot.groupDmChannelMap), unavailableGuildMap = toMutableMap(snapshot.unavailableGuildMap), guildMap = toMutableMap(snapshot.guildMap), messageMap = toMutableMapNested(snapshot.messageMap), lastTypedMap = toMutableMapNested(snapshot.lastTypedMap), userMap = toMutableMap(snapshot.userMap), banMap = toMutableMapNested(snapshot.banMap) ) } }
Katrix-/AckCord
core/src/main/scala/ackcord/cachehandlers/CacheSnapshotBuilder.scala
Scala
mit
4,398
package com.realizationtime.btdogg import akka.actor.{Actor, ActorLogging, ActorRef, DeadLetter, Props} import com.realizationtime.btdogg.RootActor._ import com.realizationtime.btdogg.dhtmanager.DhtsManager import com.realizationtime.btdogg.dhtmanager.DhtsManager.{NodeReady, Shutdown} import com.realizationtime.btdogg.hashessource.SourcesHub import com.realizationtime.btdogg.hashessource.SourcesHub.AddWorkers import com.realizationtime.btdogg.scraping.ScrapersHub import com.realizationtime.btdogg.scraping.ScrapersHub.AddScrapers import com.realizationtime.btdogg.utils.DeadLetterActor import scala.language.postfixOps class RootActor extends Actor with ActorLogging { import context._ private val dhtsManager = actorOf(Props[DhtsManager], "DhtsManager") private val sourcesHub = actorOf(Props[SourcesHub], "SourcesHub") private val scrapersHub = actorOf(Props[ScrapersHub], "ScrapersHub") private def registerDeadLetterActor = { val deadLetterActor = system.actorOf(Props.create(classOf[DeadLetterActor])) system.eventStream.subscribe(deadLetterActor, classOf[DeadLetter]) } override def preStart(): Unit = { registerDeadLetterActor dhtsManager ! DhtsManager.Boot } override def receive: Receive = { case node: NodeReady => sourcesHub ! AddWorkers(Set(node.hashesSource)) scrapersHub ! AddScrapers(Map(node.key.prefix -> node.scraping)) case m: Message => m match { case SubscribePublisher(p) => sourcesHub ! SourcesHub.SubscribePublisher(p) case GetScrapersHub => sender() ! scrapersHub case UnsubscribePublisher(sub, msg) => sourcesHub ! SourcesHub.UnsubscribePublisher(sub, msg) case ShutdownDHTs => dhtsManager ! Shutdown become(stopping(List(sender()))) } } private def stopping(shutdownCallers: List[ActorRef]): Receive = { case DhtsManager.ShutdownCompleted => shutdownCallers.foreach(_ ! ShutdownComplete) become(stopped) case ShutdownDHTs => become(stopping(sender() :: shutdownCallers)) } private val stopped: Receive = { case ShutdownDHTs => sender() ! ShutdownComplete } } object RootActor { sealed trait Message final case class SubscribePublisher(publisher: ActorRef) extends Message final case class UnsubscribePublisher(subscriber: ActorRef, endMessage: Option[Any]) extends Message case object GetScrapersHub extends Message case object ShutdownDHTs extends Message case object ShutdownComplete }
bwrega/btdogg
src/main/scala/com/realizationtime/btdogg/RootActor.scala
Scala
mit
2,499
package com.rasterfoundry.database import java.util.UUID import com.rasterfoundry.common.ast.MapAlgebraAST import com.rasterfoundry.common.ast.MapAlgebraAST.{LayerRaster, ProjectRaster} import com.rasterfoundry.common.ast.codec.MapAlgebraCodec import com.rasterfoundry.datamodel._ import com.rasterfoundry.common.Generators.Implicits._ import io.circe.syntax._ import doobie.implicits._ import org.scalatest._ import org.scalatestplus.scalacheck.Checkers import org.scalacheck.Prop.forAll class ExportDaoSpec extends FunSuite with Matchers with Checkers with DBTestConfig with PropTestHelpers with MapAlgebraCodec { test("types") { ExportDao.query.list.transact(xa).unsafeRunSync.length should be >= 0 } test("can create an export definition for project export") { check { forAll { (user: User.Create, org: Organization.Create, platform: Platform, projCreate: Project.Create, sceneCreate: Scene.Create, exportCreate: Export.Create) => { val projectInsertIO = for { (dbUser, _, _, dbProject) <- insertUserOrgPlatProject(user, org, platform, projCreate) randomDatasource <- unsafeGetRandomDatasource dbScene <- { val scene = fixupSceneCreate(dbUser, randomDatasource, sceneCreate) SceneDao.insert(scene, dbUser) } _ <- ProjectDao.addScenesToProject(List(dbScene.id), dbProject.id, dbProject.defaultLayerId) dbExport <- { val export = exportCreate.toExport(dbUser) ExportDao.insert(export.copy(projectId = Some(dbProject.id)), dbUser) } exportDefinition <- { ExportDao.getExportDefinition(dbExport) } } yield exportDefinition projectInsertIO.transact(xa).unsafeRunSync true } } } } test("can create an export definition for project _layer_ export") { check { forAll { (user: User.Create, org: Organization.Create, platform: Platform, projCreate: Project.Create, sceneCreate: Scene.Create, exportCreate: Export.Create) => { val projectInsertIO = for { (dbUser, _, _, dbProject) <- insertUserOrgPlatProject(user, org, platform, projCreate) randomDatasource <- unsafeGetRandomDatasource dbScene <- { val scene = fixupSceneCreate(dbUser, randomDatasource, sceneCreate) SceneDao.insert(scene, dbUser) } _ <- ProjectDao.addScenesToProject(List(dbScene.id), dbProject.id, dbProject.defaultLayerId) dbExport <- { val export = exportCreate.toExport(dbUser) ExportDao.insert(export.copy(projectId = Some(dbProject.id), projectLayerId = Some(dbProject.defaultLayerId)), dbUser) } exportDefinition <- { ExportDao.getExportDefinition(dbExport) } } yield exportDefinition projectInsertIO.transact(xa).unsafeRunSync true } } } } test( "can create an export definition for tool run with layer and project sources") { check { forAll { (user: User.Create, org: Organization.Create, platform: Platform, projCreate: Project.Create, sceneCreate: Scene.Create, exportCreate: Export.Create, toolRunCreate: ToolRun.Create) => { val projectInsertIO = for { (dbUser, _, _, dbProject) <- insertUserOrgPlatProject(user, org, platform, projCreate) randomDatasource <- unsafeGetRandomDatasource dbScene <- { val scene = fixupSceneCreate(dbUser, randomDatasource, sceneCreate) SceneDao.insert(scene, dbUser) } _ <- ProjectDao.addScenesToProject(List(dbScene.id), dbProject.id, dbProject.defaultLayerId) dbToolRun <- { val projectRaster: ProjectRaster = ProjectRaster(UUID.randomUUID(), dbProject.id, Some(2), None, None) val layerRaster: LayerRaster = LayerRaster(UUID.randomUUID(), dbProject.defaultLayerId, Some(1), None, None) val ast = MapAlgebraAST .Addition(List(projectRaster, layerRaster), UUID.randomUUID(), None) val toolRun = toolRunCreate.copy(executionParameters = ast.asJson) ToolRunDao.insertToolRun(toolRun, dbUser) } dbExport <- { val export = exportCreate.toExport(dbUser) ExportDao.insert(export.copy(toolRunId = Some(dbToolRun.id)), dbUser) } exportDefinition <- ExportDao.getExportDefinition(dbExport) } yield exportDefinition projectInsertIO.transact(xa).unsafeRunSync true } } } } }
azavea/raster-foundry
app-backend/db/src/test/scala/com/azavea/rf/database/ExportDaoSpec.scala
Scala
apache-2.0
6,612
//package xi.armatweet.nlp // //import edu.stanford.nlp.ling.{CoreAnnotations, CoreLabel} //import edu.stanford.nlp.simple.Document // //import scala.collection.JavaConversions._ // ///** // * Created by alberto on 16/08/16. // */ //object SparkOpenIE { // // def getOffsets(tweet: Tweet, comp: Seq[CoreLabel]) = { // // if (comp.nonEmpty) { // val cs = comp.head.get(classOf[CoreAnnotations.CharacterOffsetBeginAnnotation]) // val ce = comp.last.get(classOf[CoreAnnotations.CharacterOffsetEndAnnotation]) // tweet.toOriginalStringOffset_roundedTokens(cs, ce).getOrElse((-1, -1)) // } else (-1, -1) // } //getOffsets // // // def annotateTweet_simple(cleanedTweet_str: String): String = { // // (originalText: String, tokenizedText: String, // // tweetNLPos: String): String = { // // val tweet = Tweet(originalText, tokenizedText, tweetNLPos) // // val cleanedTweet = tweet.clean() // // val cleanedTweet_str = cleanedTweet.text // // try { // val doc: Document = new Document(cleanedTweet_str) // // if (cleanedTweet_str.trim.isEmpty) return "" // // for (s <- doc.sentences()) { // // val x = s.nerTags() // val y = s.openie() // } // doc.jsonMinified() // } catch { // case e: Throwable => // "failed\\n" + e.getMessage // } // try-catch // // } //annotateTweet // // // def cleanTweetSerialize(originalText: String, tokenizedText: String, // tweetNLPos: String) = { // // val tweet = Tweet(originalText, tokenizedText, tweetNLPos) // val cleanedTweet = tweet.clean() // cleanedTweet.serializeStr // } //cleanTweetSerialize // // // def cleanTweetString(originalText: String, tokenizedText: String, // tweetNLPos: String) = { // // val tweet = Tweet(originalText, tokenizedText, tweetNLPos) // val cleanedTweet = tweet.clean() // cleanedTweet.text // } //cleanTweetSerialize // // // def main(args: Array[String]) { // val conf = new SparkConf() // .setAppName("SparkOpenIE") // .set("spark.sql.parquet.compression.codec", "snappy") //// .setMaster("local[1]") // // val sc = new SparkContext(conf) // val sqlContext = new SQLContext(sc) // // val inputFile = args(0) // val outputFile = args(1) // // // val inputFile = "/Users/alberto/Documents/Projects/ArmaSuisse/tmp/bastard.json" //args(0) // // val inputFile = "/Users/alberto/Documents/Projects/ArmaSuisse/tmp/bastard_openie.json" //args(1) // // val outputFile = "/Users/alberto/Documents/Projects/ArmaSuisse/tmp/bastard_openie2.json" //args(1) // // val fcleanSer = udf((x: String, y: String, z: String) => cleanTweetSerialize(x, y, z)) // val fcleanString = udf((x: String, y: String, z: String) => cleanTweetString(x, y, z)) // val fopenIe = udf((x: String) => annotateTweet_simple(x)) // // val tweetsAnnotated = sqlContext.read.parquet(inputFile). // withColumn("cleanedTweet", fcleanSer(col("text"), col("tweetNLPTokens"), col("tweetNLPPosTags"))). // withColumn("cleanedTweet_str", fcleanString(col("text"), col("tweetNLPTokens"), col("tweetNLPPosTags"))). // withColumn("openIEjson", fopenIe(col("cleanedTweet_str"))) // // tweetsAnnotated.write //// .mode(SaveMode.Overwrite) // .parquet(outputFile) // } //main // //}
eXascaleInfolab/2016-armatweet
NLP/src/main/scala/xi/armatweet/nlp/SparkOpenIE.scala
Scala
mit
3,362
package org.greencheek.web.filter.memcached.servlets import javax.servlet.http.{HttpServletResponse, HttpServletRequest, HttpServlet} import java.io.IOException import javax.servlet.ServletException /** * Created by dominictootell on 01/05/2014. */ class SayHelloServletIT extends HttpServlet { @throws(classOf[IOException]) @throws(classOf[ServletException]) override protected def service(req: HttpServletRequest, resp: HttpServletResponse) : Unit = { resp.setContentType("text/plain") resp.getWriter.println("hello") } }
tootedom/tomcat-memcached-response-filter
src/integration-test/scala/org/greencheek/web/filter/memcached/servlets/SayHelloServletIT.scala
Scala
apache-2.0
546
package openscad trait Vec[Self] { this : Self => def *(t: Double) : Self def +(other: Self) : Self def -(other: Self) : Self } case class Vec3(x : Double, y : Double, z : Double) extends Vec[Vec3] { def toDegrees = Vec3(math.toDegrees(x), math.toDegrees(y), math.toDegrees(z)) def normalized() = { val l = length Vec3(x / l, y / l, z / l) } def length = math.sqrt(x * x + y * y + z * z) def angle = normalized().angleOfNormalizedVector private def angleOfNormalizedVector = { def length2(x : Double, y: Double) = math.sqrt(x * x + y * y) Vec3(0, -math.atan2(z, length2(x, y)), math.atan2(y, x)) } def -(other: Vec3) = Vec3(x - other.x, y - other.y, z - other.z) def +(other: Vec3) = Vec3(x + other.x, y + other.y, z + other.z) def *(t: Double): Vec3 = Vec3(x * t, y * t, z * t) override def toString = s"[$x, $y, $z]" } case class Vec2(x : Double, y : Double) extends Vec[Vec2] { def to3(z : Double = 0): Vec3 = Vec3(x, y, z) def +(other: Vec2) = Vec2(x + other.x, y + other.y) def -(other: Vec2) = Vec2(x - other.x, y - other.y) def *(t: Double) = Vec2(x * t, y * t) def dot(other : Vec2) = x * other.x + y * other.y def perpendicular() = Vec2(-y, x) def length = math.sqrt(x * x + y * y) def normalized() = { val l = length Vec2(x / l, y / l) } def distance(other : Vec2) = (this - other).length def mirror(d : Vec2) = { val nd = d.normalized() this - nd * 2 * (nd dot this) } }
mikea/3d-bookmark
src/main/scala/openscad/vectors.scala
Scala
apache-2.0
1,486
/* * Part of NDLA learningpath-api. * Copyright (C) 2018 NDLA * * See LICENSE */ package db.migration import no.ndla.learningpathapi.UnitSuite class V7__MovePublishedExternToUnlistedTest extends UnitSuite { val migration = new V7__MovePublishedExternToUnlisted() test("extern learningpaths with PUBLISHED should be moved to UNLISTED") { val before = """{"duration":0,"status":"PUBLISHED","owner":"hmm","verificationStatus":"EXTERNAL"}""" val expected = """{"duration":0,"status":"UNLISTED","owner":"hmm","verificationStatus":"EXTERNAL"}""" migration.updateStatus(before) should equal(expected) } test("extern learningpaths with PRIVATE should stay PRIVATE") { val before = """{"duration":0,"status":"PRIVATE","owner":"hmm","verificationStatus":"EXTERNAL"}""" val expected = """{"duration":0,"status":"PRIVATE","owner":"hmm","verificationStatus":"EXTERNAL"}""" migration.updateStatus(before) should equal(expected) } test("CREATED_BY_NDLA learningpaths should stay PUBLISHED") { val before = """{"duration":0,"status":"PUBLISHED","owner":"hmm","verificationStatus":"CREATED_BY_NDLA"}""" val expected = """{"duration":0,"status":"PUBLISHED","owner":"hmm","verificationStatus":"CREATED_BY_NDLA"}""" migration.updateStatus(before) should equal(expected) } }
NDLANO/learningpath-api
src/test/scala/db/migration/V7__MovePublishedExternToUnlistedTest.scala
Scala
gpl-3.0
1,319
/* * Copyright (c) 2014, Cloudera, Inc. All Rights Reserved. * * Cloudera, Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"). You may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for * the specific language governing permissions and limitations under the * License. */ package com.cloudera.oryx.api.serving import com.cloudera.oryx.api.KeyMessage import com.typesafe.config.Config import org.apache.hadoop.conf.Configuration /** * Scala counterpart to Java ServingModelManager. * * @tparam U type of update message read/written */ trait ScalaServingModelManager[U] { /** * Called by the framework to initiate a continuous process of reading models, and reading * from the input topic and updating model state in memory, and issuing updates to the * update topic. This will be executed asynchronously and may block. * * @param updateIterator iterator to read models from * @param hadoopConf Hadoop context, which may be required for reading from HDFS */ def consume(updateIterator: Iterator[KeyMessage[String,U]], hadoopConf: Configuration): Unit /** * @return configuration for the serving layer */ def getConfig: Config /** * @return in-memory model representation */ def getModel: AnyRef def close(): Unit }
SevenYoung/oryx
framework/oryx-api/src/main/scala/com/cloudera/oryx/api/serving/ScalaServingModelManager.scala
Scala
apache-2.0
1,569
/* * This file is part of Kiama. * * Copyright (C) 2013-2015 Anthony M Sloane, Macquarie University. * * Kiama is free software: you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * * Kiama is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for * more details. * * You should have received a copy of the GNU Lesser General Public License * along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see * <http://www.gnu.org/licenses/>. */ package org.kiama package util /** * Thread-safe counters. This class provides an operation that can be used * to generate a sequence of integer values. Instances of this class are * useful for generating things like unique names for generated entities. * The methods synchronize on the counter value so they can be called safely * from more than one thread. * * `init` is the initial value of the counter (default: -1). */ class Counter (init : Int = -1) { /** * The most recent value that was generated, or -1 if no values have * been generated. */ private[this] var _value = init /** * Return the current value of the counter. */ def value : Int = synchronized { _value } /** * Return zero if this is the first time this method has been called. * Otherwise increment the stored value and return its new value. * `inc` is the amount to increment by (default: 1). */ def next (inc : Int = 1) : Int = { synchronized { _value = _value + inc _value } } /** * Reset the value, by default to the initial value of the counter. */ def reset (to : Int = init) { synchronized { _value = to } } }
adeze/kiama
library/src/org/kiama/util/Counter.scala
Scala
gpl-3.0
2,095
/* * Copyright 2019 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.scio.bigquery import scala.annotation.StaticAnnotation package object types { /** * Case class field annotation for BigQuery field description. * * To be used with case class fields annotated with [[BigQueryType.toTable]], For example: * * {{{ * @BigQueryType.toTable * case class User(@description("user name") name: String, * @description("user age") age: Int) * }}} */ final class description(value: String) extends StaticAnnotation with Serializable /** * Case class to serve as raw type for Geography instances to distinguish them from Strings. * * See also https://cloud.google.com/bigquery/docs/gis-data * * @param wkt * Well Known Text formatted string that BigQuery displays for Geography */ case class Geography(wkt: String) }
spotify/scio
scio-google-cloud-platform/src/main/scala/com/spotify/scio/bigquery/types/package.scala
Scala
apache-2.0
1,438
/* Copyright 2014, 2015 Richard WiedenhΓΆft <richard@wiedenhoeft.xyz> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package xyz.wiedenhoeft.scalacrypt.util import xyz.wiedenhoeft.scalacrypt._ import scala.util.{ Try, Success, Failure } /** * Implements an easy way to hash passwords using PBKDF2. * * The verification process is backwards compatible. */ object PBKDF2Easy { lazy val algoMap = Map[Byte, KeyedHash[Key]]( 1.toByte -> khash.HmacSHA256) lazy val defaultAlgorithm = 1.toByte val defaultSaltLength = 32 val defaultHashLength = 32 lazy val defaultSaltLengthBytes = java.nio.ByteBuffer.allocate(4).putInt(defaultSaltLength).array.toList lazy val defaultHashLengthBytes = java.nio.ByteBuffer.allocate(4).putInt(defaultHashLength).array.toList def apply(password: Seq[Byte], iterations: Int = 20000): Try[Seq[Byte]] = { val key = password.toKey[SymmetricKeyArbitrary].get val iterationsBytes = java.nio.ByteBuffer.allocate(4).putInt(iterations).array.toList val pbkdf2 = khash.PBKDF2(algoMap(defaultAlgorithm), iterations, defaultHashLength) val salt = Random.nextBytes(32).toList pbkdf2(key, salt) map { _.toList } match { case Success(hash) β‡’ Success(defaultAlgorithm :: iterationsBytes ::: defaultSaltLengthBytes ::: salt ::: defaultHashLengthBytes ::: hash) case Failure(f) β‡’ Failure(f) } } def verify(password: Seq[Byte], hash: Seq[Byte]): Try[Boolean] = { if (hash.length < 9 || !algoMap.contains(hash(0))) return Success(false) val key = password.toKey[SymmetricKeyArbitrary].get val algorithm = algoMap(hash(0)) val iterations = java.nio.ByteBuffer.allocate(4).put(hash.slice(1, 5).toArray).getInt(0) val saltLength = java.nio.ByteBuffer.allocate(4).put(hash.slice(5, 9).toArray).getInt(0) val slice1 = hash.slice(9, hash.length) if (slice1.length < saltLength) return Success(false) val salt = slice1.slice(0, saltLength) val slice2 = slice1.slice(saltLength, slice1.length) if (slice2.length < 4) return Success(false) val hashLength = java.nio.ByteBuffer.allocate(4).put(slice2.slice(0, 4).toArray).getInt(0) val realHash = slice2.slice(4, slice2.length) if (realHash.length != hashLength) return Success(false) val pbkdf2 = khash.PBKDF2(algorithm, iterations, hashLength) pbkdf2(key, salt) map { _ == realHash } } }
Richard-W/scalacrypt
src/main/scala/util/PBKDF2Easy.scala
Scala
apache-2.0
2,908
object test { abstract class Foo; object o extends Foo }
yusuke2255/dotty
tests/untried/pos/t439.scala
Scala
bsd-3-clause
61
import scala.language.implicitConversions class A class B extends A trait Y { def value: String } trait X[-T] { def y(t: T): Y } object XA extends X[A] { def y(a: A) = new Y { def value = s"${a.getClass}: AValue" } } object XB extends X[B] { def y(b: B) = new Y { def value = s"${b.getClass}: BValue" } } object Test { implicit def f[T](t: T)(implicit x: X[T]): Y = x.y(t) implicit val xa: X[A] = XA implicit val xb: X[B] = XB def main(argv: Array[String]): Unit = { val a = new A val b = new B println(s"A: ${a.value}") println(s"B: ${b.value}") } }
martijnhoekstra/scala
test/files/run/t2509-1.scala
Scala
apache-2.0
592
package models.product import play.api.libs.json.Json case class ProductOption( name: String, description: Option[String], priceInCent: Int ) object ProductOption { implicit val jsonFormat = Json.format[ProductOption] }
leanovate/microzon-web
app/models/product/ProductOption.scala
Scala
mit
328
package at.fh.swengb.resifo_android import android.content.Intent import android.os.Bundle import android.support.v7.app.AppCompatActivity import android.view.View import android.widget.{EditText, Toast} class Formular08Activity extends AppCompatActivity { var bundle: Bundle = _ var intent: Intent = _ override protected def onCreate(savedInstanceState: Bundle) { super.onCreate(savedInstanceState) setContentView(R.layout.activity_formular08) intent = getIntent(); } /* def getPrev(view: View): Unit = { val prevView = new Intent(this, classOf[Formular05Activity]) startActivity(prevView) } */ def Prev(view : View) : Unit = onBackPressed() def getNext(view: View): Unit = { val prevView = new Intent(this, classOf[Formular09Activity]) //Einlesen der Daten val land: String = findViewById(R.id.editText_land).asInstanceOf[EditText].getText.toString val plz: String = findViewById(R.id.editText_plz).asInstanceOf[EditText].getText.toString val ort: String = findViewById(R.id.editText_ort).asInstanceOf[EditText].getText.toString val straße: String = findViewById(R.id.editText_straße).asInstanceOf[EditText].getText.toString val tuer: String = findViewById(R.id.editText_tuer).asInstanceOf[EditText].getText.toString val hausnummer: String = findViewById(R.id.editText_hausnummer).asInstanceOf[EditText].getText.toString val stiege: String = findViewById(R.id.editText_stiege).asInstanceOf[EditText].getText.toString val hauptwohnsitzBleibtIn: HauptwohnsitzBleibt = HauptwohnsitzBleibt(land, plz, ort, straße, tuer, hausnummer, stiege) bundle = intent.getExtras().getBundle("bundleFormular05Activity") bundle.putSerializable("intentFormular08Activity_hauptwohnsitzBleibtIn", hauptwohnsitzBleibtIn) prevView.putExtra("bundleFormular08Activity", bundle) if( land.toString().trim().equals("") || plz.toString().trim().equals("") || ort.toString().trim().equals("") || straße.toString().trim().equals("") || hausnummer.toString().trim().equals("")){ Toast.makeText(getApplicationContext, "Bitte alle Pflichtfelder ausfüllen!", Toast.LENGTH_SHORT).show() } else{ startActivity(prevView); } } /* def saveObject(view: View): Unit = { val land: String = findViewById(R.id.editText_land).asInstanceOf[EditText].getText.toString val plz: String = findViewById(R.id.editText_plz).asInstanceOf[EditText].getText.toString val ort: String = findViewById(R.id.editText_ort).asInstanceOf[EditText].getText.toString val straße: String = findViewById(R.id.editText_straße).asInstanceOf[EditText].getText.toString val tuer: String = findViewById(R.id.editText_tuer).asInstanceOf[EditText].getText.toString val hausnummer: String = findViewById(R.id.editText_hausnummer).asInstanceOf[EditText].getText.toString val stiege: String = findViewById(R.id.editText_stiege).asInstanceOf[EditText].getText.toString }*/ }
FlorianReinprecht/resifo-android
app/src/main/scala/at/fh/swengb/resifo_android/Formular08Activity.scala
Scala
gpl-3.0
2,971
package goggles.macros.interpret case class OpticInfo[+Type]( label: String, sourceType: Type, targetType: Type, opticType: OpticType, compositeOpticType: OpticType) { def map[U](f: Type => U): OpticInfo[U] = { copy(sourceType = f(sourceType), targetType = f(targetType)) } }
kenbot/goggles
macros/src/main/scala/goggles/macros/interpret/OpticInfo.scala
Scala
mit
302
/* * Copyright 2016 Dennis Vriend * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package akka.persistence.jdbc.util import scala.concurrent.duration.{ FiniteDuration, _ } import scala.concurrent.{ Await, Future } object BlockingOps { implicit class BlockingFutureImplicits[T](val that: Future[T]) extends AnyVal { def futureValue(implicit awaitDuration: FiniteDuration = 24.hour): T = Await.result(that, awaitDuration) def printFutureValue(implicit awaitDuration: FiniteDuration = 24.hour): Unit = println(that.futureValue) } }
gavares/akka-persistence-jdbc
src/main/scala/akka/persistence/jdbc/util/BlockingOps.scala
Scala
apache-2.0
1,071
package io.rout.example.incomplete import scala.collection.mutable case class Todo(id: Int, title: String, completed: Boolean, order: Option[Int], daysToComplete: Int,relatedTodos: Option[Seq[Int]], advisor: Option[String]) object Todo { private[this] val db: mutable.Map[Int, Todo] = mutable.Map.empty[Int, Todo] def get(id: Int): Option[Todo] = synchronized { db.get(id) } def list(): List[Todo] = synchronized { db.values.toList } def save(t: Todo): Unit = synchronized { db += (t.id -> t) } def delete(id: Int): Unit = synchronized { db -= id } } case class TodoNotFound(id: Int) extends Exception { override def getMessage: String = s"Todo(${id.toString}) not found." }
teodimoff/rOut
examples/src/io/rout/incomplete/Todo.scala
Scala
apache-2.0
708
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600e.v2 import uk.gov.hmrc.ct.box._ import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever case class E13(value: Option[Int]) extends CtBoxIdentifier("Income from other sources") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever] { override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = { validateZeroOrPositiveInteger(this) } }
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E13.scala
Scala
apache-2.0
1,020
/* * Copyright 2018 Aman Mehara * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ object CreateList { //A list with 4 items val numberList = "One" :: "Two" :: "Three" :: "Four" :: Nil def main(args: Array[String]): Unit = { //Print all 4 items of the list numberList.foreach((x: Any) => println(x)) } }
amanmehara/programming-app-data
Scala/Create List/CreateList.scala
Scala
apache-2.0
834
/* * Copyright 2001-2013 Stephen Colebourne * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.joda.time.chrono import java.util.Locale /** * Provides time calculations for the month of the year component of time. * * @author Guy Allard * @author Stephen Colebourne * @author Brian S O'Neill * @since 1.0 */ @SerialVersionUID(-4748157875845286249L) final class GJMonthOfYearDateTimeField extends BasicMonthOfYearDateTimeField { /** * Restricted constructor */ private[chrono] def this(chronology: BasicChronology) { this() `super`(chronology, 2) } override def getAsText(fieldValue: Int, locale: Locale): String = { return GJLocaleSymbols.forLocale(locale).monthOfYearValueToText(fieldValue) } override def getAsShortText(fieldValue: Int, locale: Locale): String = { return GJLocaleSymbols.forLocale(locale).monthOfYearValueToShortText(fieldValue) } protected override def convertText(text: String, locale: Locale): Int = { return GJLocaleSymbols.forLocale(locale).monthOfYearTextToValue(text) } override def getMaximumTextLength(locale: Locale): Int = { return GJLocaleSymbols.forLocale(locale).getMonthMaxTextLength } override def getMaximumShortTextLength(locale: Locale): Int = { return GJLocaleSymbols.forLocale(locale).getMonthMaxShortTextLength } }
aparo/scalajs-joda
src/main/scala/org/joda/time/chrono/GJMonthOfYearDateTimeField.scala
Scala
apache-2.0
1,863
/* * The MIT License (MIT) * * Copyright (c) 2014 yetu AG * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.yetu.siren /** * The model package, containing a complete model of Siren entities in terms of * algebraic data types). */ package object model { import collection.immutable import immutable.{ Seq β‡’ ImmutableSeq } type Properties = ImmutableSeq[Property] /** * Companion object for the property type. */ object Property { /** Sum type for property values */ sealed trait Value /** * A property value that is string-typed. * @param value the string value */ case class StringValue(value: String) extends Value /** * A property value that is number-typed. * @param value the number value */ case class NumberValue(value: BigDecimal) extends Value /** * A property value that is boolean-typed. * @param value the boolean value */ case class BooleanValue(value: Boolean) extends Value /** * A property value which has a type JSON object * @param value Sequence of key(String) - value(siren property value) pairs */ case class JsObjectValue(value: Seq[(String, Value)]) extends Value /** * A property value which has a type JSON array * @param value Sequence of Siren property values */ case class JsArrayValue(value: Seq[Value]) extends Value /** * The property value that represents a non-existing value. */ case object NullValue extends Value } /** * Representation of a property of a Siren entity. * @param name the name of the property * @param value the value of the property */ case class Property(name: String, value: Property.Value) /** * Companion object of the [[Action]] type. */ object Action { /** * A sum type that represents a method that can be specified for an action. * The available methods are a subset of the HTTP verbs. */ sealed trait Method extends Enum.Val[Method] /** Companion object of the [[Method]] trait. **/ object Method extends Enum[Method] { /** The HTTP GET method */ case object GET extends Method /** The HTTP PUT method */ case object PUT extends Method /** The HTTP POST method */ case object POST extends Method /** The HTTP DELETE method */ case object DELETE extends Method /** The HTTP PATCH method */ case object PATCH extends Method override val values = List(GET, PUT, POST, DELETE, PATCH) } /** * Sum type that encompasses all the supported encodings for actions in Siren. */ sealed trait Encoding extends Enum.Val[Encoding] /** Companion object for the [[Encoding]] trait. */ object Encoding extends Enum[Encoding] { /** The application/x-www-form-urlencoded encoding for an action's payload. */ case object `application/x-www-form-urlencoded` extends Encoding /** The application/json encoding for an action's payload. */ case object `application/json` extends Encoding override val values = List(`application/json`, `application/x-www-form-urlencoded`) } /** * Companion object for the [[Field]] type. */ object Field { /** * A sum type for all possible types of a field. */ sealed trait Type extends Enum.Val[Type] /** * Companion object for the [[Type]] trait. */ object Type extends Enum[Type] { case object `hidden` extends Type case object `text` extends Type case object `search` extends Type case object `tel` extends Type case object `url` extends Type case object `email` extends Type case object `password` extends Type case object `datetime` extends Type case object `date` extends Type case object `month` extends Type case object `week` extends Type case object `time` extends Type case object `datetime-local` extends Type case object `number` extends Type case object `range` extends Type case object `color` extends Type case object `checkbox` extends Type case object `radio` extends Type case object `file` extends Type case object `image` extends Type case object `reset` extends Type case object `button` extends Type override val values = List( `hidden`, `text`, `search`, `tel`, `url`, `email`, `password`, `datetime`, `date`, `month`, `week`, `time`, `datetime-local`, `number`, `range`, `color`, `checkbox`, `radio`, `file`, `image`, `reset`, `button`) } } /** * A field that specifies part of the payload for an action. * @param name the name of the field * @param `type` the type of the field * @param value the optional value of the field; only makes sense for certain types * of fields; * @param title an optional textual annotation for the field */ case class Field( name: String, `type`: Field.Type, value: Option[String] = None, title: Option[String] = None) } /** * An action that can be specified for an entity in Siren. * @param name the name of the action * @param href the URL to be used for executing the action * @param classes the optional classes of the action * @param title optional descriptive text about the action * @param method the HTTP method to be used when executing the action * @param `type` the encoding to be used for the payload when sending the request to the * URL of this action * @param fields the fields specified for this action */ case class Action( name: String, href: String, classes: Option[ImmutableSeq[String]] = None, title: Option[String] = None, method: Option[Action.Method] = None, `type`: Option[Action.Encoding] = None, fields: Option[ImmutableSeq[Action.Field]] = None) /** * A navigational link that can be specified for an entity in Siren. * @param rel the relationship of this link to the entity * @param href the URL of the linked resource * @param title an optional text describing the nature of the link */ case class Link(rel: ImmutableSeq[String], href: String, title: Option[String] = None) /** * A Siren entity. */ sealed trait Entity { /** the optional classes of this entity */ def classes: Option[ImmutableSeq[String]] } /** * An embedded entity, i.e. a sub entity of a [[Entity.RootEntity]] */ sealed trait EmbeddedEntity extends Entity { /** the relationship between the parent entity and this sub entity */ def rel: ImmutableSeq[String] } /** * A fully represented entity. */ sealed trait EntityRepresentation extends Entity { /** the optional properties of this entity */ def properties: Option[Properties] /** the optional actions specified for this entity */ def actions: Option[ImmutableSeq[Action]] /** the optional links specified for this entity */ def links: Option[ImmutableSeq[Link]] /** an optional descriptive text about this entity */ def title: Option[String] } /** * Companion object for the [[Entity]] trait. */ object Entity { /** * A root, i.e. top-level, Siren entity. */ case class RootEntity( classes: Option[ImmutableSeq[String]] = None, properties: Option[Properties] = None, entities: Option[ImmutableSeq[EmbeddedEntity]] = None, actions: Option[ImmutableSeq[Action]] = None, links: Option[ImmutableSeq[Link]] = None, title: Option[String] = None) extends EntityRepresentation /** * A sub entity that is only an embedded link, not a a full representation of the * sub entity. * @param rel the relationship between the parent entity and this sub entity * @param href the URL of the linked sub entity * @param classes the optional classes of this entity */ case class EmbeddedLink( rel: ImmutableSeq[String], href: String, classes: Option[ImmutableSeq[String]] = None) extends EmbeddedEntity /** * A full representation of an embedded sub entity. */ case class EmbeddedRepresentation( rel: ImmutableSeq[String], classes: Option[ImmutableSeq[String]] = None, properties: Option[Properties] = None, entities: Option[ImmutableSeq[EmbeddedEntity]] = None, actions: Option[ImmutableSeq[Action]] = None, links: Option[ImmutableSeq[Link]] = None, title: Option[String] = None) extends EmbeddedEntity with EntityRepresentation } /** * Base type for enumerations. * @tparam A the type of the enumerated values */ trait Enum[A <: Enum.Val[A]] { /** * All values of this enumeration in order. */ def values: List[A] /** * Returns the enumeration value with the specified name as a [[Some]], or [[None]] if no * enumeration value with that name exists. * @param name the name for which a corresponding enumeration value is to be returned */ def forName(name: String): Option[A] = values find (_.name == name) def unapply(name: String): Option[A] = forName(name) } /** * Companion object of the [[Enum]] trait. */ object Enum { /** * Base trait for enumerated values. * @tparam A the type of the enumerated values */ trait Val[A] { /** * the name of the enumeration value */ def name: String = toString } } }
yetu/siren-scala
src/main/scala/com/yetu/siren/model/package.scala
Scala
mit
10,650
package es.juanc.katas.mocking object Main extends App { Greeting { new FileOutput }.message("eoo!!!") }
juancsch/katas
scala/SimpleKatas/src/test/scala/es/juanc/katas/mocking/Main.scala
Scala
unlicense
111
/* * Copyright (c) 2012 Roberto Tyley * * This file is part of 'BFG Repo-Cleaner' - a tool for removing large * or troublesome blobs from Git repositories. * * BFG Repo-Cleaner is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * BFG Repo-Cleaner is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see http://www.gnu.org/licenses/ . */ package com.madgag.git.bfg.cleaner import com.madgag.git._ import com.madgag.git.bfg.GitUtil._ import com.madgag.git.bfg.cleaner.ObjectIdSubstitutor._ import com.madgag.git.bfg.cleaner.protection.ProtectedObjectCensus import com.madgag.git.bfg.model.{FileName, RegularFile, TreeBlobEntry} import com.madgag.git.test._ import com.madgag.textmatching._ import org.apache.commons.io.FilenameUtils import org.eclipse.jgit.lib.ObjectId import org.eclipse.jgit.revwalk.RevWalk import org.eclipse.jgit.util.RawParseUtils import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import java.io.StringReader import java.net.URLEncoder import java.util.Properties import java.util.regex.Pattern._ import scala.PartialFunction.condOpt import scala.jdk.CollectionConverters._ class RepoRewriteSpec extends AnyFlatSpec with Matchers { "Git repo" should "not explode" in { implicit val repo = unpackRepo("/sample-repos/example.git.zip") implicit val reader = repo.newObjectReader hasBeenProcessedByBFGBefore(repo) shouldBe false val blobsToRemove = Set(abbrId("06d740")) RepoRewriter.rewrite(repo, ObjectIdCleaner.Config(ProtectedObjectCensus(Set("HEAD")), OldIdsPublic, Seq(FormerCommitFooter), treeBlobsCleaners = Seq(new BlobRemover(blobsToRemove)))) val allCommits = repo.git.log.all.call.asScala.toSeq val unwantedBlobsByCommit = allCommits.flatMap(commit => { val unwantedBlobs = allBlobsReachableFrom(commit).intersect(blobsToRemove).map(_.shortName) if (!unwantedBlobs.isEmpty) Some(commit.shortName -> unwantedBlobs) else None }).toMap unwantedBlobsByCommit shouldBe empty allCommits.head.getFullMessage should include(FormerCommitFooter.Key) hasBeenProcessedByBFGBefore(repo) shouldBe true } "Repo rewriter" should "clean commit messages even on clean branches, because commit messages may reference commits from dirty ones" in { implicit val repo = unpackRepo("/sample-repos/taleOfTwoBranches.git.zip") implicit val revWalk = new RevWalk(repo) def commitMessageForRev(rev: String) = repo.resolve(rev).asRevCommit.getFullMessage commitMessageForRev("pure") should include("6e76960ede2addbbe7e") RepoRewriter.rewrite(repo, ObjectIdCleaner.Config(ProtectedObjectCensus.None, OldIdsPrivate, Seq(new CommitMessageObjectIdsUpdater(OldIdsPrivate)), treeBlobsCleaners = Seq(new FileDeleter(Literal("sin"))))) commitMessageForRev("pure") should not include "6e76960ede2addbbe7e" } it should "remove passwords" in { implicit val repo = unpackRepo("/sample-repos/example.git.zip") implicit val (revWalk, reader) = repo.singleThreadedReaderTuple def propertiesIn(contents: String) = { val p = new Properties() p.load(new StringReader(contents)) p } def passwordFileContentsIn(id: ObjectId) = { val cleanedPasswordFile = repo.resolve(id.name + ":folder/secret-passwords.txt") RawParseUtils.decode(reader.open(cleanedPasswordFile).getCachedBytes) } object FileExt { def unapply(fileName: String) = Option(FilenameUtils.getExtension(fileName)) } val blobTextModifier = new BlobTextModifier { override def lineCleanerFor(entry: TreeBlobEntry) = condOpt(entry.filename.string) { case FileExt("txt") | FileExt("scala") => """(\\.password=).*""".r --> (_.group(1) + "*** PASSWORD ***") } val threadLocalObjectDBResources = repo.getObjectDatabase.threadLocalResources } val cleanedObjectMap = RepoRewriter.rewrite(repo, ObjectIdCleaner.Config(ProtectedObjectCensus(Set("HEAD")), treeBlobsCleaners = Seq(blobTextModifier))) val oldCommitContainingPasswords = abbrId("37bcc89") val cleanedCommitWithPasswordsRemoved = cleanedObjectMap(oldCommitContainingPasswords).asRevCommit val originalContents = passwordFileContentsIn(oldCommitContainingPasswords) val cleanedContents = passwordFileContentsIn(cleanedCommitWithPasswordsRemoved) cleanedContents should (include("science") and include("database.password=")) originalContents should include("correcthorse") cleanedContents should not include "correcthorse" propertiesIn(cleanedContents).asScala.toMap should have size propertiesIn(originalContents).size } def textReplacementOf(parentPath: String, fileNamePrefix: String, fileNamePostfix: String, before: String, after: String) = { implicit val repo = unpackRepo("/sample-repos/encodings.git.zip") val beforeAndAfter = Seq(before, after).map(URLEncoder.encode(_, "UTF-8")).mkString("-") val filename = s"$fileNamePrefix-ORIGINAL.$fileNamePostfix" val beforeFile = s"$parentPath/$filename" val afterFile = s"$parentPath/$fileNamePrefix-MODIFIED-$beforeAndAfter.$fileNamePostfix" // val dirtyFile = repo.resolve(s"master:$beforeFile") val blobTextModifier = new BlobTextModifier { def lineCleanerFor(entry: TreeBlobEntry) = Some(quote(before).r --> (_ => after)) val threadLocalObjectDBResources = repo.getObjectDatabase.threadLocalResources } RepoRewriter.rewrite(repo, ObjectIdCleaner.Config(ProtectedObjectCensus.None, treeBlobsCleaners = Seq(blobTextModifier))) val cleanedFile = repo.resolve(s"master:$beforeFile") val expectedFile = repo.resolve(s"master:$afterFile") expectedFile should not be null implicit val threadLocalObjectReader = repo.getObjectDatabase.threadLocalResources.reader() // val dirty = dirtyFile.open.getBytes val cleaned = cleanedFile.open.getBytes val expected = expectedFile.open.getBytes // val dirtyStr = new String(dirty) val cleanedStr = new String(cleaned) val expectedStr = new String(expected) cleanedStr shouldBe expectedStr cleanedFile shouldBe expectedFile } "Text modifier" should "handle the short UTF-8" in textReplacementOf("UTF-8", "bushhidthefacts", "txt", "facts", "toffee") it should "handle the long UTF-8" in textReplacementOf("UTF-8", "big", "scala", "good", "blessed") it should "handle ASCII in SHIFT JIS" in textReplacementOf("SHIFT-JIS", "japanese", "txt", "EUC", "BOOM") it should "handle ASCII in ISO-8859-1" in textReplacementOf("ISO-8859-1", "laparabla", "txt", "palpitando", "buscando") it should "handle converting Windows newlines to Unix" in textReplacementOf("newlines", "windows", "txt", "\\r\\n", "\\n") }
rtyley/bfg-repo-cleaner
bfg-library/src/test/scala/com/madgag/git/bfg/cleaner/RepoRewriteSpec.scala
Scala
gpl-3.0
7,179
/* * Copyright (c) 2013 original authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.eigengo.monitor.example.spray // run with -javaagent:$HOME/.m2/repository/org/aspectj/aspectjweaver/1.7.3/aspectjweaver-1.7.3.jar // in my case -javaagent:/Users/janmachacek/.m2/repository/org/aspectj/aspectjweaver/1.7.3/aspectjweaver-1.7.3.jar object Main extends App { }
eigengo/monitor
example-spray/src/main/scala/org/eigengo/monitor/example/spray/Main.scala
Scala
apache-2.0
891
package org.daydev.scala.bb.rewrite.config import org.scalatest.FunSuite import org.daydev.scala.bb.model.BbTag import scala.xml.Text import scala.xml.NodeSeq class HtmlRewriteConfigSpec extends FunSuite { val dummyRuleMap = Map[String, RewriteRule[NodeSeq]]( "a" -> { (_, content) => <a>{ content }</a> }, "b" -> { (_, content) => <b>{ content }</b> } ) val config = HtmlRewriteConfig(dummyRuleMap) test("html rewrite config should rewrite unknown tag in it's original form") { val tag = BbTag("a", "text") val tagA = BbTag("a", "b", "text") assert(config.defaultRule(tag, Seq(Text("text"))).mkString === "[a]text[/a]") assert(config.defaultRule(tagA, Seq(Text("text"))).mkString === "[a=b]text[/a]") } test("html rewrite config should be exstensible") { val html = <c>text</c> val extendedConfig = config.addRule("c")((_, _) => html) assert(extendedConfig("c")(BbTag("c"), Nil).mkString === html.toString) } test("html rewrite config should be editable") { val html = <c>text</c> val updatedConfig = config.updateRule("a")((_, _) => <c>text</c>) assert(updatedConfig("a")(BbTag("a"), Nil).mkString === html.toString) } test("html rewrite config should be able to drop rule") { val restrictedConfig = config.dropRule("a") assert(restrictedConfig("a")(BbTag("a"), Seq(Text("text"))).mkString === "[a]text[/a]") } }
daydev/bbcode-scala
src/test/scala/org/daydev/scala/bb/rewrite/config/HtmlRewriteConfigSpec.scala
Scala
mit
1,421
import bio._ import org.scalatest.FlatSpec import org.scalatest.matchers.ShouldMatchers package bio.test { import bio.DNA._ class RemoveSparseSpec extends FlatSpec with ShouldMatchers { "An alignment" should "remove sparse columns" in { // this also tests removeSparseRows and transpose val s1 = new GappedSequence("agc--taacg---") val s2 = new GappedSequence("agc---aaca---") val s3 = new GappedSequence("agc----aca---") val m = List(s1.toList,s2.toList,s3.toList) val (m1,log1) = SparseAlignment.removeSparseColumns(m,1) log1 should equal (List(3,4,10,11,12)) m1.head.mkString should equal ("agctaacg") val (m2,log2) = SparseAlignment.removeSparseColumns(m,2) log2 should equal (List(3,4,5,10,11,12)) m2.head.mkString should equal ("agcaacg") m2(1).mkString should equal ("agcaaca") } } }
shamim8888/bioscala
src/test/scala/bio/alignment/actions/sparsealignment_spec.scala
Scala
bsd-2-clause
881
package com.aslepakurov.spark.maxnumber import com.aslepakurov.spark.common.CommonSparkContext import com.aslepakurov.spark.maxnumber.MaxNumberService._ object MaxNumberJob { def main(args: Array[String]): Unit = { val context = new MaxNumberContext(args.toList) .builder .initContext .initSQL .enableS3Support .disableSuccessFile .withDriverMemory("1g") .withOverhead("1g") .withExecutorMemory("1g") .withSerializer(CommonSparkContext.DEFAULT_SERIALIZER) .get .asInstanceOf[MaxNumberContext] try { context.validateArgs() //1. Read numbers from input file val numbers = readNumbers(context) //2. Get max number val maxNumber = getMaxNumber(context, numbers) //3. Output max number flushMaxNumber(context, maxNumber) } catch { case e: Throwable => println(e.getMessage) throw new RuntimeException(e) } finally { context.close() } } }
aslepakurov/common-spark-template
src/main/java/com/aslepakurov/spark/maxnumber/MaxNumberJob.scala
Scala
mit
996
package mimir.algebra import java.io.File import org.specs2.mutable._ import org.specs2.specification._ import mimir.parser._ import mimir.algebra._ import mimir.algebra.function.FunctionRegistry import mimir.optimizer._ import mimir.optimizer.expression._ import mimir.test._ object DateSpec extends SQLTestSpecification("Dates") with BeforeAll { def beforeAll() { loadCSV("test/data/DetectSeriesTest2.csv") } "Dates on the query backend" should { "Not be messed up by order-by" >> { val noOrderBy = db.query(db.sqlToRA(MimirSQL.Select("SELECT DOB FROM DetectSeriesTest2 WHERE Rank = 1;"))) { _.tuples } val withOrderBy = db.query(db.sqlToRA(MimirSQL.Select("SELECT DOB FROM DetectSeriesTest2 WHERE Rank = 1 ORDER BY DOB;"))) { _.tuples } withOrderBy must be equalTo(noOrderBy) } } }
UBOdin/mimir
src/test/scala/mimir/algebra/DateSpec.scala
Scala
apache-2.0
854
/** * MergeSort.scala --- Recursive merge sort with pattern matching * * Copyright (C) 2014 Aaron S. Hawley * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Commentary: * * Based on Sedgewick, Robert. Algorithms in C, Parts 1-4: * Fundamentals, Data Structures, Sorting, Searching, 3rd * Edition. Addison-Wesley. 1998. Page 262. */ package org.ninthfloor.users.ashawley.algs.sort import scala.util.Try import org.ninthfloor.users.ashawley.algs.util.WordReader /** * Singleton object providing a merge sort method. * * == Integers == * Example interactive session: * {{{ * scala> MergeSort.sort(List(2, 3, 1)) * res1: List[Int] = List(1, 2, 3) * }}} * * == Command-line == * Compilation: * {{{ * $ scalac ./org/ninthfloor/users/ashawley/algs/sort/MergeSort.scala * }}} * * Usage: * {{{ * $ scala org.ninthfloor.users.ashawley.algs.sort.MergeSort < sort.txt * }}} */ object MergeSort extends WordReader { /** * Sort list of integers `a` in ascending order. * * @return Sorted list */ def sort(a: List[Int]): List[Int] = { def merge(a: List[Int], b: List[Int]): List[Int] = (a, b) match { case (Nil, b) => b case (a, Nil) => a case (a :: aa, b :: bb) => if (a < b) a :: merge(aa, b :: bb) else b :: merge(a :: aa, bb) } a match { case Nil => Nil case x :: Nil => a case a => { val half = a.length / 2 val (a1, a2) = a.splitAt(half) merge(sort(a1), sort(a2)) } } } /** * Sort numbers on standard input in a file, display misses. */ def main(args: Array[String]): Unit = { import scala.io.Source val codec = scala.io.Codec.ISO8859 val input = Source.fromInputStream(System.in) val words = new WordIterator(input) val nums = { for { word <- words num <- Try(word.trim.toInt).toOption } yield (num) } sort(nums.toList).foreach(println) } }
ashawley/algs
src/main/scala/org/ninthfloor/users/ashawley/algs/sort/MergeSort.scala
Scala
gpl-3.0
2,559
package knot.core.stream.dsl import java.nio.file.StandardOpenOption.{CREATE, TRUNCATE_EXISTING, WRITE} import java.nio.file.{OpenOption, Path} import knot.core.Done import knot.core.stream.graphs.{GraphMaterializer, SourceGraph} import knot.core.stream.{Flow, Sink} import knot.data.ByteNode trait ByteNodeSourceSyntax { implicit final def toByteNodeSourceOps[M](source: SourceGraph[ByteNode, M]): ByteNodeSourceOps[M] = new ByteNodeSourceOps(source) } final class ByteNodeSourceOps[M](val source: SourceGraph[ByteNode, M]) extends AnyVal { def delimiter(delimiter: String, maxBytes: Int, allowTruncation: Boolean): SourceGraph[ByteNode, M] = source.to(Flow.delimiter(delimiter, maxBytes, allowTruncation)) def toFile(path: Path, options: Set[OpenOption] = Set(WRITE, CREATE, TRUNCATE_EXISTING)): GraphMaterializer[Done] = source.materializeWithSink(Sink.file(path, options)) }
defvar/knot
knot-core/src/main/scala/knot/core/stream/dsl/ByteNodeSourceSyntax.scala
Scala
mit
899
package scalajsclient.jswidgets import org.scalajs.dom import scala.scalajs.js import scala.scalajs.js.timers._ import scala.scalajs.js.Dynamic.global import org.singlespaced.d3js.d3 import org.singlespaced.d3js.Ops._ import scalajsclient._ import scalajsclient.ScalaJSUtils._ class JSCombo( setid: String, options: List[String], selected: Option[String] = None, width: Double = 150.0, height: Double = 25.0, selectedcallback: (String) => Unit = (s: String) => {} ) extends JSSubWidget { id = setid override def TOTAL_WIDTH = width override def TOTAL_HEIGHT = height var current: String = null if (!selected.isEmpty) current = selected.get else if (options.length > 0) current = options(0) fixedratio = true def getselected: String = { if (current == null) if (selected.isEmpty) if (options.length > 0) options(0) else "" else selected.get else current } def setselected(sel: String) { current = sel open = false draw } def labelid = id + "label" val buttonmargin = height / 12.5 val buttonheight = height - buttonmargin * 2 val buttonwidth = buttonheight def selwidth = width - buttonwidth - 3 * buttonmargin def buttonid = id + "button" var open = false def optwidth = selwidth def optid(i: Int) = id + "opt" + i val buttonmousedownhandler: MouseHandler = (e: dom.MouseEvent) => { open = !open draw } val openmouseouthandler: MouseHandler = (e: dom.MouseEvent) => { if (!isincrid(e.clientX, e.clientY, contentid)) { open = false draw } } val optmouseouthandler: MouseHandler = (e: dom.MouseEvent) => { } def coloropts(seli: Int) { var i = 0 for (option <- options) { d3.select("#" + optid(i)). style("background-color", if (i == seli) selcolor else "#dfffaf") i += 1 } } def optmouseinhandler(seli: Int): MouseHandler = (e: dom.MouseEvent) => { coloropts(seli) } def optmousedownhandler(seli: Int): MouseHandler = (e: dom.MouseEvent) => { current = options(seli) open = false selectedcallback(current) draw } def getselectedindex(sel: String): Int = { options.indexOf(sel) } override def render { renderhelper } val selcolor = "#bfbfff" val buttoncolor = "#bfffbf" val buttoncolorpressed = "#9fff9f" val optscolor = "#ffffaf" val optscolorpressed = "#ffff9f" def FONT_MARGIN = buttonheight / 8.0 def FONT_SIZE = FONT_MARGIN * 6.0 val border = buttonheight / 15.0 override def draw { parentdown initdraw if (open) { parentup d3content. style("width", scaledpx(width)). style("height", scaledpx(options.length * height)). style("background-color", optscolorpressed). style("position", "relative"). style("opacity", "1.0"). style("z-index", "55") d3content.append("div").attr("id", buttonid). style("width", scaledpx(buttonwidth - 4 * buttonmargin)). style("height", scaledpx(buttonheight - 4 * buttonmargin)). style("position", "absolute"). style("left", scaledpx(selwidth + 5 * buttonmargin - border)). style("top", scaledpx(2 * buttonmargin)). style("border-style", "solid"). style("border-width", scaledpx(border)). style("cursor", "pointer"). style("background-color", buttoncolorpressed). style("opacity", "1.0"). style("z-index", "55") dgebid(buttonid).onmousedown = buttonmousedownhandler dgebid(contentid).onmouseout = openmouseouthandler var i = 0 for (option <- options) { d3content.append("div").attr("id", optid(i)). style("width", scaledpx(optwidth)). style("height", scaledpx(buttonheight)). style("position", "absolute"). style("top", scaledpx(i * height + buttonmargin)). style("left", scaledpx(buttonmargin)). style("opacity", "1.0"). style("z-index", "60"). append("label").attr("id", id + "label" + i). style("position", "absolute"). style("top", scaledpx(FONT_MARGIN)). style("cursor", "pointer"). style("font-size", scaledpx(FONT_SIZE)). style("z-index", "55"). html(options(i)) val lid = id + "label" + i d3.select("#" + lid). style("left", px(aligncenter(scaled(selwidth), gbcrwidthbyid(lid)))) dgebid(optid(i)).onmouseout = optmouseouthandler dgebid(optid(i)).onmouseenter = optmouseinhandler(i) dgebid(optid(i)).onmousedown = optmousedownhandler(i) i += 1 } coloropts(getselectedindex(current)) d3content.style("transform", "scale(0.3,0.3)") d3content.transition(). style("transform", "scale(1,1)") } else { d3content. style("width", scaledpx(width)). style("height", scaledpx(height)). style("position", "relative"). style("background-color", optscolor). style("opacity", "0.99"). style("z-index", "50") d3content.append("div").attr("id", buttonid). style("width", scaledpx(buttonwidth - buttonmargin - border)). style("height", scaledpx(buttonheight - buttonmargin - border)). style("position", "absolute"). style("left", scaledpx(selwidth + 2 * buttonmargin + border)). style("top", scaledpx(buttonmargin)). style("cursor", "pointer"). style("background-color", buttoncolor). style("border-style", "solid"). style("border-color", "#9f9f9f"). style("border-width", scaledpx(border)). style("opacity", "0.99"). style("z-index", "50") d3content.append("div").attr("id", id + "sel"). style("width", scaledpx(selwidth)). style("height", scaledpx(buttonheight)). style("top", scaledpx(buttonmargin)). style("left", scaledpx(buttonmargin)). style("position", "relative"). style("background-color", selcolor). style("z-index", "50"). append("label").attr("id", id + "label"). style("position", "absolute"). style("top", scaledpx(FONT_MARGIN)). style("cursor", "pointer"). style("font-size", scaledpx(FONT_SIZE)). style("z-index", "50"). html(getselected) d3.select("#" + id + "label"). style("left", px(aligncenter(scaled(selwidth), gbcrwidthbyid(labelid)))) dgebid(buttonid).onmousedown = buttonmousedownhandler dgebid(id + "sel").onmousedown = buttonmousedownhandler } } }
serversideapps/silhmojs
client/src/main/scala/scalajsclient/jswidgets/jscombo.scala
Scala
apache-2.0
6,612
package nest.sparkle.loader.kafka import scala.concurrent.duration._ import spray.http.StatusCodes._ import spray.http.MediaTypes.`application/json` import spray.testkit.ScalatestRouteTest import akka.actor.ActorRefFactory import spray.json._ import nest.sparkle.measure.MeasurementToTsvFile import nest.sparkle.util.kafka.{KafkaTestSuite, KafkaBroker, KafkaTopic, KafkaGroupOffsets} import nest.sparkle.util.kafka.KafkaJsonProtocol._ class TestKafkaLoaderBaseAdminService extends KafkaTestSuite with ScalatestRouteTest with KafkaTestConfig with KafkaLoaderBaseAdminService { override def actorRefFactory: ActorRefFactory = system implicit def executionContext = system.dispatcher implicit override lazy val measurements = new MeasurementToTsvFile("/tmp/kafka-loader-tests.tsv")(executionContext) // SCALA why doesn't implicit above catch this? // Some of the requests currently take a very long time implicit val routeTestTimeout = RouteTestTimeout(1.minute) override def afterAll(): Unit = { super.afterAll() measurements.close() } /** validate the response is JSON and convert to T */ protected def convertJsonResponse[T: JsonFormat]: T = { assert(handled, "request was not handled") assert(status == OK, "response not OK") mediaType shouldBe `application/json` val json = body.asString json.length > 0 shouldBe true val ast = json.parseJson ast.convertTo[T] } test("The list of brokers is correct") { Get("/brokers") ~> allRoutes ~> check { val brokers = convertJsonResponse[Seq[KafkaBroker]] brokers.length shouldBe 1 val broker = brokers(0) broker.id shouldBe 0 broker.host shouldBe "localhost" broker.port shouldBe 9092 } } test("The list of topics includes the test topic") { Get("/topics") ~> allRoutes ~> check { val topics = convertJsonResponse[Map[String,KafkaTopic]] topics should contain key TopicName val topic = topics(TopicName) topic.partitions.length shouldBe NumPartitions topic.partitions.zipWithIndex foreach { case (partition, i) => partition.id shouldBe i partition.leader shouldBe 0 partition.brokerIds.length shouldBe 1 partition.brokerIds(0) shouldBe 0 partition.earliest should contain (0) partition.latest should contain (5) } } } test("The list of consumer groups includes the test group") { Get("/groups") ~> allRoutes ~> check { val groups = convertJsonResponse[Seq[String]] assert(groups.contains(ConsumerGroup),s"$ConsumerGroup not found in $groups") } } test("The list of consumer group topic offsets is correct") { Get("/offsets") ~> allRoutes ~> check { val groups = convertJsonResponse[Seq[KafkaGroupOffsets]] assert(groups.length > 1, "no consumer groups found") val optGroup = groups.find(_.group.contentEquals(ConsumerGroup)) optGroup match { case Some(group) => assert(group.topics.size == 1, "not one topic in the consumer group") assert(group.topics.contains(TopicName), "topic not in the consumer group") val topic = group.topics(TopicName) assert(topic.partitions.length == NumPartitions, s"${topic.topic} does not have $NumPartitions partitions") topic.partitions.zipWithIndex foreach { case (offset,i) => assert(offset.partition == i, s"${topic.topic}:$i partition id doesn't equal index") assert(offset.offset == Some(2), s"${topic.topic}:$i partition offset doesn't equal 2") } case _ => fail(s"consumer group $ConsumerGroup not found") } } } }
mighdoll/sparkle
kafka/src/it/scala/nest/sparkle/loader/kafka/TestKafkaLoaderBaseAdminService.scala
Scala
apache-2.0
3,723
package org.http4s import scala.collection.generic.CanBuildFrom import scala.collection.immutable.{BitSet, IndexedSeq} import scala.collection.mutable.ListBuffer import scala.collection.{ IndexedSeqOptimized, mutable } import cats.implicits._ import org.http4s.Query._ import org.http4s.parser.QueryParser import org.http4s.util.{UrlCodingUtils, Writer, Renderable} import org.http4s.internal.parboiled2.CharPredicate /** Collection representation of a query string * * It is a indexed sequence of key and maybe a value pairs which maps * precisely to a query string, modulo the identity of separators. * * When rendered, the resulting `String` will have the pairs separated * by '&' while the key is separated from the value with '=' */ final class Query private(pairs: Vector[KeyValue]) extends IndexedSeq[KeyValue] with IndexedSeqOptimized[KeyValue, Query] with QueryOps with Renderable { override def apply(idx: Int): KeyValue = pairs(idx) override def length: Int = pairs.length override def slice(from: Int, until: Int): Query = new Query(pairs.slice(from, until)) override def +:[B >: KeyValue, That](elem: B)(implicit bf: CanBuildFrom[Query, B, That]): That = { if (bf eq Query.cbf) new Query((elem +: pairs).asInstanceOf[Vector[KeyValue]]).asInstanceOf[That] else super.+:(elem) } override def :+[B >: KeyValue, That](elem: B)(implicit bf: CanBuildFrom[Query, B, That]): That = { if (bf eq Query.cbf) new Query((pairs :+ elem).asInstanceOf[Vector[KeyValue]]).asInstanceOf[That] else super.:+(elem) } override def toVector: Vector[(String, Option[String])] = pairs /** Render the Query as a `String`. * * Pairs are separated by '&' and keys are separated from values by '=' */ override def render(writer: Writer): writer.type = { var first = true def encode(s: String) = UrlCodingUtils.urlEncode(s, spaceIsPlus = false, toSkip = NoEncode) pairs.foreach { case (n, None) => if (!first) writer.append('&') else first = false writer.append(encode(n)) case (n, Some(v)) => if (!first) writer.append('&') else first = false writer.append(encode(n)) .append("=") .append(encode(v)) } writer } /** Map[String, String] representation of the [[Query]] * * If multiple values exist for a key, the first is returned. If * none exist, the empty `String` "" is returned. */ def params: Map[String, String] = new ParamsView(multiParams) /** Map[String, Seq[String] ] representation of the [[Query]] * * Params are represented as a `Seq[String]` and may be empty. */ lazy val multiParams: Map[String, Seq[String]] = { if (isEmpty) Map.empty[String, Seq[String]] else { val m = mutable.Map.empty[String, ListBuffer[String]] foreach { case (k, None) => m.getOrElseUpdate(k, new ListBuffer) case (k, Some(v)) => m.getOrElseUpdate(k, new ListBuffer) += v } m.toMap } } override protected[this] def newBuilder: Builder = Query.newBuilder /////////////////////// QueryOps methods and types ///////////////////////// override protected type Self = Query override protected val query: Query = this override protected def self: Self = this override protected def replaceQuery(query: Query): Self = query //////////////////////////////////////////////////////////////////////////// } object Query { type KeyValue = (String, Option[String]) type Builder = mutable.Builder[KeyValue, Query] val empty: Query = new Query(Vector.empty) /* * "The characters slash ("/") and question mark ("?") may represent data * within the query component... it is sometimes better for usability to * avoid percent-encoding those characters." * -- http://tools.ietf.org/html/rfc3986#section-3.4 */ private val NoEncode: CharPredicate = UrlCodingUtils.Unreserved ++ "?/" def apply(xs: (String, Option[String])*): Query = new Query(xs.toVector) def fromPairs(xs: (String, String)*): Query = { val b = newBuilder xs.foreach{ case (k, v) => b += ((k, Some(v))) } b.result() } /** Generate a [[Query]] from its `String` representation * * If parsing fails, the empty [[Query]] is returned */ def fromString(query: String): Query = { if (query.isEmpty) new Query(Vector("" -> None)) else QueryParser.parseQueryString(query).getOrElse(Query.empty) } /** Build a [[Query]] from the `Map` structure */ def fromMap(map: Map[String, Seq[String]]): Query = { val b = newBuilder map.foreach { case (k, Seq()) => b += ((k, None)) case (k, vs) => vs.foreach(v => b += ((k, Some(v)))) } b.result() } def newBuilder: mutable.Builder[KeyValue, Query] = Vector.newBuilder[KeyValue].mapResult(v => new Query(v)) implicit val cbf: CanBuildFrom[Query, KeyValue, Query] = new CanBuildFrom[Query, KeyValue, Query] { override def apply(from: Query): mutable.Builder[KeyValue, Query] = newBuilder override def apply(): mutable.Builder[KeyValue, Query] = newBuilder } /////////////////////////////////////////////////////////////////////// // Wrap the multiParams to get a Map[String, String] view private class ParamsView(wrapped: Map[String, Seq[String]]) extends Map[String, String] { override def +[B1 >: String](kv: (String, B1)): Map[String, B1] = { val m = wrapped + (kv) m.asInstanceOf[Map[String, B1]] } override def -(key: String): Map[String, String] = new ParamsView(wrapped - key) override def iterator: Iterator[(String, String)] = wrapped.iterator.map { case (k, s) => (k, s.headOption.getOrElse("")) } override def get(key: String): Option[String] = wrapped.get(key).flatMap(_.headOption) } }
ZizhengTai/http4s
core/src/main/scala/org/http4s/Query.scala
Scala
apache-2.0
5,843
/* * Copyright 2017 Mediative * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rocks.muki.graphql.codegen.style.sangria class BlogSpecSangria extends SangriaCodegenBaseSpec("blog")
muuki88/sbt-graphql
src/test/scala/rocks/muki/graphql/codegen/style/sangria/BlogCodegenSpec.scala
Scala
apache-2.0
705