code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/** * Copyright 2011-2016 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.core.check.extractor.css import scala.annotation.implicitNotFound import jodd.lagarto.dom.Node trait LowPriorityNodeConverterImplicits { implicit val stringNodeConverter = new NodeConverter[String] { def convert(node: Node, nodeAttribute: Option[String]): Option[String] = nodeAttribute match { case Some(attr) => Option(node.getAttribute(attr)) case _ => Some(node.getTextContent.trim) } } implicit val nodeNodeConverter = new NodeConverter[Node] { def convert(node: Node, nodeAttribute: Option[String]): Option[Node] = Some(node) } implicit val formNodeConverter = new NodeConverter[Map[String, Seq[String]]] { def convert(node: Node, nodeAttribute: Option[String]): Option[Map[String, Seq[String]]] = node.getNodeName match { case "form" => Some(Jodd.extractFormInputs(node)) case _ => None } } } object NodeConverter extends LowPriorityNodeConverterImplicits { def apply[X: NodeConverter] = implicitly[NodeConverter[X]] } @implicitNotFound("No member of type class NodeConverter found for type ${X}") trait NodeConverter[X] { def convert(node: Node, nodeAttribute: Option[String]): Option[X] }
GabrielPlassard/gatling
gatling-core/src/main/scala/io/gatling/core/check/extractor/css/NodeConverter.scala
Scala
apache-2.0
1,829
package org.scalatra.servlet import javax.servlet.ServletContext trait MountConfig { def apply(ctxt: ServletContext) }
etorreborre/scalatra
common/src/main/scala/org/scalatra/servlet/MountConfig.scala
Scala
bsd-2-clause
124
/* * Copyright 2017 by Simba Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.apache.spark.sql.simba.util import org.apache.spark.sql.simba.ShapeType import org.apache.spark.sql.simba.spatial.Shape import org.apache.spark.sql.catalyst.expressions.Literal /** * Created by dongx on 11/14/2016. */ object LiteralUtil { def apply(v: Any): Literal = v match { case s: Shape => Literal.create(v, ShapeType) case _ => Literal(v) } }
InitialDLab/Simba
src/main/scala/org/apache/spark/sql/simba/util/LiteralUtil.scala
Scala
apache-2.0
983
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.runtime.batch.sql import org.apache.flink.table.api.TableEnvironment import org.apache.flink.table.planner.runtime.FileSystemITCaseBase import org.apache.flink.table.planner.runtime.utils.BatchTestBase import org.apache.flink.types.Row import org.junit.Before import scala.collection.Seq /** * Batch [[FileSystemITCaseBase]]. */ abstract class BatchFileSystemITCaseBase extends BatchTestBase with FileSystemITCaseBase { @Before override def before(): Unit = { super.before() super.open() } override def tableEnv: TableEnvironment = { tEnv } override def check(sqlQuery: String, expectedResult: Seq[Row]): Unit = { checkResult(sqlQuery, expectedResult) } }
tillrohrmann/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/BatchFileSystemITCaseBase.scala
Scala
apache-2.0
1,543
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.s2graph.core.features import org.apache.tinkerpop.gremlin.structure.Graph.Features case class S2DataTypeFeatures() extends Features.DataTypeFeatures { // primitive types override def supportsBooleanValues(): Boolean = true override def supportsByteValues(): Boolean = false override def supportsDoubleValues(): Boolean = true override def supportsFloatValues(): Boolean = true override def supportsIntegerValues(): Boolean = true override def supportsLongValues(): Boolean = true // non-primitive types override def supportsMapValues(): Boolean = false override def supportsMixedListValues(): Boolean = false override def supportsBooleanArrayValues(): Boolean = false override def supportsByteArrayValues(): Boolean = false override def supportsDoubleArrayValues(): Boolean = false override def supportsFloatArrayValues(): Boolean = false override def supportsIntegerArrayValues(): Boolean = false override def supportsStringArrayValues(): Boolean = false override def supportsLongArrayValues(): Boolean = false override def supportsSerializableValues(): Boolean = false override def supportsStringValues(): Boolean = true override def supportsUniformListValues(): Boolean = false }
SteamShon/incubator-s2graph
s2core/src/main/scala/org/apache/s2graph/core/features/S2DataTypeFeatures.scala
Scala
apache-2.0
2,074
package com.twitter.finagle import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine, LoadingCache} import com.twitter.cache.caffeine.CaffeineCache import com.twitter.concurrent.AsyncSemaphore import com.twitter.conversions.DurationOps._ import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver} import com.twitter.finagle.util.{DefaultTimer, InetSocketAddressUtil, Updater} import com.twitter.logging.Logger import com.twitter.util.{Await, Closable, Var, _} import java.net.{InetAddress, InetSocketAddress, UnknownHostException} private[finagle] class DnsResolver(statsReceiver: StatsReceiver, resolvePool: FuturePool) extends (String => Future[Seq[InetAddress]]) { private[this] val dnsLookupFailures = statsReceiver.counter("dns_lookup_failures") private[this] val dnsLookups = statsReceiver.counter("dns_lookups") private[this] val log = Logger() // Resolve hostnames asynchronously and concurrently. private[this] val dnsCond = new AsyncSemaphore(100) private[this] val waitersGauge = statsReceiver.addGauge("queue_size") { dnsCond.numWaiters } private[this] val Loopback = Future.value(Seq(InetAddress.getLoopbackAddress)) override def apply(host: String): Future[Seq[InetAddress]] = { if (host.isEmpty || host == "localhost") { // Avoid using the thread pool to resolve localhost. Ideally we // would always do that if hostname is an IP address, but there is // no native API to determine if it is the case. localhost can // safely be treated specially here, see rfc6761 section 6.3.3. Loopback } else { dnsLookups.incr() dnsCond.acquire().flatMap { permit => resolvePool(InetAddress.getAllByName(host).toSeq) .onFailure { e => log.debug(s"Failed to resolve $host. Error $e") dnsLookupFailures.incr() } .ensure { permit.release() } } } } } /** * Resolver for inet scheme. */ object InetResolver { def apply(): Resolver = apply(DefaultStatsReceiver) def apply(resolvePool: FuturePool): Resolver = apply(DefaultStatsReceiver, resolvePool) def apply(unscopedStatsReceiver: StatsReceiver): Resolver = apply(unscopedStatsReceiver, FuturePool.unboundedPool) def apply(unscopedStatsReceiver: StatsReceiver, resolvePool: FuturePool): Resolver = apply(unscopedStatsReceiver, Some(5.seconds), resolvePool) def apply( unscopedStatsReceiver: StatsReceiver, pollIntervalOpt: Option[Duration], resolvePool: FuturePool ) = { val statsReceiver = unscopedStatsReceiver.scope("inet").scope("dns") new InetResolver( new DnsResolver(statsReceiver, resolvePool), statsReceiver, pollIntervalOpt, resolvePool ) } } private[finagle] class InetResolver( resolveHost: String => Future[Seq[InetAddress]], statsReceiver: StatsReceiver, pollIntervalOpt: Option[Duration], resolvePool: FuturePool) extends Resolver { import InetSocketAddressUtil._ type HostPortMetadata = (String, Int, Addr.Metadata) val scheme = "inet" private[this] val latencyStat = statsReceiver.stat("lookup_ms") private[this] val successes = statsReceiver.counter("successes") private[this] val failures = statsReceiver.counter("failures") private[this] val log = Logger() private[this] val timer = DefaultTimer /** * Resolve all hostnames and merge into a final Addr. * If all lookups are unknown hosts, returns Addr.Neg. * If all lookups fail with unexpected errors, returns Addr.Failed. * If any lookup succeeds the final result will be Addr.Bound * with the successful results. */ def toAddr(hp: Seq[HostPortMetadata]): Future[Addr] = { val elapsed = Stopwatch.start() Future .collectToTry(hp.map { case (host, port, meta) => resolveHost(host).map { inetAddrs => inetAddrs.map { inetAddr => Address.Inet(new InetSocketAddress(inetAddr, port), meta) } } }) .flatMap { seq: Seq[Try[Seq[Address]]] => // Filter out all successes. If there was at least 1 success, consider // the entire operation a success val results = seq.collect { case Return(subset) => subset }.flatten // Consider any result a success. Ignore partial failures. if (results.nonEmpty) { successes.incr() latencyStat.add(elapsed().inMilliseconds) Future.value(Addr.Bound(results.toSet)) } else { // Either no hosts or resolution failed for every host failures.incr() latencyStat.add(elapsed().inMilliseconds) log.debug(s"Resolution failed for all hosts in $hp") seq.collectFirst { case Throw(e) => e } match { case Some(_: UnknownHostException) => Future.value(Addr.Neg) case Some(e) => Future.value(Addr.Failed(e)) case None => Future.value(Addr.Bound(Set[Address]())) } } } } def bindHostPortsToAddr(hosts: Seq[HostPortMetadata]): Var[Addr] = { Var.async(Addr.Pending: Addr) { u => toAddr(hosts) onSuccess { u() = _ } pollIntervalOpt match { case Some(pollInterval) => val updater = new Updater[Unit] { val one = Seq(()) // Just perform one update at a time. protected def preprocess(elems: Seq[Unit]) = one protected def handle(unit: Unit): Unit = { // This always runs in a thread pool; it's okay to block. u() = Await.result(toAddr(hosts)) } } timer.schedule(pollInterval.fromNow, pollInterval) { resolvePool(updater(())) } case None => Closable.nop } } } /** * Binds to the specified hostnames, and refreshes the DNS information periodically. */ def bind(hosts: String): Var[Addr] = Try(parseHostPorts(hosts)) match { case Return(hp) => bindHostPortsToAddr(hp.map { case (host, port) => (host, port, Addr.Metadata.empty) }) case Throw(exc) => Var.value(Addr.Failed(exc)) } } /** * InetResolver that caches all successful DNS lookups indefinitely * and does not poll for updates. * * Clients should only use this in scenarios where host -> IP map changes * do not occur. */ object FixedInetResolver { private[this] val log = Logger() val scheme = "fixedinet" def apply(): InetResolver = apply(DefaultStatsReceiver) def apply(unscopedStatsReceiver: StatsReceiver): InetResolver = apply(unscopedStatsReceiver, 16000) def apply(unscopedStatsReceiver: StatsReceiver, maxCacheSize: Long): InetResolver = apply(unscopedStatsReceiver, maxCacheSize, Stream.empty, DefaultTimer) /** * Uses a [[com.twitter.util.Future]] cache to memoize lookups. * * @param maxCacheSize Specifies the maximum number of `Futures` that can be cached. * No maximum size limit if Long.MaxValue. * @param backoffs Optionally retry DNS resolution failures using this sequence of * durations for backoff. Stream.empty means don't retry. */ def apply( unscopedStatsReceiver: StatsReceiver, maxCacheSize: Long, backoffs: Stream[Duration], timer: Timer ): InetResolver = { val statsReceiver = unscopedStatsReceiver.scope("inet").scope("dns") new FixedInetResolver( cache( new DnsResolver(statsReceiver, FuturePool.unboundedPool), maxCacheSize, backoffs, timer ), statsReceiver ) } // A size-bounded FutureCache backed by a LoaderCache private[finagle] def cache( resolveHost: String => Future[Seq[InetAddress]], maxCacheSize: Long, backoffs: Stream[Duration] = Stream.empty, timer: Timer = DefaultTimer ): LoadingCache[String, Future[Seq[InetAddress]]] = { val cacheLoader = new CacheLoader[String, Future[Seq[InetAddress]]]() { def load(host: String): Future[Seq[InetAddress]] = { // Optionally retry failed DNS resolutions with specified backoff. def retryingLoad(nextBackoffs: Stream[Duration]): Future[Seq[InetAddress]] = { resolveHost(host).rescue { case exc: UnknownHostException => nextBackoffs match { case nextBackoff #:: restBackoffs => log.debug( s"Caught UnknownHostException resolving host '$host'. Retrying in $nextBackoff..." ) Future.sleep(nextBackoff)(timer).before(retryingLoad(restBackoffs)) case Stream.Empty => Future.exception(exc) } } } retryingLoad(backoffs) } } var builder = Caffeine .newBuilder() .recordStats() if (maxCacheSize != Long.MaxValue) { builder = builder.maximumSize(maxCacheSize) } builder.build(cacheLoader) } } /** * Uses a [[com.twitter.util.Future]] cache to memoize lookups. * * @param cache The lookup cache */ private[finagle] class FixedInetResolver( cache: LoadingCache[String, Future[Seq[InetAddress]]], statsReceiver: StatsReceiver) extends InetResolver( CaffeineCache.fromLoadingCache(cache), statsReceiver, None, FuturePool.unboundedPool ) { override val scheme = FixedInetResolver.scheme private[this] val cacheStatsReceiver = statsReceiver.scope("cache") private[this] val cacheGauges = Seq( cacheStatsReceiver.addGauge("size") { cache.estimatedSize }, cacheStatsReceiver.addGauge("evicts") { cache.stats().evictionCount }, cacheStatsReceiver.addGauge("hit_rate") { cache.stats().hitRate.toFloat } ) }
luciferous/finagle
finagle-core/src/main/scala/com/twitter/finagle/InetResolver.scala
Scala
apache-2.0
9,764
package org.denigma.kappa.notebook.styles import scalacss.Defaults._ trait TextLayerStyles extends StyleSheet.Standalone { import dsl._ ".textLayer" -( position.absolute, left(0 px), top( 0 px), right (0 px), bottom (0 px), overflow.hidden, opacity(0.2), lineHeight(1.0), transformOrigin := "0% 0%" ) ".textLayer > div" -( color.transparent, position.absolute, whiteSpace.pre, cursor.text, transformOrigin := "0% 0%" ) ".textLayer .highlight" - ( margin(-1.0 px), padding(1 px), backgroundColor.deepskyblue, borderWidth(1 px), borderColor.navy, //backgroundColor.rgb(180, 0, 170), borderRadius( 4 px) ) ".textLayer .highlight.begin" -( borderRadius(4 px, 0 px , 0 px , 4 px) ) ".textLayer .highlight.end" -( borderRadius(0 px, 4 px , 4 px , 0 px) ) ".textLayer .highlight.middle" -( borderRadius(0 px) ) ".textLayer .highlight.middle" -( borderRadius(0 px) ) ".textLayer .highlight.selected" -( backgroundColor(rgb(0, 100, 0)) ) ".textLayer ::selection" -( backgroundColor(rgb(0, 0, 255)) ) ".textLayer ::-moz-selection " -( backgroundColor(rgb(0, 0, 255)) ) ".textLayer .endOfContent" -( display.block, position.absolute, left(0 px), top(100 %%), right(0 px), bottom(0 px), zIndex(-1), cursor.default, userSelect := "none" ) ".textLayer .endOfContent.active" -( top(0 px) ) }
antonkulaga/kappa-notebook
app/jvm/src/main/scala/org/denigma/kappa/notebook/styles/TextLayerStyles.scala
Scala
mpl-2.0
1,545
package knub.master_thesis.preprocessing import java.util import java.util.HashSet; import java.util.ArrayList; import java.io._; import cc.mallet.types.FeatureSequenceWithBigrams; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.pipe.Pipe object UseFixedVocabulary { private val CURRENT_SERIAL_VERSION = 1 } @SerialVersionUID(1) class UseFixedVocabulary(var vocabulary: Set[String]) extends Pipe with Serializable { import UseFixedVocabulary._ override def pipe(carrier: Instance): Instance = { val ts = carrier.getData.asInstanceOf[TokenSequence] val ret = new TokenSequence() var prevToken: Token = null for (i <- 0 until ts.size) { val t = ts.get(i) if (vocabulary.contains(t.getText)) { ret.add(t) prevToken = t } } carrier.setData(ret) carrier } private def writeObject(out: ObjectOutputStream) { out.writeInt(CURRENT_SERIAL_VERSION) out.writeObject(vocabulary) } private def readObject(in: ObjectInputStream) { val version = in.readInt() vocabulary = in.readObject().asInstanceOf[Set[String]] } }
knub/master-thesis
code/scala/src/main/scala/knub/master_thesis/preprocessing/UseFixedVocabulary.scala
Scala
apache-2.0
1,271
package scala.models import scala.language.implicitConversions import simulacrum._ import com.bryzek.apidoc.spec.v0.models._ @typeclass trait HasAttributes[T] { def getAttributes(t: T): Seq[Attribute] def findAttribute(t: T, name: String): Option[Attribute] = getAttributes(t).find(_.name == name) } object HasAttributesI { implicit val modelHasAttributes = new HasAttributes[Model] { def getAttributes(t: Model) = t.attributes } implicit val fieldHasAttributes = new HasAttributes[Field] { def getAttributes(t: Field) = t.attributes } implicit val bodyHasAttributes = new HasAttributes[Body] { def getAttributes(t: Body) = t.attributes } implicit val enumHasAttributes = new HasAttributes[Enum] { def getAttributes(t: Enum) = t.attributes } implicit val enumValueHasAttributes = new HasAttributes[EnumValue] { def getAttributes(t: EnumValue) = t.attributes } implicit val headerHasAttributes = new HasAttributes[Header] { def getAttributes(t: Header) = t.attributes } implicit val operationHasAttributes = new HasAttributes[Operation] { def getAttributes(t: Operation) = t.attributes } implicit val resourceHasAttributes = new HasAttributes[Resource] { def getAttributes(t: Resource) = t.attributes } implicit val unionHasAttributes = new HasAttributes[Union] { def getAttributes(t: Union) = t.attributes } implicit val unionTypeHasAttributes = new HasAttributes[UnionType] { def getAttributes(t: UnionType) = t.attributes } }
movio/movio-apidoc-generator
scala-generator/src/main/scala/models/HasAttributes.scala
Scala
mit
1,522
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gearpump.akkastream.example import akka.NotUsed import akka.stream.{ClosedShape, ThrottleMode} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.ArgumentsParser import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ /** * Stream example showing Conflate, Throttle */ object Test10 extends AkkaApp with ArgumentsParser { // scalastyle:off println override def main(akkaConf: Config, args: Array[String]): Unit = { import akka.actor.ActorSystem import akka.stream.scaladsl._ implicit val system = ActorSystem("Test10", akkaConfig) implicit val materializer = GearpumpMaterializer() implicit val ec = system.dispatcher // Conflate[A] - (2 inputs, 1 output) concatenates two streams // (first consumes one, then the second one) def stream(x: String) = Stream.continually(x) val sourceA = Source(stream("A")) val sourceB = Source(stream("B")) val throttler: Flow[String, String, NotUsed] = Flow[String].throttle(1, 1.second, 1, ThrottleMode.Shaping) val conflateFlow: Flow[String, String, NotUsed] = Flow[String].conflate((x: String, y: String) => x: String) ((acc: String, x: String) => s"$acc::$x") val printFlow: Flow[(String, String), String, NotUsed] = Flow[(String, String)].map { x => println(s" lengths are : ${x._1.length} and ${x._2.length} ; ${x._1} zip ${x._2}") x.toString } val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val zipping = b.add(Zip[String, String]()) sourceA ~> throttler ~> zipping.in0 sourceB ~> conflateFlow ~> zipping.in1 zipping.out ~> printFlow ~> Sink.ignore ClosedShape }) graph.run() Await.result(system.whenTerminated, 60.minutes) } // scalastyle:on println }
manuzhang/incubator-gearpump
experiments/akkastream/src/main/scala/org/apache/gearpump/akkastream/example/Test10.scala
Scala
apache-2.0
2,760
import sml.instructions.AddInstruction import sml.{Labels, Machine} /** * Tests for the AddInstruction class * @author lmignot */ class AddInstructionSpec extends BaseSpec { private val Label = "L1" private val OP = "add" private val R1 = 1 private val R2 = 2 private val R3 = 3 private val ZERO = 0 private val AMT = 72 private val RES = AMT * 2 describe("An ADD instruction") { it("should initialise with the correct values") { Given("An ADD instruction") When("The instruction is created") val ai = AddInstruction(Label, R3, R1, R2) Then("The values should be correct") ai.label should be (Label) ai.opcode should be (OP) ai.result should be (R3) ai.op1 should be (R1) ai.op2 should be (R2) And("toString() should be correct") ai.toString() should be (s"${ai.label}: ${ai.opcode} ${ai.op1} + ${ai.op2} to ${ai.result} \n") } describe("Executing the instruction") { it("should sum the values of 2 registers and store the result in the correct register") { Given("A Machine") val m: Machine = new Machine(Labels(), Vector()) And("An ADD instruction") val ai = AddInstruction(Label, R3, R1, R2) When("The instruction's op registers are initialised with the expected values") m.regs(R1) = AMT m.regs(R2) = AMT And("The instruction is executed") ai.execute(m) Then("The result Machine register should have the correct value") m.regs(R3) should be (RES) } it("should correctly sum the values given any valid Int value") { Given("A Machine") val m: Machine = new Machine(Labels(), Vector()) And("An ADD instruction") val ai = AddInstruction(Label, R3, R1, R2) When("The instruction is executed") ai.execute(m) Then("The result Machine register should have the correct value") m.regs(R3) should be (ZERO) } it("should correctly sum the values given the same register for arguments") { Given("A Machine") val m: Machine = new Machine(Labels(), Vector()) And("An ADD instruction") val ai = AddInstruction(Label, R1, R1, R1) When("The instruction's op register is initialised with the expected value") m.regs(R1) = AMT And("The instruction is executed") ai.execute(m) Then("The result Machine register should have the correct value") m.regs(R1) should be (RES) } } } }
BBK-PiJ-2015-67/sdp-portfolio
coursework/cw-one/src/test/scala/AddInstructionSpec.scala
Scala
unlicense
2,556
package com.yetu.oauth2provider.browser /** * Created by elisahilprecht on 30/03/15. */ class DownloadBrowserSpec extends BaseBrowserSpec { "Download page" must { "have title called 'Download'" in { go to (s"http://localhost:$port" + setupDownloadUrl) pageTitle mustBe "Download" } "show new content when clicking on download" in { //TODO: Fix this test // val downloadButton = find(id("download_win1")) // downloadButton must be ('defined) // click on downloadButton.value // eventually { find(id("fullContainer")) must be ('defined) } // Last line: The code passed to eventually never returned normally. Attempted 43 times over 15.266835617 // seconds. Last failure message: None was not defined. } } }
yetu/oauth2-provider
test/com/yetu/oauth2provider/browser/DownloadBrowserSpec.scala
Scala
mit
803
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.api.libs import play.api._ import play.api.libs.crypto._ // Keep Crypto around to manage global state for now... private[play] object Crypto { private val cookieSignerCache: (Application) => CookieSigner = Application.instanceCache[CookieSigner] // Temporary placeholder until we can move out Session / Cookie singleton objects def cookieSigner: CookieSigner = { Play.privateMaybeApplication.fold { sys.error("The global cookie signer instance requires a running application!") }(cookieSignerCache) } }
Shruti9520/playframework
framework/src/play/src/main/scala/play/api/libs/Crypto.scala
Scala
apache-2.0
620
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spark.storage import akka.actor.Actor import spark.{Logging, SparkException, Utils} /** * An actor to take commands from the master to execute options. For example, * this is used to remove blocks from the slave's BlockManager. */ class BlockManagerSlaveActor(blockManager: BlockManager) extends Actor { override def receive = { case RemoveBlock(blockId) => blockManager.removeBlock(blockId) case RemoveRdd(rddId) => val numBlocksRemoved = blockManager.removeRdd(rddId) sender ! numBlocksRemoved } }
wgpshashank/spark
core/src/main/scala/spark/storage/BlockManagerSlaveActor.scala
Scala
apache-2.0
1,349
package strd.pg import java.sql.{Connection, Statement} import com.mchange.v2.c3p0.AbstractConnectionCustomizer import org.slf4j.LoggerFactory import scala.collection.mutable object SchemaConnectionCustomizer { val schemas = new mutable.HashMap[String, String]() } class SchemaConnectionCustomizer extends AbstractConnectionCustomizer { val log = LoggerFactory.getLogger(getClass) def schema(parentDataSourceIdentityToken: String) = { SchemaConnectionCustomizer.schemas(parentDataSourceIdentityToken) } override def onAcquire(c: Connection, parentDataSourceIdentityToken: String) { val schemaName = schema(parentDataSourceIdentityToken) log.debug(s"Setting schema to $schemaName") var stmt: Statement = null try { stmt = c.createStatement() stmt.executeUpdate("SET search_path TO " + schemaName); } catch { case e: Exception => log.warn(s"Can not set schema to $schemaName", e) throw e } finally { if (stmt != null) stmt.close() } } }
onerinvestments/strd
strd-commons/src/main/scala/strd/pg/SchemaConnectionCustomizer.scala
Scala
apache-2.0
1,027
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gearpump.streaming.serializer import com.twitter.chill.KryoInjection import org.apache.gearpump.streaming.state.api.Serializer import scala.util.Try class ChillSerializer[T] extends Serializer[T] { override def serialize(t: T): Array[Byte] = KryoInjection(t) override def deserialize(bytes: Array[Byte]): Try[T] = KryoInjection.invert(bytes).map(_.asInstanceOf[T]) }
manuzhang/incubator-gearpump
external/serializer/src/main/scala/org/apache/gearpump/streaming/serializer/ChillSerializer.scala
Scala
apache-2.0
1,210
/* * Copyright (c) 2012-2015 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.enrich package hadoop package bad // Scala import scala.collection.mutable.Buffer // Specs2 import org.specs2.mutable.Specification // Scalding import com.twitter.scalding._ // Cascading import cascading.tuple.TupleEntry // This project import JobSpecHelpers._ /** * Holds the input and expected data * for the test. */ object NullNumericFieldsSpec { val lines = Lines( "2014-10-11 14:01:05 - 37 172.31.38.31 GET 24.209.95.109 /i 200 http://www.myvideowebsite.com/embed/ab123456789?auto_start=e9&rf=cb Mozilla%2F5.0+%28Macintosh%3B+Intel+Mac+OS+X+10.6%3B+rv%3A32.0%29+Gecko%2F20100101+Firefox%2F32.0 e=se&se_ca=video-player%3Anewformat&se_ac=play-time&se_la=efba3ef384&se_va=&tid=" ) val expected = (failure_tstamp: String) => s"""{"line":"2014-10-11 14:01:05 - 37 172.31.38.31 GET 24.209.95.109 /i 200 http://www.myvideowebsite.com/embed/ab123456789?auto_start=e9&rf=cb Mozilla%2F5.0+%28Macintosh%3B+Intel+Mac+OS+X+10.6%3B+rv%3A32.0%29+Gecko%2F20100101+Firefox%2F32.0 e=se&se_ca=video-player%3Anewformat&se_ac=play-time&se_la=efba3ef384&se_va=&tid=","errors":[{"level":"error","message":"Field [se_va]: cannot convert [] to Double-like String"},{"level":"error","message":"Field [tid]: [] is not a valid integer"}],"failure_tstamp":"$failure_tstamp"}""" } /** * Integration test for the EtlJob: * * Check that all tuples in a custom structured event * (CloudFront format) are successfully extracted. */ class NullNumericFieldsSpec extends Specification { "A job which processes a CloudFront file containing 1 event with null integer and double fields" should { EtlJobSpec("clj-tomcat", "2", true, List("geo", "organization")). source(MultipleTextLineFiles("inputFolder"), NullNumericFieldsSpec.lines). sink[String](Tsv("outputFolder")){ output => "not write any events" in { output must beEmpty } }. sink[TupleEntry](Tsv("exceptionsFolder")){ trap => "not trap any exceptions" in { trap must beEmpty } }. sink[String](Tsv("badFolder")){ buf => val jsonStr = buf.head val failure_tstamp = getFailureTstampFrom(jsonStr) "write a bad row JSON containing the input line and all errors" in { jsonStr must_== NullNumericFieldsSpec.expected(failure_tstamp) } }. run. finish } }
jramos/snowplow
3-enrich/scala-hadoop-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.hadoop/bad/NullNumericFieldsSpec.scala
Scala
apache-2.0
3,130
package com.felixmilea.vorbit.main import akka.actor.ActorSystem import com.felixmilea.vorbit.utils.Initable import com.felixmilea.vorbit.utils.ConfigManager import com.felixmilea.vorbit.utils.Loggable object Vorbit extends App with Loggable { Info("Vorbit running") override def wrapLog(m: String) = "VB: " + m }
felixmc/Felix-Milea-Ciobanu-Vorbit
code/com/felixmilea/vorbit/main/Vorbit.scala
Scala
mit
322
package xitrum.handler.inbound import io.netty.channel.{ChannelHandler, SimpleChannelInboundHandler, ChannelHandlerContext} import ChannelHandler.Sharable import xitrum.Config import xitrum.handler.HandlerEnv import xitrum.handler.outbound.XSendFile @Sharable class BaseUrlRemover extends SimpleChannelInboundHandler[HandlerEnv] { override def channelRead0(ctx: ChannelHandlerContext, env: HandlerEnv): Unit = { val request = env.request remove(request.uri) match { case None => val response = env.response XSendFile.set404Page(response, fromController = false) ctx.channel.writeAndFlush(env) case Some(withoutBaseUri) => request.setUri(withoutBaseUri) ctx.fireChannelRead(env) } } /** * Removes the base URI (see config/xitrum.properties) from the original request URL. * * @return None if the original URL does not start with the base URI */ private def remove(originalUri: String): Option[String] = { if (originalUri == Config.baseUrl) Some("/") else if (originalUri.startsWith(Config.baseUrl + "/")) Some(originalUri.substring(Config.baseUrl.length)) else None } }
xitrum-framework/xitrum
src/main/scala/xitrum/handler/inbound/BaseUrlRemover.scala
Scala
mit
1,191
package mesosphere.mesos import mesosphere.marathon.Protos.{ Constraint, MarathonTask } import com.google.common.collect.Lists import mesosphere.marathon.Protos.Constraint.Operator import scala.collection.JavaConverters._ import scala.util.Random import mesosphere.mesos.protos.{ FrameworkID, SlaveID, OfferID, TextAttribute } import org.apache.mesos.Protos.{ Offer, Attribute } import mesosphere.marathon.MarathonSpec class ConstraintsTest extends MarathonSpec { import mesosphere.mesos.protos.Implicits._ def makeSampleTask(id: String, attrs: Map[String, String]) = { val builder = MarathonTask.newBuilder() .setHost("host") .addAllPorts(Lists.newArrayList(999)) .setId(id) for ((name, value) <- attrs) { builder.addAttributes(TextAttribute(name, value)) } builder.build() } def makeOffer(hostname: String, attributes: Iterable[Attribute]) = { Offer.newBuilder .setId(OfferID(Random.nextString(9))) .setSlaveId(SlaveID(Random.nextString(9))) .setFrameworkId(FrameworkID(Random.nextString(9))) .setHostname(hostname) .addAllAttributes(attributes.asJava) .build } def makeTaskWithHost(id: String, host: String) = { MarathonTask.newBuilder() .setHost(host) .addAllPorts(Lists.newArrayList(999)) .setId(id) .build() } def makeConstraint(field: String, operator: Operator, value: String) = { Constraint.newBuilder .setField(field) .setOperator(operator) .setValue(value) .build } test("UniqueHostConstraint") { val task1_host1 = makeTaskWithHost("task1", "host1") val task2_host2 = makeTaskWithHost("task2", "host2") val task3_host3 = makeTaskWithHost("task3", "host3") val attributes: Set[Attribute] = Set() val firstTask = Set() val hostnameUnique = makeConstraint("hostname", Operator.UNIQUE, "") val firstTaskOnHost = Constraints.meetsConstraint( firstTask, makeOffer("foohost", attributes), makeConstraint("hostname", Operator.CLUSTER, "")) assert(firstTaskOnHost, "Should meet first task constraint.") val wrongHostName = Constraints.meetsConstraint( firstTask, makeOffer("wrong.com", attributes), makeConstraint("hostname", Operator.CLUSTER, "right.com")) assert(!wrongHostName, "Should not accept the wrong hostname.") val differentHosts = Set(task1_host1, task2_host2, task3_host3) val differentHostsDifferentTasks = Constraints.meetsConstraint( differentHosts, makeOffer("host4", attributes), hostnameUnique) assert(differentHostsDifferentTasks, "Should place host in array") val reusingOneHost = Constraints.meetsConstraint( differentHosts, makeOffer("host2", attributes), hostnameUnique) assert(!reusingOneHost, "Should not place host") val firstOfferFirstTaskInstance = Constraints.meetsConstraint( firstTask, makeOffer("host2", attributes), hostnameUnique) assert(firstOfferFirstTaskInstance, "Should not place host") } test("RackConstraints") { val task1_rack1 = makeSampleTask("task1", Map("rackid" -> "rack-1")) val task2_rack1 = makeSampleTask("task2", Map("rackid" -> "rack-1")) val task3_rack2 = makeSampleTask("task3", Map("rackid" -> "rack-2")) val freshRack = Set() val sameRack = Set(task1_rack1, task2_rack1) val uniqueRack = Set(task1_rack1, task3_rack2) val clusterByRackId = makeConstraint("rackid", Constraint.Operator.CLUSTER, "") val uniqueRackId = makeConstraint("rackid", Constraint.Operator.UNIQUE, "") val clusterFreshRackMet = Constraints.meetsConstraint( freshRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), clusterByRackId) assert(clusterFreshRackMet, "Should be able to schedule in fresh rack.") val clusterRackMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), clusterByRackId) assert(clusterRackMet, "Should meet clustered-in-rack constraints.") val clusterRackNotMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-2"))), clusterByRackId) assert(!clusterRackNotMet, "Should not meet cluster constraint.") val uniqueFreshRackMet = Constraints.meetsConstraint( freshRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), uniqueRackId) assert(uniqueFreshRackMet, "Should meet unique constraint for fresh rack.") val uniqueRackMet = Constraints.meetsConstraint( uniqueRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-3"))), uniqueRackId) assert(uniqueRackMet, "Should meet unique constraint for rack") val uniqueRackNotMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), uniqueRackId) assert(!uniqueRackNotMet, "Should not meet unique constraint for rack.") } test("AttributesLikeByConstraints") { val task1_rack1 = makeSampleTask("task1", Map("foo" -> "bar")) val task2_rack1 = makeSampleTask("task2", Map("jdk" -> "7")) val freshRack = Set(task1_rack1, task2_rack1) val jdk7Constraint = makeConstraint("jdk", Constraint.Operator.LIKE, "7") val clusterNotMet = Constraints.meetsConstraint( freshRack, // list of tasks register in the cluster makeOffer("foohost", Set(TextAttribute("jdk", "6"))), // slave attributes jdk7Constraint) assert(!clusterNotMet, "Should not meet cluster constraints.") val clusterMet = Constraints.meetsConstraint( freshRack, // list of tasks register in the cluster makeOffer("foohost", Set(TextAttribute("jdk", "7"))), // slave attributes jdk7Constraint) assert(clusterMet, "Should meet cluster constraints.") } test("RackGroupedByConstraints") { val task1_rack1 = makeSampleTask("task1", Map("rackid" -> "rack-1")) val task2_rack1 = makeSampleTask("task2", Map("rackid" -> "rack-1")) val task3_rack2 = makeSampleTask("task3", Map("rackid" -> "rack-2")) val task4_rack1 = makeSampleTask("task4", Map("rackid" -> "rack-1")) val task5_rack3 = makeSampleTask("task5", Map("rackid" -> "rack-3")) var sameRack = Set[MarathonTask]() var uniqueRack = Set[MarathonTask]() val group2ByRack = makeConstraint("rackid", Constraint.Operator.GROUP_BY, "2") val rackIdUnique = makeConstraint("rackid", Constraint.Operator.UNIQUE, "") val clusterFreshRackMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), group2ByRack) assert(clusterFreshRackMet, "Should be able to schedule in fresh rack.") sameRack ++= Set(task1_rack1) val clusterRackMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), group2ByRack) assert(!clusterRackMet, "Should not meet clustered-in-rack constraints.") val clusterRackMet2 = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-2"))), group2ByRack) assert(clusterRackMet2, "Should meet cluster constraint.") sameRack ++= Set(task3_rack2) val clusterRackMet3 = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), group2ByRack) assert(clusterRackMet3, "Should meet clustered-in-rack constraints.") sameRack ++= Set(task2_rack1) val clusterRackNotMet = Constraints.meetsConstraint( sameRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), group2ByRack) assert(!clusterRackNotMet, "Should not meet cluster constraint.") val uniqueFreshRackMet = Constraints.meetsConstraint( uniqueRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), rackIdUnique) assert(uniqueFreshRackMet, "Should meet unique constraint for fresh rack.") uniqueRack ++= Set(task4_rack1) val uniqueRackMet = Constraints.meetsConstraint( uniqueRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-3"))), rackIdUnique) assert(uniqueRackMet, "Should meet unique constraint for rack.") uniqueRack ++= Set(task5_rack3) val uniqueRackNotMet = Constraints.meetsConstraint( uniqueRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), rackIdUnique) assert(!uniqueRackNotMet, "Should not meet unique constraint for rack.") } test("RackGroupedByConstraints2") { val task1_rack1 = makeSampleTask("task1", Map("rackid" -> "rack-1")) val task2_rack2 = makeSampleTask("task2", Map("rackid" -> "rack-2")) val task3_rack3 = makeSampleTask("task3", Map("rackid" -> "rack-3")) val task4_rack1 = makeSampleTask("task4", Map("rackid" -> "rack-1")) val task5_rack2 = makeSampleTask("task5", Map("rackid" -> "rack-2")) var groupRack = Set[MarathonTask]() val groupByRack = makeConstraint("rackid", Constraint.Operator.GROUP_BY, "3") val clusterFreshRackMet = Constraints.meetsConstraint( groupRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), groupByRack) assert(clusterFreshRackMet, "Should be able to schedule in fresh rack.") groupRack ++= Set(task1_rack1) val clusterRackMet1 = Constraints.meetsConstraint( groupRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-2"))), groupByRack) assert(clusterRackMet1, "Should meet clustered-in-rack constraints.") groupRack ++= Set(task2_rack2) val clusterRackMet2 = Constraints.meetsConstraint( groupRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-3"))), groupByRack) assert(clusterRackMet2, "Should meet clustered-in-rack constraints.") groupRack ++= Set(task3_rack3) val clusterRackMet3 = Constraints.meetsConstraint( groupRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-1"))), groupByRack) assert(clusterRackMet3, "Should meet clustered-in-rack constraints.") groupRack ++= Set(task4_rack1) val clusterRackMet4 = Constraints.meetsConstraint( groupRack, makeOffer("foohost", Set(TextAttribute("foo", "bar"), TextAttribute("rackid", "rack-2"))), groupByRack) assert(clusterRackMet4, "Should meet clustered-in-rack constraints.") } }
tnachen/marathon
src/test/scala/mesosphere/mesos/ConstraintsTest.scala
Scala
apache-2.0
11,030
package mockws import org.scalatest.prop.PropertyChecks import org.scalatest.{FunSuite, Matchers} import play.api.libs.concurrent.Execution.Implicits._ import play.api.libs.iteratee.Concurrent._ import play.api.libs.iteratee.{Enumerator, Iteratee} import play.api.libs.json.Json import play.api.libs.ws.{WSAuthScheme, WSSignatureCalculator, WSClient, WSResponseHeaders} import play.api.mvc.BodyParsers.parse import play.api.mvc.Results._ import play.api.mvc.{Action, ResponseHeader, Result} import play.api.test.FakeRequest import play.api.test.Helpers._ import org.mockito.Mockito._ import scala.concurrent.Promise /** * Tests that [[MockWS]] simulates a WS client */ class MockWSTest extends FunSuite with Matchers with PropertyChecks { test("mock WS simulates all HTTP methods") { val ws = MockWS { case (GET, "/get") => Action { Ok("get ok") } case (POST, "/post") => Action { Ok("post ok") } case (PUT, "/put") => Action { Ok("put ok") } case (DELETE, "/delete") => Action { Ok("delete ok") } case ("PATCH", "/patch") => Action { Ok("patch ok") } } await(ws.url("/get").get()).body shouldEqual "get ok" await(ws.url("/post").post("")).body shouldEqual "post ok" await(ws.url("/put").put("")).body shouldEqual "put ok" await(ws.url("/delete").delete()).body shouldEqual "delete ok" await(ws.url("/patch").patch("")).body shouldEqual "patch ok" } test("mock WS simulates the HTTP status code") { val ws = MockWS { case (GET, "/get200") => Action { Ok("") } case (GET, "/get201") => Action { Created("") } case (GET, "/get404") => Action { NotFound("") } } await(ws.url("/get200").get()).status shouldEqual OK await(ws.url("/get201").get()).status shouldEqual CREATED await(ws.url("/get404").get()).status shouldEqual NOT_FOUND } test("mock WS simulates a POST with a JSON payload") { val ws = MockWS { case (POST, "/") => Action { request => Ok((request.body.asJson.get \\ "result").as[String]) } } val json = Json.parse("""{"result": "OK"}""") val response = await(ws.url("/").post(json)) response.status shouldEqual OK response.body shouldEqual "OK" } test("mock WS simulates a POST with a JSON payload with a custom content type") { val ws = MockWS { case (POST, "/") => Action(parse.tolerantJson) { request => Ok((request.body \\ "result").as[String]) } } val json = Json.parse("""{"result": "OK"}""") val response = await(ws.url("/").withHeaders(CONTENT_TYPE -> "application/my-json").post(json)) response.status shouldEqual OK response.body shouldEqual "OK" } test("mock WS sets the response content type") { val ws = MockWS { case (GET, "/text") => Action { Ok("text") } case (GET, "/json") => Action { Ok(Json.parse("""{ "type": "json" }""")) } } val text = await(ws.url("/text").get()) val json = await(ws.url("/json").get()) text.header(CONTENT_TYPE) shouldEqual Some("text/plain; charset=utf-8") json.header(CONTENT_TYPE) shouldEqual Some("application/json; charset=utf-8") } test("mock WS simulates a streaming") { def testedController(ws: WSClient) = Action.async { ws.url("/").stream().map { case (rh, content) => Result( header = ResponseHeader(rh.status, rh.headers.mapValues(_.head)), body = content ) } } val ws = MockWS { case (GET, "/") => Action { Result( header = ResponseHeader(201, Map("x-header" -> "x-value")), body = Enumerator("first", "second", "third").map(_.getBytes) ) } } val response = testedController(ws).apply(FakeRequest()) status(response) shouldEqual CREATED contentAsString(response) shouldEqual "firstsecondthird" header("x-header", response) shouldEqual Some("x-value") } test("mock WS simulates a GET with a consumer") { def testedController(ws: WSClient) = Action.async { val resultP = Promise[Result]() def consumer(rh: WSResponseHeaders): Iteratee[Array[Byte], Unit] = { val (wsConsumer, content) = joined[Array[Byte]] resultP.success(Result( header = ResponseHeader(rh.status, rh.headers.mapValues(_.head)), body = content )) wsConsumer } ws.url("/").get(consumer).map(_.run) resultP.future } val ws = MockWS { case (GET, "/") => Action { Result( header = ResponseHeader(201, Map("x-header" -> "x-value")), body = Enumerator("first", "second", "third").map(_.getBytes) ) } } val response = testedController(ws).apply(FakeRequest()) status(response) shouldEqual CREATED contentAsString(response) shouldEqual "firstsecondthird" header("x-header", response) shouldEqual Some("x-value") } test("mock WS can produce JSON") { val ws = MockWS { case (GET, "/json") => Action { Ok(Json.obj("field" -> "value")) } } val wsResponse = await( ws.url("/json").get() ) wsResponse.body shouldEqual """{"field":"value"}""" (wsResponse.json \\ "field").asOpt[String] shouldEqual Some("value") } test("mock WS can produce XML") { val ws = MockWS { case (GET, "/xml") => Action { Ok(<foo><bar>value</bar></foo>) } } val wsResponse = await( ws.url("/xml").get() ) wsResponse.body shouldEqual "<foo><bar>value</bar></foo>" (wsResponse.xml \\ "bar").text shouldEqual "value" } test("a call to an unknown route causes an exception") { val ws = MockWS { case (GET, "/url") => Action { Ok("") } } the [Exception] thrownBy { ws.url("/url2").get() } should have message "no route defined for GET /url2" the [Exception] thrownBy { ws.url("/url").delete() } should have message "no route defined for DELETE /url" } test("mock WS supports custom response content types") { val ws = MockWS { case (_, _) => Action { Ok("hello").as("hello/world") } } val wsResponse = await( ws.url("/").get() ) wsResponse.status shouldEqual OK wsResponse.header(CONTENT_TYPE) shouldEqual Some("hello/world") wsResponse.body shouldEqual "hello" } test("mock WS supports custom request content types") { val ws = MockWS { case (_, _) => Action { request => request.contentType match { case Some(ct) => Ok(ct) case None => BadRequest("no content type") } } } val wsResponse = await( ws.url("/").withHeaders(CONTENT_TYPE -> "hello/world").get) wsResponse.status shouldEqual OK wsResponse.body shouldEqual "hello/world" } test("mock WS supports query parameter") { forAll { (q: String, v: String) => whenever(q.nonEmpty) { val ws = MockWS { case (GET, "/uri") => Action { request => request.getQueryString(q).fold[Result](NotFound) { id => Ok(id) } } } val wsResponse = await( ws.url("/uri").withQueryString(q -> v).get) wsResponse.status shouldEqual OK wsResponse.body shouldEqual v } } } test("mock WS supports varargs passed as immutable Seqs") { forAll { (q: String, v: String) => whenever(q.nonEmpty) { val ws = MockWS { case (GET, "/uri") => Action { request => request.getQueryString(q).fold[Result](NotFound) { id => Ok(id) } } } await( ws.url("/uri").withHeaders(Seq(q -> v): _*).get ) await( ws.url("/uri").withQueryString(Seq(q -> v): _*).get ) } } } test("mock WS supports method in execute") { val ws = MockWS { case (GET, "/get") => Action { Ok("get ok") } case (POST, "/post") => Action { Ok("post ok") } case (PUT, "/put") => Action { Ok("put ok") } case (DELETE, "/delete") => Action { Ok("delete ok") } } await(ws.url("/get").withMethod("GET").execute()).body shouldEqual "get ok" await(ws.url("/post").withMethod("POST").execute()).body shouldEqual "post ok" await(ws.url("/put").withMethod("PUT").execute()).body shouldEqual "put ok" await(ws.url("/delete").withMethod("DELETE").execute()).body shouldEqual "delete ok" } test("mock WS supports method in stream") { def testedController(ws: WSClient) = Action.async { ws.url("/").withMethod("POST").stream().map { case (rh, content) => Result( header = ResponseHeader(rh.status, rh.headers.mapValues(_.head)), body = content ) } } val ws = MockWS { case (POST, "/") => Action { Result( header = ResponseHeader(201, Map("x-header" -> "x-value")), body = Enumerator("first", "second", "third").map(_.getBytes) ) } } val response = testedController(ws).apply(FakeRequest()) status(response) shouldEqual CREATED contentAsString(response) shouldEqual "firstsecondthird" header("x-header", response) shouldEqual Some("x-value") } test("should not raise NullPointerExceptions on method chaining") { val ws = MockWS { case (GET, "/get") => Action { Ok("get ok") } } await(ws .url("/get") .sign(mock(classOf[WSSignatureCalculator])) .withVirtualHost("bla") .withFollowRedirects(follow = true) .withAuth("user", "password", WSAuthScheme.BASIC) .withRequestTimeout(10L) .get()).body shouldEqual "get ok" } }
matterche/play-mockws
src/test/scala/mockws/MockWSTest.scala
Scala
mit
9,668
package com.thangiee.metadroid import com.typesafe.scalalogging.LazyLogging import scala.annotation.StaticAnnotation import scala.language.experimental.macros import scala.reflect.macros.blackbox class Case extends StaticAnnotation { def macroTransform(annottees: Any*) = macro CaseImpl.impl } object CaseImpl extends LazyLogging { val namespace = "com.thangiee.metadroid." def impl(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { import c.universe._ // covert type A* -> Seq[A] val convertRepeatedIntoSeq: ValDef => ValDef = { case ValDef(mod, termName, AppliedTypeTree(Select(_, TypeName("<repeated>")), typeName), rhs) => ValDef(mod, termName, AppliedTypeTree(Ident(TypeName("Seq")), typeName), rhs) case good => good // not a type A*, don't need to do anything } // Unpickle[A*] is a syntax error, convert to Unpickle[Seq[A]] if need be val deserializeParam: ValDef => ValDef = convertRepeatedIntoSeq andThen { case ValDef(_, name, typ, _) => val rhs = q"Unpickle[$typ].fromBytes(java.nio.ByteBuffer.wrap(getIntent.getByteArrayExtra(${namespace+name.toString})))" typ match { case AppliedTypeTree(Ident(TypeName("Option")), _) => ValDef(Modifiers(Flag.LAZY), name, typ, q"scala.util.Try(..$rhs).recover{case _ => None}.toOption.flatten") case _ => ValDef(Modifiers(Flag.LAZY), name, typ, rhs) } } val serializeParam: ValDef => Tree = param => q"intent.putExtra(${namespace + param.name.toString}, Pickle.intoBytes(${param.name}).array())" val trees = annottees.map(_.tree).toList val result: c.universe.Tree = trees.headOption match { case Some(q"$mods class $tpname[..$tparams] $ctorMods(...$paramss) extends $parent with ..$parents { $self => ..$stats }") => val classParams: Seq[ValDef] = paramss.flatten val className: TermName = tpname match { case name: TypeName => name.toTermName case other => c.abort(c.enclosingPosition, s"Fail to extract class name: ${showRaw(other)}") } val genClass = q""" $mods class $tpname[..$tparams] $ctorMods() extends $parent with ..$parents { $self => import boopickle.Default._ ..${classParams.map(deserializeParam) ++ stats} } """ val genCompanionObj = trees.tail.headOption match { case Some(q"$mods object $tname extends { ..$earlydefns } with ..$parents { $self => ..$body }") => q""" object $tname extends { ..$earlydefns } with ..$parents { $self => ..$body def apply(...$paramss)(implicit ctx: android.content.Context): android.content.Intent = { import boopickle.Default._ val intent = new android.content.Intent(ctx, classOf[$tpname]) ..${classParams.map(serializeParam)} intent } } """ case None => // no companion obj defined q""" object $className { def apply(...$paramss)(implicit ctx: android.content.Context): android.content.Intent = { import boopickle.Default._ val intent = new android.content.Intent(ctx, classOf[$tpname]) ..${classParams.map(serializeParam)} intent } } """ case other => c.abort(c.enclosingPosition, s"Expected companion object but got: ${showRaw(other)}") } q"..${List(genClass, genCompanionObj)}" case other => c.abort(c.enclosingPosition, s"Expected class but got: ${showRaw(other)}") } logger.debug(result.toString()) c.Expr[Any](result) } }
Thangiee/Metadroid
core/src/main/scala/com/thangiee/metadroid/Case.scala
Scala
mit
3,805
package com.twitter.finagle.util import java.util.concurrent.ThreadFactory import org.mockito.Matchers.any import org.mockito.Mockito.verify import org.scalatest.FunSuite import org.scalatestplus.mockito.MockitoSugar class BlockingTimeTrackingThreadFactoryTest extends FunSuite with MockitoSugar { private class RunnableCount extends Runnable { var runs = 0 def run(): Unit = runs += 1 } test("delegates to newRunnable and underlying ThreadFactory") { val threadFactory = mock[ThreadFactory] val ptf = new BlockingTimeTrackingThreadFactory(threadFactory) val r = new RunnableCount() ptf.newThread(r) assert(r.runs == 0) verify(threadFactory).newThread(any()) } }
luciferous/finagle
finagle-core/src/test/scala/com/twitter/finagle/util/BlockingTimeTrackingThreadFactoryTest.scala
Scala
apache-2.0
715
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.play.microservice.bootstrap import com.kenshoo.play.metrics.PlayModule import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, WordSpecLike} import play.api.inject.guice.GuiceApplicationBuilder import play.api.mvc.EssentialFilter import play.api.test.Helpers._ import uk.gov.hmrc.play.microservice.filters.{AuditFilter, LoggingFilter} class MicroserviceFiltersSpec extends WordSpecLike with Matchers with MockitoSugar { val config: Map[String, _] = Map("play.modules.enabled" -> Seq("com.kenshoo.play.metrics.PlayModule")) "MicroserviceFilters" should { "include authFilter if defined" in running(new GuiceApplicationBuilder().bindings(new PlayModule).build()) { val filters = new MicroserviceFilters { override def loggingFilter: LoggingFilter = mock[LoggingFilter] override def microserviceAuditFilter: AuditFilter = mock[AuditFilter] override def authFilter: Option[EssentialFilter] = Some(mock[EssentialFilter]) } filters.microserviceFilters.size shouldBe 6 } "not include authFilter if not defined" in running(new GuiceApplicationBuilder().bindings(new PlayModule).build()) { val filters = new MicroserviceFilters { override def loggingFilter: LoggingFilter = mock[LoggingFilter] override def microserviceAuditFilter: AuditFilter = mock[AuditFilter] override def authFilter: Option[EssentialFilter] = None } filters.microserviceFilters.size shouldBe 5 } } }
iteratoruk/microservice-bootstrap
src/test/scala/uk/gov/hmrc/play/microservice/bootstrap/MicroserviceFiltersSpec.scala
Scala
apache-2.0
2,122
package edu.cmu.dynet /** Behind the scenes, DyNet frequently operates on C++ `std::vector<>` types. The wrapper * classes implement [[scala.collection.mutable.IndexedSeq]] to make them easy to work with * in Scala code. Each has a `size: Long` constructor and a `values: Seq[_]` constructor. */ import scala.language.implicitConversions import scala.collection.JavaConverters._ class IntVector private[dynet] (private[dynet] val vector: internal.IntVector) extends scala.collection.mutable.IndexedSeq[Int] { def this(size: Long) { this(new internal.IntVector(size)) } def this(values: Seq[Int] = Seq.empty) { this(new internal.IntVector(values.map(int2Integer).asJavaCollection)) } def add(v: Int): Unit = vector.add(v) override def apply(idx: Int): Int = vector.get(idx) override def length: Int = vector.size.toInt override def update(idx: Int, elem: Int): Unit = vector.set(idx, elem) } /** SWIG converts C++ `unsigned` to Scala `Long` */ class UnsignedVector private[dynet] (private[dynet] val vector: internal.UnsignedVector) extends scala.collection.mutable.IndexedSeq[Long] { def this(size: Long) { this(new internal.UnsignedVector(size)) } def this(values: Seq[Long] = Seq.empty) { this(new internal.UnsignedVector(values.map(_.toInt).map(int2Integer).asJavaCollection)) } def add(v: Long): Unit = vector.add(v) override def apply(idx: Int): Long = vector.get(idx) override def length: Int = vector.size.toInt override def update(idx: Int, elem: Long): Unit = vector.set(idx, elem) } class FloatVector private[dynet] (private[dynet] val vector: internal.FloatVector) extends scala.collection.mutable.IndexedSeq[Float] { def this(size: Long) { this(new internal.FloatVector(size)) } def this(values: Seq[Float] = Seq.empty) { this(new internal.FloatVector(values.map(float2Float).asJavaCollection)) } def add(v: Float): Unit = vector.add(v) override def apply(idx: Int): Float = vector.get(idx) override def length: Int = vector.size.toInt override def update(idx: Int, elem: Float): Unit = vector.set(idx, elem) } class ExpressionVector private[dynet] ( private[dynet] val version: Long, private[dynet] val vector: internal.ExpressionVector) extends scala.collection.mutable.IndexedSeq[Expression] { private[dynet] def this(vector: internal.ExpressionVector) = { this(ComputationGraph.version, vector) } def this(size: Long) { this(new internal.ExpressionVector(size)) } def this(values: Seq[Expression] = Seq.empty) { this(new internal.ExpressionVector(values.map(_.expr).asJavaCollection)) ensureFresh() } def ensureFresh(): Unit = { if (version != ComputationGraph.version) { throw new RuntimeException("stale") } } def add(v: Expression): Unit = { v.ensureFresh() vector.add(v.expr) } override def apply(idx: Int): Expression = new Expression(vector.get(idx)) override def length: Int = vector.size.toInt override def update(idx: Int, elem: Expression): Unit = { elem.ensureFresh() vector.set(idx, elem.expr) } } class UnsignedVectorVector private[dynet] (private[dynet] val vector: internal.UnsignedVectorVector) extends scala.collection.mutable.IndexedSeq[UnsignedVector] { def this(size: Long) { this(new internal.UnsignedVectorVector(size)) } def this(values: Seq[UnsignedVector] = Seq.empty) { this(new internal.UnsignedVectorVector(values.map(_.vector).asJavaCollection)) } def add(v: UnsignedVector): Unit = vector.add(v.vector) override def apply(idx: Int): UnsignedVector = new UnsignedVector(vector.get(idx)) override def length: Int = vector.size.toInt override def update(idx: Int, v: UnsignedVector): Unit = vector.set(idx, v.vector) }
xunzhang/dynet
contrib/swig/src/main/scala/edu/cmu/dynet/Vector.scala
Scala
apache-2.0
3,744
package debop4s.web.scalatra.scalate import javax.servlet.http.HttpServletRequest import org.fusesource.scalate.TemplateEngine import org.fusesource.scalate.layout.DefaultLayoutStrategy import org.scalatra._ import org.scalatra.scalate._ import scala.collection.mutable /** * Scalate Template Engine을 이용하여 Web Page를 제공하는 Servlet에 기본 제공하는 Trait 입니다. * {{{ * class MyServlet extends ScalatraWebStack { * before() { * contentType = "text/html" * } * get("/") { * // webapp/WEB-INF/templates/views/index.ssp 를 컴파일해서 제공한다. * ssp("index") * } * } * }}} * @author sunghyouk.bae@gmail.com */ trait ScalatraWebStack extends ScalatraServlet with ScalateSupport { // wire up the precompiled template override protected def defaultTemplatePath: List[String] = List("/templates/views") override protected def createTemplateEngine(config: ConfigT) = { val engine = super.createTemplateEngine(config) engine.layoutStrategy = new DefaultLayoutStrategy(engine, TemplateEngine.templateTypes.map("/templates/layouts/default." + _): _*) engine.packagePrefix = "templates" engine } override protected def templateAttributes(implicit request: HttpServletRequest): mutable.Map[String, Any] = { super.templateAttributes ++ mutable.Map.empty } notFound { // remove content type in case it was set through an action contentType = null // Try to render a ScalateTemplate if no route matched findTemplate(requestPath) map { path => contentType = "text/html" layoutTemplate(path) } orElse serveStaticResource() getOrElse resourceNotFound() } }
debop/debop4s
debop4s-web-scalatra/src/main/scala/debop4s/web/scalatra/scalate/ScalatraWebStack.scala
Scala
apache-2.0
1,716
package com.ing.baker.il case class EventOutputTransformer(newEventName: String, ingredientRenames: Map[String, String])
ing-bank/baker
core/intermediate-language/src/main/scala/com/ing/baker/il/EventOutputTransformer.scala
Scala
mit
122
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.spark.testsuite.datacompaction import scala.collection.JavaConverters._ import org.apache.spark.sql.common.util.QueryTest import org.scalatest.BeforeAndAfterAll import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier} import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.statusmanager.SegmentStatusManager import org.apache.carbondata.core.util.CarbonProperties /** * FT for compaction scenario where major compaction will only compact the segments which are * present at the time of triggering the compaction. */ class MajorCompactionStopsAfterCompaction extends QueryTest with BeforeAndAfterAll { override def beforeAll { sql("drop table if exists stopmajor") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy") sql( "CREATE TABLE IF NOT EXISTS stopmajor (country String, ID decimal(7,4), date Timestamp, name " + "String, " + "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" + ".format'" ) val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv" val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv" val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv" sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" + "('DELIMITER'= ',', 'QUOTECHAR'= '\"')" ) sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor OPTIONS" + "('DELIMITER'= ',', 'QUOTECHAR'= '\"')" ) // compaction will happen here. sql("alter table stopmajor compact 'major'" ) Thread.sleep(2000) sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" + "('DELIMITER'= ',', 'QUOTECHAR'= '\"')" ) sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor OPTIONS" + "('DELIMITER'= ',', 'QUOTECHAR'= '\"')" ) if (checkCompactionCompletedOrNot("0.1")) { } } /** * Check if the compaction is completed or not. * * @param requiredSeg * @return */ def checkCompactionCompletedOrNot(requiredSeg: String): Boolean = { var status = false var noOfRetries = 0 while (!status && noOfRetries < 10) { val identifier = new AbsoluteTableIdentifier( CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION), new CarbonTableIdentifier( CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", noOfRetries + "") ) val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier) val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList segments.foreach(seg => System.out.println( "valid segment is =" + seg) ) if (!segments.contains(requiredSeg)) { // wait for 2 seconds for compaction to complete. System.out.println("sleping for 2 seconds.") Thread.sleep(2000) noOfRetries += 1 } else { status = true } } return status } /** * Test whether major compaction is not included in minor compaction. */ test("delete merged folder and check segments") { // delete merged segments sql("clean files for table stopmajor") val identifier = new AbsoluteTableIdentifier( CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION), new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", "rrr") ) val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier) // merged segment should not be there val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList assert(segments.contains("0.1")) assert(!segments.contains("0.2")) assert(!segments.contains("0")) assert(!segments.contains("1")) assert(segments.contains("2")) assert(segments.contains("3")) } override def afterAll { sql("drop table if exists stopmajor") CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy") } }
JihongMA/incubator-carbondata
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
Scala
apache-2.0
5,139
package com.dataintuitive.luciuscore package model.v3 import io.SampleCompoundRelationsIO.loadSampleCompoundRelationsFromFileV2 import DbFunctions._ import com.dataintuitive.test.BaseSparkContextSpec import org.scalatest.flatspec.AnyFlatSpec class DbFunctionsTest extends AnyFlatSpec with BaseSparkContextSpec{ info("Testing rank vector scoring") // load relations data in order to get an example DbRow object val sampleCompoundRelationsV2Source = "src/test/resources/v2/sampleCompoundRelations.txt" val aDbRow = loadSampleCompoundRelationsFromFileV2(sc, sampleCompoundRelationsV2Source).first // the example data does not have the r vectors we are testing, so construct new DbRow val newDbRow = DbRow(aDbRow.id, SampleAnnotations(aDbRow.sampleAnnotations.sample, Some(Array(2.0, 2.0, 2.0, 2.0)), Some(Array(2.0, 2.0, 2.0, 2.0)), Some(Array(2.0, 2.0, 2.0, 2.0))), aDbRow.compoundAnnotations) "queryDbRowPwid function" should "give a numerical value for a single query" in { val x: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} assert(queryDbRowPwid(newDbRow, x).values.toSeq.head.head.get.isInstanceOf[Double] === true) } it should "give a list of size two for two queries" in { val x: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} val y: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} assert(queryDbRowPwid(newDbRow, x, y).values.toSeq.head.size === 2) } "queryDbRow function" should "give a numerical value for a single query" in { val x: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} assert(queryDbRow(newDbRow, x).get._2.head.isInstanceOf[Double] === true) } it should "give a list of size two for two queries" in { val x: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} val y: RankVector = Array.fill(4){scala.util.Random.nextInt(10).asInstanceOf[Double]} assert(queryDbRow(newDbRow, x, y).get._2.size === 2) } }
data-intuitive/LuciusCore
src/test/scala/com/dataintuitive/luciuscore/model/v3/DbFunctionsTest.scala
Scala
apache-2.0
2,084
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.optimizer import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ /** * Replaces logical [[AsOfJoin]] operator using a combination of Join and Aggregate operator. * * Input Pseudo-Query: * {{{ * SELECT * FROM left ASOF JOIN right ON (condition, as_of on(left.t, right.t), tolerance) * }}} * * Rewritten Query: * {{{ * SELECT left.*, __right__.* * FROM ( * SELECT * left.*, * ( * SELECT MIN_BY(STRUCT(right.*), left.t - right.t) AS __nearest_right__ * FROM right * WHERE condition AND left.t >= right.t AND right.t >= left.t - tolerance * ) as __right__ * FROM left * ) * WHERE __right__ IS NOT NULL * }}} */ object RewriteAsOfJoin extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithNewOutput { case j @ AsOfJoin(left, right, asOfCondition, condition, joinType, orderExpression, _) => val conditionWithOuterReference = condition.map(And(_, asOfCondition)).getOrElse(asOfCondition).transformUp { case a: AttributeReference if left.outputSet.contains(a) => OuterReference(a) } val filtered = Filter(conditionWithOuterReference, right) val orderExpressionWithOuterReference = orderExpression.transformUp { case a: AttributeReference if left.outputSet.contains(a) => OuterReference(a) } val rightStruct = CreateStruct(right.output) val nearestRight = MinBy(rightStruct, orderExpressionWithOuterReference) .toAggregateExpression() val aggExpr = Alias(nearestRight, "__nearest_right__")() val aggregate = Aggregate(Seq.empty, Seq(aggExpr), filtered) val projectWithScalarSubquery = Project( left.output :+ Alias(ScalarSubquery(aggregate, left.output), "__right__")(), left) val filterRight = joinType match { case LeftOuter => projectWithScalarSubquery case _ => Filter(IsNotNull(projectWithScalarSubquery.output.last), projectWithScalarSubquery) } val project = Project( left.output ++ right.output.zipWithIndex.map { case (out, idx) => Alias(GetStructField(filterRight.output.last, idx), out.name)() }, filterRight) val attrMapping = j.output.zip(project.output) project -> attrMapping } }
ueshin/apache-spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteAsOfJoin.scala
Scala
apache-2.0
3,460
/* * Copyright 2014 Frugal Mechanic (http://frugalmechanic.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fm.sbt import com.amazonaws.ClientConfiguration import com.amazonaws.SDKGlobalConfiguration.{ACCESS_KEY_SYSTEM_PROPERTY, SECRET_KEY_SYSTEM_PROPERTY} import com.amazonaws.SDKGlobalConfiguration.{ACCESS_KEY_ENV_VAR, SECRET_KEY_ENV_VAR} import com.amazonaws.auth._ import com.amazonaws.regions.{Region, Regions, RegionUtils} import com.amazonaws.services.s3.{AmazonS3Client, AmazonS3URI} import com.amazonaws.services.s3.model.{AmazonS3Exception, GetObjectRequest, ListObjectsRequest, ObjectListing, ObjectMetadata, PutObjectResult, S3Object} import org.apache.ivy.util.{CopyProgressEvent, CopyProgressListener, Message, FileUtil} import org.apache.ivy.util.url.URLHandler import java.io.{File, InputStream} import java.net.{InetAddress, URI, URL} import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.util.matching.Regex import scala.util.Try object S3URLHandler { // This is for matching region names in URLs or host names private val RegionMatcher: Regex = Regions.values().map{ _.getName }.sortBy{ -1 * _.length }.mkString("|").r private class S3URLInfo(available: Boolean, contentLength: Long, lastModified: Long) extends URLHandler.URLInfo(available, contentLength, lastModified) private class BucketSpecificSystemPropertiesCredentialsProvider(bucket: String) extends BucketSpecificCredentialsProvider(bucket) { def AccessKeyName: String = ACCESS_KEY_SYSTEM_PROPERTY def SecretKeyName: String = SECRET_KEY_SYSTEM_PROPERTY protected def getProp(names: String*): String = names.map{ System.getProperty }.flatMap{ Option(_) }.head.trim } private class BucketSpecificEnvironmentVariableCredentialsProvider(bucket: String) extends BucketSpecificCredentialsProvider(bucket) { def AccessKeyName: String = ACCESS_KEY_ENV_VAR def SecretKeyName: String = SECRET_KEY_ENV_VAR protected def getProp(names: String*): String = names.map{ cleanName }.map{ System.getenv }.flatMap{ Option(_) }.head.trim private def cleanName(s: String): String = s.toUpperCase.replace('-','_').replace('.','_').replaceAll("[^A-Z0-9_]", "") } private abstract class BucketSpecificCredentialsProvider(bucket: String) extends AWSCredentialsProvider { def AccessKeyName: String def SecretKeyName: String def getCredentials(): AWSCredentials = { val accessKey: String = getProp(s"${AccessKeyName}.${bucket}", s"${bucket}.${AccessKeyName}") val secretKey: String = getProp(s"${SecretKeyName}.${bucket}", s"${bucket}.${SecretKeyName}") new BasicAWSCredentials(accessKey, secretKey) } def refresh(): Unit = {} // This should throw an exception if the value is missing protected def getProp(names: String*): String } } /** * This implements the Ivy URLHandler */ final class S3URLHandler extends URLHandler { import URLHandler.{UNAVAILABLE, URLInfo} import S3URLHandler._ def isReachable(url: URL): Boolean = getURLInfo(url).isReachable def isReachable(url: URL, timeout: Int): Boolean = getURLInfo(url, timeout).isReachable def getContentLength(url: URL): Long = getURLInfo(url).getContentLength def getContentLength(url: URL, timeout: Int): Long = getURLInfo(url, timeout).getContentLength def getLastModified(url: URL): Long = getURLInfo(url).getLastModified def getLastModified(url: URL, timeout: Int): Long = getURLInfo(url, timeout).getLastModified def getURLInfo(url: URL): URLInfo = getURLInfo(url, 0) private def debug(msg: String): Unit = Message.debug("S3URLHandler."+msg) private def makePropertiesFileCredentialsProvider(fileName: String): PropertiesFileCredentialsProvider = { val dir: File = new File(System.getProperty("user.home"), ".sbt") val file: File = new File(dir, fileName) new PropertiesFileCredentialsProvider(file.toString) } private def makeCredentialsProviderChain(bucket: String): AWSCredentialsProviderChain = { val providers = Vector( new BucketSpecificEnvironmentVariableCredentialsProvider(bucket), new BucketSpecificSystemPropertiesCredentialsProvider(bucket), makePropertiesFileCredentialsProvider(s".s3credentials_${bucket}"), makePropertiesFileCredentialsProvider(s".${bucket}_s3credentials"), new EnvironmentVariableCredentialsProvider(), new SystemPropertiesCredentialsProvider(), makePropertiesFileCredentialsProvider(".s3credentials"), new InstanceProfileCredentialsProvider() ) new AWSCredentialsProviderChain(providers: _*) } private val credentialsCache: ConcurrentHashMap[String,AWSCredentials] = new ConcurrentHashMap() def getCredentials(bucket: String): AWSCredentials = { var credentials: AWSCredentials = credentialsCache.get(bucket) if (null == credentials) { credentials = try { makeCredentialsProviderChain(bucket).getCredentials() } catch { case ex: com.amazonaws.AmazonClientException => Message.error("Unable to find AWS Credentials.") throw ex } Message.info("S3URLHandler - Using AWS Access Key Id: "+credentials.getAWSAccessKeyId+" for bucket: "+bucket) credentialsCache.put(bucket, credentials) } credentials } def getProxyConfiguration: ClientConfiguration = { val configuration = new ClientConfiguration() for { proxyHost <- Option( System.getProperty("https.proxyHost") ) proxyPort <- Option( System.getProperty("https.proxyPort").toInt ) } { configuration.setProxyHost(proxyHost) configuration.setProxyPort(proxyPort) } configuration } def getClientBucketAndKey(url: URL): (AmazonS3Client, String, String) = { val (bucket, key) = getBucketAndKey(url) val client: AmazonS3Client = new AmazonS3Client(getCredentials(bucket), getProxyConfiguration) val region: Option[Region] = getRegion(url, bucket, client) region.foreach{ client.setRegion } (client, bucket, key) } def getURLInfo(url: URL, timeout: Int): URLInfo = try { debug(s"getURLInfo($url, $timeout)") val (client, bucket, key) = getClientBucketAndKey(url) val meta: ObjectMetadata = client.getObjectMetadata(bucket, key) val available: Boolean = true val contentLength: Long = meta.getContentLength val lastModified: Long = meta.getLastModified.getTime new S3URLInfo(available, contentLength, lastModified) } catch { case ex: AmazonS3Exception if ex.getStatusCode == 404 => UNAVAILABLE } def openStream(url: URL): InputStream = { debug(s"openStream($url)") val (client, bucket, key) = getClientBucketAndKey(url) val obj: S3Object = client.getObject(bucket, key) obj.getObjectContent() } /** * A directory listing for keys/directories under this prefix */ def list(url: URL): Seq[URL] = { debug(s"list($url)") val (client, bucket, key /* key is the prefix in this case */) = getClientBucketAndKey(url) // We want the prefix to have a trailing slash val prefix: String = key.stripSuffix("/") + "/" val request: ListObjectsRequest = new ListObjectsRequest().withBucketName(bucket).withPrefix(prefix).withDelimiter("/") val listing: ObjectListing = client.listObjects(request) require(!listing.isTruncated, "Truncated ObjectListing! Making additional calls currently isn't implemented!") val keys: Seq[String] = listing.getCommonPrefixes.asScala ++ listing.getObjectSummaries.asScala.map{ _.getKey } val res: Seq[URL] = keys.map{ k: String => new URL(url.toString.stripSuffix("/") + "/" + k.stripPrefix(prefix)) } debug(s"list($url) => \\n "+res.mkString("\\n ")) res } def download(src: URL, dest: File, l: CopyProgressListener): Unit = { debug(s"download($src, $dest)") val (client, bucket, key) = getClientBucketAndKey(src) val event: CopyProgressEvent = new CopyProgressEvent() if (null != l) l.start(event) val meta: ObjectMetadata = client.getObject(new GetObjectRequest(bucket, key), dest) dest.setLastModified(meta.getLastModified.getTime) if (null != l) l.end(event) //l.progress(evt.update(EMPTY_BUFFER, 0, meta.getContentLength)) } def upload(src: File, dest: URL, l: CopyProgressListener): Unit = { debug(s"upload($src, $dest)") val event: CopyProgressEvent = new CopyProgressEvent() if (null != l) l.start(event) val (client, bucket, key) = getClientBucketAndKey(dest) val res: PutObjectResult = client.putObject(bucket, key, src) if (null != l) l.end(event) } // I don't think we care what this is set to def setRequestMethod(requestMethod: Int): Unit = debug(s"setRequestMethod($requestMethod)") // Try to get the region of the S3 URL so we can set it on the S3Client def getRegion(url: URL, bucket: String, client: AmazonS3Client): Option[Region] = { val region: Option[String] = getRegionNameFromURL(url) orElse getRegionNameFromDNS(bucket) orElse getRegionNameFromService(bucket, client) region.map{ RegionUtils.getRegion }.flatMap{ Option(_) } } def getRegionNameFromURL(url: URL): Option[String] = { // We'll try the AmazonS3URI parsing first then fallback to our RegionMatcher getAmazonS3URI(url).map{ _.getRegion }.flatMap{ Option(_) } orElse RegionMatcher.findFirstIn(url.toString) } def getRegionNameFromDNS(bucket: String): Option[String] = { // This gives us something like s3-us-west-2-w.amazonaws.com which must have changed // at some point because the region from that hostname is no longer parsed by AmazonS3URI val canonicalHostName: String = InetAddress.getByName(bucket+".s3.amazonaws.com").getCanonicalHostName() // So we use our regex based RegionMatcher to try and extract the region since AmazonS3URI doesn't work RegionMatcher.findFirstIn(canonicalHostName) } // TODO: cache the result of this so we aren't always making the call def getRegionNameFromService(bucket: String, client: AmazonS3Client): Option[String] = { // This might fail if the current credentials don't have access to the getBucketLocation call Try { client.getBucketLocation(bucket) }.toOption } def getBucketAndKey(url: URL): (String, String) = { // The AmazonS3URI constructor should work for standard S3 urls. But if a custom domain is being used // (e.g. snapshots.maven.frugalmechanic.com) then we treat the hostname as the bucket and the path as the key getAmazonS3URI(url).map{ amzn: AmazonS3URI => (amzn.getBucket, amzn.getKey) }.getOrElse { // Probably a custom domain name - The host should be the bucket and the path the key (url.getHost, url.getPath.stripPrefix("/")) } } def getAmazonS3URI(uri: String): Option[AmazonS3URI] = getAmazonS3URI(URI.create(uri)) def getAmazonS3URI(url: URL) : Option[AmazonS3URI] = getAmazonS3URI(url.toURI) def getAmazonS3URI(uri: URI) : Option[AmazonS3URI] = try { val httpsURI: URI = // If there is no scheme (e.g. new URI("s3-us-west-2.amazonaws.com/<bucket>")) // then we need to re-create the URI to add one and to also make sure the host is set if (uri.getScheme == null) new URI("https://"+uri) // AmazonS3URI can't parse the region from s3:// URLs so we rewrite the scheme to https:// else new URI("https", uri.getUserInfo, uri.getHost, uri.getPort, uri.getPath, uri.getQuery, uri.getFragment) Some(new AmazonS3URI(httpsURI)) } catch { case _: IllegalArgumentException => None } }
rmarsch/fm-sbt-s3-resolver
src/main/scala/fm/sbt/S3URLHandler.scala
Scala
apache-2.0
12,271
package play.it.http import play.api.mvc.{Results, EssentialAction} import play.api.test._ import play.api.test.TestServer import play.api.libs.concurrent.Promise import play.api.libs.iteratee._ import java.util.concurrent.TimeUnit import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global object HttpPipeliningSpec extends PlaySpecification { "Play's http pipelining support" should { def withServer[T](action: EssentialAction)(block: Port => T) = { val port = testServerPort running(TestServer(port, FakeApplication( withRoutes = { case _ => action } ))) { block(port) } } "wait for the first response to return before returning the second" in withServer(EssentialAction { req => req.path match { case "/long" => Iteratee.flatten(Promise.timeout(Done(Results.Ok("long")), 100, TimeUnit.MILLISECONDS)) case "/short" => Done(Results.Ok("short")) case _ => Done(Results.NotFound) } }) { port => val responses = BasicHttpClient.pipelineRequests(port, BasicRequest("GET", "/long", "HTTP/1.1", Map(), ""), BasicRequest("GET", "/short", "HTTP/1.1", Map(), "") ) responses(0).status must_== 200 responses(0).body must beLeft("long") responses(1).status must_== 200 responses(1).body must beLeft("short") } "wait for the first response body to return before returning the second" in withServer(EssentialAction { req => req.path match { case "/long" => Done( Results.Ok.chunked(Enumerator.unfoldM[Int, String](0) { chunk => if (chunk < 3) { Promise.timeout(Some((chunk + 1, chunk.toString)), 50, TimeUnit.MILLISECONDS) } else { Future.successful(None) } }) ) case "/short" => Done(Results.Ok("short")) case _ => Done(Results.NotFound) } }) { port => val responses = BasicHttpClient.pipelineRequests(port, BasicRequest("GET", "/long", "HTTP/1.1", Map(), ""), BasicRequest("GET", "/short", "HTTP/1.1", Map(), "") ) responses(0).status must_== 200 responses(0).body must beRight responses(0).body.right.get._1 must containAllOf(Seq("0", "1", "2")).inOrder responses(1).status must_== 200 responses(1).body must beLeft("short") } } }
michaelahlers/team-awesome-wedding
vendor/play-2.2.1/framework/src/play-integration-test/src/test/scala/play/it/http/HttpPipeliningSpec.scala
Scala
mit
2,429
package spinoco.fs2.http import cats.effect.IO import fs2._ import org.scalacheck.Properties import org.scalacheck.Prop._ import spinoco.protocol.http._ import spinoco.protocol.http.codec.HttpRequestHeaderCodec import spinoco.protocol.http.header._ import spinoco.protocol.mime.{ContentType, MIMECharset, MediaType} object HttpRequestSpec extends Properties("HttpRequest") { import spinoco.fs2.http.util.chunk2ByteVector property("encode") = secure { val request = HttpRequest.get[IO]( Uri.http("www.spinoco.com", "/hello-world.html") ).withUtf8Body("Hello World") HttpRequest.toStream(request, HttpRequestHeaderCodec.defaultCodec) .chunks.compile.toVector.map { _.map(chunk2ByteVector).reduce { _ ++ _ }.decodeUtf8 } .unsafeRunSync() ?= Right(Seq( "GET /hello-world.html HTTP/1.1" , "Host: www.spinoco.com" , "Content-Type: text/plain; charset=utf-8" , "Content-Length: 11" , "" , "Hello World" ).mkString("\\r\\n")) } property("decode") = secure { Stream.chunk(Chunk.bytes( Seq( "GET /hello-world.html HTTP/1.1" , "Host: www.spinoco.com" , "Content-Type: text/plain; charset=utf-8" , "Content-Length: 11" , "" , "Hello World" ).mkString("\\r\\n").getBytes )) .covary[IO] .through(HttpRequest.fromStream[IO](4096,HttpRequestHeaderCodec.defaultCodec)) .flatMap { case (header, body) => Stream.eval(body.chunks.compile.toVector.map(_.map(chunk2ByteVector).reduce(_ ++ _).decodeUtf8)).map { bodyString => header -> bodyString } }.compile.toVector.unsafeRunSync() ?= Vector( HttpRequestHeader( method = HttpMethod.GET , path = Uri.Path / "hello-world.html" , headers = List( Host(HostPort("www.spinoco.com", None)) , `Content-Type`(ContentType.TextContent(MediaType.`text/plain`, Some(MIMECharset.`UTF-8`))) , `Content-Length`(11) ) , query = Uri.Query.empty ) -> Right("Hello World") ) } }
Spinoco/fs2-http
src/test/scala/spinoco/fs2/http/HttpRequestSpec.scala
Scala
mit
2,076
package net.mm.example.services import net.mm.composer.relations.Relation._ class UserService extends FakeService { val getUsers: Source[String, User] = _.map(u => (u, User(u))).toMap.asFuture } case class User(username: String)
mosche/finatra-composition-proxy
example/src/main/scala/net/mm/example/services/UserService.scala
Scala
mit
233
/* ,i::, :;;;;;;; ;:,,::;. 1ft1;::;1tL t1;::;1, :;::; _____ __ ___ __ fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_ CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\ 1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / / CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/ Lft1,:;: , 1tfL: ;it1i ,,,:::;;;::1tti s_mach.concurrent .t1i .,::;;; ;1tt Copyright (c) 2017 S-Mach, Inc. Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com .L1 1tt1ttt,,Li ...1LLLL... */ package s_mach.concurrent.config import scala.concurrent.ExecutionContext import s_mach.concurrent.util.RetryDecider /** * A trait for configuring optional failure retry */ trait OptRetryConfig { def optRetry: Option[RetryConfig] } /** * A trait that configures failure retry */ trait RetryConfig { implicit def executionContext: ExecutionContext def retryer: RetryDecider } object RetryConfig { case class RetryConfigImpl( retryer: RetryDecider )(implicit val executionContext: ExecutionContext ) extends RetryConfig def apply( retryer: RetryDecider )(implicit executionContext: ExecutionContext ) : RetryConfig = RetryConfigImpl(retryer) }
S-Mach/s_mach.concurrent
src/main/scala/s_mach/concurrent/config/RetryConfig.scala
Scala
mit
1,435
package com.sksamuel.scapegoat.inspections.collections import com.sksamuel.scapegoat._ /** * @author Stephen Samuel * This inspection was inspired by http://p5wscala.wordpress.com/scalaprocessing-gotchas/#t2 */ class CollectionPromotionToAny extends Inspection( text = "Collection promotion to Any", defaultLevel = Levels.Warning, description = "Checks for collection operations that promote the collection to Any.", explanation = "The `:+` (append) operator on collections accepts any argument you give it, which means that you can end up with e.g. `Seq[Any]` if your types don't match." ) { def inspector(context: InspectionContext): Inspector = new Inspector(context) { override def postTyperTraverser = new context.Traverser { import context.global._ private def isSeq(symbol: Symbol): Boolean = { val full = symbol.typeSignature.resultType.typeSymbol.fullName val immutableCollection = full.startsWith("scala.collection.immutable") && (full.endsWith("List") || full.endsWith("Set") || full.endsWith("Seq") || full.endsWith( "Vector" )) immutableCollection || full == "scala.collection.Seq" } private def isAny(tree: Tree): Boolean = tree.toString() == "Any" private def isAny(symbol: Symbol): Boolean = symbol.typeSignature.resultType.typeArgs.headOption match { case Some(t) => t.toString == "Any" case None => false } private def isAnySeq(tree: Tree): Boolean = tree match { case select @ Select(_, _) if select.symbol != null => isSeq(select.symbol) && isAny(select.symbol) case _ => false } override def inspect(tree: Tree): Unit = { tree match { case TypeApply(Select(l, TermName("$colon$plus")), a :: _) => if (!isAnySeq(l) && isAny(a)) context.warn(tree.pos, self, tree.toString.take(100)) case _ => continue(tree) } } } } }
sksamuel/scalac-scapegoat-plugin
src/main/scala/com/sksamuel/scapegoat/inspections/collections/CollectionPromotionToAny.scala
Scala
apache-2.0
2,198
package org.jetbrains.sbt package project import java.io.File import javax.swing.Icon import com.intellij.ide.util.projectWizard.WizardContext import com.intellij.openapi.externalSystem.model.DataNode import com.intellij.openapi.externalSystem.model.project.ProjectData import com.intellij.openapi.externalSystem.service.project.ProjectDataManager import com.intellij.openapi.externalSystem.service.project.wizard.AbstractExternalProjectImportBuilder import com.intellij.openapi.project.Project /** * @author Pavel Fatin */ class SbtProjectImportBuilder(projectDataManager: ProjectDataManager) extends AbstractExternalProjectImportBuilder[SbtImportControl](projectDataManager, new SbtImportControl(), SbtProjectSystem.Id) { def getName: String = Sbt.Name def getIcon: Icon = Sbt.Icon def doPrepare(context: WizardContext) {} def beforeCommit(dataNode: DataNode[ProjectData], project: Project) {} def onProjectInit(project: Project) {} def getExternalProjectConfigToUse(file: File): File = file def applyExtraSettings(context: WizardContext) {} }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/sbt/project/SbtProjectImportBuilder.scala
Scala
apache-2.0
1,074
/* NSC -- new Scala compiler * Copyright 2005-2013 LAMP/EPFL * @author Martin Odersky */ package scala.tools.nsc package typechecker import scala.language.postfixOps import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.tools.nsc.settings.ScalaVersion import scala.tools.nsc.settings.NoScalaVersion import symtab.Flags._ import transform.Transform /** <p> * Post-attribution checking and transformation. * </p> * <p> * This phase performs the following checks. * </p> * <ul> * <li>All overrides conform to rules.</li> * <li>All type arguments conform to bounds.</li> * <li>All type variable uses conform to variance annotations.</li> * <li>No forward reference to a term symbol extends beyond a value definition.</li> * </ul> * <p> * It performs the following transformations. * </p> * <ul> * <li>Local modules are replaced by variables and classes</li> * <li>Calls to case factory methods are replaced by new's.</li> * <li>Eliminate branches in a conditional if the condition is a constant</li> * </ul> * * @author Martin Odersky * @version 1.0 * * @todo Check whether we always check type parameter bounds. */ abstract class RefChecks extends Transform { val global: Global // need to repeat here because otherwise last mixin defines global as // SymbolTable. If we had DOT this would not be an issue import global._ import definitions._ import typer.typed /** the following two members override abstract members in Transform */ val phaseName: String = "refchecks" def newTransformer(unit: CompilationUnit): RefCheckTransformer = new RefCheckTransformer(unit) val toJavaRepeatedParam = new SubstSymMap(RepeatedParamClass -> JavaRepeatedParamClass) val toScalaRepeatedParam = new SubstSymMap(JavaRepeatedParamClass -> RepeatedParamClass) def accessFlagsToString(sym: Symbol) = flagsToString( sym getFlag (PRIVATE | PROTECTED), if (sym.hasAccessBoundary) "" + sym.privateWithin.name else "" ) def overridesTypeInPrefix(tp1: Type, tp2: Type, prefix: Type, isModuleOverride: Boolean): Boolean = (tp1.dealiasWiden, tp2.dealiasWiden) match { case (MethodType(List(), rtp1), NullaryMethodType(rtp2)) => rtp1 <:< rtp2 case (NullaryMethodType(rtp1), MethodType(List(), rtp2)) => rtp1 <:< rtp2 // all this module business would be so much simpler if we moduled^w modelled a module as a class and an accessor, like we do for fields case (TypeRef(_, sym, _), _) if sym.isModuleClass => overridesTypeInPrefix(NullaryMethodType(tp1), tp2, prefix, isModuleOverride) case (_, TypeRef(_, sym, _)) if sym.isModuleClass => overridesTypeInPrefix(tp1, NullaryMethodType(tp2), prefix, isModuleOverride) case _ => def classBoundAsSeen(tp: Type) = tp.typeSymbol.classBound.asSeenFrom(prefix, tp.typeSymbol.owner) (tp1 <:< tp2) || isModuleOverride && ( // Object override check. This requires that both the overridden and the overriding member are object // definitions. The overriding module type is allowed to replace the original one with the same name // as long as it conform to the original non-singleton type. tp1.typeSymbol.isModuleClass && tp2.typeSymbol.isModuleClass && { val cb1 = classBoundAsSeen(tp1) val cb2 = classBoundAsSeen(tp2) (cb1 <:< cb2) && { log("Allowing %s to override %s because %s <:< %s".format(tp1, tp2, cb1, cb2)) true } } ) } private val separatelyCompiledScalaSuperclass = perRunCaches.newAnyRefMap[Symbol, Unit]() final def isSeparatelyCompiledScalaSuperclass(sym: Symbol) = if (globalPhase.refChecked){ separatelyCompiledScalaSuperclass.contains(sym) } else { // conservative approximation in case someone in pre-refchecks phase asks for `exitingFields(someClass.info)` // and we haven't run the refchecks tree transform which populates `separatelyCompiledScalaSuperclass` false } class RefCheckTransformer(unit: CompilationUnit) extends Transformer { var localTyper: analyzer.Typer = typer var currentApplication: Tree = EmptyTree var inPattern: Boolean = false @inline final def savingInPattern[A](body: => A): A = { val saved = inPattern try body finally inPattern = saved } var checkedCombinations = Set[List[Type]]() // only one overloaded alternative is allowed to define default arguments private def checkOverloadedRestrictions(clazz: Symbol, defaultClass: Symbol): Unit = { // Using the default getters (such as methodName$default$1) as a cheap way of // finding methods with default parameters. This way, we can limit the members to // those with the DEFAULTPARAM flag, and infer the methods. Looking for the methods // directly requires inspecting the parameter list of every one. That modification // shaved 95% off the time spent in this method. val defaultGetters = defaultClass.info.findMembers(excludedFlags = PARAM, requiredFlags = DEFAULTPARAM) val defaultMethodNames = defaultGetters map (sym => nme.defaultGetterToMethod(sym.name)) defaultMethodNames.toList.distinct foreach { name => val methods = clazz.info.findMember(name, 0L, requiredFlags = METHOD, stableOnly = false).alternatives def hasDefaultParam(tpe: Type): Boolean = tpe match { case MethodType(params, restpe) => (params exists (_.hasDefault)) || hasDefaultParam(restpe) case _ => false } val haveDefaults = methods filter ( if (settings.isScala211) (sym => mexists(sym.info.paramss)(_.hasDefault) && !nme.isProtectedAccessorName(sym.name)) else (sym => hasDefaultParam(sym.info) && !nme.isProtectedAccessorName(sym.name)) ) if (haveDefaults.lengthCompare(1) > 0) { val owners = haveDefaults map (_.owner) // constructors of different classes are allowed to have defaults if (haveDefaults.exists(x => !x.isConstructor) || owners.distinct.size < haveDefaults.size) { reporter.error(clazz.pos, "in "+ clazz + ", multiple overloaded alternatives of "+ haveDefaults.head + " define default arguments" + ( if (owners.forall(_ == clazz)) "." else ".\\nThe members with defaults are defined in "+owners.map(_.fullLocationString).mkString("", " and ", ".") ) ) } } } // Check for doomed attempt to overload applyDynamic if (clazz isSubClass DynamicClass) { for ((_, m1 :: m2 :: _) <- (clazz.info member nme.applyDynamic).alternatives groupBy (_.typeParams.length)) { reporter.error(m1.pos, "implementation restriction: applyDynamic cannot be overloaded except by methods with different numbers of type parameters, e.g. applyDynamic[T1](method: String)(arg: T1) and applyDynamic[T1, T2](method: String)(arg1: T1, arg2: T2)") } } // This has become noisy with implicit classes. if (settings.warnPolyImplicitOverload && settings.developer) { clazz.info.decls.foreach(sym => if (sym.isImplicit && sym.typeParams.nonEmpty) { // implicit classes leave both a module symbol and a method symbol as residue val alts = clazz.info.decl(sym.name).alternatives filterNot (_.isModule) if (alts.size > 1) alts foreach (x => reporter.warning(x.pos, "parameterized overloaded implicit methods are not visible as view bounds")) }) } } // Override checking ------------------------------------------------------------ /** Add bridges for vararg methods that extend Java vararg methods */ def addVarargBridges(clazz: Symbol): List[Tree] = { // This is quite expensive, so attempt to skip it completely. // Insist there at least be a java-defined ancestor which // defines a varargs method. TODO: Find a cheaper way to exclude. if (inheritsJavaVarArgsMethod(clazz)) { log("Found java varargs ancestor in " + clazz.fullLocationString + ".") val self = clazz.thisType val bridges = new ListBuffer[Tree] def varargBridge(member: Symbol, bridgetpe: Type): Tree = { log(s"Generating varargs bridge for ${member.fullLocationString} of type $bridgetpe") val newFlags = (member.flags | VBRIDGE | ARTIFACT) & ~PRIVATE val bridge = member.cloneSymbolImpl(clazz, newFlags) setPos clazz.pos bridge.setInfo(bridgetpe.cloneInfo(bridge)) clazz.info.decls enter bridge val params = bridge.paramss.head val elemtp = params.last.tpe.typeArgs.head val idents = params map Ident val lastarg = gen.wildcardStar(gen.mkWrapArray(idents.last, elemtp)) val body = Apply(Select(This(clazz), member), idents.init :+ lastarg) localTyper typed DefDef(bridge, body) } // For all concrete non-private members (but: see below) that have a (Scala) repeated // parameter: compute the corresponding method type `jtpe` with a Java repeated parameter // if a method with type `jtpe` exists and that method is not a varargs bridge // then create a varargs bridge of type `jtpe` that forwards to the // member method with the Scala vararg type. // // @PP: Can't call nonPrivateMembers because we will miss refinement members, // which have been marked private. See SI-4729. for (member <- nonTrivialMembers(clazz)) { log(s"Considering $member for java varargs bridge in $clazz") if (!member.isDeferred && member.isMethod && hasRepeatedParam(member.info)) { val inherited = clazz.info.nonPrivateMemberAdmitting(member.name, VBRIDGE) // Delaying calling memberType as long as possible if (inherited.exists) { val jtpe = toJavaRepeatedParam(self memberType member) // this is a bit tortuous: we look for non-private members or bridges // if we find a bridge everything is OK. If we find another member, // we need to create a bridge val inherited1 = inherited filter (sym => !(sym hasFlag VBRIDGE) && (self memberType sym matches jtpe)) if (inherited1.exists) bridges += varargBridge(member, jtpe) } } } if (bridges.size > 0) log(s"Adding ${bridges.size} bridges for methods extending java varargs.") bridges.toList } else Nil } /** 1. Check all members of class `clazz` for overriding conditions. * That is for overriding member M and overridden member O: * * 1.1. M must have the same or stronger access privileges as O. * 1.2. O must not be final. * 1.3. O is deferred, or M has `override` modifier. * 1.4. If O is stable, then so is M. * // @M: LIFTED 1.5. Neither M nor O are a parameterized type alias * 1.6. If O is a type alias, then M is an alias of O. * 1.7. If O is an abstract type then * 1.7.1 either M is an abstract type, and M's bounds are sharper than O's bounds. * or M is a type alias or class which conforms to O's bounds. * 1.7.2 higher-order type arguments must respect bounds on higher-order type parameters -- @M * (explicit bounds and those implied by variance annotations) -- @see checkKindBounds * 1.8. If O and M are values, then * 1.8.1 M's type is a subtype of O's type, or * 1.8.2 M is of type []S, O is of type ()T and S <: T, or * 1.8.3 M is of type ()S, O is of type []T and S <: T, or * 1.9. If M is a macro def, O cannot be deferred unless there's a concrete method overriding O. * 1.10. If M is not a macro def, O cannot be a macro def. * 2. Check that only abstract classes have deferred members * 3. Check that concrete classes do not have deferred definitions * that are not implemented in a subclass. * 4. Check that every member with an `override` modifier * overrides some other member. */ private def checkAllOverrides(clazz: Symbol, typesOnly: Boolean = false) { val self = clazz.thisType def classBoundAsSeen(tp: Type) = { tp.typeSymbol.classBound.asSeenFrom(self, tp.typeSymbol.owner) } case class MixinOverrideError(member: Symbol, msg: String) val mixinOverrideErrors = new ListBuffer[MixinOverrideError]() def printMixinOverrideErrors() { mixinOverrideErrors.toList match { case List() => case List(MixinOverrideError(_, msg)) => reporter.error(clazz.pos, msg) case MixinOverrideError(member, msg) :: others => val others1 = others.map(_.member.name.decode).filter(member.name.decode != _).distinct reporter.error( clazz.pos, msg+(if (others1.isEmpty) "" else ";\\n other members with override errors are: "+(others1 mkString ", "))) } } def infoString(sym: Symbol) = infoString0(sym, sym.owner != clazz) def infoStringWithLocation(sym: Symbol) = infoString0(sym, true) def infoString0(member: Symbol, showLocation: Boolean) = { val underlying = // not using analyzer.underlyingSymbol(member) because we should get rid of it if (!(member hasFlag ACCESSOR)) member else member.accessed match { case field if field.exists => field case _ if member.isSetter => member.getterIn(member.owner) case _ => member } def memberInfo = self.memberInfo(underlying) match { case getterTp if underlying.isGetter => getterTp.resultType case tp => tp } underlying.toString() + (if (showLocation) underlying.locationString + (if (underlying.isAliasType) s", which equals $memberInfo" else if (underlying.isAbstractType) s" with bounds$memberInfo" else if (underlying.isModule) "" else if (underlying.isTerm) s" of type $memberInfo" else "") else "") } /* Check that all conditions for overriding `other` by `member` * of class `clazz` are met. */ def checkOverride(pair: SymbolPair) { import pair._ val member = low val other = high def memberTp = lowType def otherTp = highType // debuglog(s"Checking validity of ${member.fullLocationString} overriding ${other.fullLocationString}") def noErrorType = !pair.isErroneous def isRootOrNone(sym: Symbol) = sym != null && sym.isRoot || sym == NoSymbol def isNeitherInClass = member.owner != pair.base && other.owner != pair.base def objectOverrideErrorMsg = ( "overriding " + high.fullLocationString + " with " + low.fullLocationString + ":\\n" + "an overriding object must conform to the overridden object's class bound" + analyzer.foundReqMsg(pair.lowClassBound, pair.highClassBound) ) def overrideErrorMsg(msg: String): String = { val isConcreteOverAbstract = (other.owner isSubClass member.owner) && other.isDeferred && !member.isDeferred val addendum = if (isConcreteOverAbstract) ";\\n (Note that %s is abstract,\\n and is therefore overridden by concrete %s)".format( infoStringWithLocation(other), infoStringWithLocation(member) ) else if (settings.debug) analyzer.foundReqMsg(member.tpe, other.tpe) else "" s"overriding ${infoStringWithLocation(other)};\\n ${infoString(member)} $msg$addendum" } def emitOverrideError(fullmsg: String) { if (member.owner == clazz) reporter.error(member.pos, fullmsg) else mixinOverrideErrors += new MixinOverrideError(member, fullmsg) } def overrideError(msg: String) { if (noErrorType) emitOverrideError(overrideErrorMsg(msg)) } def overrideTypeError() { if (noErrorType) { emitOverrideError( if (member.isModule && other.isModule) objectOverrideErrorMsg else overrideErrorMsg("has incompatible type") ) } } def overrideAccessError() { val otherAccess = accessFlagsToString(other) overrideError("has weaker access privileges; it should be "+ (if (otherAccess == "") "public" else "at least "+otherAccess)) } //Console.println(infoString(member) + " overrides " + infoString(other) + " in " + clazz);//DEBUG // return if we already checked this combination elsewhere if (member.owner != clazz) { def deferredCheck = member.isDeferred || !other.isDeferred def subOther(s: Symbol) = s isSubClass other.owner def subMember(s: Symbol) = s isSubClass member.owner if (subOther(member.owner) && deferredCheck) { //Console.println(infoString(member) + " shadows1 " + infoString(other) " in " + clazz);//DEBUG return } if (clazz.parentSymbols exists (p => subOther(p) && subMember(p) && deferredCheck)) { //Console.println(infoString(member) + " shadows2 " + infoString(other) + " in " + clazz);//DEBUG return } if (clazz.parentSymbols forall (p => subOther(p) == subMember(p))) { //Console.println(infoString(member) + " shadows " + infoString(other) + " in " + clazz);//DEBUG return } } /* Is the intersection between given two lists of overridden symbols empty? */ def intersectionIsEmpty(syms1: List[Symbol], syms2: List[Symbol]) = !(syms1 exists (syms2 contains _)) if (typesOnly) checkOverrideTypes() else { // o: public | protected | package-protected (aka java's default access) // ^-may be overridden by member with access privileges-v // m: public | public/protected | public/protected/package-protected-in-same-package-as-o if (member.isPrivate) // (1.1) overrideError("has weaker access privileges; it should not be private") // todo: align accessibility implication checking with isAccessible in Contexts val ob = other.accessBoundary(member.owner) val mb = member.accessBoundary(member.owner) def isOverrideAccessOK = member.isPublic || { // member is public, definitely same or relaxed access (!other.isProtected || member.isProtected) && // if o is protected, so is m ((!isRootOrNone(ob) && ob.hasTransOwner(mb)) || // m relaxes o's access boundary other.isJavaDefined) // overriding a protected java member, see #3946 } if (!isOverrideAccessOK) { overrideAccessError() } else if (other.isClass) { overrideError("cannot be used here - class definitions cannot be overridden") } else if (!other.isDeferred && member.isClass) { overrideError("cannot be used here - classes can only override abstract types") } else if (other.isEffectivelyFinal) { // (1.2) overrideError("cannot override final member") } else if (!other.isDeferred && !member.isAnyOverride && !member.isSynthetic) { // (*) // (*) Synthetic exclusion for (at least) default getters, fixes SI-5178. We cannot assign the OVERRIDE flag to // the default getter: one default getter might sometimes override, sometimes not. Example in comment on ticket. if (isNeitherInClass && !(other.owner isSubClass member.owner)) emitOverrideError( clazz + " inherits conflicting members:\\n " + infoStringWithLocation(other) + " and\\n " + infoStringWithLocation(member) + "\\n(Note: this can be resolved by declaring an override in " + clazz + ".)" ) else overrideError("needs `override' modifier") } else if (other.isAbstractOverride && other.isIncompleteIn(clazz) && !member.isAbstractOverride) { overrideError("needs `abstract override' modifiers") } else if (member.isAnyOverride && (other hasFlag ACCESSOR) && !(other hasFlag STABLE | DEFERRED)) { // The check above used to look at `field` == `other.accessed`, ensuring field.isVariable && !field.isLazy, // which I think is identical to the more direct `!(other hasFlag STABLE)` (given that `other` is a method). // Also, we're moving away from (looking at) underlying fields (vals in traits no longer have them, to begin with) // TODO: this is not covered by the spec. We need to resolve this either by changing the spec or removing the test here. if (!settings.overrideVars) overrideError("cannot override a mutable variable") } else if (member.isAnyOverride && !(member.owner.thisType.baseClasses exists (_ isSubClass other.owner)) && !member.isDeferred && !other.isDeferred && intersectionIsEmpty(member.extendedOverriddenSymbols, other.extendedOverriddenSymbols)) { overrideError("cannot override a concrete member without a third member that's overridden by both "+ "(this rule is designed to prevent ``accidental overrides'')") } else if (other.isStable && !member.isStable) { // (1.4) overrideError("needs to be a stable, immutable value") } else if (member.isValue && member.isLazy && other.isValue && other.hasFlag(STABLE) && !(other.isDeferred || other.isLazy)) { overrideError("cannot override a concrete non-lazy value") } else if (other.isValue && other.isLazy && member.isValue && !member.isLazy) { overrideError("must be declared lazy to override a concrete lazy value") } else if (other.isDeferred && member.isTermMacro && member.extendedOverriddenSymbols.forall(_.isDeferred)) { // (1.9) overrideError("cannot be used here - term macros cannot override abstract methods") } else if (other.isTermMacro && !member.isTermMacro) { // (1.10) overrideError("cannot be used here - only term macros can override term macros") } else { checkOverrideTypes() checkOverrideDeprecated() if (settings.warnNullaryOverride) { if (other.paramss.isEmpty && !member.paramss.isEmpty && !member.isJavaDefined) { reporter.warning(member.pos, "non-nullary method overrides nullary method") } } } } //if (!member.typeParams.isEmpty) (1.5) @MAT // overrideError("may not be parameterized"); //if (!other.typeParams.isEmpty) (1.5) @MAT // overrideError("may not override parameterized type"); // @M: substSym def checkOverrideAlias() { // Important: first check the pair has the same kind, since the substitution // carries high's type parameter's bounds over to low, so that // type equality doesn't consider potentially different bounds on low/high's type params. // In b781e25afe this went from using memberInfo to memberType (now lowType/highType), tested by neg/override.scala. // TODO: was that the right fix? it seems type alias's RHS should be checked by looking at the symbol's info if (pair.sameKind && lowType.substSym(low.typeParams, high.typeParams) =:= highType) () else overrideTypeError() // (1.6) } //if (!member.typeParams.isEmpty) // (1.7) @MAT // overrideError("may not be parameterized"); def checkOverrideAbstract() { if (!(highInfo.bounds containsType lowType)) { // (1.7.1) overrideTypeError(); // todo: do an explaintypes with bounds here explainTypes(_.bounds containsType _, highInfo, lowType) } // check overriding (abstract type --> abstract type or abstract type --> concrete type member (a type alias)) // making an abstract type member concrete is like passing a type argument typer.infer.checkKindBounds(high :: Nil, lowType :: Nil, rootType, low.owner) match { // (1.7.2) case Nil => case kindErrors => reporter.error(member.pos, "The kind of "+member.keyString+" "+member.varianceString + member.nameString+ " does not conform to the expected kind of " + other.defString + other.locationString + "." + kindErrors.toList.mkString("\\n", ", ", "")) } // check a type alias's RHS corresponds to its declaration // this overlaps somewhat with validateVariance if (low.isAliasType) { typer.infer.checkKindBounds(low :: Nil, lowType.normalize :: Nil, rootType, low.owner) match { case Nil => case kindErrors => reporter.error(member.pos, "The kind of the right-hand side "+lowType.normalize+" of "+low.keyString+" "+ low.varianceString + low.nameString+ " does not conform to its expected kind."+ kindErrors.toList.mkString("\\n", ", ", "")) } } else if (low.isAbstractType && lowType.isVolatile && !highInfo.bounds.hi.isVolatile) overrideError("is a volatile type; cannot override a type with non-volatile upper bound") } def checkOverrideTerm() { other.cookJavaRawInfo() // #2454 if (!overridesTypeInPrefix(lowType, highType, rootType, low.isModuleOrModuleClass && high.isModuleOrModuleClass)) { // 8 overrideTypeError() explainTypes(lowType, highType) } if (low.isStable && !highType.isVolatile) { if (lowType.isVolatile) overrideError("has a volatile type; cannot override a member with non-volatile type") else lowType.normalize.resultType match { case rt: RefinedType if !(rt =:= highType) && !(checkedCombinations contains rt.parents) => // might mask some inconsistencies -- check overrides checkedCombinations += rt.parents val tsym = rt.typeSymbol if (tsym.pos == NoPosition) tsym setPos member.pos checkAllOverrides(tsym, typesOnly = true) case _ => } } } def checkOverrideTypes() { if (high.isAliasType) checkOverrideAlias() else if (high.isAbstractType) checkOverrideAbstract() else if (high.isTerm) checkOverrideTerm() } def checkOverrideDeprecated() { if (other.hasDeprecatedOverridingAnnotation && !(member.hasDeprecatedOverridingAnnotation || member.ownerChain.exists(x => x.isDeprecated || x.hasBridgeAnnotation))) { val version = other.deprecatedOverridingVersion.getOrElse("") val since = if (version.isEmpty) version else s" (since $version)" val message = other.deprecatedOverridingMessage map (msg => s": $msg") getOrElse "" val report = s"overriding ${other.fullLocationString} is deprecated$since$message" currentRun.reporting.deprecationWarning(member.pos, other, report, version) } } } val opc = new overridingPairs.Cursor(clazz) while (opc.hasNext) { if (!opc.high.isClass) checkOverride(opc.currentPair) opc.next() } printMixinOverrideErrors() // Verifying a concrete class has nothing unimplemented. if (clazz.isConcreteClass && !typesOnly) { val abstractErrors = new ListBuffer[String] def abstractErrorMessage = // a little formatting polish if (abstractErrors.size <= 2) abstractErrors mkString " " else abstractErrors.tail.mkString(abstractErrors.head + ":\\n", "\\n", "") def abstractClassError(mustBeMixin: Boolean, msg: String) { def prelude = ( if (clazz.isAnonymousClass || clazz.isModuleClass) "object creation impossible" else if (mustBeMixin) clazz + " needs to be a mixin" else clazz + " needs to be abstract" ) + ", since" if (abstractErrors.isEmpty) abstractErrors ++= List(prelude, msg) else abstractErrors += msg } def javaErasedOverridingSym(sym: Symbol): Symbol = clazz.tpe.nonPrivateMemberAdmitting(sym.name, BRIDGE).filter(other => !other.isDeferred && other.isJavaDefined && !sym.enclClass.isSubClass(other.enclClass) && { // #3622: erasure operates on uncurried types -- // note on passing sym in both cases: only sym.isType is relevant for uncurry.transformInfo // !!! erasure.erasure(sym, uncurry.transformInfo(sym, tp)) gives erroneous or inaccessible type - check whether that's still the case! def uncurryAndErase(tp: Type) = erasure.erasure(sym)(uncurry.transformInfo(sym, tp)) val tp1 = uncurryAndErase(clazz.thisType.memberType(sym)) val tp2 = uncurryAndErase(clazz.thisType.memberType(other)) exitingErasure(tp1 matches tp2) }) def ignoreDeferred(member: Symbol) = ( (member.isAbstractType && !member.isFBounded) || ( // the test requires exitingErasure so shouldn't be // done if the compiler has no erasure phase available member.isJavaDefined && (currentRun.erasurePhase == NoPhase || javaErasedOverridingSym(member) != NoSymbol) ) ) // 2. Check that only abstract classes have deferred members def checkNoAbstractMembers(): Unit = { // Avoid spurious duplicates: first gather any missing members. def memberList = clazz.info.nonPrivateMembersAdmitting(VBRIDGE) val (missing, rest) = memberList partition (m => m.isDeferred && !ignoreDeferred(m)) // Group missing members by the name of the underlying symbol, // to consolidate getters and setters. val grouped = missing groupBy (_.name.getterName) val missingMethods = grouped.toList flatMap { case (name, syms) => if (syms exists (_.isSetter)) syms filterNot (_.isGetter) else syms } def stubImplementations: List[String] = { // Grouping missing methods by the declaring class val regrouped = missingMethods.groupBy(_.owner).toList def membersStrings(members: List[Symbol]) = { members foreach fullyInitializeSymbol members.sortBy(_.name) map (m => m.defStringSeenAs(clazz.tpe_* memberType m) + " = ???") } if (regrouped.tail.isEmpty) membersStrings(regrouped.head._2) else (regrouped.sortBy("" + _._1.name) flatMap { case (owner, members) => ("// Members declared in " + owner.fullName) +: membersStrings(members) :+ "" }).init } // If there are numerous missing methods, we presume they are aware of it and // give them a nicely formatted set of method signatures for implementing. if (missingMethods.size > 1) { abstractClassError(false, "it has " + missingMethods.size + " unimplemented members.") val preface = """|/** As seen from %s, the missing signatures are as follows. | * For convenience, these are usable as stub implementations. | */ |""".stripMargin.format(clazz) abstractErrors += stubImplementations.map(" " + _ + "\\n").mkString(preface, "", "") return } for (member <- missing) { def undefined(msg: String) = abstractClassError(false, infoString(member) + " is not defined" + msg) val underlying = analyzer.underlyingSymbol(member) // TODO: don't use this method // Give a specific error message for abstract vars based on why it fails: // It could be unimplemented, have only one accessor, or be uninitialized. val groupedAccessors = grouped.getOrElse(member.name.getterName, Nil) val isMultiple = groupedAccessors.size > 1 if (groupedAccessors.exists(_.isSetter) || (member.isGetter && !isMultiple && member.setterIn(member.owner).exists)) { // If both getter and setter are missing, squelch the setter error. if (member.isSetter && isMultiple) () else undefined( if (member.isSetter) "\\n(Note that an abstract var requires a setter in addition to the getter)" else if (member.isGetter && !isMultiple) "\\n(Note that an abstract var requires a getter in addition to the setter)" else "\\n(Note that variables need to be initialized to be defined)" ) } else if (underlying.isMethod) { // If there is a concrete method whose name matches the unimplemented // abstract method, and a cursory examination of the difference reveals // something obvious to us, let's make it more obvious to them. val abstractParams = underlying.tpe.paramTypes val matchingName = clazz.tpe.nonPrivateMembersAdmitting(VBRIDGE) val matchingArity = matchingName filter { m => !m.isDeferred && (m.name == underlying.name) && (m.tpe.paramTypes.size == underlying.tpe.paramTypes.size) && (m.tpe.typeParams.size == underlying.tpe.typeParams.size) } matchingArity match { // So far so good: only one candidate method case Scope(concrete) => val mismatches = abstractParams zip concrete.tpe.paramTypes filterNot { case (x, y) => x =:= y } mismatches match { // Only one mismatched parameter: say something useful. case (pa, pc) :: Nil => val abstractSym = pa.typeSymbol val concreteSym = pc.typeSymbol def subclassMsg(c1: Symbol, c2: Symbol) = ( ": %s is a subclass of %s, but method parameter types must match exactly.".format( c1.fullLocationString, c2.fullLocationString) ) val addendum = ( if (abstractSym == concreteSym) { // TODO: what is the optimal way to test for a raw type at this point? // Compilation has already failed so we shouldn't have to worry overmuch // about forcing types. if (underlying.isJavaDefined && pa.typeArgs.isEmpty && abstractSym.typeParams.nonEmpty) ". To implement a raw type, use %s[_]".format(pa) else if (pa.prefix =:= pc.prefix) ": their type parameters differ" else ": their prefixes (i.e. enclosing instances) differ" } else if (abstractSym isSubClass concreteSym) subclassMsg(abstractSym, concreteSym) else if (concreteSym isSubClass abstractSym) subclassMsg(concreteSym, abstractSym) else "" ) undefined("\\n(Note that %s does not match %s%s)".format(pa, pc, addendum)) case xs => undefined("") } case _ => undefined("") } } else undefined("") } // Check the remainder for invalid absoverride. for (member <- rest ; if (member.isAbstractOverride && member.isIncompleteIn(clazz))) { val other = member.superSymbolIn(clazz) val explanation = if (other != NoSymbol) " and overrides incomplete superclass member " + infoString(other) else ", but no concrete implementation could be found in a base class" abstractClassError(true, infoString(member) + " is marked `abstract' and `override'" + explanation) } } // 3. Check that concrete classes do not have deferred definitions // that are not implemented in a subclass. // Note that this is not the same as (2); In a situation like // // class C { def m: Int = 0} // class D extends C { def m: Int } // // (3) is violated but not (2). def checkNoAbstractDecls(bc: Symbol) { for (decl <- bc.info.decls) { if (decl.isDeferred && !ignoreDeferred(decl)) { val impl = decl.matchingSymbol(clazz.thisType, admit = VBRIDGE) if (impl == NoSymbol || (decl.owner isSubClass impl.owner)) { abstractClassError(false, "there is a deferred declaration of "+infoString(decl)+ " which is not implemented in a subclass"+analyzer.abstractVarMessage(decl)) } } } if (bc.superClass hasFlag ABSTRACT) checkNoAbstractDecls(bc.superClass) } checkNoAbstractMembers() if (abstractErrors.isEmpty) checkNoAbstractDecls(clazz) if (abstractErrors.nonEmpty) reporter.error(clazz.pos, abstractErrorMessage) } else if (clazz.isTrait && !(clazz isSubClass AnyValClass)) { // For non-AnyVal classes, prevent abstract methods in interfaces that override // final members in Object; see #4431 for (decl <- clazz.info.decls) { // Have to use matchingSymbol, not a method involving overridden symbols, // because the scala type system understands that an abstract method here does not // override a concrete method in Object. The jvm, however, does not. val overridden = decl.matchingSymbol(ObjectClass, ObjectTpe) if (overridden.isFinal) reporter.error(decl.pos, "trait cannot redefine final method from class AnyRef") } } /* Returns whether there is a symbol declared in class `inclazz` * (which must be different from `clazz`) whose name and type * seen as a member of `class.thisType` matches `member`'s. */ def hasMatchingSym(inclazz: Symbol, member: Symbol): Boolean = { val isVarargs = hasRepeatedParam(member.tpe) lazy val varargsType = toJavaRepeatedParam(member.tpe) def isSignatureMatch(sym: Symbol) = !sym.isTerm || { val symtpe = clazz.thisType memberType sym def matches(tp: Type) = tp matches symtpe matches(member.tpe) || (isVarargs && matches(varargsType)) } /* The rules for accessing members which have an access boundary are more * restrictive in java than scala. Since java has no concept of package nesting, * a member with "default" (package-level) access can only be accessed by members * in the exact same package. Example: * * package a.b; * public class JavaClass { void foo() { } } * * The member foo() can be accessed only from members of package a.b, and not * nested packages like a.b.c. In the analogous scala class: * * package a.b * class ScalaClass { private[b] def foo() = () } * * The member IS accessible to classes in package a.b.c. The javaAccessCheck logic * is restricting the set of matching signatures according to the above semantics. */ def javaAccessCheck(sym: Symbol) = ( !inclazz.isJavaDefined // not a java defined member || !sym.hasAccessBoundary // no access boundary || sym.isProtected // marked protected in java, thus accessible to subclasses || sym.privateWithin == member.enclosingPackageClass // exact package match ) def classDecls = inclazz.info.nonPrivateDecl(member.name) def matchingSyms = classDecls filter (sym => isSignatureMatch(sym) && javaAccessCheck(sym)) (inclazz != clazz) && (matchingSyms != NoSymbol) } // 4. Check that every defined member with an `override` modifier overrides some other member. for (member <- clazz.info.decls) if (member.isAnyOverride && !(clazz.thisType.baseClasses exists (hasMatchingSym(_, member)))) { // for (bc <- clazz.info.baseClasses.tail) Console.println("" + bc + " has " + bc.info.decl(member.name) + ":" + bc.info.decl(member.name).tpe);//DEBUG val nonMatching: List[Symbol] = clazz.info.member(member.name).alternatives.filterNot(_.owner == clazz).filterNot(_.isFinal) def issueError(suffix: String) = reporter.error(member.pos, member.toString() + " overrides nothing" + suffix) nonMatching match { case Nil => issueError("") case ms => val superSigs = ms.map(m => m.defStringSeenAs(clazz.tpe memberType m)).mkString("\\n") issueError(s".\\nNote: the super classes of ${member.owner} contain the following, non final members named ${member.name}:\\n${superSigs}") } member resetFlag (OVERRIDE | ABSOVERRIDE) // Any Override } } // Basetype Checking -------------------------------------------------------- /** <ol> * <li> <!-- 1 --> * Check that later type instances in the base-type sequence * are subtypes of earlier type instances of the same mixin. * </li> * </ol> */ private def validateBaseTypes(clazz: Symbol) { val seenParents = mutable.HashSet[Type]() val seenTypes = new Array[List[Type]](clazz.info.baseTypeSeq.length) for (i <- 0 until seenTypes.length) seenTypes(i) = Nil /* validate all base types of a class in reverse linear order. */ def register(tp: Type): Unit = { // if (clazz.fullName.endsWith("Collection.Projection")) // println("validate base type "+tp) val baseClass = tp.typeSymbol if (baseClass.isClass) { if (!baseClass.isTrait && !baseClass.isJavaDefined && !currentRun.compiles(baseClass) && !separatelyCompiledScalaSuperclass.contains(baseClass)) separatelyCompiledScalaSuperclass.update(baseClass, ()) val index = clazz.info.baseTypeIndex(baseClass) if (index >= 0) { if (seenTypes(index) forall (tp1 => !(tp1 <:< tp))) seenTypes(index) = tp :: (seenTypes(index) filter (tp1 => !(tp <:< tp1))) } } val remaining = tp.parents filterNot seenParents seenParents ++= remaining remaining foreach register } register(clazz.tpe) for (i <- 0 until seenTypes.length) { val baseClass = clazz.info.baseTypeSeq(i).typeSymbol seenTypes(i) match { case Nil => devWarning(s"base $baseClass not found in basetypes of $clazz. This might indicate incorrect caching of TypeRef#parents.") case _ :: Nil => ;// OK case tp1 :: tp2 :: _ => reporter.error(clazz.pos, "illegal inheritance;\\n " + clazz + " inherits different type instances of " + baseClass + ":\\n" + tp1 + " and " + tp2) explainTypes(tp1, tp2) explainTypes(tp2, tp1) } } } // Variance Checking -------------------------------------------------------- object varianceValidator extends VarianceValidator { private def tpString(tp: Type) = tp match { case ClassInfoType(parents, _, clazz) => "supertype "+intersectionType(parents, clazz.owner) case _ => "type "+tp } override def issueVarianceError(base: Symbol, sym: Symbol, required: Variance) { reporter.error(base.pos, s"${sym.variance} $sym occurs in $required position in ${tpString(base.info)} of $base") } } // Forward reference checking --------------------------------------------------- class LevelInfo(val outer: LevelInfo) { val scope: Scope = if (outer eq null) newScope else newNestedScope(outer.scope) var maxindex: Int = Int.MinValue var refpos: Position = _ var refsym: Symbol = _ } private var currentLevel: LevelInfo = null private val symIndex = perRunCaches.newMap[Symbol, Int]() private def pushLevel() { currentLevel = new LevelInfo(currentLevel) } private def popLevel() { currentLevel = currentLevel.outer } private def enterSyms(stats: List[Tree]) { var index = -1 for (stat <- stats) { index = index + 1 stat match { case _ : MemberDef if stat.symbol.isLocalToBlock => currentLevel.scope.enter(stat.symbol) symIndex(stat.symbol) = index case _ => } } } private def enterReference(pos: Position, sym: Symbol) { if (sym.isLocalToBlock) { val e = currentLevel.scope.lookupEntry(sym.name) if ((e ne null) && sym == e.sym) { var l = currentLevel while (l.scope != e.owner) l = l.outer val symindex = symIndex(sym) if (l.maxindex < symindex) { l.refpos = pos l.refsym = sym l.maxindex = symindex } } } } // Comparison checking ------------------------------------------------------- object normalizeAll extends TypeMap { def apply(tp: Type) = mapOver(tp).normalize } def checkImplicitViewOptionApply(pos: Position, fn: Tree, args: List[Tree]): Unit = if (settings.warnOptionImplicit) (fn, args) match { case (tap@TypeApply(fun, targs), List(view: ApplyImplicitView)) if fun.symbol == currentRun.runDefinitions.Option_apply => reporter.warning(pos, s"Suspicious application of an implicit view (${view.fun}) in the argument to Option.apply.") // SI-6567 case _ => } private def isObjectOrAnyComparisonMethod(sym: Symbol) = sym match { case Object_eq | Object_ne | Object_== | Object_!= | Any_== | Any_!= => true case _ => false } /** Check the sensibility of using the given `equals` to compare `qual` and `other`. */ private def checkSensibleEquals(pos: Position, qual: Tree, name: Name, sym: Symbol, other: Tree) = { def isReferenceOp = sym == Object_eq || sym == Object_ne def isNew(tree: Tree) = tree match { case Function(_, _) | Apply(Select(New(_), nme.CONSTRUCTOR), _) => true case _ => false } def underlyingClass(tp: Type): Symbol = { val sym = tp.widen.typeSymbol if (sym.isAbstractType) underlyingClass(sym.info.bounds.hi) else sym } val actual = underlyingClass(other.tpe) val receiver = underlyingClass(qual.tpe) def onTrees[T](f: List[Tree] => T) = f(List(qual, other)) def onSyms[T](f: List[Symbol] => T) = f(List(receiver, actual)) // @MAT normalize for consistency in error message, otherwise only part is normalized due to use of `typeSymbol` def typesString = normalizeAll(qual.tpe.widen)+" and "+normalizeAll(other.tpe.widen) /* Symbols which limit the warnings we can issue since they may be value types */ val isMaybeValue = Set[Symbol](AnyClass, AnyRefClass, AnyValClass, ObjectClass, ComparableClass, JavaSerializableClass) // Whether def equals(other: Any) has known behavior: it is the default // inherited from java.lang.Object, or it is a synthetically generated // case equals. TODO - more cases are warnable if the target is a synthetic // equals. def isUsingWarnableEquals = { val m = receiver.info.member(nme.equals_) ((m == Object_equals) || (m == Any_equals) || isMethodCaseEquals(m)) } def isMethodCaseEquals(m: Symbol) = m.isSynthetic && m.owner.isCase def isCaseEquals = isMethodCaseEquals(receiver.info.member(nme.equals_)) // Whether this == or != is one of those defined in Any/AnyRef or an overload from elsewhere. def isUsingDefaultScalaOp = sym == Object_== || sym == Object_!= || sym == Any_== || sym == Any_!= def haveSubclassRelationship = (actual isSubClass receiver) || (receiver isSubClass actual) // Whether the operands+operator represent a warnable combo (assuming anyrefs) // Looking for comparisons performed with ==/!= in combination with either an // equals method inherited from Object or a case class synthetic equals (for // which we know the logic.) def isWarnable = isReferenceOp || (isUsingDefaultScalaOp && isUsingWarnableEquals) def isEitherNullable = (NullTpe <:< receiver.info) || (NullTpe <:< actual.info) def isEitherValueClass = actual.isDerivedValueClass || receiver.isDerivedValueClass def isBoolean(s: Symbol) = unboxedValueClass(s) == BooleanClass def isUnit(s: Symbol) = unboxedValueClass(s) == UnitClass def isNumeric(s: Symbol) = isNumericValueClass(unboxedValueClass(s)) || isAnyNumber(s) def isScalaNumber(s: Symbol) = s isSubClass ScalaNumberClass def isJavaNumber(s: Symbol) = s isSubClass JavaNumberClass // includes java.lang.Number if appropriate [SI-5779] def isAnyNumber(s: Symbol) = isScalaNumber(s) || isJavaNumber(s) def isMaybeAnyValue(s: Symbol) = isPrimitiveValueClass(unboxedValueClass(s)) || isMaybeValue(s) // used to short-circuit unrelatedTypes check if both sides are special def isSpecial(s: Symbol) = isMaybeAnyValue(s) || isAnyNumber(s) val nullCount = onSyms(_ filter (_ == NullClass) size) def isNonsenseValueClassCompare = ( !haveSubclassRelationship && isUsingDefaultScalaOp && isEitherValueClass && !isCaseEquals ) // Have we already determined that the comparison is non-sensible? I mean, non-sensical? var isNonSensible = false def nonSensibleWarning(what: String, alwaysEqual: Boolean) = { val msg = alwaysEqual == (name == nme.EQ || name == nme.eq) reporter.warning(pos, s"comparing $what using `${name.decode}' will always yield $msg") isNonSensible = true } def nonSensible(pre: String, alwaysEqual: Boolean) = nonSensibleWarning(s"${pre}values of types $typesString", alwaysEqual) def nonSensiblyEq() = nonSensible("", alwaysEqual = true) def nonSensiblyNeq() = nonSensible("", alwaysEqual = false) def nonSensiblyNew() = nonSensibleWarning("a fresh object", alwaysEqual = false) def unrelatedMsg = name match { case nme.EQ | nme.eq => "never compare equal" case _ => "always compare unequal" } def unrelatedTypes() = if (!isNonSensible) { val weaselWord = if (isEitherValueClass) "" else " most likely" reporter.warning(pos, s"$typesString are unrelated: they will$weaselWord $unrelatedMsg") } if (nullCount == 2) // null == null nonSensiblyEq() else if (nullCount == 1) { if (onSyms(_ exists isPrimitiveValueClass)) // null == 5 nonSensiblyNeq() else if (onTrees( _ exists isNew)) // null == new AnyRef nonSensiblyNew() } else if (isBoolean(receiver)) { if (!isBoolean(actual) && !isMaybeValue(actual)) // true == 5 nonSensiblyNeq() } else if (isUnit(receiver)) { if (isUnit(actual)) // () == () nonSensiblyEq() else if (!isUnit(actual) && !isMaybeValue(actual)) // () == "abc" nonSensiblyNeq() } else if (isNumeric(receiver)) { if (!isNumeric(actual)) if (isUnit(actual) || isBoolean(actual) || !isMaybeValue(actual)) // 5 == "abc" nonSensiblyNeq() } else if (isWarnable && !isCaseEquals) { if (isNew(qual)) // new X == y nonSensiblyNew() else if (isNew(other) && (receiver.isEffectivelyFinal || isReferenceOp)) // object X ; X == new Y nonSensiblyNew() else if (receiver.isEffectivelyFinal && !(receiver isSubClass actual) && !actual.isRefinementClass) { // object X, Y; X == Y if (isEitherNullable) nonSensible("non-null ", false) else nonSensiblyNeq() } } // warn if one but not the other is a derived value class // this is especially important to enable transitioning from // regular to value classes without silent failures. if (isNonsenseValueClassCompare) unrelatedTypes() // possibleNumericCount is insufficient or this will warn on e.g. Boolean == j.l.Boolean else if (isWarnable && nullCount == 0 && !(isSpecial(receiver) && isSpecial(actual))) { // better to have lubbed and lost def warnIfLubless(): Unit = { val common = global.lub(List(actual.tpe, receiver.tpe)) if (ObjectTpe <:< common && !(ObjectTpe <:< actual.tpe) && !(ObjectTpe <:< receiver.tpe)) unrelatedTypes() } // warn if actual has a case parent that is not same as receiver's; // if actual is not a case, then warn if no common supertype, as below if (isCaseEquals) { def thisCase = receiver.info.member(nme.equals_).owner actual.info.baseClasses.find(_.isCase) match { case Some(p) if p != thisCase => nonSensible("case class ", false) case None => // stronger message on (Some(1) == None) //if (receiver.isCase && receiver.isEffectivelyFinal && !(receiver isSubClass actual)) nonSensiblyNeq() //else // if a class, it must be super to thisCase (and receiver) since not <: thisCase if (!actual.isTrait && !(receiver isSubClass actual)) nonSensiblyNeq() else if (!haveSubclassRelationship) warnIfLubless() case _ => } } // warn only if they have no common supertype below Object else if (!haveSubclassRelationship) { warnIfLubless() } } } /** Sensibility check examines flavors of equals. */ def checkSensible(pos: Position, fn: Tree, args: List[Tree]) = fn match { case Select(qual, name @ (nme.EQ | nme.NE | nme.eq | nme.ne)) if args.length == 1 && isObjectOrAnyComparisonMethod(fn.symbol) && !currentOwner.isSynthetic => checkSensibleEquals(pos, qual, name, fn.symbol, args.head) case _ => } // SI-6276 warn for trivial recursion, such as `def foo = foo` or `val bar: X = bar`, which come up more frequently than you might think. // TODO: Move to abide rule. Also, this does not check that the def is final or not overridden, for example def checkInfiniteLoop(sym: Symbol, rhs: Tree): Unit = if (!sym.isValueParameter && sym.paramss.isEmpty) { rhs match { case t@(Ident(_) | Select(This(_), _)) if t hasSymbolWhich (_.accessedOrSelf == sym) => reporter.warning(rhs.pos, s"${sym.fullLocationString} does nothing other than call itself recursively") case _ => } } // Transformation ------------------------------------------------------------ /* Convert a reference to a case factory of type `tpe` to a new of the class it produces. */ def toConstructor(pos: Position, tpe: Type): Tree = { val rtpe = tpe.finalResultType assert(rtpe.typeSymbol hasFlag CASE, tpe) val tree = localTyper.typedOperator { atPos(pos) { Select(New(TypeTree(rtpe)), rtpe.typeSymbol.primaryConstructor) } } checkUndesiredProperties(rtpe.typeSymbol, tree.pos) tree } override def transformStats(stats: List[Tree], exprOwner: Symbol): List[Tree] = { pushLevel() try { enterSyms(stats) var index = -1 stats flatMap { stat => index += 1; transformStat(stat, index) } } finally popLevel() } def transformStat(tree: Tree, index: Int): List[Tree] = tree match { case t if treeInfo.isSelfConstrCall(t) => assert(index == 0, index) try transform(tree) :: Nil finally if (currentLevel.maxindex > 0) { // An implementation restriction to avoid VerifyErrors and lazyvals mishaps; see SI-4717 debuglog("refsym = " + currentLevel.refsym) reporter.error(currentLevel.refpos, "forward reference not allowed from self constructor invocation") } case ValDef(_, _, _, _) => val tree1 = transform(tree) // important to do before forward reference check if (tree1.symbol.isLazy) tree1 :: Nil else { val sym = tree.symbol if (sym.isLocalToBlock && index <= currentLevel.maxindex) { debuglog("refsym = " + currentLevel.refsym) reporter.error(currentLevel.refpos, "forward reference extends over definition of " + sym) } tree1 :: Nil } case Import(_, _) => Nil case DefDef(mods, _, _, _, _, _) if (mods hasFlag MACRO) || (tree.symbol hasFlag MACRO) => Nil case _ => transform(tree) :: Nil } /* Check whether argument types conform to bounds of type parameters */ private def checkBounds(tree0: Tree, pre: Type, owner: Symbol, tparams: List[Symbol], argtps: List[Type]): Unit = try typer.infer.checkBounds(tree0, pre, owner, tparams, argtps, "") catch { case ex: TypeError => reporter.error(tree0.pos, ex.getMessage()) if (settings.explaintypes) { val bounds = tparams map (tp => tp.info.instantiateTypeParams(tparams, argtps).bounds) (argtps, bounds).zipped map ((targ, bound) => explainTypes(bound.lo, targ)) (argtps, bounds).zipped map ((targ, bound) => explainTypes(targ, bound.hi)) () } } private def isIrrefutable(pat: Tree, seltpe: Type): Boolean = pat match { case Apply(_, args) => val clazz = pat.tpe.typeSymbol clazz == seltpe.typeSymbol && clazz.isCaseClass && (args corresponds clazz.primaryConstructor.tpe.asSeenFrom(seltpe, clazz).paramTypes)(isIrrefutable) case Typed(pat, tpt) => seltpe <:< tpt.tpe case Ident(tpnme.WILDCARD) => true case Bind(_, pat) => isIrrefutable(pat, seltpe) case _ => false } // Note: if a symbol has both @deprecated and @migration annotations and both // warnings are enabled, only the first one checked here will be emitted. // I assume that's a consequence of some code trying to avoid noise by suppressing // warnings after the first, but I think it'd be better if we didn't have to // arbitrarily choose one as more important than the other. private def checkUndesiredProperties(sym: Symbol, pos: Position) { // If symbol is deprecated, and the point of reference is not enclosed // in either a deprecated member or a scala bridge method, issue a warning. // TODO: x.hasBridgeAnnotation doesn't seem to be needed here... if (sym.isDeprecated && !currentOwner.ownerChain.exists(x => x.isDeprecated || x.hasBridgeAnnotation)) currentRun.reporting.deprecationWarning(pos, sym) // Similar to deprecation: check if the symbol is marked with @migration // indicating it has changed semantics between versions. if (sym.hasMigrationAnnotation && settings.Xmigration.value != NoScalaVersion) { val changed = try settings.Xmigration.value < ScalaVersion(sym.migrationVersion.get) catch { case e : NumberFormatException => reporter.warning(pos, s"${sym.fullLocationString} has an unparsable version number: ${e.getMessage()}") // if we can't parse the format on the migration annotation just conservatively assume it changed true } if (changed) reporter.warning(pos, s"${sym.fullLocationString} has changed semantics in version ${sym.migrationVersion.get}:\\n${sym.migrationMessage.get}") } // See an explanation of compileTimeOnly in its scaladoc at scala.annotation.compileTimeOnly. if (sym.isCompileTimeOnly && !currentOwner.ownerChain.exists(x => x.isCompileTimeOnly)) { def defaultMsg = sm"""Reference to ${sym.fullLocationString} should not have survived past type checking, |it should have been processed and eliminated during expansion of an enclosing macro.""" // The getOrElse part should never happen, it's just here as a backstop. reporter.error(pos, sym.compileTimeOnlyMessage getOrElse defaultMsg) } } private def checkDelayedInitSelect(qual: Tree, sym: Symbol, pos: Position) = { def isLikelyUninitialized = ( (sym.owner isSubClass DelayedInitClass) && !qual.tpe.isInstanceOf[ThisType] && sym.accessedOrSelf.isVal ) if (settings.warnDelayedInit && isLikelyUninitialized) reporter.warning(pos, s"Selecting ${sym} from ${sym.owner}, which extends scala.DelayedInit, is likely to yield an uninitialized value") } private def lessAccessible(otherSym: Symbol, memberSym: Symbol): Boolean = ( (otherSym != NoSymbol) && !otherSym.isProtected && !otherSym.isTypeParameterOrSkolem && !otherSym.isExistentiallyBound && (otherSym isLessAccessibleThan memberSym) && (otherSym isLessAccessibleThan memberSym.enclClass) ) private def lessAccessibleSymsInType(other: Type, memberSym: Symbol): List[Symbol] = { val extras = other match { case TypeRef(pre, _, args) => // checking the prefix here gives us spurious errors on e.g. a private[process] // object which contains a type alias, which normalizes to a visible type. args filterNot (_ eq NoPrefix) flatMap (tp => lessAccessibleSymsInType(tp, memberSym)) case _ => Nil } if (lessAccessible(other.typeSymbol, memberSym)) other.typeSymbol :: extras else extras } private def warnLessAccessible(otherSym: Symbol, memberSym: Symbol) { val comparison = accessFlagsToString(memberSym) match { case "" => "" case acc => " is " + acc + " but" } val cannot = if (memberSym.isDeferred) "may be unable to provide a concrete implementation of" else "may be unable to override" reporter.warning(memberSym.pos, "%s%s references %s %s.".format( memberSym.fullLocationString, comparison, accessFlagsToString(otherSym), otherSym ) + "\\nClasses which cannot access %s %s %s.".format( otherSym.decodedName, cannot, memberSym.decodedName) ) } /** Warn about situations where a method signature will include a type which * has more restrictive access than the method itself. */ private def checkAccessibilityOfReferencedTypes(tree: Tree) { val member = tree.symbol def checkAccessibilityOfType(tpe: Type) { val inaccessible = lessAccessibleSymsInType(tpe, member) // if the unnormalized type is accessible, that's good enough if (inaccessible.isEmpty) () // or if the normalized type is, that's good too else if ((tpe ne tpe.normalize) && lessAccessibleSymsInType(tpe.dealiasWiden, member).isEmpty) () // otherwise warn about the inaccessible syms in the unnormalized type else inaccessible foreach (sym => warnLessAccessible(sym, member)) } // types of the value parameters mapParamss(member)(p => checkAccessibilityOfType(p.tpe)) // upper bounds of type parameters member.typeParams.map(_.info.bounds.hi.widen) foreach checkAccessibilityOfType } private def checkByNameRightAssociativeDef(tree: DefDef) { tree match { case DefDef(_, name, _, params :: _, _, _) => if (settings.warnByNameRightAssociative && !treeInfo.isLeftAssoc(name.decodedName) && params.exists(p => isByName(p.symbol))) reporter.warning(tree.pos, "by-name parameters will be evaluated eagerly when called as a right-associative infix operator. For more details, see SI-1980.") case _ => } } /** Check that a deprecated val or def does not override a * concrete, non-deprecated method. If it does, then * deprecation is meaningless. */ private def checkDeprecatedOvers(tree: Tree) { val symbol = tree.symbol if (symbol.isDeprecated) { val concrOvers = symbol.allOverriddenSymbols.filter(sym => !sym.isDeprecated && !sym.isDeferred && !sym.hasDeprecatedOverridingAnnotation && !sym.enclClass.hasDeprecatedInheritanceAnnotation) if(!concrOvers.isEmpty) currentRun.reporting.deprecationWarning( tree.pos, symbol, s"${symbol.toString} overrides concrete, non-deprecated symbol(s): ${concrOvers.map(_.name.decode).mkString(", ")}", "") } } private def isRepeatedParamArg(tree: Tree) = currentApplication match { case Apply(fn, args) => ( args.nonEmpty && (args.last eq tree) && (fn.tpe.params.length == args.length) && isRepeatedParamType(fn.tpe.params.last.tpe) ) case _ => false } private def checkTypeRef(tp: Type, tree: Tree, skipBounds: Boolean) = tp match { case TypeRef(pre, sym, args) => tree match { case tt: TypeTree if tt.original == null => // SI-7783 don't warn about inferred types // FIXME: reconcile this check with one in resetAttrs case _ => checkUndesiredProperties(sym, tree.pos) } if(sym.isJavaDefined) sym.typeParams foreach (_.cookJavaRawInfo()) if (!tp.isHigherKinded && !skipBounds) checkBounds(tree, pre, sym.owner, sym.typeParams, args) case _ => } private def checkTypeRefBounds(tp: Type, tree: Tree) = { var skipBounds = false tp match { case AnnotatedType(ann :: Nil, underlying) if ann.symbol == UncheckedBoundsClass => skipBounds = true underlying case TypeRef(pre, sym, args) => if (!tp.isHigherKinded && !skipBounds) checkBounds(tree, pre, sym.owner, sym.typeParams, args) tp case _ => tp } } private def checkAnnotations(tpes: List[Type], tree: Tree) = tpes foreach { tp => checkTypeRef(tp, tree, skipBounds = false) checkTypeRefBounds(tp, tree) } private def doTypeTraversal(tree: Tree)(f: Type => Unit) = if (!inPattern) tree.tpe foreach f private def applyRefchecksToAnnotations(tree: Tree): Unit = { def applyChecks(annots: List[AnnotationInfo]) = { checkAnnotations(annots map (_.atp), tree) transformTrees(annots flatMap (_.args)) } def checkIsElisible(sym: Symbol) = if (sym ne null) sym.elisionLevel.foreach { level => if (!sym.isMethod || sym.isAccessor || sym.isLazy || sym.isDeferred) reporter.error(sym.pos, s"${sym.name}: Only methods can be marked @elidable.") } if (settings.isScala213) checkIsElisible(tree.symbol) tree match { case m: MemberDef => val sym = m.symbol applyChecks(sym.annotations) def messageWarning(name: String)(warn: String) = reporter.warning(tree.pos, f"Invalid $name message for ${sym}%s${sym.locationString}%s:%n$warn") // validate implicitNotFoundMessage and implicitAmbiguousMessage analyzer.ImplicitNotFoundMsg.check(sym) foreach messageWarning("implicitNotFound") analyzer.ImplicitAmbiguousMsg.check(sym) foreach messageWarning("implicitAmbiguous") case tpt@TypeTree() => if (tpt.original != null) { tpt.original foreach { case dc@TypeTreeWithDeferredRefCheck() => applyRefchecksToAnnotations(dc.check()) // #2416 case _ => } } doTypeTraversal(tree) { case tp @ AnnotatedType(annots, _) => applyChecks(annots) case tp => } case _ => } } private def isSimpleCaseApply(tree: Tree): Boolean = { val sym = tree.symbol def isClassTypeAccessible(tree: Tree): Boolean = tree match { case TypeApply(fun, targs) => isClassTypeAccessible(fun) case Select(module, apply) => ( // SI-4859 `CaseClass1().InnerCaseClass2()` must not be rewritten to `new InnerCaseClass2()`; // {expr; Outer}.Inner() must not be rewritten to `new Outer.Inner()`. treeInfo.isQualifierSafeToElide(module) && // SI-5626 Classes in refinement types cannot be constructed with `new`. In this case, // the companion class is actually not a ClassSymbol, but a reference to an abstract type. module.symbol.companionClass.isClass ) } sym.name == nme.apply && !(sym hasFlag STABLE) && // ??? sym.isCase && isClassTypeAccessible(tree) && !tree.tpe.finalResultType.typeSymbol.primaryConstructor.isLessAccessibleThan(tree.symbol) } private def transformCaseApply(tree: Tree) = { def loop(t: Tree): Unit = t match { case Ident(_) => checkUndesiredProperties(t.symbol, t.pos) case Select(qual, _) => checkUndesiredProperties(t.symbol, t.pos) loop(qual) case _ => } tree foreach { case i@Ident(_) => enterReference(i.pos, i.symbol) // SI-5390 need to `enterReference` for `a` in `a.B()` case _ => } loop(tree) toConstructor(tree.pos, tree.tpe) } private def transformApply(tree: Apply): Tree = tree match { case Apply( Select(qual, nme.withFilter), List(Function( List(ValDef(_, pname, tpt, _)), Match(_, CaseDef(pat1, _, _) :: _)))) if ((pname startsWith nme.CHECK_IF_REFUTABLE_STRING) && isIrrefutable(pat1, tpt.tpe) && (qual.tpe <:< tree.tpe)) => transform(qual) case Apply(fn, args) => // sensicality should be subsumed by the unreachability/exhaustivity/irrefutability // analyses in the pattern matcher if (!inPattern) { checkImplicitViewOptionApply(tree.pos, fn, args) checkSensible(tree.pos, fn, args) } currentApplication = tree tree } private def transformSelect(tree: Select): Tree = { val Select(qual, _) = tree val sym = tree.symbol checkUndesiredProperties(sym, tree.pos) checkDelayedInitSelect(qual, sym, tree.pos) if (!sym.exists) devWarning("Select node has NoSymbol! " + tree + " / " + tree.tpe) else if (sym.isLocalToThis) varianceValidator.checkForEscape(sym, currentClass) def checkSuper(mix: Name) = // term should have been eliminated by super accessors assert(!(qual.symbol.isTrait && sym.isTerm && mix == tpnme.EMPTY), (qual.symbol, sym, mix)) // Rewrite eligible calls to monomorphic case companion apply methods to the equivalent constructor call. // // Note: for generic case classes the rewrite needs to be handled at the enclosing `TypeApply` to transform // `TypeApply(Select(C, apply), targs)` to `Select(New(C[targs]), <init>)`. In case such a `TypeApply` // was deemed ineligible for transformation (e.g. the case constructor was private), the refchecks transform // will recurse to this point with `Select(C, apply)`, which will have a type `[T](...)C[T]`. // // We don't need to perform the check on the Select node, and `!isHigherKinded will guard against this // redundant (and previously buggy, SI-9546) consideration. if (!tree.tpe.isHigherKinded && isSimpleCaseApply(tree)) { transformCaseApply(tree) } else { qual match { case Super(_, mix) => checkSuper(mix) case _ => } tree } } private def transformIf(tree: If): Tree = { val If(cond, thenpart, elsepart) = tree def unitIfEmpty(t: Tree): Tree = if (t == EmptyTree) Literal(Constant(())).setPos(tree.pos).setType(UnitTpe) else t cond.tpe match { case ConstantType(value) => val res = if (value.booleanValue) thenpart else elsepart unitIfEmpty(res) case _ => tree } } // Warning about nullary methods returning Unit. private def checkNullaryMethodReturnType(sym: Symbol) = sym.tpe match { case NullaryMethodType(restpe) if restpe.typeSymbol == UnitClass => // this may be the implementation of e.g. a generic method being parameterized // on Unit, in which case we had better let it slide. val isOk = ( sym.isGetter || (sym.name containsName nme.DEFAULT_GETTER_STRING) || sym.allOverriddenSymbols.exists(over => !(over.tpe.resultType =:= sym.tpe.resultType)) ) if (!isOk) reporter.warning(sym.pos, s"side-effecting nullary methods are discouraged: suggest defining as `def ${sym.name.decode}()` instead") case _ => () } // Verify classes extending AnyVal meet the requirements private def checkAnyValSubclass(clazz: Symbol) = { if (clazz.isDerivedValueClass) { if (clazz.isTrait) reporter.error(clazz.pos, "Only classes (not traits) are allowed to extend AnyVal") else if (clazz.hasAbstractFlag) reporter.error(clazz.pos, "`abstract' modifier cannot be used with value classes") } } private def checkUnexpandedMacro(t: Tree) = if (!t.isDef && t.hasSymbolField && t.symbol.isTermMacro) reporter.error(t.pos, "macro has not been expanded") override def transform(tree: Tree): Tree = { val savedLocalTyper = localTyper val savedCurrentApplication = currentApplication try { val sym = tree.symbol // Apply RefChecks to annotations. Makes sure the annotations conform to // type bounds (bug #935), issues deprecation warnings for symbols used // inside annotations. applyRefchecksToAnnotations(tree) var result: Tree = tree match { // NOTE: a val in a trait is now a DefDef, with the RHS being moved to an Assign in Constructors case tree: ValOrDefDef => checkDeprecatedOvers(tree) if (!tree.isErroneous) checkInfiniteLoop(tree.symbol, tree.rhs) if (settings.warnNullaryUnit) checkNullaryMethodReturnType(sym) if (settings.warnInaccessible) { if (!sym.isConstructor && !sym.isEffectivelyFinalOrNotOverridden && !sym.isSynthetic) checkAccessibilityOfReferencedTypes(tree) } tree match { case dd: DefDef => checkByNameRightAssociativeDef(dd) if (sym hasAnnotation NativeAttr) { if (sym.owner.isTrait) { reporter.error(tree.pos, "A trait cannot define a native method.") tree } else if (dd.rhs == EmptyTree) { // pretend it had a stub implementation sym resetFlag DEFERRED deriveDefDef(dd)(_ => typed(gen.mkSysErrorCall("native method stub"))) } else tree } else tree case _ => tree } case Template(parents, self, body) => localTyper = localTyper.atOwner(tree, currentOwner) validateBaseTypes(currentOwner) checkOverloadedRestrictions(currentOwner, currentOwner) // SI-7870 default getters for constructors live in the companion module checkOverloadedRestrictions(currentOwner, currentOwner.companionModule) val bridges = addVarargBridges(currentOwner) // TODO: do this during uncurry? checkAllOverrides(currentOwner) checkAnyValSubclass(currentOwner) if (currentOwner.isDerivedValueClass) currentOwner.primaryConstructor makeNotPrivate NoSymbol // SI-6601, must be done *after* pickler! if (bridges.nonEmpty) deriveTemplate(tree)(_ ::: bridges) else tree case dc@TypeTreeWithDeferredRefCheck() => abort("adapt should have turned dc: TypeTreeWithDeferredRefCheck into tpt: TypeTree, with tpt.original == dc") case tpt@TypeTree() => if(tpt.original != null) { tpt.original foreach { case dc@TypeTreeWithDeferredRefCheck() => transform(dc.check()) // #2416 -- only call transform to do refchecks, but discard results // tpt has the right type if the deferred checks are ok case _ => } } val existentialParams = new ListBuffer[Symbol] var skipBounds = false // check all bounds, except those that are existential type parameters // or those within typed annotated with @uncheckedBounds doTypeTraversal(tree) { case tp @ ExistentialType(tparams, tpe) => existentialParams ++= tparams case ann: AnnotatedType if ann.hasAnnotation(UncheckedBoundsClass) => // SI-7694 Allow code synthetizers to disable checking of bounds for TypeTrees based on inferred LUBs // which might not conform to the constraints. skipBounds = true case tp: TypeRef => val tpWithWildcards = deriveTypeWithWildcards(existentialParams.toList)(tp) checkTypeRef(tpWithWildcards, tree, skipBounds) case _ => } if (skipBounds) { tree.setType(tree.tpe.map { _.filterAnnotations(_.symbol != UncheckedBoundsClass) }) } tree case TypeApply(fn, args) => checkBounds(tree, NoPrefix, NoSymbol, fn.tpe.typeParams, args map (_.tpe)) if (isSimpleCaseApply(tree)) transformCaseApply(tree) else tree case x @ Apply(_, _) => transformApply(x) case x @ If(_, _, _) => transformIf(x) case New(tpt) => enterReference(tree.pos, tpt.tpe.typeSymbol) tree case treeInfo.WildcardStarArg(_) if !isRepeatedParamArg(tree) => reporter.error(tree.pos, "no `: _*' annotation allowed here\\n"+ "(such annotations are only allowed in arguments to *-parameters)") tree case Ident(name) => checkUndesiredProperties(sym, tree.pos) if (name != nme.WILDCARD && name != tpnme.WILDCARD_STAR) { assert(sym != NoSymbol, "transformCaseApply: name = " + name.debugString + " tree = " + tree + " / " + tree.getClass) //debug enterReference(tree.pos, sym) } tree case x @ Select(_, _) => transformSelect(x) case UnApply(fun, args) => transform(fun) // just make sure we enterReference for unapply symbols, note that super.transform(tree) would not transform(fun) // transformTrees(args) // TODO: is this necessary? could there be forward references in the args?? // probably not, until we allow parameterised extractors tree case _ => tree } // skip refchecks in patterns.... result = result match { case CaseDef(pat, guard, body) => val pat1 = savingInPattern { inPattern = true transform(pat) } treeCopy.CaseDef(tree, pat1, transform(guard), transform(body)) case LabelDef(_, _, _) if treeInfo.hasSynthCaseSymbol(result) => savingInPattern { inPattern = true deriveLabelDef(result)(transform) } case Apply(fun, args) if fun.symbol.isLabel && treeInfo.isSynthCaseSymbol(fun.symbol) => savingInPattern { // SI-7756 If we were in a translated pattern, we can now switch out of pattern mode, as the label apply signals // that we are in the user-supplied code in the case body. // // Relies on the translation of: // (null: Any) match { case x: List[_] => x; x.reverse; case _ => }' // to: // <synthetic> val x2: List[_] = (x1.asInstanceOf[List[_]]: List[_]); // matchEnd4({ x2; x2.reverse}) // case body is an argument to a label apply. inPattern = false super.transform(result) } case ValDef(_, _, _, _) if treeInfo.hasSynthCaseSymbol(result) => deriveValDef(result)(transform) // SI-7716 Don't refcheck the tpt of the synthetic val that holds the selector. case _ => super.transform(result) } result match { case ClassDef(_, _, _, _) | TypeDef(_, _, _, _) | ModuleDef(_, _, _) => if (result.symbol.isLocalToBlock || result.symbol.isTopLevel) varianceValidator.traverse(result) case tt @ TypeTree() if tt.original != null => varianceValidator.traverse(tt.original) // See SI-7872 case _ => } checkUnexpandedMacro(result) result } catch { case ex: TypeError => if (settings.debug) ex.printStackTrace() reporter.error(tree.pos, ex.getMessage()) tree } finally { localTyper = savedLocalTyper currentApplication = savedCurrentApplication } } } }
felixmulder/scala
src/compiler/scala/tools/nsc/typechecker/RefChecks.scala
Scala
bsd-3-clause
83,931
package com.ubirch.avatar.model.actors import com.ubirch.avatar.model.rest.device.DeviceDataRaw case class AnchoredRawData(raw: DeviceDataRaw)
ubirch/ubirch-avatar-service
model-rest/src/main/scala/com/ubirch/avatar/model/actors/AnchoredRawData.scala
Scala
apache-2.0
144
package com.mesosphere.universe.test import com.mesosphere.universe import com.netaporter.uri.Uri import io.circe.Json import io.circe.JsonObject import io.circe.syntax._ import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import org.scalatest.prop.TableFor1 import org.scalatest.prop.TableFor2 object TestingPackages { val PackagingVersion = universe.v3.model.V3PackagingVersion val Name = "MAXIMAL" val MinimalName = "minimal" val Version = universe.v3.model.Version("9.87.654.3210") val Maintainer = "max@mesosphere.io" val MaxReleaseVersion = universe.v3.model.ReleaseVersion(Long.MaxValue) val MinReleaseVersion = universe.v3.model.ReleaseVersion(0L) val Description = "A complete package definition" val MarathonTemplate = Some(universe.v3.model.Marathon( v2AppMustacheTemplate = ByteBuffer.wrap("marathon template".getBytes(StandardCharsets.UTF_8)) )) val Tags = List("all", "the", "things").map( s => universe.v3.model.Tag(s) ) val Scm = Some("git") val Website = Some("mesosphere.com") val Framework = Some(true) val PreInstallNotes = Some("pre-install message") val PostInstallNotes = Some("post-install message") val PostUninstallNotes = Some("post-uninstall message") val Licenses = Some(List( universe.v3.model.License(name = "ABC", url = Uri.parse("http://foobar/a/b/c")), universe.v3.model.License(name = "XYZ", url = Uri.parse("http://foobar/x/y/z")) )) val MinDcosReleaseVersion = Some(universe.v3.model.DcosReleaseVersionParser.parseUnsafe("1.9.99")) val Resource = Some(universe.v3.model.V3Resource( assets = Some(universe.v3.model.Assets( uris = Some(Map( "foo.tar.gz" -> "http://mesosphere.com/foo.tar.gz", "bar.jar" -> "https://mesosphere.com/bar.jar" )), container = Some(universe.v3.model.Container(Map( "image1" -> "docker/image:1", "image2" -> "docker/image:2" ))) )), images = Some(universe.v3.model.Images( iconSmall = Some("small.png"), iconMedium = Some("medium.png"), iconLarge = Some("large.png"), screenshots = Some(List("ooh.png", "aah.png")) )), cli = Some(universe.v3.model.Cli( binaries = Some(universe.v3.model.Platforms( windows = Some(universe.v3.model.Architectures( `x86-64` = universe.v3.model.Binary( kind = "windows", url = "mesosphere.com/windows.exe", contentHash = List( universe.v3.model.HashInfo("letters", "abcba"), universe.v3.model.HashInfo("numbers", "12321") ) ) )), linux = Some(universe.v3.model.Architectures( `x86-64` = universe.v3.model.Binary( kind = "linux", url = "mesosphere.com/linux", contentHash = List( universe.v3.model.HashInfo("letters", "ijkji"), universe.v3.model.HashInfo("numbers", "13579") ) ) )), darwin = Some(universe.v3.model.Architectures( `x86-64` = universe.v3.model.Binary( kind = "darwin", url = "mesosphere.com/darwin", contentHash = List( universe.v3.model.HashInfo("letters", "xyzyx"), universe.v3.model.HashInfo("numbers", "02468") ) ) )) )) )) )) val Config = Some(JsonObject.fromMap(Map("foo" -> 42.asJson, "bar" -> "baz".asJson))) val MaximalV3ModelV3PackageDefinition: universe.v3.model.V3Package = universe.v3.model.V3Package( PackagingVersion, Name, Version, MaxReleaseVersion, Maintainer, Description, Tags, None, Scm, Website, Framework, PreInstallNotes, PostInstallNotes, PostUninstallNotes, Licenses, MinDcosReleaseVersion, MarathonTemplate, Resource, Config, command = Some(universe.v3.model.Command( pip = List("flask", "jinja", "jsonschema") )) ) val MinimalV3ModelV3PackageDefinition: universe.v3.model.V3Package = universe.v3.model.V3Package( packagingVersion = universe.v3.model.V3PackagingVersion, name = "minimal", version = universe.v3.model.Version("1.2.3"), releaseVersion = MinReleaseVersion, maintainer = "minimal@mesosphere.io", description = "A minimal package definition" ) val MaximalV3ModelV2PackageDefinition: universe.v3.model.V2Package = universe.v3.model.V2Package( packagingVersion = universe.v3.model.V2PackagingVersion, name = MaximalV3ModelV3PackageDefinition.name, version = MaximalV3ModelV3PackageDefinition.version, releaseVersion = MaximalV3ModelV3PackageDefinition.releaseVersion, maintainer = MaximalV3ModelV3PackageDefinition.maintainer, description = MaximalV3ModelV3PackageDefinition.description, marathon = MaximalV3ModelV3PackageDefinition.marathon.get, tags = MaximalV3ModelV3PackageDefinition.tags, selected = MaximalV3ModelV3PackageDefinition.selected, scm = MaximalV3ModelV3PackageDefinition.scm, website = MaximalV3ModelV3PackageDefinition.website, framework = MaximalV3ModelV3PackageDefinition.framework, preInstallNotes = MaximalV3ModelV3PackageDefinition.preInstallNotes, postInstallNotes = MaximalV3ModelV3PackageDefinition.postInstallNotes, postUninstallNotes = MaximalV3ModelV3PackageDefinition.postUninstallNotes, licenses = MaximalV3ModelV3PackageDefinition.licenses, resource = Some(universe.v3.model.V2Resource( assets = Some(universe.v3.model.Assets( uris = Some(Map( "foo.tar.gz" -> "http://mesosphere.com/foo.tar.gz", "bar.jar" -> "https://mesosphere.com/bar.jar" )), container = Some(universe.v3.model.Container(Map( "image1" -> "docker/image:1", "image2" -> "docker/image:2" ))) )), images = Some(universe.v3.model.Images( iconSmall = Some("small.png"), iconMedium = Some("medium.png"), iconLarge = Some("large.png"), screenshots = Some(List("ooh.png", "aah.png")) )) )), config = MaximalV3ModelV3PackageDefinition.config, command = MaximalV3ModelV3PackageDefinition.command ) val MinimalV3ModelV2PackageDefinition: universe.v3.model.V2Package = universe.v3.model.V2Package( packagingVersion = universe.v3.model.V2PackagingVersion, name = MinimalV3ModelV3PackageDefinition.name, version = MinimalV3ModelV3PackageDefinition.version, releaseVersion = MinimalV3ModelV3PackageDefinition.releaseVersion, maintainer = MinimalV3ModelV3PackageDefinition.maintainer, description = MinimalV3ModelV3PackageDefinition.description, marathon = MaximalV3ModelV3PackageDefinition.marathon.get ) val MaximalV2ModelPackageDetails = universe.v2.model.PackageDetails( packagingVersion = universe.v2.model.PackagingVersion("2.0"), name = MaximalV3ModelV2PackageDefinition.name, version = universe.v2.model.PackageDetailsVersion(MaximalV3ModelV2PackageDefinition.version.toString), maintainer = MaximalV3ModelV2PackageDefinition.maintainer, description = MaximalV3ModelV2PackageDefinition.description, tags = List("all", "the", "things"), selected = MaximalV3ModelV2PackageDefinition.selected, scm = MaximalV3ModelV2PackageDefinition.scm, website = MaximalV3ModelV2PackageDefinition.website, framework = MaximalV3ModelV2PackageDefinition.framework, preInstallNotes = MaximalV3ModelV2PackageDefinition.preInstallNotes, postInstallNotes = MaximalV3ModelV2PackageDefinition.postInstallNotes, postUninstallNotes = MaximalV3ModelV2PackageDefinition.postUninstallNotes, licenses = Some(List( universe.v2.model.License(name = "ABC", url = "http://foobar/a/b/c"), universe.v2.model.License(name = "XYZ", url = "http://foobar/x/y/z") )) ) val MinimalV2ModelPackageDetails = universe.v2.model.PackageDetails( packagingVersion = universe.v2.model.PackagingVersion("2.0"), name = MinimalV3ModelV3PackageDefinition.name, version = universe.v2.model.PackageDetailsVersion(MinimalV3ModelV3PackageDefinition.version.toString), maintainer = MinimalV3ModelV3PackageDefinition.maintainer, description = MinimalV3ModelV3PackageDefinition.description ) val MaximalV3ModelPackageDefinitionV2: universe.v4.model.PackageDefinition = MaximalV3ModelV2PackageDefinition val MinimalV3ModelPackageDefinitionV2: universe.v4.model.PackageDefinition = MinimalV3ModelV2PackageDefinition val MaximalV3ModelPackageDefinitionV3: universe.v4.model.PackageDefinition = MaximalV3ModelV3PackageDefinition val MinimalV3ModelPackageDefinitionV3: universe.v4.model.PackageDefinition = MinimalV3ModelV3PackageDefinition val MaximalV2Resource = universe.v2.model.Resource( assets = Some(universe.v2.model.Assets( uris = Some(Map( "foo.tar.gz" -> "http://mesosphere.com/foo.tar.gz", "bar.jar" -> "https://mesosphere.com/bar.jar" )), container = Some(universe.v2.model.Container(Map( "image1" -> "docker/image:1", "image2" -> "docker/image:2" ))) )), images = Some(universe.v2.model.Images( iconSmall = Some("small.png"), iconMedium = Some("medium.png"), iconLarge = Some("large.png"), screenshots = Some(List("ooh.png", "aah.png")) )) ) val MaximalV3ModelMetadata = universe.v3.model.V3Metadata( PackagingVersion, Name, Version, Maintainer, Description, Tags, Scm, Website, Framework, PreInstallNotes, PostInstallNotes, PostUninstallNotes, Licenses, MinDcosReleaseVersion, MarathonTemplate, Resource, Config ) val MinimalV3ModelMetadata = universe.v3.model.V3Metadata( PackagingVersion, MinimalName, Version, Maintainer, Description ) val HelloWorldMarathonTemplate: ByteBuffer = { val templateText = """ |{ | "id": "helloworld", | "cpus": 1.0, | "mem": 512, | "instances": 1, | "cmd": "python3 -m http.server {{port}}", | "container": { | "type": "DOCKER", | "docker": { | "image": "python:3", | "network": "HOST" | } | } |} """.stripMargin ByteBuffer.wrap(templateText.getBytes(StandardCharsets.UTF_8)) } val HelloWorldV3Package: universe.v3.model.V3Package = universe.v3.model.V3Package( name = "helloworld", version = universe.v3.model.Version("0.1.0"), releaseVersion = MinReleaseVersion, website = Some("https://github.com/mesosphere/dcos-helloworld"), maintainer = "support@mesosphere.io", description = "Example DCOS application package", preInstallNotes = Some("A sample pre-installation message"), postInstallNotes = Some("A sample post-installation message"), tags = List("mesosphere", "example", "subcommand") .map(s => universe.v3.model.Tag(s)), marathon = Some(universe.v3.model.Marathon(HelloWorldMarathonTemplate)), config = Some(JsonObject.fromMap(Map( "$schema" -> "http://json-schema.org/schema#".asJson, "type" -> "object".asJson, "properties" -> Json.obj( "port" -> Json.obj( "type" -> "integer".asJson, "default" -> 8080.asJson ) ) ))) ) val MaximalV4ModelMetadata = universe.v4.model.V4Metadata( universe.v4.model.V4PackagingVersion, Name + "v4", Version, Maintainer, Description, Tags, Scm, Website, Framework, PreInstallNotes, PostInstallNotes, PostUninstallNotes, Licenses, MinDcosReleaseVersion, MarathonTemplate, Resource, Config, upgradesFrom = Some(List(universe.v3.model.ExactVersion(universe.v3.model.Version("8.0")))), downgradesTo = Some(List(universe.v3.model.ExactVersion(universe.v3.model.Version("8.0")))) ) val MinimalV4ModelMetadata = universe.v4.model.V4Metadata( universe.v4.model.V4PackagingVersion, MinimalName + "v4", Version, Maintainer, Description ) val validPackagingVersions: TableFor2[universe.v4.model.PackagingVersion, String] = { new TableFor2( "PackagingVersion" -> "String", universe.v3.model.V2PackagingVersion -> "2.0", universe.v3.model.V3PackagingVersion -> "3.0", universe.v4.model.V4PackagingVersion -> "4.0" ) } val versionStringList = validPackagingVersions.map(_._2).mkString("[", ", ", "]") def renderInvalidVersionMessage(invalidVersion: String): String = { s"Expected one of $versionStringList for packaging version, but found [$invalidVersion]" } val MinimalV4ModelV4PackageDefinition: universe.v4.model.V4Package = universe.v4.model.V4Package( packagingVersion = universe.v4.model.V4PackagingVersion, name = "minimalv4", version = universe.v3.model.Version("1.2.3"), releaseVersion = universe.v3.model.ReleaseVersion(0), maintainer = "minimal@mesosphere.io", description = "A minimal package definition" ) val MaximalV4ModelV4PackageDefinition: universe.v4.model.V4Package = universe.v4.model.V4Package( universe.v4.model.V4PackagingVersion, Name + "v4", Version, releaseVersion = universe.v3.model.ReleaseVersion(Long.MaxValue), Maintainer, Description, Tags, None, Scm, Website, Framework, PreInstallNotes, PostInstallNotes, PostUninstallNotes, Licenses, MinDcosReleaseVersion, MarathonTemplate, Resource, Config, upgradesFrom = Some(List(universe.v3.model.ExactVersion(universe.v3.model.Version("8.0")))), downgradesTo = Some(List(universe.v3.model.ExactVersion(universe.v3.model.Version("8.0")))) ) val MaximalV4ModelPackageDefinitionV4: universe.v4.model.PackageDefinition = MaximalV4ModelV4PackageDefinition val MinimalV4ModelPackageDefinitionV4: universe.v4.model.PackageDefinition = MinimalV4ModelV4PackageDefinition val supportedPackageDefinitions: TableFor1[universe.v4.model.SupportedPackageDefinition] = new TableFor1( "supportedPackageDefinition", MinimalV3ModelV3PackageDefinition, MaximalV3ModelV3PackageDefinition, MinimalV4ModelV4PackageDefinition, MaximalV4ModelV4PackageDefinition ) val packageDefinitions: TableFor1[universe.v4.model.PackageDefinition] = new TableFor1( "packageDefinition", MinimalV3ModelV2PackageDefinition, MaximalV3ModelV2PackageDefinition, MinimalV3ModelV3PackageDefinition, MaximalV3ModelV3PackageDefinition, MinimalV4ModelV4PackageDefinition, MaximalV4ModelV4PackageDefinition ) }
takirala/cosmos
cosmos-test-common/src/main/scala/com/mesosphere/universe/test/TestingPackages.scala
Scala
apache-2.0
14,665
package resource import java.net.URL import java.nio.file._ import java.util.Collections object Resource { def readFile(url: URL): String = { val uri = url.toURI val fileSystem = if (uri.getScheme != "jar") None else Some(FileSystems.newFileSystem(uri, Collections.emptyMap[String, Object])) try new String(Files.readAllBytes(Paths.get(uri))) finally fileSystem.foreach(_.close()) } def getStringResource(name: String): String = { readFile(Resource.getClass.getClassLoader.getResource(name)) } }
xuwei-k/xsbt
sbt-app/src/sbt-test/classloader-cache/resources/libraries/foo/src/main/scala/resource/Resource.scala
Scala
apache-2.0
550
/** * Trait Parameters: http://dotty.epfl.ch/docs/reference/trait-parameters.html */ object TraitParams { trait Base(val msg: String) class A extends Base("Hello") class B extends Base("Dotty!") // Union types only exist in Dotty, so there's no chance that this will accidentally be compiled with Scala 2 private def printMessages(msgs: (A | B)*) = println(msgs.map(_.msg).mkString(" ")) def test: Unit = { printMessages(new A, new B) // Sanity check the classpath: this won't run if the dotty jar is not present. val x: Int => Int = z => z x(1) } }
smarter/dotty-example-project
src/main/scala/TraitParams.scala
Scala
bsd-3-clause
589
/* * Copyright (C) 2016 Nikos Katzouris * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package metric import breeze.optimize.linear.KuhnMunkres /** * Matcher is any object that solves an assignment problem. The problem consists of finding * a maximum cost matching (or a minimum cost perfect matching) in a bipartite graph. The input * graph is usually represented as a cost matrix. Zero values define the absence of edges. * * === General Formulation === * * Each problem instance has a number of agents and a number of tasks. Any agent can be assigned to * any task, incurring a cost that may vary depending on the agent-task assignment. It is required * that all tasks are assigned to exactly one agent in such a way that the total cost is minimized. * In case the numbers of agents and tasks are equal and the total cost of the assignment for all tasks * is equal to the sum of the costs for each agent then the problem is called the linear assignment problem. * * @see https://en.wikipedia.org/wiki/Assignment_problem */ trait Matcher extends (Seq[Seq[Double]] => (Array[Int], Double)) /** * The Hungarian matcher is a combinatorial optimization algorithm that solves the assignment problem in * polynomial time O(n&#94;3). * * @see https://en.wikipedia.org/wiki/Hungarian_algorithm */ object HungarianMatcher extends Matcher { /** * It solves the assignment problem for the given cost matrix. The cost * matrix represents the costs for each edge in the graph. * * @param costMatrix the bipartite graph cost matrix * @return the cost of the optimal assignment */ override def apply(costMatrix: Seq[Seq[Double]]): (Array[Int], Double) = { val unmatched = math.abs(costMatrix.length - costMatrix.head.length) val maxDimension = math.max(costMatrix.length, costMatrix.head.length) KuhnMunkres.extractMatching(costMatrix) match { case (matches, cost) => matches.toArray -> (cost + unmatched) / maxDimension } } } /** * The Hausdorff matcher is based on the Hausdorff distance. The Hausdorff distance is the longest distance * you can be forced to travel by an adversary that chooses a point in one set, from where you then must travel * to the other set. In other words, it is the greatest of all the distances from a point in one set to the * closest point in another set. * * @note The Hausdorff matcher can be used for solving the assignment problem, but the solution is not * guaranteed to be the optimal one. Moreover, the matching is not guaranteed to be one to one. * @see https://en.wikipedia.org/wiki/Hausdorff_distance * Distance Between Herbrand Interpretations: A Measure for Approximations * to a Target Concept (1997) */ object HausdorffMatcher extends Matcher { /** * It solves the assignment problem for a given cost matrix. The cost * matrix represents the costs for each edge in the graph. * * @param costMatrix the bipartite graph cost matrix * @return the cost of the assignment */ override def apply(costMatrix: Seq[Seq[Double]]): (Array[Int], Double) = Array.empty[Int] -> math.max(costMatrix.map(_.min).max, costMatrix.transpose.map(_.min).max) }
nkatzz/OLED
src/main/scala/metric/Matcher.scala
Scala
gpl-3.0
3,847
/* * Copyright 2014–2020 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar import iotaz.TListK.::: import iotaz.{ CopK, TNilK } package object ejson { /** For _strict_ JSON, you want something like `Obj[Mu[Json]]`. */ type Json[A] = CopK[Obj ::: Common ::: TNilK, A] val ObjJson = CopK.Inject[Obj, Json] val CommonJson = CopK.Inject[Common, Json] type EJsonL = Extension ::: Common ::: TNilK type EJson[A] = CopK[EJsonL, A] val ExtEJson = CopK.Inject[Extension, EJson] val CommonEJson = CopK.Inject[Common, EJson] }
slamdata/quasar
ejson/src/main/scala/quasar/ejson/package.scala
Scala
apache-2.0
1,102
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.{Map => JMap} import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.collection.mutable.LinkedHashSet import org.apache.avro.{Schema, SchemaNormalization} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.History._ import org.apache.spark.internal.config.Kryo._ import org.apache.spark.internal.config.Network._ import org.apache.spark.serializer.KryoSerializer import org.apache.spark.util.Utils /** * Configuration for a Spark application. Used to set various Spark parameters as key-value pairs. * * Most of the time, you would create a SparkConf object with `new SparkConf()`, which will load * values from any `spark.*` Java system properties set in your application as well. In this case, * parameters you set directly on the `SparkConf` object take priority over system properties. * * For unit tests, you can also call `new SparkConf(false)` to skip loading external settings and * get the same configuration no matter what the system properties are. * * All setter methods in this class support chaining. For example, you can write * `new SparkConf().setMaster("local").setAppName("My app")`. * * @param loadDefaults whether to also load values from Java system properties * * @note Once a SparkConf object is passed to Spark, it is cloned and can no longer be modified * by the user. Spark does not support modifying the configuration at runtime. */ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Serializable { import SparkConf._ /** Create a SparkConf that loads defaults from system properties and the classpath */ def this() = this(true) private val settings = new ConcurrentHashMap[String, String]() @transient private lazy val reader: ConfigReader = { val _reader = new ConfigReader(new SparkConfigProvider(settings)) _reader.bindEnv((key: String) => Option(getenv(key))) _reader } if (loadDefaults) { loadFromSystemProperties(false) } private[spark] def loadFromSystemProperties(silent: Boolean): SparkConf = { // Load any spark.* system properties for ((key, value) <- Utils.getSystemProperties if key.startsWith("spark.")) { set(key, value, silent) } this } /** Set a configuration variable. */ def set(key: String, value: String): SparkConf = { set(key, value, false) } private[spark] def set(key: String, value: String, silent: Boolean): SparkConf = { if (key == null) { throw new NullPointerException("null key") } if (value == null) { throw new NullPointerException("null value for " + key) } if (!silent) { logDeprecationWarning(key) } settings.put(key, value) this } private[spark] def set[T](entry: ConfigEntry[T], value: T): SparkConf = { set(entry.key, entry.stringConverter(value)) this } private[spark] def set[T](entry: OptionalConfigEntry[T], value: T): SparkConf = { set(entry.key, entry.rawStringConverter(value)) this } /** * The master URL to connect to, such as "local" to run locally with one thread, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. */ def setMaster(master: String): SparkConf = { set("spark.master", master) } /** Set a name for your application. Shown in the Spark web UI. */ def setAppName(name: String): SparkConf = { set("spark.app.name", name) } /** Set JAR files to distribute to the cluster. */ def setJars(jars: Seq[String]): SparkConf = { for (jar <- jars if (jar == null)) logWarning("null jar passed to SparkContext constructor") set(JARS, jars.filter(_ != null)) } /** Set JAR files to distribute to the cluster. (Java-friendly version.) */ def setJars(jars: Array[String]): SparkConf = { setJars(jars.toSeq) } /** * Set an environment variable to be used when launching executors for this application. * These variables are stored as properties of the form spark.executorEnv.VAR_NAME * (for example spark.executorEnv.PATH) but this method makes them easier to set. */ def setExecutorEnv(variable: String, value: String): SparkConf = { set("spark.executorEnv." + variable, value) } /** * Set multiple environment variables to be used when launching executors. * These variables are stored as properties of the form spark.executorEnv.VAR_NAME * (for example spark.executorEnv.PATH) but this method makes them easier to set. */ def setExecutorEnv(variables: Seq[(String, String)]): SparkConf = { for ((k, v) <- variables) { setExecutorEnv(k, v) } this } /** * Set multiple environment variables to be used when launching executors. * (Java-friendly version.) */ def setExecutorEnv(variables: Array[(String, String)]): SparkConf = { setExecutorEnv(variables.toSeq) } /** * Set the location where Spark is installed on worker nodes. */ def setSparkHome(home: String): SparkConf = { set("spark.home", home) } /** Set multiple parameters together */ def setAll(settings: Iterable[(String, String)]): SparkConf = { settings.foreach { case (k, v) => set(k, v) } this } /** Set a parameter if it isn't already configured */ def setIfMissing(key: String, value: String): SparkConf = { if (settings.putIfAbsent(key, value) == null) { logDeprecationWarning(key) } this } private[spark] def setIfMissing[T](entry: ConfigEntry[T], value: T): SparkConf = { if (settings.putIfAbsent(entry.key, entry.stringConverter(value)) == null) { logDeprecationWarning(entry.key) } this } private[spark] def setIfMissing[T](entry: OptionalConfigEntry[T], value: T): SparkConf = { if (settings.putIfAbsent(entry.key, entry.rawStringConverter(value)) == null) { logDeprecationWarning(entry.key) } this } /** * Use Kryo serialization and register the given set of classes with Kryo. * If called multiple times, this will append the classes from all calls together. */ def registerKryoClasses(classes: Array[Class[_]]): SparkConf = { val allClassNames = new LinkedHashSet[String]() allClassNames ++= get(KRYO_CLASSES_TO_REGISTER).map(_.trim) .filter(!_.isEmpty) allClassNames ++= classes.map(_.getName) set(KRYO_CLASSES_TO_REGISTER, allClassNames.toSeq) set(SERIALIZER, classOf[KryoSerializer].getName) this } private final val avroNamespace = "avro.schema." /** * Use Kryo serialization and register the given set of Avro schemas so that the generic * record serializer can decrease network IO */ def registerAvroSchemas(schemas: Schema*): SparkConf = { for (schema <- schemas) { set(avroNamespace + SchemaNormalization.parsingFingerprint64(schema), schema.toString) } this } /** Gets all the avro schemas in the configuration used in the generic Avro record serializer */ def getAvroSchema: Map[Long, String] = { getAll.filter { case (k, v) => k.startsWith(avroNamespace) } .map { case (k, v) => (k.substring(avroNamespace.length).toLong, v) } .toMap } /** Remove a parameter from the configuration */ def remove(key: String): SparkConf = { settings.remove(key) this } private[spark] def remove(entry: ConfigEntry[_]): SparkConf = { remove(entry.key) } /** Get a parameter; throws a NoSuchElementException if it's not set */ def get(key: String): String = { getOption(key).getOrElse(throw new NoSuchElementException(key)) } /** Get a parameter, falling back to a default if not set */ def get(key: String, defaultValue: String): String = { getOption(key).getOrElse(defaultValue) } /** * Retrieves the value of a pre-defined configuration entry. * * - This is an internal Spark API. * - The return type if defined by the configuration entry. * - This will throw an exception is the config is not optional and the value is not set. */ private[spark] def get[T](entry: ConfigEntry[T]): T = { entry.readFrom(reader) } /** * Get a time parameter as seconds; throws a NoSuchElementException if it's not set. If no * suffix is provided then seconds are assumed. * @throws java.util.NoSuchElementException If the time parameter is not set * @throws NumberFormatException If the value cannot be interpreted as seconds */ def getTimeAsSeconds(key: String): Long = catchIllegalValue(key) { Utils.timeStringAsSeconds(get(key)) } /** * Get a time parameter as seconds, falling back to a default if not set. If no * suffix is provided then seconds are assumed. * @throws NumberFormatException If the value cannot be interpreted as seconds */ def getTimeAsSeconds(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.timeStringAsSeconds(get(key, defaultValue)) } /** * Get a time parameter as milliseconds; throws a NoSuchElementException if it's not set. If no * suffix is provided then milliseconds are assumed. * @throws java.util.NoSuchElementException If the time parameter is not set * @throws NumberFormatException If the value cannot be interpreted as milliseconds */ def getTimeAsMs(key: String): Long = catchIllegalValue(key) { Utils.timeStringAsMs(get(key)) } /** * Get a time parameter as milliseconds, falling back to a default if not set. If no * suffix is provided then milliseconds are assumed. * @throws NumberFormatException If the value cannot be interpreted as milliseconds */ def getTimeAsMs(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.timeStringAsMs(get(key, defaultValue)) } /** * Get a size parameter as bytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then bytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key)) } /** * Get a size parameter as bytes, falling back to a default if not set. If no * suffix is provided then bytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key, defaultValue)) } /** * Get a size parameter as bytes, falling back to a default if not set. * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String, defaultValue: Long): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key, defaultValue + "B")) } /** * Get a size parameter as Kibibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Kibibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Kibibytes */ def getSizeAsKb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsKb(get(key)) } /** * Get a size parameter as Kibibytes, falling back to a default if not set. If no * suffix is provided then Kibibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Kibibytes */ def getSizeAsKb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsKb(get(key, defaultValue)) } /** * Get a size parameter as Mebibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Mebibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Mebibytes */ def getSizeAsMb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsMb(get(key)) } /** * Get a size parameter as Mebibytes, falling back to a default if not set. If no * suffix is provided then Mebibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Mebibytes */ def getSizeAsMb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsMb(get(key, defaultValue)) } /** * Get a size parameter as Gibibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Gibibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Gibibytes */ def getSizeAsGb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsGb(get(key)) } /** * Get a size parameter as Gibibytes, falling back to a default if not set. If no * suffix is provided then Gibibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Gibibytes */ def getSizeAsGb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsGb(get(key, defaultValue)) } /** Get a parameter as an Option */ def getOption(key: String): Option[String] = { Option(settings.get(key)).orElse(getDeprecatedConfig(key, settings)) } /** Get an optional value, applying variable substitution. */ private[spark] def getWithSubstitution(key: String): Option[String] = { getOption(key).map(reader.substitute) } /** Get all parameters as a list of pairs */ def getAll: Array[(String, String)] = { settings.entrySet().asScala.map(x => (x.getKey, x.getValue)).toArray } /** * Get all parameters that start with `prefix` */ def getAllWithPrefix(prefix: String): Array[(String, String)] = { getAll.filter { case (k, v) => k.startsWith(prefix) } .map { case (k, v) => (k.substring(prefix.length), v) } } /** * Get a parameter as an integer, falling back to a default if not set * @throws NumberFormatException If the value cannot be interpreted as an integer */ def getInt(key: String, defaultValue: Int): Int = catchIllegalValue(key) { getOption(key).map(_.toInt).getOrElse(defaultValue) } /** * Get a parameter as a long, falling back to a default if not set * @throws NumberFormatException If the value cannot be interpreted as a long */ def getLong(key: String, defaultValue: Long): Long = catchIllegalValue(key) { getOption(key).map(_.toLong).getOrElse(defaultValue) } /** * Get a parameter as a double, falling back to a default if not ste * @throws NumberFormatException If the value cannot be interpreted as a double */ def getDouble(key: String, defaultValue: Double): Double = catchIllegalValue(key) { getOption(key).map(_.toDouble).getOrElse(defaultValue) } /** * Get a parameter as a boolean, falling back to a default if not set * @throws IllegalArgumentException If the value cannot be interpreted as a boolean */ def getBoolean(key: String, defaultValue: Boolean): Boolean = catchIllegalValue(key) { getOption(key).map(_.toBoolean).getOrElse(defaultValue) } /** Get all executor environment variables set on this SparkConf */ def getExecutorEnv: Seq[(String, String)] = { getAllWithPrefix("spark.executorEnv.") } /** * Returns the Spark application id, valid in the Driver after TaskScheduler registration and * from the start in the Executor. */ def getAppId: String = get("spark.app.id") /** Does the configuration contain a given parameter? */ def contains(key: String): Boolean = { settings.containsKey(key) || configsWithAlternatives.get(key).toSeq.flatten.exists { alt => contains(alt.key) } } private[spark] def contains(entry: ConfigEntry[_]): Boolean = contains(entry.key) /** Copy this object */ override def clone: SparkConf = { val cloned = new SparkConf(false) settings.entrySet().asScala.foreach { e => cloned.set(e.getKey(), e.getValue(), true) } cloned } /** * By using this instead of System.getenv(), environment variables can be mocked * in unit tests. */ private[spark] def getenv(name: String): String = System.getenv(name) /** * Wrapper method for get() methods which require some specific value format. This catches * any [[NumberFormatException]] or [[IllegalArgumentException]] and re-raises it with the * incorrectly configured key in the exception message. */ private def catchIllegalValue[T](key: String)(getValue: => T): T = { try { getValue } catch { case e: NumberFormatException => // NumberFormatException doesn't have a constructor that takes a cause for some reason. throw new NumberFormatException(s"Illegal value for config key $key: ${e.getMessage}") .initCause(e) case e: IllegalArgumentException => throw new IllegalArgumentException(s"Illegal value for config key $key: ${e.getMessage}", e) } } /** * Checks for illegal or deprecated config settings. Throws an exception for the former. Not * idempotent - may mutate this conf object to convert deprecated settings to supported ones. */ private[spark] def validateSettings(): Unit = { if (contains("spark.local.dir")) { val msg = "Note that spark.local.dir will be overridden by the value set by " + "the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS" + " in YARN)." logWarning(msg) } val executorOptsKey = EXECUTOR_JAVA_OPTIONS.key // Used by Yarn in 1.1 and before sys.props.get("spark.driver.libraryPath").foreach { value => val warning = s""" |spark.driver.libraryPath was detected (set to '$value'). |This is deprecated in Spark 1.2+. | |Please instead use: ${DRIVER_LIBRARY_PATH.key} """.stripMargin logWarning(warning) } // Validate spark.executor.extraJavaOptions getOption(executorOptsKey).foreach { javaOpts => if (javaOpts.contains("-Dspark")) { val msg = s"$executorOptsKey is not allowed to set Spark options (was '$javaOpts'). " + "Set them directly on a SparkConf or in a properties file when using ./bin/spark-submit." throw new Exception(msg) } if (javaOpts.contains("-Xmx")) { val msg = s"$executorOptsKey is not allowed to specify max heap memory settings " + s"(was '$javaOpts'). Use spark.executor.memory instead." throw new Exception(msg) } } // Validate memory fractions for (key <- Seq(MEMORY_FRACTION.key, MEMORY_STORAGE_FRACTION.key)) { val value = getDouble(key, 0.5) if (value > 1 || value < 0) { throw new IllegalArgumentException(s"$key should be between 0 and 1 (was '$value').") } } if (contains(SUBMIT_DEPLOY_MODE)) { get(SUBMIT_DEPLOY_MODE) match { case "cluster" | "client" => case e => throw new SparkException(s"${SUBMIT_DEPLOY_MODE.key} can only be " + "\\"cluster\\" or \\"client\\".") } } if (contains(CORES_MAX) && contains(EXECUTOR_CORES)) { val totalCores = getInt(CORES_MAX.key, 1) val executorCores = get(EXECUTOR_CORES) val leftCores = totalCores % executorCores if (leftCores != 0) { logWarning(s"Total executor cores: ${totalCores} is not " + s"divisible by cores per executor: ${executorCores}, " + s"the left cores: ${leftCores} will not be allocated") } } val encryptionEnabled = get(NETWORK_CRYPTO_ENABLED) || get(SASL_ENCRYPTION_ENABLED) require(!encryptionEnabled || get(NETWORK_AUTH_ENABLED), s"${NETWORK_AUTH_ENABLED.key} must be enabled when enabling encryption.") val executorTimeoutThresholdMs = get(NETWORK_TIMEOUT) * 1000 val executorHeartbeatIntervalMs = get(EXECUTOR_HEARTBEAT_INTERVAL) val networkTimeout = NETWORK_TIMEOUT.key // If spark.executor.heartbeatInterval bigger than spark.network.timeout, // it will almost always cause ExecutorLostFailure. See SPARK-22754. require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " + s"${networkTimeout}=${executorTimeoutThresholdMs}ms must be greater than the value of " + s"${EXECUTOR_HEARTBEAT_INTERVAL.key}=${executorHeartbeatIntervalMs}ms.") } /** * Return a string listing all keys and values, one per line. This is useful to print the * configuration out for debugging. */ def toDebugString: String = { Utils.redact(this, getAll).sorted.map { case (k, v) => k + "=" + v }.mkString("\\n") } } private[spark] object SparkConf extends Logging { /** * Maps deprecated config keys to information about the deprecation. * * The extra information is logged as a warning when the config is present in the user's * configuration. */ private val deprecatedConfigs: Map[String, DeprecatedConfig] = { val configs = Seq( DeprecatedConfig("spark.cache.class", "0.8", "The spark.cache.class property is no longer being used! Specify storage levels using " + "the RDD.persist() method instead."), DeprecatedConfig("spark.yarn.user.classpath.first", "1.3", "Please use spark.{driver,executor}.userClassPathFirst instead."), DeprecatedConfig("spark.kryoserializer.buffer.mb", "1.4", "Please use spark.kryoserializer.buffer instead. The default value for " + "spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " + "are no longer accepted. To specify the equivalent now, one may use '64k'."), DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."), DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0", "Please use the new excludedOnFailure options, spark.excludeOnFailure.*"), DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.shuffle.service.index.cache.entries", "2.3.0", "Not used anymore. Please use spark.shuffle.service.index.cache.size"), DeprecatedConfig("spark.yarn.credentials.file.retention.count", "2.4.0", "Not used anymore."), DeprecatedConfig("spark.yarn.credentials.file.retention.days", "2.4.0", "Not used anymore."), DeprecatedConfig("spark.yarn.services", "3.0.0", "Feature no longer available."), DeprecatedConfig("spark.executor.plugins", "3.0.0", "Feature replaced with new plugin API. See Monitoring documentation."), DeprecatedConfig("spark.blacklist.enabled", "3.1.0", "Please use spark.excludeOnFailure.enabled"), DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerExecutor", "3.1.0", "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"), DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerNode", "3.1.0", "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"), DeprecatedConfig("spark.blacklist.application.maxFailedTasksPerExecutor", "3.1.0", "Please use spark.excludeOnFailure.application.maxFailedTasksPerExecutor"), DeprecatedConfig("spark.blacklist.stage.maxFailedTasksPerExecutor", "3.1.0", "Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"), DeprecatedConfig("spark.blacklist.application.maxFailedExecutorsPerNode", "3.1.0", "Please use spark.excludeOnFailure.application.maxFailedExecutorsPerNode"), DeprecatedConfig("spark.blacklist.stage.maxFailedExecutorsPerNode", "3.1.0", "Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"), DeprecatedConfig("spark.blacklist.timeout", "3.1.0", "Please use spark.excludeOnFailure.timeout"), DeprecatedConfig("spark.blacklist.application.fetchFailure.enabled", "3.1.0", "Please use spark.excludeOnFailure.application.fetchFailure.enabled"), DeprecatedConfig("spark.scheduler.blacklist.unschedulableTaskSetTimeout", "3.1.0", "Please use spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"), DeprecatedConfig("spark.blacklist.killBlacklistedExecutors", "3.1.0", "Please use spark.excludeOnFailure.killExcludedExecutors"), DeprecatedConfig("spark.yarn.blacklist.executor.launch.blacklisting.enabled", "3.1.0", "Please use spark.yarn.executor.launch.excludeOnFailure.enabled") ) Map(configs.map { cfg => (cfg.key -> cfg) } : _*) } /** * Maps a current config key to alternate keys that were used in previous version of Spark. * * The alternates are used in the order defined in this map. If deprecated configs are * present in the user's configuration, a warning is logged. * * TODO: consolidate it with `ConfigBuilder.withAlternative`. */ private val configsWithAlternatives = Map[String, Seq[AlternateConfig]]( EXECUTOR_USER_CLASS_PATH_FIRST.key -> Seq( AlternateConfig("spark.files.userClassPathFirst", "1.3")), UPDATE_INTERVAL_S.key -> Seq( AlternateConfig("spark.history.fs.update.interval.seconds", "1.4"), AlternateConfig("spark.history.fs.updateInterval", "1.3"), AlternateConfig("spark.history.updateInterval", "1.3")), CLEANER_INTERVAL_S.key -> Seq( AlternateConfig("spark.history.fs.cleaner.interval.seconds", "1.4")), MAX_LOG_AGE_S.key -> Seq( AlternateConfig("spark.history.fs.cleaner.maxAge.seconds", "1.4")), "spark.yarn.am.waitTime" -> Seq( AlternateConfig("spark.yarn.applicationMaster.waitTries", "1.3", // Translate old value to a duration, with 10s wait time per try. translation = s => s"${s.toLong * 10}s")), REDUCER_MAX_SIZE_IN_FLIGHT.key -> Seq( AlternateConfig("spark.reducer.maxMbInFlight", "1.4")), KRYO_SERIALIZER_BUFFER_SIZE.key -> Seq( AlternateConfig("spark.kryoserializer.buffer.mb", "1.4", translation = s => s"${(s.toDouble * 1000).toInt}k")), KRYO_SERIALIZER_MAX_BUFFER_SIZE.key -> Seq( AlternateConfig("spark.kryoserializer.buffer.max.mb", "1.4")), SHUFFLE_FILE_BUFFER_SIZE.key -> Seq( AlternateConfig("spark.shuffle.file.buffer.kb", "1.4")), EXECUTOR_LOGS_ROLLING_MAX_SIZE.key -> Seq( AlternateConfig("spark.executor.logs.rolling.size.maxBytes", "1.4")), IO_COMPRESSION_SNAPPY_BLOCKSIZE.key -> Seq( AlternateConfig("spark.io.compression.snappy.block.size", "1.4")), IO_COMPRESSION_LZ4_BLOCKSIZE.key -> Seq( AlternateConfig("spark.io.compression.lz4.block.size", "1.4")), RPC_NUM_RETRIES.key -> Seq( AlternateConfig("spark.akka.num.retries", "1.4")), RPC_RETRY_WAIT.key -> Seq( AlternateConfig("spark.akka.retry.wait", "1.4")), RPC_ASK_TIMEOUT.key -> Seq( AlternateConfig("spark.akka.askTimeout", "1.4")), RPC_LOOKUP_TIMEOUT.key -> Seq( AlternateConfig("spark.akka.lookupTimeout", "1.4")), "spark.streaming.fileStream.minRememberDuration" -> Seq( AlternateConfig("spark.streaming.minRememberDuration", "1.5")), "spark.yarn.max.executor.failures" -> Seq( AlternateConfig("spark.yarn.max.worker.failures", "1.5")), MEMORY_OFFHEAP_ENABLED.key -> Seq( AlternateConfig("spark.unsafe.offHeap", "1.6")), RPC_MESSAGE_MAX_SIZE.key -> Seq( AlternateConfig("spark.akka.frameSize", "1.6")), "spark.yarn.jars" -> Seq( AlternateConfig("spark.yarn.jar", "2.0")), MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM.key -> Seq( AlternateConfig("spark.reducer.maxReqSizeShuffleToMem", "2.3"), AlternateConfig("spark.maxRemoteBlockSizeFetchToMem", "3.0")), LISTENER_BUS_EVENT_QUEUE_CAPACITY.key -> Seq( AlternateConfig("spark.scheduler.listenerbus.eventqueue.size", "2.3")), DRIVER_MEMORY_OVERHEAD.key -> Seq( AlternateConfig("spark.yarn.driver.memoryOverhead", "2.3")), EXECUTOR_MEMORY_OVERHEAD.key -> Seq( AlternateConfig("spark.yarn.executor.memoryOverhead", "2.3")), KEYTAB.key -> Seq( AlternateConfig("spark.yarn.keytab", "3.0")), PRINCIPAL.key -> Seq( AlternateConfig("spark.yarn.principal", "3.0")), KERBEROS_RELOGIN_PERIOD.key -> Seq( AlternateConfig("spark.yarn.kerberos.relogin.period", "3.0")), KERBEROS_FILESYSTEMS_TO_ACCESS.key -> Seq( AlternateConfig("spark.yarn.access.namenodes", "2.2"), AlternateConfig("spark.yarn.access.hadoopFileSystems", "3.0")), "spark.kafka.consumer.cache.capacity" -> Seq( AlternateConfig("spark.sql.kafkaConsumerCache.capacity", "3.0")) ) /** * A view of `configsWithAlternatives` that makes it more efficient to look up deprecated * config keys. * * Maps the deprecated config name to a 2-tuple (new config name, alternate config info). */ private val allAlternatives: Map[String, (String, AlternateConfig)] = { configsWithAlternatives.keys.flatMap { key => configsWithAlternatives(key).map { cfg => (cfg.key -> (key -> cfg)) } }.toMap } /** * Return whether the given config should be passed to an executor on start-up. * * Certain authentication configs are required from the executor when it connects to * the scheduler, while the rest of the spark configs can be inherited from the driver later. */ def isExecutorStartupConf(name: String): Boolean = { (name.startsWith("spark.auth") && name != SecurityManager.SPARK_AUTH_SECRET_CONF) || name.startsWith("spark.rpc") || name.startsWith("spark.network") || isSparkPortConf(name) } /** * Return true if the given config matches either `spark.*.port` or `spark.port.*`. */ def isSparkPortConf(name: String): Boolean = { (name.startsWith("spark.") && name.endsWith(".port")) || name.startsWith("spark.port.") } /** * Looks for available deprecated keys for the given config option, and return the first * value available. */ def getDeprecatedConfig(key: String, conf: JMap[String, String]): Option[String] = { configsWithAlternatives.get(key).flatMap { alts => alts.collectFirst { case alt if conf.containsKey(alt.key) => val value = conf.get(alt.key) if (alt.translation != null) alt.translation(value) else value } } } /** * Logs a warning message if the given config key is deprecated. */ def logDeprecationWarning(key: String): Unit = { deprecatedConfigs.get(key).foreach { cfg => logWarning( s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " + s"may be removed in the future. ${cfg.deprecationMessage}") return } allAlternatives.get(key).foreach { case (newKey, cfg) => logWarning( s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " + s"may be removed in the future. Please use the new key '$newKey' instead.") return } if (key.startsWith("spark.akka") || key.startsWith("spark.ssl.akka")) { logWarning( s"The configuration key $key is not supported anymore " + s"because Spark doesn't use Akka since 2.0") } } /** * Holds information about keys that have been deprecated and do not have a replacement. * * @param key The deprecated key. * @param version Version of Spark where key was deprecated. * @param deprecationMessage Message to include in the deprecation warning. */ private case class DeprecatedConfig( key: String, version: String, deprecationMessage: String) /** * Information about an alternate configuration key that has been deprecated. * * @param key The deprecated config key. * @param version The Spark version in which the key was deprecated. * @param translation A translation function for converting old config values into new ones. */ private case class AlternateConfig( key: String, version: String, translation: String => String = null) }
maropu/spark
core/src/main/scala/org/apache/spark/SparkConf.scala
Scala
apache-2.0
32,958
/* * Copyright 2014-2022 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.core package object model { type UnaryOp = Double => Double type BinaryOp = (Double, Double) => Double type TimeSeriesInput = Iterator[(String, TimeSeries)] }
Netflix/atlas
atlas-core/src/main/scala/com/netflix/atlas/core/model/package.scala
Scala
apache-2.0
794
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util.Optional import kafka.api._ import kafka.cluster.BrokerEndPoint import kafka.log.LogAppendInfo import kafka.server.AbstractFetcherThread.ReplicaFetch import kafka.server.AbstractFetcherThread.ResultWithPartitions import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, Records} import org.apache.kafka.common.requests.EpochEndOffset._ import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{LogContext, Time} import scala.collection.JavaConverters._ import scala.collection.{mutable, Map} class ReplicaFetcherThread(name: String, fetcherId: Int, sourceBroker: BrokerEndPoint, brokerConfig: KafkaConfig, failedPartitions: FailedPartitions, replicaMgr: ReplicaManager, metrics: Metrics, time: Time, quota: ReplicaQuota, leaderEndpointBlockingSend: Option[BlockingSend] = None) extends AbstractFetcherThread(name = name, clientId = name, sourceBroker = sourceBroker, failedPartitions, fetchBackOffMs = brokerConfig.replicaFetchBackoffMs, isInterruptible = false, replicaMgr.brokerTopicStats) { private val replicaId = brokerConfig.brokerId private val logContext = new LogContext(s"[ReplicaFetcher replicaId=$replicaId, leaderId=${sourceBroker.id}, " + s"fetcherId=$fetcherId] ") this.logIdent = logContext.logPrefix private val leaderEndpoint = leaderEndpointBlockingSend.getOrElse( new ReplicaFetcherBlockingSend(sourceBroker, brokerConfig, metrics, time, fetcherId, s"broker-$replicaId-fetcher-$fetcherId", logContext)) // Visible for testing private[server] val fetchRequestVersion: Short = if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_3_IV1) 11 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV2) 10 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1) 8 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_1_1_IV0) 7 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV1) 5 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV0) 4 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV1) 3 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_0_IV0) 2 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_9_0) 1 else 0 // Visible for testing private[server] val offsetForLeaderEpochRequestVersion: Short = if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_3_IV1) 3 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV1) 2 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV0) 1 else 0 // Visible for testing private[server] val listOffsetRequestVersion: Short = if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_2_IV1) 5 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV1) 4 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1) 3 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV0) 2 else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV2) 1 else 0 private val maxWait = brokerConfig.replicaFetchWaitMaxMs private val minBytes = brokerConfig.replicaFetchMinBytes private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes private val fetchSize = brokerConfig.replicaFetchMaxBytes private val brokerSupportsLeaderEpochRequest = brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV2 val fetchSessionHandler = new FetchSessionHandler(logContext, sourceBroker.id) override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { replicaMgr.localLogOrException(topicPartition).latestEpoch } override protected def logStartOffset(topicPartition: TopicPartition): Long = { replicaMgr.localLogOrException(topicPartition).logStartOffset } override protected def logEndOffset(topicPartition: TopicPartition): Long = { replicaMgr.localLogOrException(topicPartition).logEndOffset } override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = { replicaMgr.localLogOrException(topicPartition).endOffsetForEpoch(epoch) } override def initiateShutdown(): Boolean = { val justShutdown = super.initiateShutdown() if (justShutdown) { // This is thread-safe, so we don't expect any exceptions, but catch and log any errors // to avoid failing the caller, especially during shutdown. We will attempt to close // leaderEndpoint after the thread terminates. try { leaderEndpoint.initiateClose() } catch { case t: Throwable => error(s"Failed to initiate shutdown of leader endpoint $leaderEndpoint after initiating replica fetcher thread shutdown", t) } } justShutdown } override def awaitShutdown(): Unit = { super.awaitShutdown() // We don't expect any exceptions here, but catch and log any errors to avoid failing the caller, // especially during shutdown. It is safe to catch the exception here without causing correctness // issue because we are going to shutdown the thread and will not re-use the leaderEndpoint anyway. try { leaderEndpoint.close() } catch { case t: Throwable => error(s"Failed to close leader endpoint $leaderEndpoint after shutting down replica fetcher thread", t) } } // process fetched data override def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = { val partition = replicaMgr.nonOfflinePartition(topicPartition).get val log = partition.localLogOrException val records = toMemoryRecords(partitionData.records) maybeWarnIfOversizedRecords(records, topicPartition) if (fetchOffset != log.logEndOffset) throw new IllegalStateException("Offset mismatch for partition %s: fetched offset = %d, log end offset = %d.".format( topicPartition, fetchOffset, log.logEndOffset)) if (isTraceEnabled) trace("Follower has replica log end offset %d for partition %s. Received %d messages and leader hw %d" .format(log.logEndOffset, topicPartition, records.sizeInBytes, partitionData.highWatermark)) // Append the leader's messages to the log val logAppendInfo = partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false) if (isTraceEnabled) trace("Follower has replica log end offset %d after appending %d bytes of messages for partition %s" .format(log.logEndOffset, records.sizeInBytes, topicPartition)) val leaderLogStartOffset = partitionData.logStartOffset // For the follower replica, we do not need to keep its segment base offset and physical position. // These values will be computed upon becoming leader or handling a preferred read replica fetch. val followerHighWatermark = log.updateHighWatermark(partitionData.highWatermark) log.maybeIncrementLogStartOffset(leaderLogStartOffset) if (isTraceEnabled) trace(s"Follower set replica high watermark for partition $topicPartition to $followerHighWatermark") // Traffic from both in-sync and out of sync replicas are accounted for in replication quota to ensure total replication // traffic doesn't exceed quota. if (quota.isThrottled(topicPartition)) quota.record(records.sizeInBytes) if (partition.isReassigning && partition.isAddingLocalReplica) brokerTopicStats.updateReassignmentBytesIn(records.sizeInBytes) brokerTopicStats.updateReplicationBytesIn(records.sizeInBytes) logAppendInfo } def maybeWarnIfOversizedRecords(records: MemoryRecords, topicPartition: TopicPartition): Unit = { // oversized messages don't cause replication to fail from fetch request version 3 (KIP-74) if (fetchRequestVersion <= 2 && records.sizeInBytes > 0 && records.validBytes <= 0) error(s"Replication is failing due to a message that is greater than replica.fetch.max.bytes for partition $topicPartition. " + "This generally occurs when the max.message.bytes has been overridden to exceed this value and a suitably large " + "message has also been sent. To fix this problem increase replica.fetch.max.bytes in your broker config to be " + "equal or larger than your settings for max.message.bytes, both at a broker and topic level.") } override protected def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { try { val clientResponse = leaderEndpoint.sendRequest(fetchRequest) val fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse[Records]] if (!fetchSessionHandler.handleResponse(fetchResponse)) { Map.empty } else { fetchResponse.responseData.asScala } } catch { case t: Throwable => fetchSessionHandler.handleError(t) throw t } } override protected def fetchEarliestOffsetFromLeader(topicPartition: TopicPartition, currentLeaderEpoch: Int): Long = { fetchOffsetFromLeader(topicPartition, currentLeaderEpoch, ListOffsetRequest.EARLIEST_TIMESTAMP) } override protected def fetchLatestOffsetFromLeader(topicPartition: TopicPartition, currentLeaderEpoch: Int): Long = { fetchOffsetFromLeader(topicPartition, currentLeaderEpoch, ListOffsetRequest.LATEST_TIMESTAMP) } private def fetchOffsetFromLeader(topicPartition: TopicPartition, currentLeaderEpoch: Int, earliestOrLatest: Long): Long = { val requestPartitionData = new ListOffsetRequest.PartitionData(earliestOrLatest, Optional.of[Integer](currentLeaderEpoch)) val requestPartitions = Map(topicPartition -> requestPartitionData) val requestBuilder = ListOffsetRequest.Builder.forReplica(listOffsetRequestVersion, replicaId) .setTargetTimes(requestPartitions.asJava) val clientResponse = leaderEndpoint.sendRequest(requestBuilder) val response = clientResponse.responseBody.asInstanceOf[ListOffsetResponse] val responsePartitionData = response.responseData.get(topicPartition) responsePartitionData.error match { case Errors.NONE => if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV2) responsePartitionData.offset else responsePartitionData.offsets.get(0) case error => throw error.exception } } override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { val partitionsWithError = mutable.Set[TopicPartition]() val builder = fetchSessionHandler.newBuilder(partitionMap.size, false) partitionMap.foreach { case (topicPartition, fetchState) => // We will not include a replica in the fetch request if it should be throttled. if (fetchState.isReadyForFetch && !shouldFollowerThrottle(quota, fetchState, topicPartition)) { try { val logStartOffset = this.logStartOffset(topicPartition) builder.add(topicPartition, new FetchRequest.PartitionData( fetchState.fetchOffset, logStartOffset, fetchSize, Optional.of(fetchState.currentLeaderEpoch))) } catch { case _: KafkaStorageException => // The replica has already been marked offline due to log directory failure and the original failure should have already been logged. // This partition should be removed from ReplicaFetcherThread soon by ReplicaManager.handleLogDirFailure() partitionsWithError += topicPartition } } } val fetchData = builder.build() val fetchRequestOpt = if (fetchData.sessionPartitions.isEmpty && fetchData.toForget.isEmpty) { None } else { val requestBuilder = FetchRequest.Builder .forReplica(fetchRequestVersion, replicaId, maxWait, minBytes, fetchData.toSend) .setMaxBytes(maxBytes) .toForget(fetchData.toForget) .metadata(fetchData.metadata) Some(ReplicaFetch(fetchData.sessionPartitions(), requestBuilder)) } ResultWithPartitions(fetchRequestOpt, partitionsWithError) } /** * Truncate the log for each partition's epoch based on leader's returned epoch and offset. * The logic for finding the truncation offset is implemented in AbstractFetcherThread.getOffsetTruncationState */ override def truncate(tp: TopicPartition, offsetTruncationState: OffsetTruncationState): Unit = { val partition = replicaMgr.nonOfflinePartition(tp).get val log = partition.localLogOrException partition.truncateTo(offsetTruncationState.offset, isFuture = false) if (offsetTruncationState.offset < log.highWatermark) warn(s"Truncating $tp to offset ${offsetTruncationState.offset} below high watermark " + s"${log.highWatermark}") // mark the future replica for truncation only when we do last truncation if (offsetTruncationState.truncationCompleted) replicaMgr.replicaAlterLogDirsManager.markPartitionsForTruncation(brokerConfig.brokerId, tp, offsetTruncationState.offset) } override protected def truncateFullyAndStartAt(topicPartition: TopicPartition, offset: Long): Unit = { val partition = replicaMgr.nonOfflinePartition(topicPartition).get partition.truncateFullyAndStartAt(offset, isFuture = false) } override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { if (partitions.isEmpty) { debug("Skipping leaderEpoch request since all partitions do not have an epoch") return Map.empty } val epochRequest = OffsetsForLeaderEpochRequest.Builder.forFollower(offsetForLeaderEpochRequestVersion, partitions.asJava, brokerConfig.brokerId) debug(s"Sending offset for leader epoch request $epochRequest") try { val response = leaderEndpoint.sendRequest(epochRequest) val responseBody = response.responseBody.asInstanceOf[OffsetsForLeaderEpochResponse] debug(s"Received leaderEpoch response $response") responseBody.responses.asScala } catch { case t: Throwable => warn(s"Error when sending leader epoch request for $partitions", t) // if we get any unexpected exception, mark all partitions with an error val error = Errors.forException(t) partitions.map { case (tp, _) => tp -> new EpochEndOffset(error, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET) } } } override def isOffsetForLeaderEpochSupported: Boolean = brokerSupportsLeaderEpochRequest /** * To avoid ISR thrashing, we only throttle a replica on the follower if it's in the throttled replica list, * the quota is exceeded and the replica is not in sync. */ private def shouldFollowerThrottle(quota: ReplicaQuota, fetchState: PartitionFetchState, topicPartition: TopicPartition): Boolean = { !fetchState.isReplicaInSync && quota.isThrottled(topicPartition) && quota.isQuotaExceeded } }
noslowerdna/kafka
core/src/main/scala/kafka/server/ReplicaFetcherThread.scala
Scala
apache-2.0
16,446
// Copyright (C) 2011-2012 the original author or authors. // See the LICENCE.txt file distributed with this work for additional // information regarding copyright ownership. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.scalastyle.scalariform import _root_.scalariform.parser._ import org.scalastyle.{CombinedChecker, ScalastyleError, CombinedAst, Lines, PositionError} import org.scalastyle.scalariform.VisitorHelper.{TreeVisit, visit} import scala.annotation.tailrec /** * According to the Effective Scala "http://twitter.github.io/effectivescala/index.html#Formatting-Whitespace" * they suggest, Use one blank line between method, class, and object definitions. * * this checker detects class/object/method where not contains one blank line between definition. * * Configuration. * to allow no blank line between class/object and method, like below * * class Foo{ * def bar = ... * } * * to allow this option, set parameter "true" at configuration xml * by default its set to "false" * * <parameters> * <parameter name="NoBlankAfterClassAllowed">true</parameter> * </parameters> */ class BlankLineBetweenDefinitionChecker extends CombinedChecker { val errorKey = "blankline.between.definition" val paramNoBlankAfterClassAllowed = "NoBlankAfterClassAllowed" val paramNoBlankAfterClassAllowedDefValue = false case class TmplClazz(t: TmplDef, subs: List[TmplClazz]) extends TreeVisit[TmplClazz] private def map(t: TmplDef): List[TmplClazz] = List(TmplClazz(t, visit(map)(t.templateBodyOption))) final def verify(ast: CombinedAst): List[ScalastyleError] = { val noBlankAfterClassAllowed = getBoolean(paramNoBlankAfterClassAllowed, paramNoBlankAfterClassAllowedDefValue) val cu = ast.compilationUnit val clazz = visit[TmplDef, TmplClazz](map)(cu.immediateChildren(0)); clazz.map(verifyClazz(_, ast.lines, noBlankAfterClassAllowed)).flatten } private def verifyClazz(t: TmplClazz, lines: Lines, noBlankAfterClassAllowed: Boolean): List[ScalastyleError] = t.t.templateBodyOption match { case Some(b) => verifyBody(b, lines, noBlankAfterClassAllowed) ::: t.subs.map(verifyClazz(_, lines, noBlankAfterClassAllowed)).flatten case None => Nil } private def verifyBody(b: TemplateBody, lines: Lines, noBlankAfterClassAllowed: Boolean): List[ScalastyleError] = { val ss = b.statSeq val head = lines.toLineColumn(b.firstToken.offset).get.line val otherStats = for ( (t, Some(s)) <- ss.otherStats ) yield s val stats = ss.firstStatOpt match { case Some(s) => s :: otherStats case None => otherStats } val statsWithCodeRange = for ( s <- stats; r <- toCodeRanges(s, lines) ) yield (s, r) val defOrDclsWithCodeRange = statsWithCodeRange.filter { case (x: FullDefOrDcl, _) if x.defOrDcl.isInstanceOf[FunDefOrDcl] || x.defOrDcl.isInstanceOf[TmplDef] => true case _ => false } val results = for ( d <- defOrDclsWithCodeRange; a <- check(d, statsWithCodeRange, lines, head, noBlankAfterClassAllowed) ) yield a results } private def check(t: (Stat, (Int, Int)), cs: List[(Stat, (Int, Int))], lines: Lines, head: Int, noBlankAfterClassAllowed: Boolean): Option[ScalastyleError] = { val (funDef, (lineNumber, _)) = t val exprList = for (c <- cs if c._2._2 != lineNumber) yield c._2._2 val exprListWithClassDef = if (noBlankAfterClassAllowed) { exprList } else { head :: exprList } funDef.firstTokenOption.flatMap(t => { if (checkIsBlankLine(lineNumber, lines, exprListWithClassDef)) { None } else { Some(PositionError(t.offset)) } }) } final val BlockCommentStart = """/\\*""".r final val BlockCommentFinish = """\\*/""".r final val LineNumber2ArrayAdjuster = 1 @tailrec private def checkIsBlankLine(lineNumber: Int, source: Lines, endOfExprList: List[Int]): Boolean = { if (!endOfExprList.contains(lineNumber)) { val skip = skipComment(lineNumber, source) if (skip == lineNumber) true else checkIsBlankLine(skip, source, endOfExprList) } else { false } } private def skipComment(lineNumber: Int, source: Lines): Int = { val text = source.lines(lineNumber - LineNumber2ArrayAdjuster).text if (text.length != 0) { execSkipComment(lineNumber, source, 0) } else { lineNumber } } @tailrec private def execSkipComment(lineNumber: Int, source: Lines, blockCommentLevel: Int): Int = { val text = source.lines(lineNumber - LineNumber2ArrayAdjuster).text val diff = blockCommentLevel + BlockCommentFinish.findAllIn(text).length - BlockCommentStart.findAllIn(text).length if (diff == 0) { lineNumber - 1 } else { execSkipComment(lineNumber - 1, source, diff) } } private def toCodeRanges(stats: Stat, lines: Lines): Option[(Int, Int)] = { val offsets = (stats.firstTokenOption, stats.lastTokenOption) match { case (Some(s), Some(e)) => Some((s.offset, e.offset)) case _ => None } for ((s, e) <- offsets; sLine <- lines.toLineColumn(s); eLine <- lines.toLineColumn(e) ) yield (sLine.line, eLine.line) } }
dwango/scalastyle
src/main/scala/org/scalastyle/scalariform/BlankLineBetweenDefinitionChecker.scala
Scala
apache-2.0
5,805
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.oap.utils import java.lang.{Double => JDouble, Float => JFloat} import scala.util.Failure import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.parquet.bytes.BytesUtils import org.apache.parquet.io.api.Binary import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow} import org.apache.spark.sql.catalyst.catalog.CatalogTypes._ import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Literal} import org.apache.spark.sql.execution.datasources.{FileIndex, PartitionDirectory, PartitioningUtils} import org.apache.spark.sql.execution.datasources.oap.{DataSourceMeta, Key, OapFileFormat} import org.apache.spark.sql.oap.adapter.FileIndexAdapter import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String /** * Utils for Oap */ object OapUtils extends Logging { def getMeta(hadoopConf: Configuration, parent: Path): Option[DataSourceMeta] = { val file = new Path(parent, OapFileFormat.OAP_META_FILE) if (file.getFileSystem(hadoopConf).exists(file)) { Some(DataSourceMeta.initialize(file, hadoopConf)) } else { None } } def getPartitions( fileIndex: FileIndex, partitionSpec: Option[TablePartitionSpec] = None): Seq[PartitionDirectory] = { val filters = if (partitionSpec.nonEmpty) { val partitionColumnsInfo: Map[String, DataType] = fileIndex.partitionSchema.map { field => (field.name, field.dataType) }.toMap // partition column spec check if (!partitionSpec.get.keys.forall( partitionColumnsInfo.keys.toSeq.contains(_))) { throw new AnalysisException( s"Partition spec is invalid. The spec (${partitionSpec.get.keys.mkString(", ")})" + s" must match the partition spec (${partitionColumnsInfo.keys.mkString(", ")})") } partitionSpec.get.map { case (key, value) => val v = partitionColumnsInfo.get(key).get match { case StringType => value case IntegerType => value.toInt case LongType => value.toLong case BooleanType => value.toBoolean case DateType => java.sql.Date.valueOf(value) case DoubleType => value.toDouble case FloatType => value.toFloat case ByteType => value.toByte case ShortType => value.toShort // OapFileFormat only support the above partition key type. case _: DataType => throw new AnalysisException( s"Only handle partition key type in common use, check the partition key type:" + s" ${partitionColumnsInfo.get(key).get.toString}") } EqualTo(AttributeReference(key, partitionColumnsInfo.get(key).get)(), Literal(v)) }.toSeq } else { Nil } FileIndexAdapter.listFiles(fileIndex, filters, Nil) } /** * Get partition directory path(s) which has oap meta or data files, * return directories' paths if data is partitioned, or a single path if data is unpartitioned. * @param rootPaths the root paths of [[FileIndex]] of the relation * @param fs File system * @param partitionSchema partitioned column(s) schema of the relation * @param partitionSpec Schema of the partitioning columns, * or the empty schema if the table is not partitioned * @return all valid path(s) of directories containing meta or data files pertain to the table */ def getPartitionPaths( rootPaths: Seq[Path], fs: FileSystem, partitionSchema: StructType, partitionSpec: Option[TablePartitionSpec] = None): Seq[Path] = { val directoryPaths = if (partitionSpec.nonEmpty) { val partitionAttributes = partitionSchema.map(field => (field.name, field.dataType)).toMap if (!partitionSpec.get.keys.forall(partitionAttributes.contains)) { throw new AnalysisException( s"""Partition spec is invalid. The spec (${partitionSpec.get.keys.mkString(", ")}) |must match the partition spec (${partitionAttributes.mkString(", ")})""") } partitionSpec.get.foreach { case (attrName, value) => val typeMatch = partitionAttributes(attrName) match { case StringType => scala.util.Try(value.toString) case IntegerType => scala.util.Try(value.toInt) case LongType => scala.util.Try(value.toLong) case BooleanType => scala.util.Try(value.toBoolean) case _: DataType => throw new AnalysisException( s"Only handle partition key type in common use, check the partition key type:" + s" ${partitionAttributes(attrName).toString}") } typeMatch match { case Failure(_) => throw new AnalysisException( s"Type mismatch, value $value cannot convert to partition key type: " + partitionAttributes(attrName).toString) case _ => } } val pathFragment = PartitioningUtils.getPathFragment(partitionSpec.get, partitionSchema) rootPaths.map(rootPath => new Path(rootPath, pathFragment)).filter(fs.exists) } else { rootPaths } getPartitionPaths(directoryPaths, fs) } /** * Scan and Get the table's all directory path(s) which has oap meta file or data files * @param directoryPaths the input path(s) to search * @param fs File system * @return the table's all directory path(s) which has oap meta file or data files */ private def getPartitionPaths(directoryPaths: Seq[Path], fs: FileSystem): Seq[Path] = { directoryPaths.filter(isTargetPath(_, fs)) ++ fs.listStatus(directoryPaths.toArray[Path]).filter(_.isDirectory).flatMap { status => getPartitionPaths(Seq(status.getPath), fs) } } /** * identify whether a path contains meta file or data files(s) * @param path the path to be checked * @param fs file system * @return true if the path directory contains meta or data file(s), otherwise false */ private def isTargetPath(path: Path, fs: FileSystem): Boolean = { fs.exists(new Path(path, OapFileFormat.OAP_META_FILE)) || fs.listStatus(path).filter(_.isFile).exists(status => isDataPath(status.getPath)) } private def isDataPath(path: Path): Boolean = { val name = path.getName !((name.startsWith("_") && !name.contains("=")) || name.startsWith(".")) } def keyFromBytes(bytes: Array[Byte], dataType: DataType): Option[Key] = { val value: Option[Any] = dataType match { case BooleanType => Some(BytesUtils.bytesToBool(bytes)) case IntegerType => Some(BytesUtils.bytesToInt(bytes)) case LongType => Some(BytesUtils.bytesToLong(bytes)) case DoubleType => Some(JDouble.longBitsToDouble(BytesUtils.bytesToLong(bytes))) case FloatType => Some(JFloat.intBitsToFloat(BytesUtils.bytesToInt(bytes))) case StringType => Some(UTF8String.fromBytes(bytes)) case BinaryType => Some(Binary.fromReusedByteArray(bytes)) case _ => None } value.map(v => InternalRow(CatalystTypeConverters.convertToCatalyst(v))) } def keyFromAny(value: Any): Key = InternalRow(CatalystTypeConverters.convertToCatalyst(value)) /** Deprecated * Refresh any cached file listings of @param fileIndex, * and return partitions if data is partitioned, or a single partition if data is unpartitioned. * indicate all valid files grouped into partition(s) on the disk * @param fileIndex [[FileIndex]] of a relation * @param partitionSpec the specification of the partitions * @return all valid files grouped into partition(s) on the disk */ @deprecated("use \\"getPartitionPaths\\" to get valid partition path(s)", "0.3") def getPartitionsRefreshed( fileIndex: FileIndex, partitionSpec: Option[TablePartitionSpec] = None): Seq[PartitionDirectory] = { fileIndex.refresh() getPartitions(fileIndex, partitionSpec) } }
Intel-bigdata/OAP
oap-cache/oap/src/main/scala/org/apache/spark/sql/execution/datasources/oap/utils/OapUtils.scala
Scala
apache-2.0
8,955
package kr.ac.kaist.ir.deep.train import kr.ac.kaist.ir.deep.fn._ import kr.ac.kaist.ir.deep.layer.NormalizeOperation import kr.ac.kaist.ir.deep.network.Network import kr.ac.kaist.ir.deep.rec.BinaryTree /** * __Input Operation__ : VectorTree as Input & Recursive Auto-Encoder Training (no output type) * * @note We recommend that you should not apply this method to non-AutoEncoder tasks * @note This implementation designed as a replica of the standard RAE (RAE + normalization) in * [[http://ai.stanford.edu/~ang/papers/emnlp11-RecursiveAutoencodersSentimentDistributions.pdf this paper]] * * @param corrupt Corruption that supervises how to corrupt the input matrix. `(Default : [[kr.ac.kaist.ir.deep.train.NoCorruption]])` * @param error An objective function `(Default: [[kr.ac.kaist.ir.deep.fn.SquaredErr]])` * * @example * {{{var make = new RAEType(error = CrossEntropyErr) * var corruptedIn = make corrupted in * var out = make onewayTrip (net, corruptedIn)}}} */ class StandardRAEType(override val corrupt: Corruption = NoCorruption, override val error: Objective = SquaredErr) extends TreeType { /** Normalization layer */ val normalizeLayer = new NormalizeOperation() /** * Apply & Back-prop given single input * * @param net A network that gets input * @param delta Sequence of delta updates */ def roundTrip(net: Network, delta: Seq[ScalarMatrix]) = (in: BinaryTree, real: Null) ⇒ { in forward { x ⇒ val out = net passedBy x val zOut = normalizeLayer passedBy out val dit = delta.toIterator // un-normalize the error val normalErr = error.derivative(x, zOut) val err = normalizeLayer updateBy(dit, normalErr) net updateBy(dit, err) // propagate hidden-layer value net(x) } } /** * Apply given input and compute the error * * @param net A network that gets input * @param pair (Input, Real output) for error computation. * @return error of this network */ def lossOf(net: Network)(pair: (BinaryTree, Null)): Scalar = { var total = 0.0f val in = pair._1 in forward { x ⇒ val out = net of x val normalized = normalizeLayer(out) total += error(x, normalized) //propagate hidden-layer value net(x) } total } /** * Make validation output * * @return input as string */ def stringOf(net: Network, pair: (BinaryTree, Null)): String = { val string = StringBuilder.newBuilder pair._1 forward { x ⇒ val out = net of x val normalized = normalizeLayer(out) val hid = net(x) string append s"IN: ${x.mkString} RAE → OUT: ${normalized.mkString}, HDN: ${hid.mkString}; " // propagate hidden-layer value hid } string.mkString } }
nearbydelta/ScalaNetwork
src/main/scala/kr/ac/kaist/ir/deep/train/StandardRAEType.scala
Scala
gpl-2.0
2,902
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn import org.scalatest.FlatSpec import com.intel.analytics.bigdl.tensor.Tensor import scala.math._ @com.intel.analytics.bigdl.tags.Parallel class MSECriterionSpec extends FlatSpec { "A MSE Criterion " should "generate correct output and grad" in { val mse = new MSECriterion[Double] val input = Tensor[Double](2, 2, 2) input(Array(1, 1, 1)) = 0.17503996845335 input(Array(1, 1, 2)) = 0.83220188552514 input(Array(1, 2, 1)) = 0.48450597329065 input(Array(1, 2, 2)) = 0.64701424003579 input(Array(2, 1, 1)) = 0.62694586534053 input(Array(2, 1, 2)) = 0.34398410236463 input(Array(2, 2, 1)) = 0.55356747563928 input(Array(2, 2, 2)) = 0.20383032318205 val target = Tensor[Double](2, 2, 2) target(Array(1, 1, 1)) = 0.69956525065936 target(Array(1, 1, 2)) = 0.86074831243604 target(Array(1, 2, 1)) = 0.54923197557218 target(Array(1, 2, 2)) = 0.57388074393384 target(Array(2, 1, 1)) = 0.63334444304928 target(Array(2, 1, 2)) = 0.99680578662083 target(Array(2, 2, 1)) = 0.49997645849362 target(Array(2, 2, 2)) = 0.23869121982716 val expectedOutput = 0.08947300078144 val expectedGrad = Tensor[Double](2, 2, 2) expectedGrad(Array(1, 1, 1)) = -0.1311313205515 expectedGrad(Array(1, 1, 2)) = -0.0071366067277268 expectedGrad(Array(1, 2, 1)) = -0.016181500570383 expectedGrad(Array(1, 2, 2)) = 0.018283374025486 expectedGrad(Array(2, 1, 1)) = -0.0015996444271877 expectedGrad(Array(2, 1, 2)) = -0.16320542106405 expectedGrad(Array(2, 2, 1)) = 0.013397754286416 expectedGrad(Array(2, 2, 2)) = -0.0087152241612785 val output = mse.forward(input, target) val gradInput = mse.backward(input, target) assert(abs(expectedOutput - output) < 1e-6) expectedGrad.map(gradInput, (v1, v2) => { assert(abs(v1 - v2) < 1e-6); v1 }) } "A MSE Criterion with sizeAverage:false " should "generate correct output and grad" in { val mse = new MSECriterion[Double] mse.sizeAverage = false val input = Tensor[Double](2, 2, 2) input(Array(1, 1, 1)) = 0.64631252549589 input(Array(1, 1, 2)) = 0.1541522629559 input(Array(1, 2, 1)) = 0.6778122568503 input(Array(1, 2, 2)) = 0.55571207939647 input(Array(2, 1, 1)) = 0.53701480175368 input(Array(2, 1, 2)) = 0.83826910308562 input(Array(2, 2, 1)) = 0.27449130127206 input(Array(2, 2, 2)) = 0.63781907199882 val target = Tensor[Double](2, 2, 2) target(Array(1, 1, 1)) = 0.8999215872027 target(Array(1, 1, 2)) = 0.7839112279471 target(Array(1, 2, 1)) = 0.11587709793821 target(Array(1, 2, 2)) = 0.39529220713302 target(Array(2, 1, 1)) = 0.8202251160983 target(Array(2, 1, 2)) = 0.41274098632857 target(Array(2, 2, 1)) = 0.37541538593359 target(Array(2, 2, 2)) = 0.34106521727517 val expectedOutput = 1.1619503498653 val expectedGrad = Tensor[Double](2, 2, 2) expectedGrad(Array(1, 1, 1)) = -0.50721812341362 expectedGrad(Array(1, 1, 2)) = -1.2595179299824 expectedGrad(Array(1, 2, 1)) = 1.1238703178242 expectedGrad(Array(1, 2, 2)) = 0.32083974452689 expectedGrad(Array(2, 1, 1)) = -0.56642062868923 expectedGrad(Array(2, 1, 2)) = 0.8510562335141 expectedGrad(Array(2, 2, 1)) = -0.20184816932306 expectedGrad(Array(2, 2, 2)) = 0.59350770944729 val output = mse.forward(input, target) val gradInput = mse.backward(input, target) assert(abs(expectedOutput - output) < 1e-6) expectedGrad.map(gradInput, (v1, v2) => { assert(abs(v1 - v2) < 1e-6); v1 }) } }
psyyz10/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/MSECriterionSpec.scala
Scala
apache-2.0
4,210
package lib.formats import lib.helper.Markup import lib.joda._ import models.enums._ import play.api.templates.Html import play.api.templates.HtmlFormat import play.api.i18n.Messages object Renderer { private val noValue = Html("""<em class="muted">not assigned</em>""") /** * Returns a ''HTML'' representation for the given `input` (replacing * `newline`s with `<br/>` elements. * * @param value the `input` to be rendered * @return the ''HTML'' representation. */ def nl2br(input: String): Html = Html(input.replace("\\n", "<br/>")) /** * Returns a ''HTML'' representation for the given `value`. * * @param datatype the datatype of the given `value` * @param value the `value` to be rendered * @param default a default ''HTML'' representation to be used if the `value` * is empty * @return the ''HTML'' representation. */ def toHtml(datatype: Datatype.Datatype, value: Option[Any], default: Html = noValue, markup: Boolean = true): Html = value.map { v => datatype match { case Datatype.Boolean => Html("""<i class="ico-%s" style="margin-right: 0;" title="%s"></i>""".format( if(v.asInstanceOf[Boolean]) "checked" else "unchecked", if(v.asInstanceOf[Boolean]) "True" else "False" )) case Datatype.Text => if(markup) Markup.toHtml(v.toString) else nl2br(Markup.toString(v.toString)) case Datatype.Date => Markup.toHtml(v.asInstanceOf[Date].toString(Messages("pattern.date"))) case Datatype.Time => Markup.toHtml(v.asInstanceOf[Time].toString(Messages("pattern.time"))) case _ => HtmlFormat.escape(v.toString) } }.getOrElse(default) }
fynnfeldpausch/frame
app/lib/formats/Renderer.scala
Scala
mit
1,743
package basics.functions class ConvertFunction extends (Int => String) { def apply(m: Int): String = { String.valueOf(m) } }
szaqal/KitchenSink
Scala/01/src/main/scala/basics/functions/ConvertFunction.scala
Scala
gpl-3.0
131
package org.apache.spark.ml.dsl import org.apache.spark.ml.dsl.utils.messaging.{MessageAPI_<<, MessageRelay} import org.apache.spark.sql.catalyst.trees.TreeNode import org.apache.spark.sql.utils.DataTypeRelay trait StepTreeNode[BaseType <: StepTreeNode[BaseType]] extends TreeNode[StepTreeNode[BaseType]] { val self: StepLike override def verboseString: String = this.simpleString + "\\n========= PATHS =========\\n" + mergedPath.mkString("\\n") lazy val paths: Seq[Seq[String]] = { val rootPath = Seq(self.name) if (children.nonEmpty) { children.flatMap { child => child.paths.map(_ ++ rootPath) } } else Seq(rootPath) } lazy val mergedPath: Seq[String] = { val numPaths = paths.map(_.size) assert(numPaths.nonEmpty, "impossible") val result = { val maxBranchLength = numPaths.max val commonAncestorLength = maxBranchLength .to(0, -1) .find { v => paths.map(_.slice(0, v)).distinct.size == 1 } .getOrElse(0) val commonAncestor = paths.head.slice(0, commonAncestorLength) val commonParentLength = maxBranchLength .to(0, -1) .find { v => paths.map(_.reverse.slice(0, v)).distinct.size == 1 } .getOrElse(0) val commonParent = paths.head.reverse.slice(0, commonParentLength).reverse if (commonAncestor.size + commonParent.size > maxBranchLength) commonParent else commonAncestor ++ commonParent } result } } object StepTreeNode extends MessageRelay[StepTreeNode[_]] { override def toMessage_>>(v: StepTreeNode[_]): M = { val base = v.self match { case source: Source => M( source.id, dataTypes = source.dataTypes .map(DataTypeRelay.toMessage_>>) ) case _ => M(v.self.id) } base.copy( stage = v.children.map(this.toMessage_>>) ) } case class M( id: String, dataTypes: Set[DataTypeRelay.M] = Set.empty, stage: Seq[M] = Nil ) extends MessageAPI_<< { override def toProto_<< : StepTreeNode[_] = ??? } }
tribbloid/spookystuff
mldsl/src/main/scala/org/apache/spark/ml/dsl/StepTreeNode.scala
Scala
apache-2.0
2,117
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.master import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.metrics.source.Source private[spark] class MasterSource(val master: Master) extends Source { override val metricRegistry = new MetricRegistry() override val sourceName = "master" // Gauge for worker numbers in cluster // 测量集群中Woker数 metricRegistry.register(MetricRegistry.name("workers"), new Gauge[Int] { override def getValue: Int = master.workers.size }) // Gauge for alive worker numbers in cluster //测量集群中存活的Woker数 metricRegistry.register(MetricRegistry.name("aliveWorkers"), new Gauge[Int]{ override def getValue: Int = master.workers.filter(_.state == WorkerState.ALIVE).size }) // Gauge for application numbers in cluster // 测量集群中应用程序数 metricRegistry.register(MetricRegistry.name("apps"), new Gauge[Int] { override def getValue: Int = master.apps.size }) // Gauge for waiting application numbers in cluster //测量集群中等待应用程序数 metricRegistry.register(MetricRegistry.name("waitingApps"), new Gauge[Int] { override def getValue: Int = master.waitingApps.size }) }
tophua/spark1.52
core/src/main/scala/org/apache/spark/deploy/master/MasterSource.scala
Scala
apache-2.0
2,015
package com.avsystem.commons package redis.commands import com.avsystem.commons.redis.ClusterUtils.keyWithSameSlotAs import com.avsystem.commons.redis._ /** * Author: ghik * Created: 14/04/16. */ trait KeyedKeysApiSuite extends CommandsSuite { import RedisApi.Batches.StringTyped._ apiTest("COPY") { setup(set("{key}1", "value")) copy("{key}1", "{key}2").assertEquals(true) copy("{key}1", "{key}2").assertEquals(false) copy("{key}1", "{key}2", replace = true).assertEquals(true) copy("{key}?", "{key}2").assertEquals(false) } apiTest("DEL") { setup(mset("{key}1" -> "value", "{key}2" -> "value")) del(Nil).assertEquals(0) del("???").assertEquals(false) del("{key}1").assertEquals(true) del("{key}2", "{key}?").assertEquals(1) } apiTest("DUMP") { setup(set("key", "value")) dump("???").assert(_.isEmpty) dump("key").assert(_.nonEmpty) } apiTest("EXISTS") { setup(mset("{key}1" -> "value", "{key}2" -> "value")) exists(Nil).assertEquals(0) exists("???").assertEquals(false) exists("{key}1").assertEquals(true) exists("{key}2", "{key}?").assertEquals(1) } apiTest("EXPIRE") { setup(set("key", "value")) expire("key", Int.MaxValue).assert(identity) } apiTest("EXPIREAT") { setup(set("key", "value")) expireat("key", Int.MaxValue).assert(identity) } apiTest("OBJECT REFCOUNT") { setup(set("key", "value")) objectRefcount("???").assert(_.isEmpty) objectRefcount("key").assert(_.nonEmpty) } apiTest("OBJECT ENCODING") { setup(set("key", "value")) objectEncoding("???").assert(_.isEmpty) objectEncoding("key").assert(_.nonEmpty) } apiTest("OBJECT IDLETIME") { setup(set("key", "value")) objectIdletime("???").assert(_.isEmpty) objectIdletime("key").assert(_.nonEmpty) } apiTest("MEMORY USAGE") { setup(set("key", "value")) memoryUsage("???").assertEquals(Opt.Empty) memoryUsage("key").assert(_.exists(_ > 5)) memoryUsage("key", 0L).assert(_.exists(_ > 5)) memoryUsage("key", 1L).assert(_.exists(_ > 5)) } apiTest("PERSIST") { setup(set("key", "value")) persist("key").assertEquals(false) } apiTest("PEXPIRE") { setup(set("key", "value")) pexpire("key", Int.MaxValue).assert(identity) } apiTest("PEXPIREAT") { setup(set("key", "value")) pexpireat("key", Int.MaxValue).assert(identity) } apiTest("PTTL") { setup( set("key", "value"), setex("exkey", Int.MaxValue, "value") ) pttl("???").assertEquals(Opt.Empty) pttl("key").assertEquals(Opt(Opt.Empty)) pttl("exkey").assert(_.exists(_.nonEmpty)) } apiTest("RENAME") { setup(set("key", "value")) rename("key", keyWithSameSlotAs("key")).exec.futureValue } apiTest("RENAMENX") { setup(set("key", "value")) renamenx("key", keyWithSameSlotAs("key")).assert(identity) } apiTest("RESTORE") { setup(set("key", "value")) val dumped = dump("key").exec.futureValue.get restore("torestore", 1, dumped).exec.futureValue } apiTest("SORT") { sort("somelist", SelfPattern, SortLimit(0, 1), SortOrder.Desc, alpha = true).assert(_.isEmpty) } apiTest("SORT with STORE") { sortStore("somelist", keyWithSameSlotAs("somelist")).assertEquals(0) } apiTest("TOUCH") { setup(mset("{key}1" -> "value", "{key}2" -> "value")) touch(Nil).assertEquals(0) touch("???").assertEquals(false) touch("{key}1").assertEquals(true) touch("{key}2", "{key}?").assertEquals(1) } apiTest("TTL") { setup( set("key", "value"), setex("exkey", Int.MaxValue, "value") ) ttl("???").assertEquals(Opt.Empty) ttl("key").assertEquals(Opt(Opt.Empty)) ttl("exkey").assert(_.exists(_.nonEmpty)) } apiTest("TYPE") { setup(set("key", "value")) `type`("key").assertEquals(RedisType.String) } apiTest("UNLINK") { setup(mset("{key}1" -> "value", "{key}2" -> "value")) unlink(Nil).assertEquals(0) unlink("???").assertEquals(false) unlink("{key}1").assertEquals(true) unlink("{key}2", "{key}?").assertEquals(1) } } trait NodeKeysApiSuite extends KeyedKeysApiSuite { import RedisApi.Batches.StringTyped._ private val scanKeys = (0 until 256).map(i => s"toscan$i") apiTest("KEYS") { setup(mset(scanKeys.map(k => (k, "value")))) keys("toscan*").assert(_.toSet == scanKeys.toSet) } apiTest("SCAN") { setup(mset(scanKeys.map(k => (k, "value")))) def scanCollect(cursor: Cursor, acc: Seq[String]): Future[Seq[String]] = scan(cursor, "toscan*", 4L).exec.flatMapNow { case (Cursor.NoCursor, data) => Future.successful(acc ++ data) case (nextCursor, data) => scanCollect(nextCursor, acc ++ data) } assert(scanCollect(Cursor.NoCursor, Vector.empty).futureValue.toSet == scanKeys.toSet) } apiTest("MOVE") { setup(set("key", "value")) move("key", 1).assert(identity) } apiTest("SORT with GET") { sortGet("somelist", Seq(FieldPattern("hash", "*")), SelfPattern, SortLimit(0, 1), SortOrder.Desc, alpha = true).assert(_.isEmpty) } apiTest("SORT with BY") { sort("somelist", by = SelfPattern).assert(_.isEmpty) sort("somelist", by = KeyPattern("sth_*")).assert(_.isEmpty) sort("somelist", by = FieldPattern("hash_*", "sth_*")).assert(_.isEmpty) } }
AVSystem/scala-commons
commons-redis/src/test/scala/com/avsystem/commons/redis/commands/KeysApiSuite.scala
Scala
mit
5,366
/* * ____ ____ _____ ____ ___ ____ * | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R) * | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data * | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc. * |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the Free Software Foundation, either version * 3 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License along with this * program. If not, see <http://www.gnu.org/licenses/>. * */ package com.precog.performance import org.specs2.mutable.Specification import java.io.File import java.nio.ByteBuffer class PerformanceSuite with RoutingPerformanceSpec with YggdrasilPerformanceSpec
precog/platform
performance/src/test/scala/com/precog/performance/PerformanceSuite.scala
Scala
agpl-3.0
1,283
package org.jetbrains.plugins.scala.codeInspection.delayedInit import com.intellij.codeInspection.ProblemsHolder import com.intellij.psi.{PsiClass, PsiElement} import org.jetbrains.plugins.scala.codeInspection.{AbstractInspection, InspectionsUtil} import org.jetbrains.plugins.scala.extensions.{Both, ContainingClass, LazyVal, PsiElementExt} import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScPatternDefinition, ScVariableDefinition} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTemplateDefinition} import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult /** * @author Nikolay.Tropin */ class FieldFromDelayedInitInspection extends AbstractInspection("FieldFromDelayedInit", "Field from DelayedInit"){ override def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = { case ref: ScReferenceExpression => ref.bind() match { case Some(FieldInDelayedInit(delayedInitClass)) => val classContainers = ref.parentsInFile.collect { case td: ScTemplateDefinition => td } if (!classContainers.exists(c => c == delayedInitClass || c.isInheritor(delayedInitClass, deep = true))) holder.registerProblem(ref.nameId, "Field defined in DelayedInit is likely to be null") case _ => } } object FieldInDelayedInit { def unapply(srr: ScalaResolveResult): Option[PsiClass] = { ScalaPsiUtil.nameContext(srr.getElement) match { case LazyVal(_) => None case Both((_: ScPatternDefinition | _: ScVariableDefinition), ContainingClass(clazz @ (_: ScClass | _: ScObject))) => if (srr.fromType.exists(InspectionsUtil.conformsToTypeFromClass(_, "scala.DelayedInit", clazz.getProject))) Some(clazz) else None case _ => None } } } }
ilinum/intellij-scala
src/org/jetbrains/plugins/scala/codeInspection/delayedInit/FieldFromDelayedInitInspection.scala
Scala
apache-2.0
1,977
package keystoneml.loaders import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import keystoneml.pipelines.Logging import keystoneml.utils.MultiLabeledImage case class VOCDataPath(imagesDirName: String, namePrefix: String, numParts: Option[Int]) case class VOCLabelPath(labelsFileName: String) /** * A data loader for the VOC 2007 Dataset. Expects input in a tar file. */ object VOCLoader extends Logging with Serializable { val NUM_CLASSES = 20 // This is a constant defined by the VOC 2007 dataset. /** * Loads a data path given a spark context and labels and returns an RDD[MultiLabeledImage]. * * A property of the VOC dataset is that images can have multiple labels which we * have to deal with later in the pipeline. * * @param sc A Spark Context * @param dataPath Path to image tar. * @param labelsPath Path to label csv. * @return */ def apply(sc: SparkContext, dataPath: VOCDataPath, labelsPath: VOCLabelPath): RDD[MultiLabeledImage] = { val filePathsRDD = ImageLoaderUtils.getFilePathsRDD(sc, dataPath.imagesDirName, dataPath.numParts) val labelsMapFile = scala.io.Source.fromFile(labelsPath.labelsFileName) val labelsMap: Map[String, Array[Int]] = labelsMapFile .getLines() .drop(1) .map(x => x.toString) .map { line => val parts = line.split(",") (parts(4).replace("\\"", ""), parts(1).toInt - 1) } .toArray .groupBy(_._1) .mapValues(_.map(_._2)) .map(identity) labelsMapFile.close() ImageLoaderUtils.loadFiles(filePathsRDD, labelsMap, MultiLabeledImage.apply, Some(dataPath.namePrefix)) } }
amplab/keystone
src/main/scala/keystoneml/loaders/VOCLoader.scala
Scala
apache-2.0
1,661
package memnets.fx.app import javafx.application.Application import memnets.core._ object SingletonAppFX { private var _startUpBuilder: ModelBuilder = _ private var _config: ModelConfig = _ /** helper for Java */ def demoJ(builder: BldType, args: Array[String] = Array()): Unit = { demo(builder, args = args) } def demo(builder: BldType, config: ModelConfig = new ModelConfig, args: Array[String] = Array()): Unit = { _startUpBuilder = builder _config = config main(args) } def main(args: Array[String]): Unit = { Application.launch(classOf[SingletonAppFX], args: _*) } } final private class SingletonAppFX extends AppBaseFX { def startUpBuilder: BldType = SingletonAppFX._startUpBuilder override def config: ModelConfig = SingletonAppFX._config } class DemoFX(bld: ModelBuilder, config: ModelConfig = new ModelConfig) { def main(args: Array[String]): Unit = { SingletonAppFX.demo(bld, config, args) } }
MemoryNetworks/memnets
fx/src/main/scala/memnets/fx/app/DemoFX.scala
Scala
apache-2.0
957
package com.metl.snippet import com.metl.model.Globals import net.liftweb._ import http._ import SHtml._ import common._ import util._ import Helpers._ object ThemeChooser { def render = "#themeCss [href]" #> "/static/assets/styles/%s/main.css".format(Globals.themeName) }
StackableRegiments/analyticalmetlx
src/main/scala/com/metl/snippet/ThemeChooser.scala
Scala
apache-2.0
277
package org.scalex package binary import scala.collection.generic.CanBuildFrom import sbinary._, DefaultProtocol._, Operations._ import semverfi.{ Valid, Version } import model._ private[binary] object BinaryFormat extends DefaultProtocol with RichProtocol { import Sugar._ implicit val entityF = wrap[Entity, String](_.qualifiedName, Entity.apply) implicit val roleF = new BinaryFormat[Role] { def reader(implicit in: Input) = Role fromName <<[String] def writer(a: Role)(implicit out: Output) { >>(Role toName a) } } implicit val blockF: Format[Block] = asProduct2(Block)(Block.unapply(_).get) implicit val commentF: Format[Comment] = asProduct14(Comment)(Comment.unapply(_).get) implicit val memberF: Format[Member] = asProduct7(Member)(Member.unapply(_).get) implicit val templateF = asProduct4(Template)(Template.unapply(_).get) implicit val valueParamF = asProduct4(ValueParam)(ValueParam.unapply(_).get) implicit val typeParamF = new BinaryFormat[TypeParam] { def reader(implicit in: Input): TypeParam = TypeParam( name = <<[String], typeParams = readMany, variance = <<[Variance], lo = <<[Option[TypeEntity]], hi = <<[Option[TypeEntity]]) def writer(e: TypeParam)(implicit out: Output) { >>(e.name) writeMany(e.typeParams) >>(e.variance) >>(e.lo) >>(e.hi) } } implicit val defF = asProduct3(Def)(Def.unapply(_).get) implicit val constructorF = asProduct2(Constructor)(Constructor.unapply(_).get) implicit val abstractTypeF = asProduct4(AbstractType)(AbstractType.unapply(_).get) implicit val docTemplateFormat = new BinaryFormat[DocTemplate] { def reader(implicit in: Input) = DocTemplate( member = <<[Member], template = <<[Template], typeParams = <<[List[TypeParam]], valueParams = <<[List[List[ValueParam]]], parentTypes = <<[List[QualifiedName]], members = <<[List[Member]], templates = readMany, methods = <<[List[Def]], values = <<[List[Member]], abstractTypes = <<[List[AbstractType]], aliasTypes = <<[List[TypeEntity]], primaryConstructor = <<[Option[Constructor]], constructors = <<[List[Constructor]]) def writer(a: DocTemplate)(implicit out: Output) { >>(a.member) >>(a.template) >>(a.typeParams) >>(a.valueParams) >>(a.parentTypes) >>(a.members) writeMany(a.templates) >>(a.methods) >>(a.values) >>(a.abstractTypes) >>(a.aliasTypes) >>(a.primaryConstructor) >>(a.constructors) } } implicit val versionF = new BinaryFormat[Valid] { def reader(implicit in: Input) = Version(<<[String]).opt err "Invalid binary version" def writer(v: Valid)(implicit out: Output) { >>(v.shows) } } implicit val projectF = asProduct3(Project)(Project.unapply(_).get) implicit val seedF = asProduct2(Seed)(Seed.unapply(_).get) implicit val databaseF = wrap[Database, List[Seed]](_.seeds, Database.apply) }
ornicar/scalex
src/main/scala/binary/BinaryFormat.scala
Scala
mit
3,027
package org.jetbrains.plugins.scala.lang.psi.types import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScAssignStmt, ScExpression} import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameterClause import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{Parameter, TypeParameter} /** * Pavel.Fatin, 02.06.2010 */ //TODO must be abstract with no description when completed sealed class ApplicabilityProblem(val description: String = "unknown") object ApplicabilityProblem { def unapply(a: ApplicabilityProblem): Option[String] = Some(a.description) } // definition syntax problems case class MalformedDefinition() extends ApplicabilityProblem // application syntax problems case class PositionalAfterNamedArgument(argument: ScExpression) extends ApplicabilityProblem //TODO , parameter case class ParameterSpecifiedMultipleTimes(assignment: ScAssignStmt) extends ApplicabilityProblem case class UnresolvedParameter(assignment: ScAssignStmt) extends ApplicabilityProblem //TODO , parameter case class ExpansionForNonRepeatedParameter(argument: ScExpression) extends ApplicabilityProblem case class ElementApplicabilityProblem(element: PsiElement, actual: ScType, found: ScType) extends ApplicabilityProblem("42") //todo // applicability problem case class DoesNotTakeParameters() extends ApplicabilityProblem case class ExcessArgument(argument: ScExpression) extends ApplicabilityProblem case class MissedParametersClause(clause: ScParameterClause) extends ApplicabilityProblem case class MissedValueParameter(parameter: Parameter) extends ApplicabilityProblem //TODO expectedType -> parameter case class TypeMismatch(expression: ScExpression, expectedType: ScType) extends ApplicabilityProblem case class DefaultTypeParameterMismatch(expectedType: ScType, actualType: ScType) extends ApplicabilityProblem case object WrongTypeParameterInferred extends ApplicabilityProblem case object DoesNotTakeTypeParameters extends ApplicabilityProblem case class ExcessTypeArgument(argument: ScTypeElement) extends ApplicabilityProblem case class MissedTypeParameter(param: TypeParameter) extends ApplicabilityProblem case object ExpectedTypeMismatch extends ApplicabilityProblem
LPTK/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/types/ApplicabilityProblem.scala
Scala
apache-2.0
2,312
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala import java.util import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.functions.{AssignerWithPunctuatedWatermarks, ProcessFunction} import org.apache.flink.streaming.api.scala.function.{ProcessAllWindowFunction, ProcessWindowFunction} import org.apache.flink.streaming.api.watermark.Watermark import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows import org.apache.flink.streaming.api.windowing.time.Time import org.apache.flink.streaming.api.windowing.windows.TimeWindow import org.apache.flink.streaming.util.StreamingMultipleProgramsTestBase import org.apache.flink.test.streaming.runtime.util.TestListResultSink import org.apache.flink.util.Collector import org.junit.Assert._ import org.junit.Test /** * Integration test for streaming programs using side outputs. */ class SideOutputITCase extends StreamingMultipleProgramsTestBase { /** * Test ProcessFunction side output. */ @Test def testProcessFunctionSideOutput() { val sideOutputResultSink = new TestListResultSink[String] val resultSink = new TestListResultSink[Int] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(3) val dataStream = env.fromElements(1, 2, 5, 3, 4) val outputTag = OutputTag[String]("side") val passThroughtStream = dataStream .process(new ProcessFunction[Int, Int] { override def processElement( value: Int, ctx: ProcessFunction[Int, Int]#Context, out: Collector[Int]): Unit = { out.collect(value) ctx.output(outputTag, "sideout-" + String.valueOf(value)) } }) passThroughtStream.getSideOutput(outputTag).addSink(sideOutputResultSink) passThroughtStream.addSink(resultSink) env.execute() assertEquals( util.Arrays.asList("sideout-1", "sideout-2", "sideout-3", "sideout-4", "sideout-5"), sideOutputResultSink.getSortedResult) assertEquals(util.Arrays.asList(1, 2, 3, 4, 5), resultSink.getSortedResult) } /** * Test keyed ProcessFunction side output. */ @Test def testKeyedProcessFunctionSideOutput() { val sideOutputResultSink = new TestListResultSink[String] val resultSink = new TestListResultSink[Int] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(3) val dataStream = env.fromElements(1, 2, 5, 3, 4) val outputTag = OutputTag[String]("side") val passThroughtStream = dataStream .keyBy(x => x) .process(new ProcessFunction[Int, Int] { override def processElement( value: Int, ctx: ProcessFunction[Int, Int]#Context, out: Collector[Int]): Unit = { out.collect(value) ctx.output(outputTag, "sideout-" + String.valueOf(value)) } }) passThroughtStream.getSideOutput(outputTag).addSink(sideOutputResultSink) passThroughtStream.addSink(resultSink) env.execute() assertEquals( util.Arrays.asList("sideout-1", "sideout-2", "sideout-3", "sideout-4", "sideout-5"), sideOutputResultSink.getSortedResult) assertEquals(util.Arrays.asList(1, 2, 3, 4, 5), resultSink.getSortedResult) } /** * Test ProcessFunction side outputs with wrong [[OutputTag]]. */ @Test def testProcessFunctionSideOutputWithWrongTag() { val sideOutputResultSink = new TestListResultSink[String] val resultSink = new TestListResultSink[Int] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(3) val dataStream = env.fromElements(1, 2, 5, 3, 4) val outputTag = OutputTag[String]("side") val otherOutputTag = OutputTag[String]("other-side") val passThroughtStream = dataStream .process(new ProcessFunction[Int, Int] { override def processElement( value: Int, ctx: ProcessFunction[Int, Int]#Context, out: Collector[Int]): Unit = { ctx.output(otherOutputTag, "sideout-" + String.valueOf(value)) } }) passThroughtStream.getSideOutput(outputTag).addSink(sideOutputResultSink) env.execute() assertTrue(sideOutputResultSink.getSortedResult.isEmpty) } /** * Test window late arriving events stream */ @Test def testAllWindowLateArrivingEvents() { val resultSink = new TestListResultSink[String] val lateResultSink = new TestListResultSink[(String, Int)] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(1) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val dataStream = env.fromElements(("1", 1), ("2", 2), ("5", 5), ("3", 3), ("4", 4)) val lateDataTag = OutputTag[(String, Int)]("late") val windowOperator = dataStream .assignTimestampsAndWatermarks(new TestAssigner) .windowAll(TumblingEventTimeWindows.of(Time.milliseconds(1))) .sideOutputLateData(lateDataTag) .process(new ProcessAllWindowFunction[(String, Int), String, TimeWindow] { override def process( context: Context, elements: Iterable[(String, Int)], out: Collector[String]): Unit = { for (in <- elements) { out.collect(in._1) } } }) windowOperator .getSideOutput(lateDataTag) .addSink(lateResultSink) windowOperator.addSink(resultSink) env.execute() assertEquals(util.Arrays.asList("1", "2", "5"), resultSink.getResult) assertEquals(util.Arrays.asList(("3", 3), ("4", 4)), lateResultSink.getResult) } /** * Test window late arriving events stream */ @Test def testKeyedWindowLateArrivingEvents() { val resultSink = new TestListResultSink[String] val lateResultSink = new TestListResultSink[(String, Int)] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(1) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val dataStream = env.fromElements(("1", 1), ("2", 2), ("5", 5), ("3", 3), ("4", 4)) val lateDataTag = OutputTag[(String, Int)]("late") val windowOperator = dataStream .assignTimestampsAndWatermarks(new TestAssigner) .keyBy(i => i._1) .window(TumblingEventTimeWindows.of(Time.milliseconds(1))) .sideOutputLateData(lateDataTag) .process(new ProcessWindowFunction[(String, Int), String, String, TimeWindow] { override def process( key:String, context: Context, elements: Iterable[(String, Int)], out: Collector[String]): Unit = { for (in <- elements) { out.collect(in._1) } } }) windowOperator .getSideOutput(lateDataTag) .addSink(lateResultSink) windowOperator.addSink(resultSink) env.execute() assertEquals(util.Arrays.asList("1", "2", "5"), resultSink.getResult) assertEquals(util.Arrays.asList(("3", 3), ("4", 4)), lateResultSink.getResult) } /** * Test ProcessWindowFunction side output. */ @Test def testProcessWindowFunctionSideOutput() { val resultSink = new TestListResultSink[String] val sideOutputResultSink = new TestListResultSink[String] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(1) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val dataStream = env.fromElements(("1", 1), ("2", 2), ("5", 5), ("3", 3), ("4", 4)) val sideOutputTag = OutputTag[String]("side") val windowOperator = dataStream .assignTimestampsAndWatermarks(new TestAssigner) .keyBy(i => i._1) .window(TumblingEventTimeWindows.of(Time.milliseconds(1))) .process(new ProcessWindowFunction[(String, Int), String, String, TimeWindow] { override def process( key: String, context: Context, elements: Iterable[(String, Int)], out: Collector[String]): Unit = { for (in <- elements) { out.collect(in._1) context.output(sideOutputTag, "sideout-" + in._1) } } }) windowOperator .getSideOutput(sideOutputTag) .addSink(sideOutputResultSink) windowOperator.addSink(resultSink) env.execute() assertEquals(util.Arrays.asList("1", "2", "5"), resultSink.getResult) assertEquals(util.Arrays.asList("sideout-1", "sideout-2", "sideout-5"), sideOutputResultSink.getResult) } /** * Test ProcessAllWindowFunction side output. */ @Test def testProcessAllWindowFunctionSideOutput() { val resultSink = new TestListResultSink[String] val sideOutputResultSink = new TestListResultSink[String] val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(1) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) val dataStream = env.fromElements(("1", 1), ("2", 2), ("5", 5), ("3", 3), ("4", 4)) val sideOutputTag = OutputTag[String]("side") val windowOperator = dataStream .assignTimestampsAndWatermarks(new TestAssigner) .windowAll(TumblingEventTimeWindows.of(Time.milliseconds(1))) .process(new ProcessAllWindowFunction[(String, Int), String, TimeWindow] { override def process( context: Context, elements: Iterable[(String, Int)], out: Collector[String]): Unit = { for (in <- elements) { out.collect(in._1) context.output(sideOutputTag, "sideout-" + in._1) } } }) windowOperator .getSideOutput(sideOutputTag) .addSink(sideOutputResultSink) windowOperator.addSink(resultSink) env.execute() assertEquals(util.Arrays.asList("1", "2", "5"), resultSink.getResult) assertEquals(util.Arrays.asList("sideout-1", "sideout-2", "sideout-5"), sideOutputResultSink.getResult) } } class TestAssigner extends AssignerWithPunctuatedWatermarks[(String, Int)] { override def checkAndGetNextWatermark( lastElement: (String, Int), extractedTimestamp: Long): Watermark = new Watermark(extractedTimestamp) override def extractTimestamp( element: (String, Int), previousElementTimestamp: Long): Long = element._2.toLong }
zimmermatt/flink
flink-streaming-scala/src/test/scala/org/apache/flink/streaming/api/scala/SideOutputITCase.scala
Scala
apache-2.0
11,143
package test import org.scalatest.PropSpec class ExamplePropSpec extends PropSpec { property("an empty Set should have size 0") { assert(Set.empty[Int].size == 0) } }
cheeseng/scalatest
scalatest-test.js/src/test/scala/test/ExamplePropSpec.scala
Scala
apache-2.0
178
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.k8s.submit.steps.initcontainer import io.fabric8.kubernetes.api.model.{ContainerBuilder, PodBuilder} import org.apache.spark.SparkFunSuite import org.apache.spark.deploy.k8s.MountSecretsBootstrap import org.apache.spark.deploy.k8s.submit.SecretVolumeUtils class InitContainerMountSecretsStepSuite extends SparkFunSuite { private val SECRET_FOO = "foo" private val SECRET_BAR = "bar" private val SECRET_MOUNT_PATH = "/etc/secrets/init-container" test("mounts all given secrets") { val baseInitContainerSpec = InitContainerSpec( Map.empty, Map.empty, new ContainerBuilder().build(), new ContainerBuilder().build(), new PodBuilder().withNewMetadata().endMetadata().withNewSpec().endSpec().build(), Seq.empty) val secretNamesToMountPaths = Map( SECRET_FOO -> SECRET_MOUNT_PATH, SECRET_BAR -> SECRET_MOUNT_PATH) val mountSecretsBootstrap = new MountSecretsBootstrap(secretNamesToMountPaths) val initContainerMountSecretsStep = new InitContainerMountSecretsStep(mountSecretsBootstrap) val configuredInitContainerSpec = initContainerMountSecretsStep.configureInitContainer( baseInitContainerSpec) val podWithSecretsMounted = configuredInitContainerSpec.driverPod val initContainerWithSecretsMounted = configuredInitContainerSpec.initContainer Seq(s"$SECRET_FOO-volume", s"$SECRET_BAR-volume").foreach(volumeName => assert(SecretVolumeUtils.podHasVolume(podWithSecretsMounted, volumeName))) Seq(s"$SECRET_FOO-volume", s"$SECRET_BAR-volume").foreach(volumeName => assert(SecretVolumeUtils.containerHasVolume( initContainerWithSecretsMounted, volumeName, SECRET_MOUNT_PATH))) } }
saltstar/spark
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/steps/initcontainer/InitContainerMountSecretsStepSuite.scala
Scala
apache-2.0
2,531
/* * Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package github4s.domain final case class User( id: Long, login: String, avatar_url: String, html_url: String, name: Option[String] = None, email: Option[String] = None, company: Option[String] = None, blog: Option[String] = None, location: Option[String] = None, bio: Option[String] = None, followers_url: Option[String] = None, following_url: Option[String] = None, `type`: String = "User", // I think this can be either "User" or "Organization" hireable: Option[Boolean] = None, public_repos: Option[Int] = None, contributions: Option[Int] = None )
47deg/github4s
github4s/src/main/scala/github4s/domain/User.scala
Scala
apache-2.0
1,252
package streams import org.scalatest.FunSuite import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import Bloxorz._ @RunWith(classOf[JUnitRunner]) class BloxorzSuite extends FunSuite { trait SolutionChecker extends GameDef with Solver with StringParserTerrain { /** * This method applies a list of moves `ls` to the block at position * `startPos`. This can be used to verify if a certain list of moves * is a valid solution, i.e. leads to the goal. */ def solve(ls: List[Move]): Block = ls.foldLeft(startBlock) { case (block, move) => move match { case Left => block.left case Right => block.right case Up => block.up case Down => block.down } } } trait Level1 extends SolutionChecker { /* terrain for level 1*/ val level = """ooo------- |oSoooo---- |ooooooooo- |-ooooooooo |-----ooToo |------ooo-""".stripMargin val optsolution = List(Right, Right, Down, Right, Right, Right, Down) } test("terrain function level 1") { new Level1 { assert(terrain(Pos(0,0)), "0,0") assert(!terrain(Pos(4,11)), "4,11") } } test("findChar level 1") { new Level1 { assert(startPos == Pos(1,1)) } } test("neighborsWithHistory level 1") { new Level1 { val expected = Set( (Block(Pos(1,2),Pos(1,3)), List(Right,Left,Up)), (Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up)) ) val actual = neighborsWithHistory(Block(Pos(1,1),Pos(1,1)), List(Left,Up)).toSet assert(actual == expected) } } test("newNeighboursOnly level 1") { new Level1 { val expected = Set( (Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up)) ) val actual = newNeighborsOnly( Set( (Block(Pos(1,2),Pos(1,3)), List(Right,Left,Up)), (Block(Pos(2,1),Pos(3,1)), List(Down,Left,Up)) ).toStream, Set(Block(Pos(1,2),Pos(1,3)), Block(Pos(1,1),Pos(1,1))) ).toSet assert(actual == expected) } } test("optimal solution for level 1") { new Level1 { assert(solve(solution) == Block(goal, goal)) } } test("optimal solution length for level 1") { new Level1 { assert(solution.length == optsolution.length) } } }
jan-j/functional-programming-principles-in-scala
week-7/src/test/scala/streams/BloxorzSuite.scala
Scala
mit
2,318
package io.mem0r1es.trank.pipeline import java.net.URI import scala.Array.canBuildFrom import org.apache.lucene.index.Term import org.apache.lucene.search.TermQuery import com.typesafe.config.Config import io.mem0r1es.trank.util.IndexUtils import io.mem0r1es.trank.util.TRankIndexType object TypeRetrieval { /** * Given a DBpedia resource URI, retrieve all its RDF types. */ def retrieveTypes(entities: Set[URI], config: Config): Map[URI, Set[URI]] = { var typedEntities = Map[URI, Set[URI]]() entities.foreach { entity => val types = getTypes(entity, config) typedEntities += entity -> types } typedEntities } private def getTypes(entity: URI, config: Config): Set[URI] = { val searcher = IndexUtils.getIndexSearcher(TRankIndexType.TYPE_INDEX, config) val query = new TermQuery(new Term("uri", entity.toString)) val docs = searcher.search(query, 1) if (docs.scoreDocs.length > 0) { val d = searcher.doc(docs.scoreDocs(0).doc) d.getValues("type").map(new URI(_)).toSet } else { Set[URI]() } } }
ahmadassaf/TRank
src/main/scala/io/mem0r1es/trank/pipeline/TypeRetrieval.scala
Scala
apache-2.0
1,092
import sbt._ import Keys._ import AndroidKeys._ object General { val settings = Defaults.defaultSettings ++ Seq ( organization := "org.positronicnet", version := "0.4-SNAPSHOT", scalaVersion := "2.9.0-1", platformName in Android := "android-14" ) lazy val fullAndroidSettings = General.settings ++ AndroidProject.androidSettings ++ TypedResources.settings ++ AndroidMarketPublish.settings ++ Seq ( keyalias in Android := "change-me", libraryDependencies += "org.scalatest" %% "scalatest" % "1.6.1" % "test", proguardOption in Android := """ -keepclassmembers class * implements java.io.Serializable { private static final java.io.ObjectStreamField[] serialPersistentFields; private void writeObject(java.io.ObjectOutputStream); private void readObject(java.io.ObjectInputStream); java.lang.Object writeReplace(); java.lang.Object readResolve(); } """ ) } object AndroidBuild extends Build { lazy val libproj = Project ( "PositronicNetLib", file("."), // Test setup --- include Robolectric, tell it where to find // resources and manifest, and don't try to run tests in parallel // (lest the DB tests stomp all over each other). settings = General.fullAndroidSettings ++ Seq( parallelExecution in Test := false, libraryDependencies += "com.pivotallabs"%"robolectric"%"1.1" % "test", testOptions in Test ++= Seq( Tests.Argument("-DandroidResPath=src/main/res"), Tests.Argument("-DandroidManifestPath=src/main/AndroidManifest.xml")) ) ) dependsOn ( roboScalaTest % "test" ) // Separate packaging for the glue code to get Robolectric support // in a ScalaTest suite. This shows up as a trait named RobolectricTests // in package org.positronicnet.test, which extends org.scalatest.Suite. // Can be published as "roboscalatest". lazy val roboScalaTest = Project ( "RoboScalaTest", file("testsupport"), settings = General.settings ++ AndroidProject.androidSettings ++ Seq ( keyalias in Android := "change-me", libraryDependencies ++= Seq( "org.scalatest" %% "scalatest" % "1.6.1", "com.pivotallabs"%"robolectric"%"1.1" ))) // Bundled sample projects def sampleProject( name: String, dir: String ) = { val projSrc = "sample/" + dir + "/src/main" Project( name, file("sample")/dir, settings = General.fullAndroidSettings ++ ( testOptions in Test ++= Seq( Tests.Argument("-DandroidResPath=" + projSrc + "/res"), Tests.Argument("-DandroidManifestPath=" + projSrc + "/AndroidManifest.xml")) )) .dependsOn (libproj % "compile") .dependsOn (roboScalaTest % "test") } lazy val todo = sampleProject( "SampleTodo", "todo_app" ) lazy val todocp = sampleProject( "SampleTodoCp", "todo_app_cp" ) lazy val call_log = sampleProject( "SampleCallLog", "call_log_app" ) lazy val contacts = sampleProject( "SampleContacts", "contacts_app" ) }
rst/positronic_net
project/build.scala
Scala
bsd-3-clause
3,132
/** * Licensed to the Minutemen Group under one or more contributor license * agreements. See the COPYRIGHT file distributed with this work for * additional information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package silhouette.util import java.security.SecureRandom import org.apache.commons.codec.binary.Hex import scala.concurrent.{ ExecutionContext, Future } /** * A generator which uses SecureRandom to generate cryptographically strong IDs. * * @param idSizeInBytes The size of the ID length in bytes. * @param ec The execution context to handle the asynchronous operations. */ class SecureRandomIDGenerator(idSizeInBytes: Int = 128)(implicit ec: ExecutionContext) extends IDGenerator { /** * Generates a new ID using SecureRandom. * * @return The generated ID. */ override def generate: Future[String] = { val randomValue = new Array[Byte](idSizeInBytes) Future(SecureRandomIDGenerator.random.nextBytes(randomValue)).map { _ => Hex.encodeHexString(randomValue) } } } /** * The companion object. */ object SecureRandomIDGenerator { /** * A cryptographically strong random number generator (RNG). * * There is a cost of getting a secure random instance for its initial seeding, so it's recommended you use * a singleton style so you only create one for all of your usage going forward. * * On Linux systems SecureRandom uses /dev/random and it can block waiting for sufficient entropy to build up. */ lazy val random = new SecureRandom() }
datalek/silhouette
silhouette/src/main/scala/silhouette/util/SecureRandomIDGenerator.scala
Scala
apache-2.0
2,076
package loader.core import loader.core.events.Event import loader.InternalLoaderException import scala.reflect.ClassTag import loader.exceptions._ import reflect.runtime.universe.TypeTag object definition { import scala.language.implicitConversions //the compiler seems to require a little help, so this class seems necessary //we also need it to build fake Tops for includes protected abstract class Top[-Kind,+D<:Def](val processor:D, kindClass:Class[_<:Kind]) { def init:processor.Parser=>processor.Element def mapper:(processor.Element,Kind)=>processor.Kind //cannot be argument because of processor dependance def map(elt:processor.Element,s:Kind):processor.Kind = if (mapper==null) s.asInstanceOf[processor.Kind] else mapper(elt,s) def check(p:ParserBuilder) = if (kindClass!=null && p.kindClass!=null && !kindClass.isAssignableFrom(p.kindClass)) throw new IncludeException(3,p.kindClass,kindClass) } /** This serves as a base for Status. * In turn, Status is a class that serves as intermediary to build a child item from a parent item. */ trait Status { def name:String } /** Defines the pattern through which we turn the sequence of parsed items into a structure with depth. * The process involves 'stacking' data structures (Element), each referring to its parent. * This trait only establishes the broad lines ; further refinements will turn this into usable code. * @see Core for a very basic implementation where elements only have names * @see ExtCore for a richer implementation where elements can contain additional data * @see CtxCore for a very rich implementation based on prior knowledge of the expected structure (using context) */ trait Def { selfDef=> type Kind>:Null type Ret type Element >: Null <: Elt type Parser <: ParserBuilder#Impl type Status >: Null <:definition.Status type UserCtx = UserContext[Element] type Cbk = callbacks.Callback[Element,Status,Ret,Kind] type Cbks = callbacks.Callbacks[Element,Status,Ret,Kind] type Bld = EltBuilder //these do not have to be filled up; they will allow a runtime check on include if they are. def procClass = getClass def parsClass:Class[_<:ParserBuilder#Impl] def kindClass:Class[Kind] /** The standard, elementary methods dealing with parser events ; a subclass will usually refine this * The order in which these methods are executed is: * - parent executes onName ; child doesn't yet exist * - child is created by the appropriate motor : onBeg is invoked * - child may either be a terminal ; onVal is invoked when the value is received * - or it maybe a container, in which case it will receive parents events (onName,onChild), * until it is finished and onEnd is invoked. * - parent executes onChild. * They are protected because users have little reason to invoke these directly! See below onName0/onEnd0. * However, these methods are important because they provide the entries for callbacks and understanding * them is necessary to coding complex callbacks. */ trait BaseElt { type Def = selfDef.type type Ret = selfDef.Ret type Status = selfDef.Status type Element = selfDef.Element type Bld = selfDef.Bld type Kind = selfDef.Kind type Parser = selfDef.Parser def parser : Parser def definition:Def.this.type = Def.this protected def onName(name: String): Status protected def onBeg(): Unit protected def onVal(v: Kind): Ret protected def isInclude(v: Kind): Boolean protected def onInclude(v: Kind): Ret protected def onEnd(): Ret protected def onChild(child: Element, r: Ret): Unit } /** Reason for which Impl exists. * Defines how the parser events are processed. * Holds the data necessary to process these events. */ abstract class Elt extends Traversable[Element] with BaseElt { self:Element=> def myself:Element = this import scala.reflect._ /** Context for use */ def userCtx:UserCtx /** Fields */ def parent : Element //parent item def name : String //element name def parser : Parser //parser creating that element protected def parser_=(parser:Parser):Unit //parser has write access for handling includes /** Builder for children elements. builder should stay a def and point to the companion object to prevent bloating. */ def childBuilder:Bld /** building a child spawning children of a possibly different nature */ def build(p:Parser, s:Status, b:Bld):Element = childBuilder(p, self, s, b) /** building a child spawning children of the same nature */ def build(p:Parser, s:Status):Element = build(p, s, childBuilder) //handle an event: // - ignore if no handler defined // - do nothing if handler not configured for that event def apply(evt:Event) = userCtx.eventHandler match { case null => case h => h.applyOrElse((this,evt),(x:(Element,Event))=>()) } /** The push/pull interface on the processor side */ def pull() = parent.onChild(this, onEnd()) def push(n:String) = { val c=build(parser,onName(n),childBuilder); c.onBeg(); c } def pull(v:Kind) = { parent.onChild(this, if (isInclude(v)) onInclude(v) else onVal(v)) } /** standard invoker, used on the top level element */ def invoke(f: =>Unit): Ret = { if (!isInclude) onBeg() //onBeg method as already been called on included elements f onEnd() //note that for included elements, the onBeg was called using the top parser when the onEnd is called using the bottom one. } /** Used to change the parser during an include. */ //creates a Top from this element for an include //callbacks are automatically carried over //!! Mutation: we change the current parser for this element //the parser will do some runtime checks to ensure that the processor (Element) and parser (from P) can work together. //the casts that follow will then work. //We must check: // - we have a transformer function from the the new parser kind to the current kind // - we use a processor supported by the new parser and that the new parser is accepted by this processor. protected def asTop(x:ParserBuilder#Executor)(mapper:(Element,x.builder.Kind)=>Kind) = new Top[x.builder.Kind](mapper,x.builder.kindClass,childBuilder,null,null) { override def init:Parser=>processor.Element = (p) => { self.parser = p; self } }.asInstanceOf[x.builder.BaseProcessor#Top[x.builder.Kind]] /** Handles an include ; this should be called in onVal(String) whenever necessary. */ protected def onInclude(s:Kind): Ret = { val old = parser try { //We are going to spawn a parser executor out of nowhere. //There is no reason that it can accept the current Processor and we have to check this and then cast as appropriate (see asInclude). val i = userCtx.solveInclude(this)(s) //builds the relevant executor i(asTop(i)(userCtx.getMapper(this)(i))).asInstanceOf[Ret] //run it with this element as top element; The return cast is always true because the element we are working on is still in the same class as this } finally { parser = old } } /** Checks when a given value received on this element is to be solved as an include. */ protected def isInclude(s:Kind):Boolean = userCtx.isInclude(this)(s) //abstract override protected def onVal(s:String): Ret = if (isInclude(s:String)) doInclude(s) else super.onVal(s) /** Some convenient methods. * Methods prefixed by g are general and use up the full parent chain. * By contrast, the non preficed method only use the chain with items of the same * kind ; this is the most common occurence. */ def isRoot: Boolean = parent==null //head of stack def isInclude: Boolean = parent match { //head of sub-stack (i.e. include) case null => false case p => p.parser != parser } //iterator on the elements forming the full chain from this element to the top def toHead:Iterator[Element] = new Iterator[Element] { private var cur = self def hasNext: Boolean = cur!=null def next: Element = { val c=cur; cur=parent; c } } //iteration on the elements forming the full chain to this element starting from the top def foreach[U](f:Element=>U):Unit = { if (parent!=null) parent.foreach(f); f(self) } def iter[U](f:Element=>U):Traversable[Element] = new Traversable[Element] { def foreach[U](f:(Element)=>U) = self.foreach(f) } /** Prints the stack */ def print(out:java.io.Writer):Unit = foreach(e=>out.write(s".${e.name}")) override def toString = { val s=new java.io.StringWriter; print(s); s.toString } } class Top[-K](val mapper:(Element,K)=>Kind,kindClass:Class[_<:K],builder:EltBuilder,s:Status,cbks: Cbks*) extends definition.Top[K,Def.this.type](Def.this,kindClass) { def init:Parser=>processor.Element = if (cbks.isEmpty) builder(_,s) else builder(_,s,cbks:_*) } /** Defines how Element are built. * - apply(Element,Status,Bld) is the nominal builder * - apply(Element,Status,Bld,Cbks*) is the nominal builder in the presence of callbacks trees * - apply(Element,Status,Bld,Cbk,Cbks*) should likely only be used internally * Other methods are: * - apply(Status,Bld) a builder for a root element (no parent) * - apply(Status) a builder for a root element (no parent) which uses the current builder for its children */ abstract class EltBuilder { def apply(s: Status):Top[Kind] = new Top(null.asInstanceOf[(Element,Kind)=>Kind],kindClass,this,s) def apply(s: Status, cbks: Cbks*):Top[Kind] = new Top(null.asInstanceOf[(Element,Kind)=>Kind],kindClass,this,s,cbks:_*) def apply[K](mapper:(Element,K)=>Kind, kindClass:Class[_<:K], s: Status):Top[K] = new Top(mapper,kindClass,this,s) def apply[K](mapper:(Element,K)=>Kind, kindClass:Class[_<:K], s: Status, cbks: Cbks*):Top[K] = new Top(mapper,kindClass,this,s,cbks:_*) def apply(p: Parser, s: Status): Element = apply(p,null,s,this) def apply(p: Parser, s: Status, childBuilder: Bld): Element = apply(p,null,s,childBuilder) def apply(p: Parser, s: Status, cbks: Cbks*): Element = apply(p,s,this,cbks:_*) def apply(p: Parser, s: Status, childBuilder: Bld, cbks: Cbks*): Element = WithCallbacks(p,null,s,cbks,childBuilder,childBuilder) def apply(p: Parser, parent: Element, s: Status, childBuilder: Bld): Element def apply(p: Parser, parent: Element, s: Status, childBuilder: Bld, cbks: Cbks*): Element with WithCallbacks def apply(p: Parser, parent: Element, s: Status, childBuilder: Bld, cb: Cbk, cbks: Cbks*): Element with WithCallback } /** Modifies the current element behavior by using a callback */ trait WithCallback extends WithCallbacks { this: Element => /** When handling Callbacks, we will want to reach the parent beahviour. */ val cb: callbacks.Callback[Element,Status,Ret,Kind] //the callback for the current element val cbx = cb(this) abstract override protected def onName(name: String): Status = if (cbx==null) super.onName(name) else cbx.onName(name, super.onName) abstract override protected def onBeg(): Unit = if (cbx==null) super.onBeg() else cbx.onBeg(super.onBeg) abstract override protected def onVal(v: Kind): Ret = if (cbx==null) super.onVal(v) else cbx.onVal(v,super.onVal) abstract override protected def isInclude(v: Kind): Boolean = if (cbx==null) super.isInclude(v) else cbx.isInclude(v,super.isInclude) abstract override protected def onInclude(v: Kind): Ret = if (cbx==null) super.onInclude(v) else cbx.onInclude(v,super.onInclude) abstract override protected def onEnd(): Ret = if (cbx==null) super.onEnd() else cbx.onEnd(super.onEnd) abstract override protected def onChild(child: Element, r: Ret): Unit = if (cbx==null) super.onChild(child,r) else cbx.onChild(child, r, super.onChild) } /** Modifies the current element to manage a callback tree * Children are built according to the following rules: * - if no callback subtree applies for the child, the child returns to the base implementation, removing any overhead for callback * - if a callback tree is present for the child, but no callback applies, the child uses the WithCallbacks trait * this causes additional data to be carried, and some overhead when building children * - if a callback tree is present for the child, and a callback applies, the child uses the WithCallback trait * this causes the same overhead as the previous case; in addition, the callback is carried and base methods pass through it (onBeg etc...) */ trait WithCallbacks extends Elt { this: Element => protected[this] def cbks: Seq[Cbks] //current callbacks trees (for children) override def build(p: Parser, s: Status, b:Bld): Element = WithCallbacks(p,this,s,cbks,childBuilder,b) } object WithCallbacks { /** Analyzes a callbacks sequence to know: * 1) whether it applies to the current item * 2) what sub sequence may apply to children */ def apply(p: Parser, parent:Element, s:Status, cbks:Seq[Cbks], builder:EltBuilder, childBuilder:Bld): Element = { if (cbks.length == 1) { //first, the case where the sequence is one element only. //it's very common, and should be optimized! it's furthermore much easier to read! cbks.head.get(s.name) match { case None => builder(p,parent,s,childBuilder) //no subtree ? get rid of the extra callback data and associated code case Some(c) => c.cur match { case None => builder(p,parent,s,childBuilder,cbks:_*) case Some(cb) => builder(p,parent,s,childBuilder,cb,cbks:_*) } } } else { //that one is a little tricky; first build the next sequence of callback trees, extracted from cbks val r = for (x <- cbks; y <- x.get(s.name)) yield y //if empty, return to the no callback version if (r.isEmpty) builder(p,parent,s,childBuilder) else { //otherwise, create the sequence of actual callbacks for this element val c = (for (x <- r; y <- x.cur) yield y) //if empty, proceed with the builder with non local callback if (c.isEmpty) builder(p,parent,s,childBuilder,r:_*) //otherwise, proceed by combining the local callbacks together to compute the final callback to apply else builder(p,parent,s,childBuilder,c.reduce(_(_)),r:_*) } } } } /** Conversion to traversable. */ implicit def toTraversable[U](e:Element):Traversable[Element] = new Traversable[Element] { def foreach[U](f:(Element)=>U) = e.foreach(f) } } trait Impl extends Def { self=> /** An actual implementation class should extend this trait. * All implementations based on the same core (i.e. extending this trait from the same instance) * are interoperable. * See examples for how to use this. */ trait Impl { def builder:Bld //an associated builder } //a factory for reading textual parameters def apply(pr: utils.ParamReader, userCtx:UserCtx):Impl /** Forwards the base methods to the upper layer. * This causes a redirection to apply them, but usually has the immense advantage of fully defining the element by * defining all behaviours. Using Motor makes it easier to define processors, all using a common element base. */ trait Motor extends Impl { motor=> type Result type ElementBase<:Element // context fields for a motor def userCtx:UserCtx // Forwarded methods protected def onInit():Unit protected def onExit():Result protected def onName(self: Element, name: String): Status protected def onBeg(self: Element): Unit protected def onVal(self: Element, v: Kind): Ret protected def onEnd(self: Element): Ret protected def onChild(self: Element, child: Element, r: Ret): Unit // Element implementation : redirect calls trait Processor extends Elt { self:ElementBase=> def userCtx = motor.userCtx protected def onName(name: String): Status = motor.onName(this,name) protected def onBeg(): Unit = motor.onBeg(this) protected def onVal(v: Kind): Ret = motor.onVal(this,v) protected def onEnd(): Ret = motor.onEnd(this) protected def onChild(child: Element, r: Ret): Unit = motor.onChild(this,child,r) /* * We have to manage the copy required when we have an include: the current element must be attached to * the upper parser, but also to the lower one! Thus, we must copy the current element be change the parser. * Several methods are possible to do this. * - creating the appropriate method for each implementing class; extremely tedious and not scalable * - clone + java reflexion : fails (these objects are too complex to locate the parser field!) * - clone + scala reflexion : works (snipet below), but awful perfs at 'cm.reflect(r)' * - clone + inner mutable (parser0) that shadows parser * However problems arise with other fields that depend on the current element but still refer to the old * one, for example cb (callback.) ; the proper solution for this would be to rebuild a new object from * scratch with the appropriate parser ; but this has its own drawbacks in case some of the methods invoked * have side effects (such as getData.) * Finally, is seems safer to make Elt mutable on parser as this is the solution that seems to be the less * restrictive and the most efficient. One must not forget to restore the old parser! * * Snipet for scala reflexion: * import scala.reflect.runtime.{ currentMirror => cm } * import scala.reflect.runtime.universe._ * val im = cm.reflect(r) * val termSymb = typeOf[ElementBase].declaration(newTermName("parser")).asTerm * val fm = im.reflectField(termSymb) * fm.set(parser) */ protected var parser0:Parser def parser = parser0 protected def parser_=(parser:Parser):Unit = parser0=parser } } implicit def toBuilder(impl:Impl):Bld = impl.builder } }
Y-P-/data-processing-binding
XX3/obsolete/core/definition.scala
Scala
gpl-3.0
19,883
package collins.models import play.api.libs.json.Json import play.api.libs.json.JsObject import play.api.libs.json.JsNumber import play.api.libs.json.JsString import org.squeryl.PrimitiveTypeMode._ import org.squeryl.Schema import org.squeryl.Table import collins.solr.SolrKey import collins.solr.SolrSingleValue import collins.solr.SolrIntValue import collins.solr.SolrDoubleValue import collins.solr.SolrBooleanValue import collins.solr.SolrStringValue import collins.models.cache.Cache import collins.models.shared.ValidatedEntity import collins.models.shared.AnormAdapter case class AssetMeta( name: String, priority: Int, label: String, description: String, id: Long = 0, value_type: Int = AssetMeta.ValueType.String.id ) extends ValidatedEntity[Long] { override def validate() { require(name != null && name.toUpperCase == name && name.size > 0, "Name must be all upper case, length > 0") require(AssetMeta.isValidName(name), "Name must be all upper case, alpha numeric (and hyphens): %s".format(name)) require(description != null && description.length > 0, "Need a description") require(AssetMeta.ValueType.valIds(value_type), "Invalid value_type, must be one of [%s]".format(AssetMeta.ValueType.valStrings.mkString(","))) } override def asJson: String = { Json.stringify(JsObject(Seq( "ID" -> JsNumber(id), "NAME" -> JsString(name), "PRIORITY" -> JsNumber(priority), "LABEL" -> JsString(label), "DESCRIPTION" -> JsString(description) ))) } def getValueType(): AssetMeta.ValueType = AssetMeta.ValueType(value_type) def valueType = getValueType def getSolrKey(): SolrKey = SolrKey(name, valueType, true, true, false) def validateValue(value: String): Boolean = typeStringValue(value).isDefined def typeStringValue(value: String): Option[SolrSingleValue] = getValueType() match { case AssetMeta.ValueType.Integer => try { Some(SolrIntValue(Integer.parseInt(value))) } catch { case _: Throwable => None } case AssetMeta.ValueType.Boolean => try { Some(SolrBooleanValue((new Truthy(value)).isTruthy)) } catch { case _: Throwable => None } case AssetMeta.ValueType.Double => try { Some(SolrDoubleValue(java.lang.Double.parseDouble(value))) } catch { case _: Throwable => None } case _ => Some(SolrStringValue(value)) } } object AssetMeta extends Schema with AnormAdapter[AssetMeta] with AssetMetaKeys { private[this] val NameR = """[A-Za-z0-9\\-_]+""".r.pattern.matcher(_) override val tableDef = table[AssetMeta]("asset_meta") on(tableDef)(a => declare( a.id is(autoIncremented,primaryKey), a.name is(unique), a.priority is(indexed) )) override def delete(a: AssetMeta): Int = inTransaction { afterDeleteCallback(a) { tableDef.deleteWhere(p => p.id === a.id) } } def isValidName(name: String): Boolean = { name != null && name.nonEmpty && NameR(name).matches } def findAll(): List[AssetMeta] = Cache.get(findByAllKey, inTransaction { from(tableDef)(s => select(s)).toList }) def findById(id: Long) = Cache.get(findByIdKey(id), inTransaction { tableDef.lookup(id) }) def findOrCreateFromName(name: String, valueType: ValueType = ValueType.String): AssetMeta = findByName(name).getOrElse { create(AssetMeta( name = name.toUpperCase, priority = -1, label = name.toLowerCase.capitalize, description = name, value_type = valueType.id )) findByName(name).get } override def get(a: AssetMeta) = findById(a.id).get def findByName(name: String): Option[AssetMeta] = Cache.get(findByNameKey(name), inTransaction { tableDef.where(a => a.name.toUpperCase === name.toUpperCase ).headOption }) def getViewable(): List[AssetMeta] = Cache.get(findByViewableKey, inTransaction { from(tableDef)(a => where(a.priority gt -1) select(a) orderBy(a.priority asc) ).toList }) type ValueType = ValueType.Value object ValueType extends Enumeration { val String = Value(1,"STRING") val Integer = Value(2,"INTEGER") val Double = Value(3,"DOUBLE") val Boolean = Value(4,"BOOLEAN") def valStrings = values.map{_.toString} def valIds = values.map{_.id} val postFix = Map[ValueType,String]( String -> "_meta_s", Integer -> "_meta_i", Double -> "_meta_d", Boolean -> "_meta_b" ) } // DO NOT ADD ANYTHING TO THIS // DEPRECATED type Enum = Enum.Value object Enum extends Enumeration(1) { val ServiceTag = Value(1, "SERVICE_TAG") val ChassisTag = Value(2, "CHASSIS_TAG") val RackPosition = Value(3, "RACK_POSITION") val PowerPort = Value(4, "POWER_PORT") //val SwitchPort = Value(5, "SWITCH_PORT") Deprecated by id LldpPortIdValue val CpuCount = Value(6, "CPU_COUNT") val CpuCores = Value(7, "CPU_CORES") val CpuThreads = Value(8, "CPU_THREADS") val CpuSpeedGhz = Value(9, "CPU_SPEED_GHZ") val CpuDescription = Value(10, "CPU_DESCRIPTION") val MemorySizeBytes = Value(11, "MEMORY_SIZE_BYTES") val MemoryDescription = Value(12, "MEMORY_DESCRIPTION") val MemorySizeTotal = Value(13, "MEMORY_SIZE_TOTAL") val MemoryBanksTotal = Value(14, "MEMORY_BANKS_TOTAL") val NicSpeed = Value(15, "NIC_SPEED") // in bits val MacAddress = Value(16, "MAC_ADDRESS") val NicDescription = Value(17, "NIC_DESCRIPTION") val DiskSizeBytes = Value(18, "DISK_SIZE_BYTES") val DiskType = Value(19, "DISK_TYPE") val DiskDescription = Value(20, "DISK_DESCRIPTION") val DiskStorageTotal = Value(21, "DISK_STORAGE_TOTAL") val LldpInterfaceName = Value(22, "LLDP_INTERFACE_NAME") val LldpChassisName = Value(23, "LLDP_CHASSIS_NAME") val LldpChassisIdType = Value(24, "LLDP_CHASSIS_ID_TYPE") val LldpChassisIdValue = Value(25, "LLDP_CHASSIS_ID_VALUE") val LldpChassisDescription = Value(26, "LLDP_CHASSIS_DESCRIPTION") val LldpPortIdType = Value(27, "LLDP_PORT_ID_TYPE") val LldpPortIdValue = Value(28, "LLDP_PORT_ID_VALUE") val LldpPortDescription = Value(29, "LLDP_PORT_DESCRIPTION") val LldpVlanId = Value(30, "LLDP_VLAN_ID") val LldpVlanName = Value(31, "LLDP_VLAN_NAME") // DO NOT USE - Deprecated val NicName = Value(32, "INTERFACE_NAME") // DO NOT USE - Deprecated val NicAddress = Value(33, "INTERFACE_ADDRESS") } // Post enum fields, enum is not safe to extend with new values object DynamicEnum { val BaseDescription = AssetMeta.findOrCreateFromName("BASE_DESCRIPTION") val BaseProduct = AssetMeta.findOrCreateFromName("BASE_PRODUCT") val BaseVendor = AssetMeta.findOrCreateFromName("BASE_VENDOR") val BaseSerial = AssetMeta.findOrCreateFromName("BASE_SERIAL") def getValues(): Seq[AssetMeta] = { Seq(BaseDescription,BaseProduct,BaseVendor,BaseSerial) } } }
box/collins
app/collins/models/AssetMeta.scala
Scala
apache-2.0
6,919
/* * Copyright (C) 2012-2014 Typesafe Inc. <http://www.typesafe.com> */ package com.qifun.statelessFuture package test package run package become import scala.language.postfixOps import scala.concurrent._ import com.qifun.statelessFuture.Future import scala.concurrent.duration._ import scala.concurrent.duration.Duration.Inf import scala.collection._ import scala.runtime.NonLocalReturnControl import scala.util.{ Try, Success, Failure } import AutoStart._ import com.qifun.statelessFuture.test.Async.{ async, await, future } import org.junit.Test import com.qifun.statelessFuture.akka.FutureFactory import _root_.akka.actor.{ ActorRef, ActorSystem, Props, Actor, Inbox } import scala.concurrent.duration._ class BecomeActor extends Actor with FutureFactory { def receive = { case "become1" => context.become(FutureFactory.receiveUntilReturn(become1).get, false) case "become2" => context.become(become2) case "isSameSender" => context.become(FutureFactory.receiveUntilReturn(isSameSender).get, false) } private def isSameSender = Future[Unit] { nextMessage.await val sender1 = sender nextMessage.await val sender2 = sender sender ! (sender1 == sender2) } private def become1 = Future[Unit] { val message1 = nextMessage.await val message2 = nextMessage.await sender ! raw"message1=$message1 message2=$message2" } private def become2 = Future[Nothing] { while (true) { val message1 = nextMessage.await.asInstanceOf[String].toInt val message2 = nextMessage.await.asInstanceOf[String].toInt sender ! message1 + message2 } throw new IllegalStateException("Unreachable code!") } } class BecomeSpec { @Test def doubleReceive() { val system = ActorSystem("helloakka") val doubleReceiveActor = system.actorOf(Props[BecomeActor], "greeter") val inbox = Inbox.create(system) inbox.send(doubleReceiveActor, "become1") inbox.send(doubleReceiveActor, "hello") inbox.send(doubleReceiveActor, "world") inbox.receive(5.seconds) mustBe "message1=hello message2=world" inbox.send(doubleReceiveActor, "isSameSender") inbox.send(doubleReceiveActor, "") inbox.send(doubleReceiveActor, "") inbox.receive(5.seconds) mustBe true inbox.send(doubleReceiveActor, "isSameSender") inbox.send(doubleReceiveActor, "") val inbox2 = Inbox.create(system) inbox2.send(doubleReceiveActor, "") inbox2.receive(5.seconds) mustBe false inbox.send(doubleReceiveActor, "become2") inbox.send(doubleReceiveActor, "17") inbox.send(doubleReceiveActor, "33") inbox.receive(5.seconds) mustBe 50 } }
Atry/stateless-future-test
test/src/test/scala/com/qifun/statelessFuture/test/run/become/BecomeSpec.scala
Scala
bsd-3-clause
2,642
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.tools package reflect import scala.reflect.{ClassTag, classTag} import scala.reflect.api.{Mirror, TypeCreator, Universe => ApiUniverse} // [Eugene++] Before 2.10 is released, I suggest we don't rely on automated type tag generation // sure, it's convenient, but then refactoring reflection / reification becomes a pain // `ClassTag` tags are fine, because they don't need a reifier to be generated trait StdTags { val u: ApiUniverse with Singleton val m: Mirror[u.type] lazy val tagOfListOfString: u.TypeTag[List[String]] = u.TypeTag[List[String]]( m, new TypeCreator { def apply[U <: ApiUniverse with Singleton](m: Mirror[U]): U # Type = { val u = m.universe u.appliedType(u.definitions.ListClass.toType, List(u.definitions.StringClass.toType)) } }) protected def tagOfStaticClass[T: ClassTag]: u.TypeTag[T] = u.TypeTag[T]( m, new TypeCreator { def apply[U <: ApiUniverse with Singleton](m: Mirror[U]): U # Type = m.staticClass(classTag[T].runtimeClass.getName).toTypeConstructor.asInstanceOf[U # Type] }) lazy val tagOfInt = u.TypeTag.Int lazy val tagOfString = tagOfStaticClass[String] lazy val tagOfFile = tagOfStaticClass[scala.tools.nsc.io.File] lazy val tagOfDirectory = tagOfStaticClass[scala.tools.nsc.io.Directory] lazy val tagOfThrowable = tagOfStaticClass[java.lang.Throwable] lazy val tagOfClassLoader = tagOfStaticClass[java.lang.ClassLoader] lazy val tagOfBigInt = tagOfStaticClass[BigInt] lazy val tagOfBigDecimal = tagOfStaticClass[BigDecimal] lazy val tagOfCalendar = tagOfStaticClass[java.util.Calendar] lazy val tagOfDate = tagOfStaticClass[java.util.Date] } object StdRuntimeTags extends StdTags { val u: scala.reflect.runtime.universe.type = scala.reflect.runtime.universe val m = u.runtimeMirror(getClass.getClassLoader) // we need getClass.getClassLoader to support the stuff from scala-compiler.jar } abstract class StdContextTags extends StdTags { val tc: scala.reflect.macros.contexts.Context val u: tc.universe.type = tc.universe val m = tc.mirror }
scala/scala
src/compiler/scala/tools/reflect/StdTags.scala
Scala
apache-2.0
2,427
package com.gx.simplefactory import org.scalatest.{FlatSpec, Matchers} /** * Copyright 2017 josephguan * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ class SimpleFactorySpect extends FlatSpec with Matchers { it should "create AddOperation" in { val op = Operation("+") op.getResult(1, 2) should be(3) } it should "create SubOperation" in { val op = Operation("-") op.getResult(1, 2) should be(-1) } it should "create MulOperation" in { val op = Operation("*") op.getResult(1, 2) should be(2) } it should "create DivOperation" in { val op = Operation("/") op.getResult(1, 2) should be(0.5) } }
josephguan/scala-design-patterns
creational/simple-factory/src/test/scala/com/gx/simplefactory/SimpleFactorySpect.scala
Scala
apache-2.0
1,177
package controllers import cms.dto.Entry import cms.ContentManager import play.api.data.Forms.list import play.api.data.Forms.mapping import play.api.data.Forms.nonEmptyText import play.api.data.Forms.text import play.api.data.Form object Content extends LangAwareController { val entryForm = Form( mapping( "key" -> nonEmptyText, "entryType" -> helpers.Forms.enum(cms.dto.EntryType), "content" -> list(mapping( "lang" -> text, "value" -> text)(cms.dto.Content.apply)(cms.dto.Content.unapply)))(Entry.apply)(Entry.unapply)) def index = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) val entityList = ContentManager.all.sortWith(_.key < _.key) Ok(views.html.contents.index(entityList, user)) } } def create = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) val entry = Entry("", cms.dto.EntryType.HTML, List.empty) Ok(views.html.contents.create(entryForm.fill(entry), user)) } } def save = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) entryForm.bindFromRequest.fold( errors => BadRequest(views.html.contents.create(errors, user)), entry => { ContentManager.create(entry) Redirect(routes.Content.index) }) } } def edit(key: String) = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) val entry = ContentManager.find(key) entry.map { entry => Ok(views.html.contents.edit(key, entryForm.fill(entry), user)) }.getOrElse { Redirect(routes.Content.index) } } } def update(key: String) = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) val entry = ContentManager.find(key) entry.map { entry => entryForm.bindFromRequest.fold( errors => BadRequest(views.html.contents.edit(key, errors, user)), entry => { ContentManager.update(entry) Redirect(routes.Content.index) }) }.getOrElse { Redirect(routes.Content.index) } } } def delete(key: String) = SecuredAction { implicit request => ensureAdmin { val user = userFromRequest(request) val entry = ContentManager.find(key) entry.map { entry => ContentManager.remove(entry) Redirect(routes.Content.index) }.getOrElse { Redirect(routes.Content.index) } } } }
lukaszbudnik/hackaton-portal
app/controllers/Content.scala
Scala
apache-2.0
2,605
package net.sourceforge.cilib.simulator import java.io.File object RunBasicShell extends App { val simulators = SimulatorShell.prepare(new File("xml/firefly.xml")) SimulatorShell.execute(simulators, new ProgressText(simulators.size)) }
felixamerbauer/firefly-simulator
src/test/scala/net/sourceforge/cilib/simulator/RunBasicShell.scala
Scala
gpl-3.0
242
package org.crashstars.akka.primes import akka.actor.Actor import org.crashstars.common.Logging /** * Created by navidad on 19/11/15. */ class PrimeListener extends Actor with Logging { override def receive = { case AggregateResult(finalResult) => { logInfo(s"Distinct primes numbers found : ${finalResult.size}") logInfo(s"Sample take : ${finalResult.take(10)}") context.system.shutdown() } } }
anavidad3/PoC-spark-scala-maven
src/main/scala/org/crashstars/akka/primes/PrimeListener.scala
Scala
apache-2.0
431
/*********************************************************************** * Copyright (c) 2013-2019 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.features.kryo package impl import java.io.InputStream import com.esotericsoftware.kryo.io.Input import org.locationtech.geomesa.features.ScalaSimpleFeature.{LazyAttributeReader, LazyImmutableSimpleFeature, LazyMutableSimpleFeature, LazyUserDataReader} import org.locationtech.geomesa.features.kryo.impl.KryoFeatureDeserialization.KryoAttributeReader import org.locationtech.geomesa.features.kryo.impl.LazyDeserialization._ import org.locationtech.geomesa.features.kryo.serialization.KryoUserDataSerialization import org.locationtech.geomesa.utils.collection.IntBitSet import org.opengis.feature.simple.SimpleFeature object LazyDeserialization { /** * Creates mutable features, lazily evaluated */ trait MutableLazyDeserialization extends LazyDeserialization { override protected def createFeature( id: String, reader: LazyAttributeReader, userData: LazyUserDataReader): SimpleFeature = { new LazyMutableSimpleFeature(out, id, reader, userData) } } /** * Creates immutable features, lazily evaluated */ trait ImmutableLazyDeserialization extends LazyDeserialization { override protected def createFeature( id: String, reader: LazyAttributeReader, userData: LazyUserDataReader): SimpleFeature = { new LazyImmutableSimpleFeature(out, id, reader, userData) } } /** * Attribute reader for v3 serialization * * @param readers readers * @param nulls null set * @param count number of attributes * @param bytes raw serialized bytes * @param offset offset into the byte array * @param length number of valid bytes in the byte array */ class LazyShortReaderV3( readers: Array[KryoAttributeReader], nulls: IntBitSet, count: Int, bytes: Array[Byte], offset: Int, length: Int ) extends LazyAttributeReader { override def read(i: Int): AnyRef = { if (i >= count || nulls.contains(i)) { null } else { // read the offset and go to the position for reading // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the same attribute is not being read in multiple threads // (since kryo can mutate the bytes during read) val input = new Input(bytes, offset + (2 * i), length - (2 * i)) input.setPosition(offset + input.readShortUnsigned()) readers(i).apply(input) } } } /** * Attribute reader for v3 serialization * * @param readers readers * @param nulls null set * @param count number of attributes * @param bytes raw serialized bytes * @param offset offset into the byte array * @param length number of valid bytes in the byte array */ class LazyIntReaderV3( readers: Array[KryoAttributeReader], nulls: IntBitSet, count: Int, bytes: Array[Byte], offset: Int, length: Int ) extends LazyAttributeReader { override def read(i: Int): AnyRef = { if (i >= count || nulls.contains(i)) { null } else { // read the offset and go to the position for reading // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the same attribute is not being read in multiple threads // (since kryo can mutate the bytes during read) val input = new Input(bytes, offset + (4 * i), length - (4 * i)) input.setPosition(offset + input.readInt()) readers(i).apply(input) } } } /** * User data reader for v3 serialization * * @param count number of attributes * @param bytes raw serialized bytes * @param offset offset into the byte array * @param length number of valid bytes in the byte array */ class LazyShortUserDataReaderV3(count: Int, bytes: Array[Byte], offset: Int, length: Int) extends LazyUserDataReader { override def read(): java.util.Map[AnyRef, AnyRef] = { // read the offset and go to the position for reading // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the user data is not being read in multiple threads // (since kryo can mutate the bytes during read) val input = new Input(bytes, offset + (2 * count), length - (2 * count)) // read the offset and go to the position for reading input.setPosition(offset + input.readShortUnsigned()) KryoUserDataSerialization.deserialize(input) } } /** * User data reader for v3 serialization * * @param count number of attributes * @param bytes raw serialized bytes * @param offset offset into the byte array * @param length number of valid bytes in the byte array */ class LazyIntUserDataReaderV3(count: Int, bytes: Array[Byte], offset: Int, length: Int) extends LazyUserDataReader { override def read(): java.util.Map[AnyRef, AnyRef] = { // read the offset and go to the position for reading // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the user data is not being read in multiple threads // (since kryo can mutate the bytes during read) val input = new Input(bytes, offset + (4 * count), length - (4 * count)) // read the offset and go to the position for reading input.setPosition(offset + input.readInt()) KryoUserDataSerialization.deserialize(input) } } /** * Attribute reader for v2 serialization * * @param readers readers * @param offsets offsets for each attribute * @param bytes raw serialized bytes * @param length number of valid bytes in the byte array */ class LazyReaderV2(readers: Array[Input => AnyRef], offsets: Array[Int], bytes: Array[Byte], length: Int) extends LazyAttributeReader { override def read(i: Int): AnyRef = { val offset = offsets(i) if (offset == -1) { null } else { // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the same attribute is not being read in multiple threads // (since kryo can mutate the bytes during read) readers(i)(new Input(bytes, offset, length - offset)) } } } /** * User data reader for v2 serialization * * @param bytes raw serialized bytes * @param userDataOffset offset to the serialized user data * @param length number of valid bytes in the byte array */ class LazyUserDataReaderV2(bytes: Array[Byte], userDataOffset: Int, length: Int) extends LazyUserDataReader { override def read(): java.util.Map[AnyRef, AnyRef] = { // we create a new kryo input each time, so that position and offset are not affected by other reads // this should be thread-safe, as long as the user data is not being read in multiple threads // (since kryo can mutate the bytes during read) KryoUserDataSerialization.deserialize(new Input(bytes, userDataOffset, length - userDataOffset)) } } /** * Reader for serialization without user data */ case object WithoutUserDataReader extends LazyUserDataReader { override def read(): java.util.Map[AnyRef, AnyRef] = new java.util.HashMap[AnyRef, AnyRef](1) } } /** * Wraps the input but defers deserialization until an attribute is required */ trait LazyDeserialization extends KryoFeatureDeserialization { override def deserialize(bytes: Array[Byte]): SimpleFeature = deserialize("", bytes, 0, bytes.length) override def deserialize(id: String, bytes: Array[Byte]): SimpleFeature = deserialize(id, bytes, 0, bytes.length) override def deserialize(bytes: Array[Byte], offset: Int, length: Int): SimpleFeature = deserialize("", bytes, offset, length) override def deserialize(id: String, bytes: Array[Byte], offset: Int, length: Int): SimpleFeature = { bytes(offset) match { case KryoFeatureSerializer.Version3 => readFeatureV3(id, bytes, offset, length) case KryoFeatureSerializer.Version2 => readFeatureV2(id, bytes, offset, length) case b => throw new IllegalArgumentException(s"Can't process features serialized with version: $b") } } // TODO read into a byte array so we can lazily evaluate it // user data is tricky here as we don't know the length... override def deserialize(in: InputStream): SimpleFeature = throw new NotImplementedError override def deserialize(id: String, in: InputStream): SimpleFeature = throw new NotImplementedError protected def createFeature(id: String, reader: LazyAttributeReader, userData: LazyUserDataReader): SimpleFeature private def readFeatureV3(id: String, bytes: Array[Byte], offset: Int, length: Int): SimpleFeature = { // skip the version byte, which we've already read val input = new Input(bytes, offset + 1, length - 1) val metadata = Metadata(input) // read count, size, nulls, etc // we should now be positioned to read the feature id val finalId = if (withoutId) { id } else { input.readString() } val remaining = input.limit - metadata.offset var reader: LazyAttributeReader = null var userData: LazyUserDataReader = null if (metadata.size == 2) { reader = new LazyShortReaderV3(readers, metadata.nulls, metadata.count, bytes, metadata.offset, remaining) userData = if (withoutUserData) { WithoutUserDataReader } else { new LazyShortUserDataReaderV3(metadata.count, bytes, metadata.offset, remaining) } } else { reader = new LazyIntReaderV3(readers, metadata.nulls, metadata.count, bytes, metadata.offset, remaining) userData = if (withoutUserData) { WithoutUserDataReader } else { new LazyIntUserDataReaderV3(metadata.count, bytes, metadata.offset, remaining) } } createFeature(finalId, reader, userData) } private def readFeatureV2(id: String, bytes: Array[Byte], offset: Int, length: Int): SimpleFeature = { val input = new Input(bytes, offset + 1, length - 1) // skip the version byte // read the start of the offsets, then the feature id val offsets = Array.ofDim[Int](readersV2.length) val offsetStarts = offset + input.readInt() val finalId = if (withoutId) { id } else { input.readString() } // now read our offsets input.setPosition(offsetStarts) // set to offsets start var i = 0 while (i < offsets.length && input.position < input.limit) { offsets(i) = offset + input.readInt(true) i += 1 } if (i < offsets.length) { // attributes have been added to the sft since this feature was serialized do { offsets(i) = -1; i += 1 } while (i < offsets.length) } val userDataOffset = input.position() val reader = new LazyReaderV2(readersV2, offsets, bytes, length) val userData = if (withoutUserData) { WithoutUserDataReader } else { new LazyUserDataReaderV2(bytes, userDataOffset, length) } createFeature(finalId, reader, userData) } }
elahrvivaz/geomesa
geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/impl/LazyDeserialization.scala
Scala
apache-2.0
11,724
/* * Copyright 2014-2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.eval.graph import com.netflix.atlas.chart.model.Layout import com.netflix.atlas.chart.model.VisionType case class ImageFlags( title: Option[String], width: Int, height: Int, zoom: Double, axes: Map[Int, Axis], axisPerLine: Boolean, showLegend: Boolean, showLegendStats: Boolean, showOnlyGraph: Boolean, vision: VisionType, palette: String, theme: String, layout: Layout )
brharrington/atlas
atlas-eval/src/main/scala/com/netflix/atlas/eval/graph/ImageFlags.scala
Scala
apache-2.0
1,028
package st import org.scalatest.WordSpec class WordSpecTest extends WordSpec { "A Set" when { "empty" should { "have size 0" in { assert(Set.empty.isEmpty) } "produce NoSuchElementException when head is invoked" in { intercept[NoSuchElementException] { Set.empty.head } } } } }
objektwerks/scala.test
src/test/scala/st/WordSpecTest.scala
Scala
mit
350
/* * Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.internal.persistence.jdbc import java.util.UUID import akka.Done import akka.actor.ActorSystem import akka.persistence.query.NoOffset import akka.persistence.query.Offset import akka.persistence.query.TimeBasedUUID import akka.persistence.query.{ Sequence => AkkaSequence } import akka.util.Timeout import com.lightbend.lagom.internal.persistence.cluster.ClusterStartupTask import com.lightbend.lagom.spi.persistence.OffsetDao import com.lightbend.lagom.spi.persistence.OffsetStore import com.typesafe.config.Config import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Try /** * INTERNAL API */ private[lagom] trait SlickOffsetStoreConfiguration { def tableName: String def schemaName: Option[String] def idColumnName: String def tagColumnName: String def sequenceOffsetColumnName: String def timeUuidOffsetColumnName: String def minBackoff: FiniteDuration def maxBackoff: FiniteDuration def randomBackoffFactor: Double def globalPrepareTimeout: FiniteDuration def role: Option[String] } /** * INTERNAL API */ private[lagom] abstract class AbstractSlickOffsetStoreConfiguration(config: Config) extends SlickOffsetStoreConfiguration { private val cfg = config.getConfig("lagom.persistence.read-side.jdbc.tables.offset") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = Option(cfg.getString("schemaName")).filter(_.trim != "") private val columnsCfg = cfg.getConfig("columnNames") val idColumnName: String = columnsCfg.getString("readSideId") val tagColumnName: String = columnsCfg.getString("tag") val sequenceOffsetColumnName: String = columnsCfg.getString("sequenceOffset") val timeUuidOffsetColumnName: String = columnsCfg.getString("timeUuidOffset") override def toString: String = s"OffsetTableConfiguration($tableName,$schemaName)" } /** * INTERNAL API */ private[lagom] class SlickOffsetStore( system: ActorSystem, val slick: SlickProvider, config: SlickOffsetStoreConfiguration ) extends OffsetStore { case class OffsetRow(id: String, tag: String, sequenceOffset: Option[Long], timeUuidOffset: Option[String]) import slick.profile.api._ import system.dispatcher override def prepare(eventProcessorId: String, tag: String): Future[SlickOffsetDao] = { runPreparations(eventProcessorId, tag).map(offset => new SlickOffsetDao(this, eventProcessorId, tag, offset)) } private class OffsetStore(_tag: Tag) extends Table[OffsetRow](_tag, _schemaName = config.schemaName, _tableName = config.tableName) { def * = (id, tag, sequenceOffset, timeUuidOffset) <> (OffsetRow.tupled, OffsetRow.unapply) // Technically these two columns shouldn't have the primary key options, but they need it to work around // https://github.com/slick/slick/issues/966 val id = column[String](config.idColumnName, O.Length(255, varying = true), O.PrimaryKey) val tag = column[String](config.tagColumnName, O.Length(255, varying = true), O.PrimaryKey) val sequenceOffset = column[Option[Long]](config.sequenceOffsetColumnName) val timeUuidOffset = column[Option[String]](config.timeUuidOffsetColumnName, O.Length(36, varying = false)) val pk = primaryKey(s"${config.tableName}_pk", (id, tag)) } private val offsets = TableQuery[OffsetStore] private val startupTask = if (slick.autoCreateTables) { Some( ClusterStartupTask( system, "slickOffsetStorePrepare", () => createTables, config.globalPrepareTimeout, config.role, config.minBackoff, config.maxBackoff, config.randomBackoffFactor ) ) } else None def runPreparations(eventProcessorId: String, tag: String): Future[Offset] = { implicit val timeout = Timeout(config.globalPrepareTimeout) for { _ <- startupTask.fold(Future.successful[Done](Done))(_.askExecute()) offset <- slick.db.run(getOffsetQuery(eventProcessorId, tag)) } yield offset } def updateOffsetQuery(id: String, tag: String, offset: Offset) = { offsets.insertOrUpdate(queryToOffsetRow(id, tag, offset)) } private def queryToOffsetRow(id: String, tag: String, offset: Offset): OffsetRow = { offset match { case AkkaSequence(value) => OffsetRow(id, tag, Some(value), None) case TimeBasedUUID(value) => OffsetRow(id, tag, None, Some(value.toString)) case NoOffset => OffsetRow(id, tag, None, None) } } private def getOffsetQuery(id: String, tag: String): DBIOAction[Offset, NoStream, Effect.Read] = { (for { offset <- offsets if offset.id === id && offset.tag === tag } yield { offset }).result.headOption.map(offsetRowToOffset) } private def offsetRowToOffset(row: Option[OffsetRow]): Offset = { row .flatMap( row => row.sequenceOffset .map(AkkaSequence) .orElse( row.timeUuidOffset .flatMap(uuid => Try(UUID.fromString(uuid)).toOption) .filter(_.version == 1) .map(TimeBasedUUID) ) ) .getOrElse(NoOffset) } private def createTables() = { // The schema will be wrong due to our work around for https://github.com/slick/slick/issues/966 above, so need to // remove the primary key declarations from those columns val statements = offsets.schema.createStatements.map(_.replace(" PRIMARY KEY,", ",")).toSeq slick.db.run( slick .createTable(statements, slick.tableExists(config.schemaName, config.tableName)) .map(_ => Done.getInstance()) ) } } private[lagom] class SlickOffsetDao( slickOffsetStore: SlickOffsetStore, readSideId: String, tag: String, override val loadedOffset: Offset )(implicit ec: ExecutionContext) extends OffsetDao { override def saveOffset(offset: Offset): Future[Done] = { slickOffsetStore.slick.db.run( slickOffsetStore .updateOffsetQuery(readSideId, tag, offset) .map(_ => Done.getInstance()) ) } def updateOffsetQuery(offset: Offset) = { slickOffsetStore.updateOffsetQuery(readSideId, tag, offset) } }
rcavalcanti/lagom
persistence-jdbc/core/src/main/scala/com/lightbend/lagom/internal/persistence/jdbc/SlickOffsetStore.scala
Scala
apache-2.0
6,454
package com.twitter.finagle.netty3 import com.twitter.finagle._ import com.twitter.finagle.channel.{ ChannelRequestStatsHandler, ChannelStatsHandler, WriteCompletionTimeoutHandler } import com.twitter.finagle.server.Listener import com.twitter.finagle.ssl.{Engine, SslShutdownHandler} import com.twitter.finagle.stats.{ServerStatsReceiver, NullStatsReceiver, StatsReceiver} import com.twitter.finagle.transport.{ChannelTransport, Transport} import com.twitter.finagle.util.{DefaultLogger, DefaultMonitor, DefaultTimer} import com.twitter.util.{CloseAwaitably, Duration, Future, Monitor, Promise, Timer, Time} import java.net.SocketAddress import java.util.IdentityHashMap import java.util.concurrent.atomic.AtomicLong import java.util.logging.{Logger, Level} import org.jboss.netty.bootstrap.ServerBootstrap import org.jboss.netty.channel._ import org.jboss.netty.channel.group.{ ChannelGroup, ChannelGroupFuture, ChannelGroupFutureListener, DefaultChannelGroup, DefaultChannelGroupFuture } import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory import org.jboss.netty.handler.ssl._ import org.jboss.netty.handler.timeout.{ReadTimeoutException, ReadTimeoutHandler} import scala.collection.JavaConverters._ /** * Netty3 TLS configuration. * * @param newEngine Creates a new SSL engine */ case class Netty3ListenerTLSConfig(newEngine: () => Engine) object Netty3Listener { val channelFactory: ServerChannelFactory = new NioServerSocketChannelFactory(Executor, Executor) { override def releaseExternalResources() = () // no-op } /** * Class Closer implements channel tracking and semi-graceful closing * of this group of channels. */ private class Closer(timer: Timer) { val activeChannels = new DefaultChannelGroup private implicit val implicitTimer = timer /** * Close the channels managed by this Closer. Closer * * 1. Closes the `serverCh`, preventing new connections * from being created; * 2. Asks the service dispatchers associated with each * managed channel to drain itself * 3. Waiting for at most `grace`-duration, forcibly closes * remaining channels. * * At the conclusion of this, the bootstrap is released. */ def close(bootstrap: ServerBootstrap, serverCh: Channel, deadline: Time): Future[Unit] = { // According to NETTY-256, the following sequence of operations // has no race conditions. // // - close the server socket (awaitUninterruptibly) // - close all open channels (awaitUninterruptibly) // - releaseExternalResources // // We modify this a little bit, to allow for graceful draining, // closing open channels only after the grace period. // // The next step here is to do a half-closed socket: we want to // suspend reading, but not writing to a socket. This may be // important for protocols that do any pipelining, and may // queue in their codecs. // On cursory inspection of the relevant Netty code, this // should never block (it is little more than a close() syscall // on the FD). serverCh.close().awaitUninterruptibly() // At this point, no new channels may be created; drain existing // ones. val snap = activeChannels.asScala val closing = new DefaultChannelGroupFuture( activeChannels, snap map(_.getCloseFuture) asJava) val p = new Promise[Unit] closing.addListener(new ChannelGroupFutureListener { def operationComplete(f: ChannelGroupFuture) { p.setValue(()) } }) p.within(deadline - Time.now) transform { _ => activeChannels.close() // Force close any remaining connections. Don't wait for success. bootstrap.releaseExternalResources() Future.Done } } } def addTlsToPipeline(pipeline: ChannelPipeline, newEngine: () => Engine) { val engine = newEngine() engine.self.setUseClientMode(false) engine.self.setEnableSessionCreation(true) val handler = new SslHandler(engine.self) // Certain engine implementations need to handle renegotiation internally, // as Netty's TLS protocol parser implementation confuses renegotiation and // notification events. Renegotiation will be enabled for those Engines with // a true handlesRenegotiation value. handler.setEnableRenegotiation(engine.handlesRenegotiation) pipeline.addFirst("ssl", handler) // Netty's SslHandler does not provide SSLEngine implementations any hints that they // are no longer needed (namely, upon disconnection.) Since some engine implementations // make use of objects that are not managed by the JVM's memory manager, we need to // know when memory can be released. The SslShutdownHandler will invoke the shutdown // method on implementations that define shutdown(): Unit. pipeline.addFirst( "sslShutdown", new SslShutdownHandler(engine) ) } } /** * A listener using Netty3 which is given a ChannelPipelineFactory * that yields ``Out``-typed upstream messages and accepts * ``In``-typed downstream messages. * * @tparam Out the type of output messages * * @tparam In the type of input messages * * @param pipelineFactory The pipeline factory for encoding input * messages and decoding output messages. * * @param channelSnooper Use the given `ChannelSnooper` to log low * level channel activity. * * @param channelFactory A netty3 `ChannelFactory` used to bootstrap * the server's listening channel. * * @param bootstrapOptions Additional options for Netty's * `ServerBootstrap` * * @param channelMaxIdleTime The maximum idle time of any channel. * * @param channelMaxLifeTime The maximum life time of any channel. * * @param channelReadTimeout Channels are given this much time to * read a request. * * @param channelWriteCompletionTimeout Channels are given this much * time to complete a write. * * @param tlsConfig When present, SSL is used to provide session security. */ case class Netty3Listener[In, Out]( name: String, pipelineFactory: ChannelPipelineFactory, channelSnooper: Option[ChannelSnooper] = None, channelFactory: ServerChannelFactory = Netty3Listener.channelFactory, bootstrapOptions: Map[String, Object] = Map( "soLinger" -> (0: java.lang.Integer), "reuseAddress" -> java.lang.Boolean.TRUE, "child.tcpNoDelay" -> java.lang.Boolean.TRUE ), channelMaxIdleTime: Duration = Duration.Top, channelMaxLifeTime: Duration = Duration.Top, channelReadTimeout: Duration = Duration.Top, channelWriteCompletionTimeout: Duration = Duration.Top, tlsConfig: Option[Netty3ListenerTLSConfig] = None, timer: Timer = DefaultTimer.twitter, nettyTimer: org.jboss.netty.util.Timer = DefaultTimer, statsReceiver: StatsReceiver = ServerStatsReceiver, monitor: Monitor = DefaultMonitor, logger: java.util.logging.Logger = DefaultLogger ) extends Listener[In, Out] { import Netty3Listener._ private[this] val statsHandlers = new IdentityHashMap[StatsReceiver, ChannelHandler] // TODO: These gauges will stay around forever. It's // fine, but it would be nice to clean them up. def channelStatsHandler(statsReceiver: StatsReceiver) = synchronized { if (!(statsHandlers containsKey statsReceiver)) { val nconn = new AtomicLong(0) statsReceiver.provideGauge("connections") { nconn.get() } statsHandlers.put(statsReceiver, new ChannelStatsHandler(statsReceiver, nconn)) } statsHandlers.get(statsReceiver) } def newServerPipelineFactory(statsReceiver: StatsReceiver, newBridge: () => ChannelHandler) = new ChannelPipelineFactory { def getPipeline() = { val pipeline = pipelineFactory.getPipeline() for (channelSnooper <- channelSnooper) pipeline.addFirst("channelLogger", channelSnooper) if (statsReceiver ne NullStatsReceiver) pipeline.addFirst("channelStatsHandler", channelStatsHandler(statsReceiver)) // Apply read timeouts *after* request decoding, preventing // death from clients trying to DoS by slowly trickling in // bytes to our (accumulating) codec. if (channelReadTimeout < Duration.Top) { val (timeoutValue, timeoutUnit) = channelReadTimeout.inTimeUnit pipeline.addLast( "readTimeout", new ReadTimeoutHandler(nettyTimer, timeoutValue, timeoutUnit)) } if (channelWriteCompletionTimeout < Duration.Top) { pipeline.addLast( "writeCompletionTimeout", new WriteCompletionTimeoutHandler(timer, channelWriteCompletionTimeout)) } for (Netty3ListenerTLSConfig(newEngine) <- tlsConfig) addTlsToPipeline(pipeline, newEngine) if (statsReceiver ne NullStatsReceiver) { pipeline.addLast( "channelRequestStatsHandler", new ChannelRequestStatsHandler(statsReceiver)) } pipeline.addLast("finagleBridge", newBridge()) pipeline } } def listen(addr: SocketAddress)(serveTransport: Transport[In, Out] => Unit): ListeningServer = new ListeningServer with CloseAwaitably { val scopedStatsReceiver = statsReceiver match { case ServerStatsReceiver => statsReceiver.scope(ServerRegistry.nameOf(addr) getOrElse name) case sr => sr } val closer = new Closer(timer) val newBridge = () => new ServerBridge( serveTransport, monitor, logger, scopedStatsReceiver, closer.activeChannels) val bootstrap = new ServerBootstrap(channelFactory) bootstrap.setOptions(bootstrapOptions.asJava) bootstrap.setPipelineFactory( newServerPipelineFactory(scopedStatsReceiver, newBridge)) val ch = bootstrap.bind(addr) def closeServer(deadline: Time) = closeAwaitably { closer.close(bootstrap, ch, deadline) } def boundAddress = ch.getLocalAddress() } } /** * Bridges a channel (pipeline) onto a transport. This must be * installed as the last handler. */ private[netty3] class ServerBridge[In, Out]( serveTransport: Transport[In, Out] => Unit, monitor: Monitor, log: Logger, statsReceiver: StatsReceiver, channels: ChannelGroup ) extends SimpleChannelHandler { private[this] def severity(exc: Throwable) = exc match { case _: java.nio.channels.ClosedChannelException | _: javax.net.ssl.SSLException | _: ReadTimeoutException | _: WriteTimedOutException | _: javax.net.ssl.SSLException => Level.FINEST case e: java.io.IOException if ( e.getMessage == "Connection reset by peer" || e.getMessage == "Broken pipe" || e.getMessage == "Connection timed out" || e.getMessage == "No route to host" ) => Level.FINEST case _ => Level.WARNING } override def channelOpen(ctx: ChannelHandlerContext, e: ChannelStateEvent) { val channel = e.getChannel channels.add(channel) val transport = new ChannelTransport[In, Out](channel) serveTransport(transport) super.channelOpen(ctx, e) } override def exceptionCaught(ctx: ChannelHandlerContext, e: ExceptionEvent) { val cause = e.getCause monitor.handle(cause) cause match { case e: ReadTimeoutException => statsReceiver.counter("read_timeout").incr() case e: WriteTimedOutException => statsReceiver.counter("write_timeout").incr() case _ => () } val msg = "Unhandled exception in connection with " + e.getChannel.getRemoteAddress.toString + " , shutting down connection" log.log(severity(cause), msg, cause) if (e.getChannel.isOpen) Channels.close(e.getChannel) } }
joshbedo/finagle
finagle-core/src/main/scala/com/twitter/finagle/netty3/server.scala
Scala
apache-2.0
11,790
package com.gizwits.rabbitmq import java.util.concurrent.atomic.AtomicInteger import com.rabbitmq.client._ import org.apache.commons.lang.StringUtils import org.apache.spark.Logging import org.apache.spark.storage.StorageLevel import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.dstream.ReceiverInputDStream import org.apache.spark.streaming.receiver.Receiver import scala.util._ class RabbitMQInputDStream( @transient ssc_ : StreamingContext, rabbitMQQueueName: Option[String], rabbitMQHost: String, rabbitMQPort: Int, virtualhost: String, username: String, password: String, exchangeName: Option[String], routingKeys: Seq[String], DirectExchangeType: Option[String], ack: Boolean, autoDelete: Boolean, prefetchCount: Int, streamingtime: Int, storageLevel: StorageLevel ) extends ReceiverInputDStream[String](ssc_) with Logging { override def getReceiver(): Receiver[String] = { val DefaultRabbitMQPort = 5672 new RabbitMQReceiver( rabbitMQQueueName, Some(rabbitMQHost).getOrElse("localhost"), Some(rabbitMQPort).getOrElse(DefaultRabbitMQPort), virtualhost, username, password, exchangeName, routingKeys, DirectExchangeType.getOrElse("direct"), ack, autoDelete, prefetchCount, streamingtime, storageLevel) } } class RabbitMQReceiver(rabbitMQQueueName: Option[String], rabbitMQHost: String, rabbitMQPort: Int, virtualhost: String, username: String, password: String, exchangeName: Option[String], routingKeys: Seq[String], DirectExchangeType: String, ack: Boolean, autoDelete: Boolean, prefetchCount: Int, streamingtime: Int, storageLevel: StorageLevel) extends Receiver[String](storageLevel) with Logging { private val count: AtomicInteger = new AtomicInteger(0) def onStart() { implicit val akkaSystem = akka.actor.ActorSystem() getConnectionAndChannel match { case Success((connection: Connection, channel: Channel)) => receive(connection, channel, ack) case Failure(f) => log.error("Could not connect"); restart("Could not connect", f) } } def onStop() { // There is nothing much to do as the thread calling receive() // is designed to stop by itself isStopped() returns false } /** Create a socket connection and receive data until receiver is stopped */ private def receive(connection: Connection, channel: Channel, ack: Boolean) { val queueName = !routingKeys.isEmpty match { case true => { if (prefetchCount > 0) { channel.basicQos(prefetchCount) } // exchangeName 存在 会报错 ,比如使用 amq.topic // channel.exchangeDeclare(exchangeName.get, DirectExchangeType) channel.exchangeDeclarePassive(exchangeName.get) channel.queueDeclare(rabbitMQQueueName.get, false, false, autoDelete, null) for (routingKey: String <- routingKeys) { channel.queueBind(rabbitMQQueueName.get, exchangeName.get, routingKey) } rabbitMQQueueName.get } case false => { // channel.queueDeclare(rabbitMQQueueName.get, false, false, false, new util.HashMap(0)) rabbitMQQueueName.get } } log.info("RabbitMQ Input waiting for messages") val consumer: QueueingConsumer = new QueueingConsumer(channel) channel.basicConsume(queueName, ack, consumer) while (!isStopped) { if (count.get() < prefetchCount) { val delivery: QueueingConsumer.Delivery = consumer.nextDelivery val body = new String(delivery.getBody) if (StringUtils.isNotEmpty(body)) { store(body) } if (!ack) { channel.basicAck(delivery.getEnvelope().getDeliveryTag(), ack) } count.incrementAndGet() } else { Thread.sleep((streamingtime) * 1000) count.set(0) } } log.info("rabbitmq streaming it has been stopped ...............") channel.close connection.close restart("Trying to connect again............") } private def getConnectionAndChannel: Try[(Connection, Channel)] = { for { connection: Connection <- Try(getConnectionFactory.newConnection()) channel: Channel <- Try(connection.createChannel) } yield { (connection, channel) } } private def getConnectionFactory: ConnectionFactory = { val factory: ConnectionFactory = new ConnectionFactory if (StringUtils.isNotEmpty(rabbitMQHost)) { factory.setHost(rabbitMQHost) } if (rabbitMQPort != 0) { factory.setPort(rabbitMQPort) } factory.setConnectionTimeout(1000) if (StringUtils.isNotEmpty(virtualhost)) { factory.setVirtualHost(virtualhost) } if (StringUtils.isNotEmpty(username)) { factory.setUsername(username) } if (StringUtils.isNotEmpty(password)) { factory.setPassword(password) } factory } }
Bestfeel/spark-rabbitmq-stream
src/main/scala/com/gizwits/rabbitmq/RabbitMQInputDStream.scala
Scala
apache-2.0
5,675
package poly.util.cliconfig import scala.collection._ import scala.collection.JavaConversions._ /** * Provides utilities to access command-line interface configurations. * @author Tongfei Chen (ctongfei@gmail.com). */ object CliConfig extends DefaultMap[String, String] { override def apply(key: String) = System.getProperties.getProperty(key) def get(key: String) = Option(System.getProperty(key)) def iterator = asScalaSet(System.getProperties.entrySet()).iterator.map(e => e.getKey.toString → e.getValue.toString) }
ctongfei/poly-util
src/main/scala/poly/util/cliconfig/CliConfig.scala
Scala
mit
536
/* * Copyright (C) 2013 Alcatel-Lucent. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Licensed to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package molecule.examples.net import molecule._ import molecule.net._ import molecule.io._ import molecule.utils.{ encode, decode } import molecule.utils.Unsigned.unsigned import molecule.parsing.Parser import java.nio.{ ByteBuffer, CharBuffer } // Needed to convert from java array to scala array such that flatten works import scala.collection.JavaConversions._ /** * Tested only with windows Telnet terminal. */ abstract class TelnetCharAdapter[R: Message](ptype: ProcessType1x1[Char, String, R]) extends ProcessType1x1[ByteBuffer, ByteBuffer, R] { import TelnetCharAdapter._ def main(in: Input[ByteBuffer], out: Output[ByteBuffer]) = //out.write(doEchoCmd) >> //out.write(dontLinemodeCmd) >> handover { ptype( in.parse(telnetMsg).collect { case Character(c) => c }, out.map(encode("US-ASCII")).map { s: String => CharBuffer.wrap(s) } ) } } object TelnetCharAdapter { val IAC = 255.toByte import parsers.bytebuffer._ import parsers.ascii abstract class TelnetMsg case class Command(operation: Byte, option: Byte) extends TelnetMsg { override def toString() = "Command(" + unsigned(operation) + "," + unsigned(option) + ")" } case class Character(c: Char) extends TelnetMsg lazy val telnetMsg: Parser[ByteBuffer, TelnetMsg] = command | char val command = (IAC ~ byteArray(2)) ^^ { case _ ~ arr => Command(arr(0), arr(1)) } val char = ascii.anyChar ^^ Character } abstract class TelnetLineAdapter[R: Message](ptype: ProcessType1x1[String, String, R]) extends ProcessType1x1[ByteBuffer, ByteBuffer, R] { import TelnetLineAdapter._ import seg._ import parsers.charbuffer import java.nio.CharBuffer def main(in: Input[ByteBuffer], out: Output[ByteBuffer]) = //out.write(doEchoCmd) >> //out.write(dontLinemodeCmd) >> handover { ptype( in.parse(telnetMsg).collect { case Data(bb) => bb }.map(decode("US-ASCII")).parse(charbuffer.line(2048)), out.map(encode("US-ASCII")).map { s: String => CharBuffer.wrap(s.replaceAll("\\n", "\\r\\n") + "\\r\\n") } ) } } object TelnetLineAdapter { val IAC = 255.toByte import parsers.bytebuffer._ abstract class TelnetMsg case class Data(cb: ByteBuffer) extends TelnetMsg case class Command(b1: Byte, b2: Byte) extends TelnetMsg { override def toString() = "Command(" + unsigned(b1) + "," + unsigned(b2) + ")" } lazy val telnetMsg: Parser[ByteBuffer, TelnetMsg] = data | command val data = splitAt(IAC) ^^ { Data(_) } val command = (IAC ~ byteArray(2)) ^^ { case _ ~ arr => Command(arr(0), arr(1)) } }
molecule-labs/molecule
molecule-net-examples/src/main/scala/molecule/examples/net/TelnetAdapter.scala
Scala
apache-2.0
3,374
package lr2 import org.apache.spark.rdd.RDD import org.apache.spark.SparkContext._ import org.apache.spark.SparkContext import classification._ import classification.OptimizerType._ import classification.RegularizerType._ import utilities.SparseMatrix import utilities.DoubleVector class DistributedGradient( override val weights: Array[Double]) extends Model(weights) with Serializable{ override def train( trainingData: RDD[(Int, (Array[Byte], SparseMatrix))], maxNumIter: Int, optType: OptimizerType, regPara: Double, regType: RegularizerType): DistributedGradient = { assume(optType == CG || optType == LBFGS, "current version only supports CG and LBFGS") val numFeatures = weights.length val gradientPrev = new Array[Double](numFeatures) val direction = new Array[Double](numFeatures) val deltaPara = if (optType == OptimizerType.LBFGS) new Array[Double](numFeatures) else null var iter = 0 while (iter < maxNumIter) { val weightsBC = trainingData.context.broadcast(weights) val gradient = trainingData.map{ case (bid, (labels, features)) => { val map = features.rowMap val weightsLocal = Functions.toLocal(weightsBC.value, map) Functions.getGrad(labels, features, weightsLocal, weightsLocal) val gradientGlobal = Functions.toGlobal(weightsLocal, map, numFeatures) DoubleVector(gradientGlobal) } }.reduce(_+=_).elements var p = 1 //no shrinkage for the intercept while (p < numFeatures) { gradient(p) -= regPara*weights(p) p += 1 } if (iter > 1) { if (optType == CG) { Optimizers.getCGDirection(gradient, gradientPrev, direction) } else if (optType == LBFGS) { Optimizers.getLBFGSDirection(deltaPara, gradient, gradientPrev, direction) } } else Array.copy(gradient, 0, direction, 0, numFeatures) val directionBC = trainingData.context.broadcast(direction) val h = trainingData.map{ case(bid, (labels, features)) => { val map = features.rowMap val weightsLocal = Functions.toLocal(weightsBC.value, map) val direction = Functions.toLocal(directionBC.value, map) Functions.getHessian(features, weightsLocal, direction) } }.sum.toFloat p = 1 //no shrinkage for the intercept var gu = gradient(0)*direction(0) var uhu = 0.0 while (p < numFeatures) { uhu += direction(p)*direction(p) gu += gradient(p)*direction(p) p += 1 } uhu *= regPara uhu += h p = 0 while (p < numFeatures) { gradientPrev(p) = gradient(p) //equation (17) in Tom Minka 2003 val delta = gu/uhu*direction(p) if (optType == LBFGS) deltaPara(p) = delta weights(p) += delta p += 1 } iter += 1 } this } }
XianXing/bdl
src/main/scala/bdl/lr2/DistributedGradient.scala
Scala
apache-2.0
2,978
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This file is part of Rudder. * * Rudder is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU General Public License version 3, the copyright holders add * the following Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General * Public License version 3, when you create a Related Module, this * Related Module is not considered as a part of the work and may be * distributed under the license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * Rudder is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Rudder. If not, see <http://www.gnu.org/licenses/>. * ************************************************************************************* */ package com.normation.rudder.web.services import bootstrap.liftweb._ import com.normation.rudder.domain.policies.DirectiveId import com.normation.rudder.web.model._ import scala.xml._ import net.liftweb.common._ import net.liftweb.http._ import net.liftweb.util.Helpers import scala.collection.mutable.{ Map => MutMap } import com.normation.cfclerk.domain._ import com.normation.exceptions.TechnicalException import org.slf4j.LoggerFactory import com.normation.utils.HashcodeCaching import com.normation.utils.Control.sequence /** * Create web representation of Directive in the goal * to configure them. * * This service does not ask anything on the backend, and * so all information have to be given. * */ class Section2FieldService(val fieldFactory: DirectiveFieldFactory, val translators: Translators) extends Loggable { /** * Fully initialize a DirectiveEditor from a list of variables */ def initDirectiveEditor( policy : Technique , directiveId : DirectiveId , vars : Seq[Variable] ): Box[DirectiveEditor] = { val valuesByName = vars.map(v => (v.spec.name, v.values)).toMap val variableSpecs = vars.map(v => (v.spec.name -> v.spec)).toMap val sections = policy.rootSection.copyWithoutSystemVars val providesExpectedReports = policy.providesExpectedReports //a policy is a new one if we don't have any saved values //Don't forget that we may have empty saved value. val isNewPolicy = valuesByName.size < 1 || valuesByName.forall { case (n,vals) => vals.size < 1 } logger.debug("Is it a new directive ? " + isNewPolicy) val bounds = vars.map { v => val used = v.spec.constraint.usedFields if(used.isEmpty) { Option.empty[(String, Seq[String])] } else { Some((v.spec.name, used.toSeq)) } }.flatten.toMap //create the fields "used" mapping val sectionField = createSectionField(sections, valuesByName, isNewPolicy, bounds) Full(DirectiveEditor(policy.id, directiveId, policy.name, policy.description, sectionField, variableSpecs, providesExpectedReports)) } //bound used section fields to private[this] def boundUsedFields(section: SectionField, bounds: Map[String, Seq[String]]): SectionField = { val allFields = section.getAllDirectVariables allFields.values.foreach { f => bounds.get(f.id) match { case None => Full("ok") case Some(used) => (for { fields <- sequence(used) { id => Box(allFields.get(id)) ?~! s"Variable '${id}' used by variable '${f.id}' was not found - are you sure all dependant fields are on the same section of the technique?" } } yield { fields }) match { case e: EmptyBox => logger.error((e ?~! "Error when binding dependant fields").messageChain) case Full(fields) => f.usedFields = fields } } } section } // -------------------------------------------- // description of the state machine // -------------------------------------------- /* * * * ----<--root----->-------- * ___ / | ___ \ * | variable sectionType1 | multiSection * | | `--->--' | | ____ * `-----<-----<------'-----<---' | / | * \ sectionType2 | * `----<------' `-->--' * * sectionType1: a section that may have a multi-section for children * sectionType2: a section that may only have simple sub section */ // -------------------------------------------- // implementation : TODO: implement above state // machine for real, not with a copy&paste for // createSingleSectionFieldForMultisec // -------------------------------------------- def createSectionField(section: SectionSpec, valuesByName:Map[String,Seq[String]], isNewPolicy:Boolean, usedFields: Map[String, Seq[String]]): SectionField = { val seqOfSectionMap = { if (isNewPolicy) Seq(createDefaultMap(section)) else { val all = createMapForEachSubSection(section, valuesByName) if(all.size < 1) Seq(createDefaultMap(section)) else all } } val readOnlySection = section.children.collect{ case x:PredefinedValuesVariableSpec => x}.size > 0 if (section.isMultivalued) { val sectionFields = for (sectionMap <- seqOfSectionMap) yield boundUsedFields(createSingleSectionFieldForMultisec(section,sectionMap, isNewPolicy, usedFields), usedFields) MultivaluedSectionField(sectionFields, () => { //here, valuesByName is empty, we are creating a new map. boundUsedFields(createSingleSectionField(section,Map(),createDefaultMap(section), true, usedFields), usedFields) } , priorityToVisibility(section.displayPriority) , readOnlySection ) } else { boundUsedFields(createSingleSectionField(section, valuesByName, seqOfSectionMap.head, isNewPolicy, usedFields), usedFields) } } private[this] def createVarField(varSpec: VariableSpec, valueOpt: Option[String]): (DirectiveField, (String, () => String)) = { val fieldKey = varSpec.name val field = fieldFactory.forType(varSpec, fieldKey) val varMappings = translators.get(field.manifest) match { case None => throw new TechnicalException("No translator from type: " + field.manifest.toString) case Some(t) => t.to.get("self") match { case None => throw new TechnicalException("Missing 'self' translator property (from type %s to a serialized string for Variable)".format(field.manifest)) case Some(c) => //close the returned function with f and store it into varMappings logger.trace("Add translator for variable '%s', get its value from field '%s.self'".format(fieldKey, fieldKey)) valueOpt match { case None => varSpec.constraint.default foreach ( setValueForField(_, field, t.from) ) case Some(value) => setValueForField(value, field, t.from) } (fieldKey -> { () => c(field.get) }) } } field.displayName = varSpec.description field.tooltip = varSpec.longDescription field.optional = varSpec.constraint.mayBeEmpty (field, varMappings) } private[this] def createSingleSectionField(sectionSpec:SectionSpec, valuesByName:Map[String,Seq[String]], sectionMap: Map[String, Option[String]], isNewPolicy:Boolean, usedFields: Map[String, Seq[String]]): SectionField = { // only variables of the current section var varMappings = Map[String, () => String]() val children = for (child <- sectionSpec.children) yield { child match { case varSpec: SectionVariableSpec => val (field, mapping) = createVarField(varSpec, sectionMap(varSpec.name)) varMappings += mapping field case sectSpec: SectionSpec => boundUsedFields(createSectionField(sectSpec, valuesByName, isNewPolicy, usedFields), usedFields) } } //actually create the SectionField for createSingleSectionField boundUsedFields(SectionFieldImp(sectionSpec.name, children, priorityToVisibility(sectionSpec.displayPriority), varMappings), usedFields) } private[this] def createSingleSectionFieldForMultisec(sectionSpec:SectionSpec, sectionMap: Map[String, Option[String]], isNewPolicy:Boolean, usedFields: Map[String, Seq[String]]): SectionFieldImp = { // only variables of the current section var varMappings = Map[String, () => String]() val children = for (child <- sectionSpec.children) yield { child match { case varSpec: SectionVariableSpec => val (field, mapping) = createVarField(varSpec, sectionMap.getOrElse(varSpec.name,None)) varMappings += mapping field case sectSpec: SectionSpec => val subSectionMap = if(isNewPolicy) createDefaultMap(sectSpec) else sectionMap boundUsedFields(createSingleSectionFieldForMultisec(sectSpec, subSectionMap, isNewPolicy, usedFields), usedFields) } } //actually create the SectionField for createSingleSectionField SectionFieldImp(sectionSpec.name, children, priorityToVisibility(sectionSpec.displayPriority), varMappings) } // transforms // Map(A -> Seq("A1", "A2"), B -> Seq("B1", "b2")) // to // Seq( Map((A -> "A1"), (B -> "B1")), // Map((A -> "A2"), (B -> "B2")) ) //If there is no value, a None is returned private def createMapForEachSubSection(section: SectionSpec, valuesByName:Map[String,Seq[String]]): Seq[Map[String, Option[String]]] = { // values represent all the values we have for the same name of variable case class NameValuesVar(name: String, values: Seq[String]) extends HashcodeCaching // seq of variable values with same name correctly ordered val seqOfNameValues : Seq[NameValuesVar] = { for { varSpec <- section.getAllVariables } yield { NameValuesVar(varSpec.name, valuesByName.getOrElse(varSpec.name, Seq[String]())) } } if (seqOfNameValues.isEmpty) { Seq(Map[String, Option[String]]()) } else { for { // If head has an empty sequence as value, it does not iterate for other variables // To fix, we use the max size of of all variables (so those value can be used, missing will be set to None. i <- 0 until seqOfNameValues.map(_.values.size).max } yield { for { nameValues <- seqOfNameValues } yield { val valueOpt = try Some(nameValues.values(i)) catch { case e: Exception => None } (nameValues.name, valueOpt) } }.toMap } } private def createDefaultMap(section: SectionSpec): Map[String, Option[String]] = section.getVariables.map(varSpec => (varSpec.name, varSpec.constraint.default)).toMap private def setValueForField( value: String, currentField: DirectiveField, unserializer: Unserializer[_]): Unit = { //if the var is not a GUI only var, just find the field unserializer and use it unserializer.get("self") match { case Some(unser) => unser(value) match { case Full(fv) => currentField.set(fv.asInstanceOf[currentField.ValueType]) //should be ok since we found the unserializer thanks to the field manifest case _ => //let field un-initialized, but log why logger.debug("Can not init field %s, translator gave no result for 'self' with value '%s'". format(currentField.name, value)) } case None => // can not init, no unserializer for it logger.debug("Can not init field %s, no translator found for property 'self'".format(currentField.name)) } } /** * From a priority, returns the visibility of a section * For the moment, a naive approach is : * - Low priority => hidden * - High priority => displayed */ private[this] def priorityToVisibility(priority : DisplayPriority) : Boolean = { priority match { case LowDisplayPriority => false case HighDisplayPriority => true case _ => true } } }
armeniaca/rudder
rudder-web/src/main/scala/com/normation/rudder/web/services/Section2FieldService.scala
Scala
gpl-3.0
12,955
// @SOURCE:/home/baptiste/check_my_ride/conf/routes // @HASH:66277d7cd60a62c63c0557fa462785079a0dca5e // @DATE:Mon Aug 18 16:34:53 CEST 2014 import Routes.{prefix => _prefix, defaultPrefix => _defaultPrefix} import play.core._ import play.core.Router._ import play.core.j._ import play.api.mvc._ import Router.queryString // @LINE:15 // @LINE:12 // @LINE:11 // @LINE:8 package controllers { // @LINE:11 class ReverseAuthentification { // @LINE:11 def verifyUser(): Call = { Call("POST", _prefix + { _defaultPrefix } + "verifyUser") } } // @LINE:12 class ReverseUsers { // @LINE:12 def createUser(): Call = { Call("POST", _prefix + { _defaultPrefix } + "user") } } // @LINE:15 class ReverseAssets { // @LINE:15 def at(file:String): Call = { Call("GET", _prefix + { _defaultPrefix } + "assets/" + implicitly[PathBindable[String]].unbind("file", file)) } } // @LINE:8 class ReverseApplication { // @LINE:8 def index(): Call = { Call("GET", _prefix) } } } // @LINE:15 // @LINE:12 // @LINE:11 // @LINE:8 package controllers.javascript { // @LINE:11 class ReverseAuthentification { // @LINE:11 def verifyUser : JavascriptReverseRoute = JavascriptReverseRoute( "controllers.Authentification.verifyUser", """ function() { return _wA({method:"POST", url:"""" + _prefix + { _defaultPrefix } + """" + "verifyUser"}) } """ ) } // @LINE:12 class ReverseUsers { // @LINE:12 def createUser : JavascriptReverseRoute = JavascriptReverseRoute( "controllers.Users.createUser", """ function() { return _wA({method:"POST", url:"""" + _prefix + { _defaultPrefix } + """" + "user"}) } """ ) } // @LINE:15 class ReverseAssets { // @LINE:15 def at : JavascriptReverseRoute = JavascriptReverseRoute( "controllers.Assets.at", """ function(file) { return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "assets/" + (""" + implicitly[PathBindable[String]].javascriptUnbind + """)("file", file)}) } """ ) } // @LINE:8 class ReverseApplication { // @LINE:8 def index : JavascriptReverseRoute = JavascriptReverseRoute( "controllers.Application.index", """ function() { return _wA({method:"GET", url:"""" + _prefix + """"}) } """ ) } } // @LINE:15 // @LINE:12 // @LINE:11 // @LINE:8 package controllers.ref { // @LINE:11 class ReverseAuthentification { // @LINE:11 def verifyUser(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef( controllers.Authentification.verifyUser(), HandlerDef(this, "controllers.Authentification", "verifyUser", Seq(), "POST", """ User management API""", _prefix + """verifyUser""") ) } // @LINE:12 class ReverseUsers { // @LINE:12 def createUser(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef( controllers.Users.createUser(), HandlerDef(this, "controllers.Users", "createUser", Seq(), "POST", """""", _prefix + """user""") ) } // @LINE:15 class ReverseAssets { // @LINE:15 def at(path:String, file:String): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef( controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]), "GET", """ Map static resources from the /public folder to the /assets URL path""", _prefix + """assets/$file<.+>""") ) } // @LINE:8 class ReverseApplication { // @LINE:8 def index(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef( controllers.Application.index(), HandlerDef(this, "controllers.Application", "index", Seq(), "GET", """ Single endpoint for serving AngularJS""", _prefix + """""") ) } }
Viewtiful/Check-my-ride
target/scala-2.10/src_managed/main/routes_reverseRouting.scala
Scala
apache-2.0
4,424
package models.team import scalaz._ import Scalaz._ import scalaz.effect.IO import scalaz.Validation import scalaz.Validation.FlatMap._ import scalaz.NonEmptyList._ import scalaz.syntax.SemigroupOps import cache._ import db._ import models.Constants._ import io.megam.auth.funnel.FunnelErrors._ import com.datastax.driver.core.{ ResultSet, Row } import com.websudos.phantom.dsl._ import scala.concurrent.{ Future => ScalaFuture } import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef } import scala.concurrent.Await import scala.concurrent.duration._ import utils.DateHelper import io.megam.util.Time import org.joda.time.{DateTime, DateTimeZone} import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat} import io.megam.common.uid.UID import net.liftweb.json._ import net.liftweb.json.scalaz.JsonScalaz._ import java.nio.charset.Charset import controllers.stack.ImplicitJsonFormats case class OrganizationsInput(name: String) { val json = "{\\"name\\":\\"" + name + "\\"}" } case class OrganizationsInviteInput(id: String) { val json = "{\\"id\\":\\"" + id + "\\"}" } case class OrganizationsResult( id: String, accounts_id: String, name: String, json_claz: String, created_at: DateTime) {} object OrganizationsResult { val empty = new OrganizationsResult("", "", "", "Megam::Organizations", DateHelper.now()) } sealed class OrganizationsT extends CassandraTable[OrganizationsT, OrganizationsResult] { object id extends StringColumn(this) with PrimaryKey[String] object accounts_id extends StringColumn(this) with PartitionKey[String] object name extends StringColumn(this) object json_claz extends StringColumn(this) object created_at extends DateTimeColumn(this) override def fromRow(r: Row): OrganizationsResult = { OrganizationsResult( id(r), accounts_id(r), name(r), json_claz(r), created_at(r)) } } /* * This class talks to scylla and performs the actions */ abstract class ConcreteOrg extends OrganizationsT with ScyllaConnector { override lazy val tableName = "organizations" def insertNewRecord(org: OrganizationsResult): ResultSet = { val res = insert.value(_.id, org.id) .value(_.accounts_id, org.accounts_id) .value(_.name, org.name) .value(_.json_claz, org.json_claz) .value(_.created_at, org.created_at) .future() Await.result(res, 5.seconds) } def deleteRecords(email: String): ValidationNel[Throwable, ResultSet] = { val res = delete.where(_.accounts_id eqs email).future() Await.result(res, 5.seconds).successNel } def dbSelectAll: ValidationNel[Throwable, Seq[OrganizationsResult]] = { val res = select.fetch Await.result(res, 5.seconds).successNel } } object Organizations extends ConcreteOrg with ImplicitJsonFormats { private def orgNel(input: String): ValidationNel[Throwable, OrganizationsInput] = { (Validation.fromTryCatchThrowable[OrganizationsInput, Throwable] { parse(input).extract[OrganizationsInput] } leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel } private def inviteNel(input: String): ValidationNel[Throwable, OrganizationsInviteInput] = { (Validation.fromTryCatchThrowable[OrganizationsInviteInput, Throwable] { parse(input).extract[OrganizationsInviteInput] } leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel } private def organizationsSet(id: String, email: String, c: OrganizationsInput): ValidationNel[Throwable, OrganizationsResult] = { (Validation.fromTryCatchThrowable[OrganizationsResult, Throwable] { OrganizationsResult(id, email, c.name, "Megam::Organizations", DateHelper.now()) } leftMap { t: Throwable => new MalformedBodyError(c.json, t.getMessage) }).toValidationNel } def create(email: String, input: String): ValidationNel[Throwable, Option[OrganizationsResult]] = { for { c <- orgNel(input) uir <- (UID("org").get leftMap { u: NonEmptyList[Throwable] => u }) org <- organizationsSet(uir.get._1 + uir.get._2, email, c) } yield { insertNewRecord(org) org.some } } def findByEmail(accounts_id: String): ValidationNel[Throwable, Seq[OrganizationsResult]] = { val resp = select.allowFiltering().where(_.accounts_id eqs accounts_id).fetch() (Await.result(resp, 5.seconds)).successNel } def delete(email: String): ValidationNel[Throwable, Option[OrganizationsResult]] = { deleteRecords(email) match { case Success(value) => Validation.success[Throwable, Option[OrganizationsResult]](none).toValidationNel case Failure(err) => Validation.success[Throwable, Option[OrganizationsResult]](none).toValidationNel } } //Admin authority can list users hack for 1.5. def list: ValidationNel[Throwable, Seq[OrganizationsResult]] = { (dbSelectAll leftMap { t: NonEmptyList[Throwable] => new ResourceItemNotFound("Admin", "Organizations = nothing found.") }).toValidationNel.flatMap { nm: Seq[OrganizationsResult] => if (!nm.isEmpty) Validation.success[Throwable, Seq[OrganizationsResult]](nm).toValidationNel else Validation.failure[Throwable, Seq[OrganizationsResult]](new ResourceItemNotFound("Admin", "Users = nothing found.")).toValidationNel } } private def findById(id: String): ValidationNel[Throwable, Option[OrganizationsResult]] = { val resp = select.allowFiltering().where(_.id eqs id).one() (Await.result(resp, 5.second)).successNel } def inviteOrganization(email: String, input: String): ValidationNel[Throwable, ResultSet] = { for { c <- inviteNel(input) upd <- findById(c.id) } yield { val org = new OrganizationsResult(upd.head.id, email, upd.head.name, upd.head.json_claz, DateHelper.now()) insertNewRecord(org) } } }
megamsys/verticegateway
app/models/team/Organizations.scala
Scala
mit
5,873
package week7.bloxorz /** * This component implements a parser to define terrains from a * graphical ASCII representation. * * When mixing in that component, a level can be defined by * defining the field `level` in the following form: * * val level = * """------ * |--ST-- * |--oo-- * |--oo-- * |------""".stripMargin * * - The `-` character denotes parts which are outside the terrain * - `o` denotes fields which are part of the terrain * - `S` denotes the start position of the block (which is also considered * inside the terrain) * - `T` denotes the final position of the block (which is also considered * inside the terrain) * * In this example, the first and last lines could be omitted, and * also the columns that consist of `-` characters only. */ trait StringParserTerrain extends GameDef { /** * A ASCII representation of the terrain. This field should remain * abstract here. */ val level: String /** * This method returns terrain function that represents the terrain * in `levelVector`. The vector contains parsed version of the `level` * string. For example, the following level * * val level = * """ST * |oo * |oo""".stripMargin * * is represented as * * Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o')) * * The resulting function should return `true` if the position `pos` is * a valid position (not a '-' character) inside the terrain described * by `levelVector`. */ def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean = { p: Pos => if (p.x < 0 || p.x >= levelVector.size) false else if (p.y < 0 || p.y >= levelVector(p.x).size) false else levelVector(p.x)(p.y) != '-' } /** * This function should return the position of character `c` in the * terrain described by `levelVector`. You can assume that the `c` * appears exactly once in the terrain. * * Hint: you can use the functions `indexWhere` and / or `indexOf` of the * `Vector` class */ def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = { { for { i <- levelVector.indices j <- levelVector(i).indices if levelVector(i)(j) == c } yield Pos(i, j) }.head // Assuming only one occurrence (returning head) } private lazy val vector: Vector[Vector[Char]] = Vector(level.split("\\n").map(str => Vector(str: _*)): _*) lazy val terrain: Terrain = terrainFunction(vector) lazy val startPos: Pos = findChar('S', vector) lazy val goal: Pos = findChar('T', vector) }
zapstar/funprog
fp-scala/src/week7/bloxorz/StringParserTerrain.scala
Scala
mit
2,627
package edu.msstate.dasi.csb.workload.spark import edu.msstate.dasi.csb.workload.Workload import org.apache.spark.graphx.Graph import scala.reflect.ClassTag /** * Connected Components algorithm implementation. */ class ConnectedComponents(engine: SparkEngine) extends Workload { val name = "Connected Components" /** * Runs Connected Components. */ def run[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]): Unit = { graph.connectedComponents().vertices.foreach(engine.doNothing) } }
msstate-dasi/csb
csb/src/main/scala/edu/msstate/dasi/csb/workload/spark/ConnectedComponents.scala
Scala
gpl-3.0
508
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wvlet.airframe.metrics import java.time.ZonedDateTime import wvlet.airframe.metrics.TimeWindow.withUTC import scala.annotation.tailrec case class TimeVector(x: Long, offset: Long, unit: TimeWindowUnit) { override def toString: String = toDurationString def toDurationString = { if (offset == 0) { s"${x}${unit.symbol}" } else { s"${x}${unit.symbol}/${offset}${unit.symbol}" } } def timeWindowFrom(context: ZonedDateTime): TimeWindow = { val grid = unit.truncate(context) val startOffset = unit.increment(grid, offset) val end = unit.increment(startOffset, x) val onGrid = grid.compareTo(context) == 0 val start = if (onGrid) startOffset else context if (start.compareTo(end) <= 0) { TimeWindow(start, end) } else { TimeWindow(end, start) } } } object TimeVector { private val durationPattern = "^(?<prefix>[+-]|last|next)?(?<num>[0-9]+)(?<unit>s|m|d|h|w|M|q|y)".r def apply(s: String): TimeVector = { s match { // current // thisXXXX is a special time range and needs to be a backward range to include the current time // even after truncating with `now` offset. // now // |----------x----------| // <---------------------| x = -1, 1 unit distance from the offset // grid (offset=0) offset = 1 case "thisHour" => TimeVector(-1, 1, TimeWindowUnit.Hour) case "today" => TimeVector(-1, 1, TimeWindowUnit.Day) case "thisWeek" => TimeVector(-1, 1, TimeWindowUnit.Week) case "thisMonth" => TimeVector(-1, 1, TimeWindowUnit.Month) case "thisYear" => TimeVector(-1, 1, TimeWindowUnit.Year) // past case "lastHour" => TimeVector(-1, 0, TimeWindowUnit.Hour) case "yesterday" => TimeVector(-1, 0, TimeWindowUnit.Day) case "lastWeek" => TimeVector(-1, 0, TimeWindowUnit.Week) case "lastMonth" => TimeVector(-1, 0, TimeWindowUnit.Month) case "lastYear" => TimeVector(-1, 0, TimeWindowUnit.Year) // future case "nextHour" => TimeVector(1, 1, TimeWindowUnit.Hour) case "tomorrow" => TimeVector(1, 1, TimeWindowUnit.Day) case "nextWeek" => TimeVector(1, 1, TimeWindowUnit.Week) case "nextMonth" => TimeVector(1, 1, TimeWindowUnit.Month) case "nextYear" => TimeVector(1, 1, TimeWindowUnit.Year) case other => durationPattern.findFirstMatchIn(s) match { case None => throw new IllegalArgumentException(s"Invalid duration: ${s}") case Some(m) => val length = m.group("num").toInt val unit = TimeWindowUnit.of(m.group("unit")) m.group("prefix") match { case "-" | "last" => TimeVector(-length, 0, unit) case null | "+" | "next" => TimeVector(length, 0, unit) case other => throw new IllegalArgumentException(s"Unknown duration prefix: ${other}") } } } } /** * Compute the most succinct TimeVector to represent a time range [start unixtime, end unixtime) */ def succinctTimeVector(startUnixTime: Long, endUnixTime: Long): TimeVector = { val r = withUTC.fromRange(startUnixTime, endUnixTime) val secondDiff = (endUnixTime - startUnixTime).toDouble @tailrec def loop(unitsToUse: List[TimeWindowUnit]): TimeVector = { if (unitsToUse.isEmpty) { TimeVector(endUnixTime - startUnixTime, 0, TimeWindowUnit.Second) } else { val unit = unitsToUse.head val numUnits = r.howMany(unit) val startTruncated = unit.truncate(r.start) val endTruncated = unit.truncate(r.end) val truncated = TimeWindow(startTruncated, endTruncated) val truncatedSecondDiff = truncated.secondDiff if (numUnits > 0 && ((secondDiff - truncatedSecondDiff) / (numUnits * unit.secondsInUnit)).abs <= 0.001) { TimeVector(numUnits, 0, unit) } else { loop(unitsToUse.tail) } } } // Find the largest unit first from Year to Second loop(TimeWindowUnit.units.reverse) } }
wvlet/airframe
airframe-metrics/.jvm/src/main/scala/wvlet/airframe/metrics/TimeVector.scala
Scala
apache-2.0
4,757
/* * Licensed to Intel Corporation under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * Intel Corporation licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.torch import com.intel.analytics.bigdl.nn.Max import com.intel.analytics.bigdl.tensor.Tensor import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers} import scala.util.Random @com.intel.analytics.bigdl.tags.Serial class MaxSpec extends FlatSpec with BeforeAndAfter with Matchers { before { if (!TH.hasTorch()) { cancel("Torch is not installed") } } "A Max(2)" should "generate correct output and grad" in { val layer = new Max[Double](2) val input = Tensor[Double](2, 3, 4).apply1(_ => Random.nextDouble()) val gradOutput = Tensor[Double](2, 4).apply1(_ => Random.nextDouble()) val start = System.nanoTime() val output = layer.forward(input) val gradInput = layer.backward(input, gradOutput) val end = System.nanoTime() val scalaTime = end - start val code = "module = nn.Max(2)\\n" + "output = module:forward(input)\\n" + "gradInput = module:backward(input,gradOutput)" val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput), Array("output", "gradInput")) val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]] val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]] output should be (luaOutput) gradInput should be (luaGradInput) println("Test case : Max, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "A Max()" should "generate correct output and grad" in { val layer = new Max[Double]() val input = Tensor[Double](1, 2, 3, 4).apply1(_ => Random.nextDouble()) val gradOutput = Tensor[Double](2, 3, 4).apply1(_ => Random.nextDouble()) val start = System.nanoTime() val output = layer.forward(input) val gradInput = layer.backward(input, gradOutput) val end = System.nanoTime() val scalaTime = end - start val code = "module = nn.Max()\\n" + "output = module:forward(input)\\n" + "gradInput = module:backward(input,gradOutput)" val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput), Array("output", "gradInput")) val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]] val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]] output should be (luaOutput) gradInput should be (luaGradInput) println("Test case : Max, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "A Max(2, 3)" should "generate correct output and grad" in { val layer = new Max[Double](2, 3) val input = Tensor[Double](3, 5, 3, 4).apply1(_ => Random.nextDouble()) val gradOutput = Tensor[Double](3, 5, 4).apply1(_ => Random.nextDouble()) val start = System.nanoTime() val output = layer.forward(input) val gradInput = layer.backward(input, gradOutput) val end = System.nanoTime() val scalaTime = end - start val code = "module = nn.Max(2, 3)\\n" + "output = module:forward(input)\\n" + "gradInput = module:backward(input,gradOutput)" val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput), Array("output", "gradInput")) val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]] val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]] output should be (luaOutput) gradInput should be (luaGradInput) println("Test case : Max, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } }
zhichao-li/BigDL
dl/src/test/scala/com/intel/analytics/bigdl/torch/MaxSpec.scala
Scala
apache-2.0
4,296
/* * Copyright © 2015 - 2019 Lightbend, Inc. <http://www.lightbend.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lightbend.paradox.sbt import sbt._ import sbt.Keys._ import com.typesafe.sbt.web.Import.{ WebKeys, Assets } import com.typesafe.sbt.web.SbtWeb import org.webjars.WebJarAssetLocator.WEBJARS_PATH_PREFIX /** * Common settings for themes. */ object ParadoxThemePlugin extends AutoPlugin { object ParadoxThemeKeys { val includeMinimalWebjars = settingKey[Boolean]("Enable bundling of referenced webjar assets.") val referencedWebjarAssets = taskKey[Set[String]]("Paths for webjar assets referenced in the theme.") } val autoImport = ParadoxThemeKeys import autoImport._ override def requires = SbtWeb override def projectSettings = minimalWebjarSettings ++ Seq( autoScalaLibrary := false, crossPaths := false ) /** * Directly include all webjar dependency assets referenced in the theme. * Requires webjar dependencies to be marked `provided` or similar. */ def minimalWebjarSettings = inConfig(Assets)(Seq( includeMinimalWebjars := true, referencedWebjarAssets := { // extract all webjar asset references in string template files val libReference = """\\$page\\.base\\$(lib/.*)\\"""".r val templates = sources.value.filter(_.getName.endsWith(".st")) (templates flatMap { template => libReference.findAllIn(IO.read(template)).matchData.flatMap(_.subgroups).toSeq }).toSet }, WebKeys.exportedMappings ++= (Def.taskDyn { if (includeMinimalWebjars.value) { val prefix = SbtWeb.path(s"${WEBJARS_PATH_PREFIX}/${moduleName.value}/${version.value}/") val include = referencedWebjarAssets.value Def.task { (mappings in WebKeys.webModules).value flatMap { case (file, path) if include(path) => Some(file -> (prefix + path)) case _ => None } } } else Def.task { Seq.empty[(java.io.File, String)] } }).value )) }
lightbend/paradox
theme-plugin/src/main/scala/com/lightbend/paradox/sbt/ParadoxThemePlugin.scala
Scala
apache-2.0
2,568
object Test { def bar(tl: => String) = { val x = tl _ // error val y = x _ // error val s: String = x() // error } }
som-snytt/dotty
tests/neg/i941.scala
Scala
apache-2.0
135
package net.rrm.ehour.ui.report.detailed import net.rrm.ehour.AbstractSpringWebAppSpec import net.rrm.ehour.ui.report.cache.ReportCacheService import org.mockito.Mockito._ class DetailedReportRESTResourceSpec extends AbstractSpringWebAppSpec { "Detailed Report REST resource" should { val cacheService = mockService[ReportCacheService] before { reset(cacheService) when(cacheService.retrieveReportData("123")).thenReturn(Some(DetailedReportDataObjectMother.getFlatReportData)) } "properly serialize date value values" in { tester.getRequest.setMethod("GET") tester.executeUrl("./rest/report/detailed/hour/123") val response = tester.getLastResponseAsString response should not include "{}" } } }
momogentoo/ehour
eHour-wicketweb/src/test/scala/net/rrm/ehour/ui/report/detailed/DetailedReportRESTResourceSpec.scala
Scala
gpl-2.0
766
package mesosphere.marathon.api.v2 import java.util import javax.inject.Inject import javax.servlet.http.HttpServletRequest import javax.ws.rs._ import javax.ws.rs.core.{ Context, MediaType, Response } import com.codahale.metrics.annotation.Timed import mesosphere.marathon.api.v2.json.Formats._ import mesosphere.marathon.api.{ EndpointsHelper, MarathonMediaType, TaskKiller, _ } import mesosphere.marathon.core.appinfo.EnrichedTask import mesosphere.marathon.core.group.GroupManager import mesosphere.marathon.core.health.HealthCheckManager import mesosphere.marathon.core.task.Task import mesosphere.marathon.core.task.state.MarathonTaskStatus import mesosphere.marathon.core.task.tracker.TaskTracker import mesosphere.marathon.plugin.auth.{ Authenticator, Authorizer, UpdateRunSpec, ViewRunSpec } import mesosphere.marathon.state.PathId import mesosphere.marathon.{ BadRequestException, MarathonConf } import org.slf4j.LoggerFactory import play.api.libs.json.Json import scala.collection.IterableView import scala.collection.JavaConverters._ import scala.concurrent.{ ExecutionContext, Future } @Path("v2/tasks") class TasksResource @Inject() ( taskTracker: TaskTracker, taskKiller: TaskKiller, val config: MarathonConf, groupManager: GroupManager, healthCheckManager: HealthCheckManager, val authenticator: Authenticator, val authorizer: Authorizer) extends AuthResource { val log = LoggerFactory.getLogger(getClass.getName) implicit val ec = ExecutionContext.Implicits.global @GET @Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON)) @Timed def indexJson( @QueryParam("status") status: String, @QueryParam("status[]") statuses: util.List[String], @Context req: HttpServletRequest): Response = authenticated(req) { implicit identity => Option(status).map(statuses.add) val statusSet = statuses.asScala.flatMap(toTaskState).toSet val taskList = taskTracker.tasksByAppSync val tasks = taskList.appTasksMap.values.view.flatMap { appTasks => appTasks.tasks.view.map(t => appTasks.appId -> t) } val appIds = taskList.allAppIdsWithTasks val appIdsToApps = appIds.map(appId => appId -> result(groupManager.app(appId))).toMap val appToPorts = appIdsToApps.map { case (appId, app) => appId -> app.map(_.servicePorts).getOrElse(Nil) } val health = appIds.flatMap { appId => result(healthCheckManager.statuses(appId)) }.toMap val enrichedTasks: IterableView[EnrichedTask, Iterable[_]] = for { (appId, task) <- tasks app <- appIdsToApps(appId) if isAuthorized(ViewRunSpec, app) if statusSet.isEmpty || statusSet(task.status.taskStatus) } yield { EnrichedTask( appId, task, health.getOrElse(task.taskId, Nil), appToPorts.getOrElse(appId, Nil) ) } ok(jsonObjString( "tasks" -> enrichedTasks )) } @GET @Produces(Array(MediaType.TEXT_PLAIN)) @Timed def indexTxt(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity => ok(EndpointsHelper.appsToEndpointString( taskTracker, result(groupManager.rootGroup()).transitiveApps.toSeq.filter(app => isAuthorized(ViewRunSpec, app)), "\t" )) } @POST @Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON)) @Consumes(Array(MediaType.APPLICATION_JSON)) @Timed @Path("delete") def killTasks( @QueryParam("scale")@DefaultValue("false") scale: Boolean, @QueryParam("force")@DefaultValue("false") force: Boolean, @QueryParam("wipe")@DefaultValue("false") wipe: Boolean, body: Array[Byte], @Context req: HttpServletRequest): Response = authenticated(req) { implicit identity => if (scale && wipe) throw new BadRequestException("You cannot use scale and wipe at the same time.") val taskIds = (Json.parse(body) \ "ids").as[Set[String]] val tasksToAppId = taskIds.map { id => try { id -> Task.Id.runSpecId(id) } catch { case e: MatchError => throw new BadRequestException(s"Invalid task id '$id'.") } }.toMap def scaleAppWithKill(toKill: Map[PathId, Iterable[Task]]): Response = { deploymentResult(result(taskKiller.killAndScale(toKill, force))) } def killTasks(toKill: Map[PathId, Iterable[Task]]): Response = { val affectedApps = tasksToAppId.values.flatMap(appId => result(groupManager.app(appId))).toSeq // FIXME (gkleiman): taskKiller.kill a few lines below also checks authorization, but we need to check ALL before // starting to kill tasks affectedApps.foreach(checkAuthorization(UpdateRunSpec, _)) val killed = result(Future.sequence(toKill.map { case (appId, tasks) => taskKiller.kill(appId, _ => tasks, wipe) })).flatten ok(jsonObjString("tasks" -> killed.map(task => EnrichedTask(task.taskId.runSpecId, task, Seq.empty)))) } val tasksByAppId = tasksToAppId .flatMap { case (taskId, appId) => taskTracker.tasksByAppSync.task(Task.Id(taskId)) } .groupBy { task => task.taskId.runSpecId } .map{ case (appId, tasks) => appId -> tasks } if (scale) scaleAppWithKill(tasksByAppId) else killTasks(tasksByAppId) } private def toTaskState(state: String): Option[MarathonTaskStatus] = state.toLowerCase match { case "running" => Some(MarathonTaskStatus.Running) case "staging" => Some(MarathonTaskStatus.Staging) case _ => None } }
timcharper/marathon
src/main/scala/mesosphere/marathon/api/v2/TasksResource.scala
Scala
apache-2.0
5,451
package org.gark87.yajom.macros import org.gark87.yajom.base.BaseMapper import language.experimental.macros import scala.collection.mutable import scala.reflect.macros.Universe class CreateOnNull { def findSetter[T: y.c.WeakTypeTag](y: YajomContext) (qualifier: y.c.Tree, name: y.c.Name, args: List[y.c.Tree], notGetter: (y.c.universe.MethodSymbol) => y.c.Tree, ok: (String, y.c.Type) => y.c.Tree): y.c.Tree = { import y.c.universe._ val getterName = name.decoded val getter: MethodSymbol = qualifier.tpe.member(name).asMethod val correctParams = getter.paramss match { case List(List()) => true case _ => false } val returnType = getter.returnType y.settings.expectSetter(y.c)(getterName, returnType) match { case None => { notGetter(getter) } case Some(setterName) => { val setter = qualifier.tpe.members.find((x: Symbol) => { if (!x.isMethod) false else { val method: MethodSymbol = x.asMethod val correctSetterParams = method.paramss match { case List(List(termSymbol)) => termSymbol.asTerm.typeSignature == returnType case _ => false } method.isMethod && correctSetterParams && method.isPublic && setterName == method.name.decoded } }) if (setter.isEmpty) { y.reporter.error("Cannot find setter for " + name + " @ " + qualifier) } else { ok(setterName, returnType) } } } } def process[T: y.c.WeakTypeTag](y: YajomContext)(expr: y.c.Expr[T], objectFactoryType: y.c.Type): y.c.Tree = { import y.c.universe._ val reporter = y.reporter val vars = new mutable.HashMap[String, Tree]() var prefix: List[y.c.universe.Tree] = List() val argsConverter = y.argsConverter def collectAllArgs(tree: Tree, list : List[List[Tree]]) : (List[List[Tree]], Tree, Tree, Name) = { tree match { case Apply(call, args) => collectAllArgs(call, args :: list) case TypeApply(Select(qualifier, name), typeArgs) => (list, addNullGuards(tree), qualifier, name) case Select(qualifier, name) => (list, addNullGuards(tree), qualifier, name) case _ => reporter.error("Cannot find Select, have instead: "+ tree) } } def addNullGuards(tree: Tree): Tree = { tree match { case Apply(select, args) => { val allArgs = collectAllArgs(tree, List()) val name = allArgs._4 val qualifier = allArgs._3 val getter: MethodSymbol = qualifier.tpe.member(name).asMethod findSetter[T](y)(qualifier, name, args, (getter) => { val convert = argsConverter.convert(y)(getter, allArgs._1, vars, objectFactoryType) val result = convert.foldLeft[Tree](allArgs._2)((tree: Tree, args: List[Tree]) => { Apply(tree, args) }) y.c.resetAllAttrs(result) }, (setterName, returnType) => { val resultValue = newTermName(y.c.fresh("CON_resultValue$")) val e = y.c.Expr[Any](tree) val newValue = newTermName(y.c.fresh("CON_newValue$")) val setterValue = y.c.Expr[T](Block( ValDef(Modifiers(), newValue, TypeTree(), y.creator.createDefaultObject(y)(returnType, objectFactoryType)), Apply(Select(qualifier, newTermName(setterName)), List(Ident(newValue))), Ident(newValue) )) prefix = prefix :+ ValDef(Modifiers(), resultValue, TypeTree(), reify { val CON_oldValue = e.splice if (CON_oldValue == null) { setterValue.splice } else CON_oldValue }.tree ) Ident(resultValue) }) } case Block(stats, epr) => Block(stats.map((s: Tree) => { addNullGuards(s) }), addNullGuards(epr)) case ValDef(mods, name, tpt, rhs) => { vars.put(name.decoded, rhs) ValDef(mods, name, tpt, y.c.resetAllAttrs(addNullGuards(rhs))) } case Select(qualifier, name) => Select(addNullGuards(qualifier), name) case Ident(name) => tree case This(a) => tree case Function(valdefs, body) => tree case Literal(literal) => tree case TypeApply(Select(qualifier, name), typeArgs) => TypeApply(Select(addNullGuards(qualifier), name), typeArgs) case expr1 => reporter.error("Too complex expression `" + expr1 + "` for YAJOM CreateOnNull:\\n1. Quick Fix: extract val without CreateOnNull\\n2. mail gark87 <my_another@mail.ru>") } } val guards: Tree = addNullGuards(expr.tree) guards match { case Block(a, b) => val d: List[Tree] = prefix ::: a Block(d, y.c.resetAllAttrs(b)) case Function(valdefs, body) => Function(valdefs, Block(prefix, body)) case any => Block(prefix, y.c.resetAllAttrs(any)) } } } object CreateOnNull { def macroImpl[F: c.WeakTypeTag, M <: BaseMapper[_]](c: reflect.macros.Context)(func: c.Expr[F])(m: c.Expr[M]): c.Expr[F] = { import c.universe._ val y = new YajomContext(c) val objectFactoryType: c.Type = m.actualType.asInstanceOf[TypeRef].args.head c.Expr[F](y.createOnNull.process[F](y)(func.asInstanceOf[y.c.Expr[F]], objectFactoryType.asInstanceOf[y.c.Type]).asInstanceOf[c.Tree]) } }
gark87/yajom
yajom-macros/src/main/scala/org/gark87/yajom/macros/CreateOnNull.scala
Scala
mit
5,567