code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
import compiletime.summonAll
@main def Test =
given Int = 10
given String = "foo"
given Double = 1.2
println(summonAll[Int *: String *: Double *: EmptyTuple])
| dotty-staging/dotty | tests/run/summonAll.scala | Scala | apache-2.0 | 168 |
package forimpatient.chapter03
/**
* Created by Iryna Kharaborkina on 7/28/16.
*
* Solution to the Chapter 03 Exercise 07 'Scala for the Impatient' by Horstmann C.S.
*
* Write a code snippet that produces all values from an array with duplicates removed. (Hint: Look at Scaladoc.)
*/
object Exercise07 extends App {
println("Chapter 03 Exercise 07")
val a = Array(2, 5, 4, 1, 2, 5 ,7, 6, 2, 1, 4, 3, 7)
println(a.mkString(", "))
println(a.distinct.mkString(", "))
}
| Kiryna/Scala-for-the-Impatient | src/forimpatient/chapter03/Exercise07.scala | Scala | apache-2.0 | 488 |
package jigg.pipeline
/*
Copyright 2013-2017 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.io.File
import java.util.Properties
import scala.xml._
import jigg.util.PropertiesUtil
import jigg.util.XMLUtil.RichNode
class CandCPOSAnnotator(override val name: String, override val props: Properties)
extends AnnotatingSentencesInParallel { self =>
@Prop(gloss = "Path to candc pos tagger binary (/path/to/bin/pos)", required = true) var path = ""
@Prop(gloss = "Path to candc models directory (containing parser, pos, etc)", required = true) var models = ""
readProps()
localAnnotators
def mkLocalAnnotator = new LocalCandCPOSAnnotator
class LocalCandCPOSAnnotator
extends SentencesAnnotator with LocalAnnotator with IOCreator {
val modelPath = new File(models, "pos").getPath
def command = s"${self.path} --model ${modelPath}"
override def launchTesters = Seq(
LaunchTester("AAAAA", _ startsWith "AAAAA", _ startsWith "AAAAA")
)
def softwareUrl = "http://www.cl.cam.ac.uk/~sc609/candc-1.00.html"
val tagger = mkIO()
override def close = tagger.close()
override def newSentenceAnnotation(sentence: Node): Node = {
val tokens = (sentence \\ "tokens").head
val tokenSeq = tokens.child
val input = tokenSeq map (_ \\@ "form") mkString " "
val output = runTagger(input)
val tags = output.split(" ").map { t => t.drop(t.lastIndexOf('|')+1) }
val taggedTokenSeq =
tokenSeq zip tags map { case (token, tag) => token.addAttribute("pos", tag) }
val newTokens = {
val nameAdded = tokens addAnnotatorName name
nameAdded replaceChild taggedTokenSeq
}
sentence addOrOverwriteChild newTokens
}
def runTagger(text: String): String = {
tagger.safeWriteWithFlush(text)
val out = tagger.readUntil(_=>true) // read only single line
out(0)
}
}
override def requires = Set(Requirement.Tokenize, Requirement.Ssplit)
override def requirementsSatisfied = Set(Requirement.POS)
}
class CandCAnnotator(override val name: String, override val props: Properties)
extends AnnotatingSentencesInParallel { self =>
@Prop(gloss = "Path to candc parser binary (/path/to/bin/parser)", required = true) var path = ""
@Prop(gloss = "Path to the models directory (containing parser, pos, etc)", required = true) var models = ""
readProps()
localAnnotators // instantiate lazy val here
def mkLocalAnnotator = new LocalCandCAnnotator
class LocalCandCAnnotator
extends SentencesAnnotator with LocalAnnotator with IOCreator {
def command = {
val modelPath = new File(models, "parser").getPath
val superPath = new File(models, "super").getPath
s"${self.path} --model ${modelPath} --super ${superPath}"
}
override def launchTesters = Seq(
LaunchTester("a|DT", _ == "</ccg>", _ endsWith "</ccg>"))
override def defaultArgs = Seq("--printer", "xml")
def softwareUrl = "http://www.cl.cam.ac.uk/~sc609/candc-1.00.html"
val candc = mkIO()
override def close() = candc.close()
override def newSentenceAnnotation(sentence: Node): Node = {
val output = runCandC(mkInput(sentence))
val ccg = XML.loadString(output.mkString("\\n"))
CandCAnnotator.annotateCCGSpans(sentence, ccg, name)
}
private def mkInput(sentence: Node): String = {
val tokenSeq = sentence \\\\ "token"
tokenSeq.map { t => (t \\@ "form") + "|" + (t \\@ "pos") } mkString " "
}
private def runCandC(text: String): Seq[String] = {
candc.safeWriteWithFlush(text)
candc.readUntil(_ == "</ccg>").map(_.trim()).filter(_.startsWith("<"))
}
}
override def requires = Set(Requirement.Ssplit, Requirement.Tokenize, Requirement.POS)
override def requirementsSatisfied = Set(Requirement.CCGDerivation)
}
object CandCAnnotator {
/** A common procedure to transform from candc-style <ccg> output to Jigg's CCG
* annotation.
*/
def annotateCCGSpans(sentence: Node, ccg: Node, name: String) = {
val decoratedCCG: Node = {
val a = assignId(ccg)
val b = assignChildren(a, sentence \\\\ "token")
assignSpan(b)
}
val spans = decoratedCCG.descendant.collect {
case e: Elem if (e.label == "lf" || e.label == "rule") =>
val a = e.attributes
val rule = if (e.label == "lf") None else Some(a("type"))
<span
id={ a("id") }
begin={ a("begin") }
end={ a("end") }
symbol={ a("cat") }
rule={ rule }
children={ a("children") }/>
}
val root = decoratedCCG.nonAtomChild()(0) \\@ "id"
sentence addChild (
<ccg annotators={ name } root={ root } id={ Annotation.CCG.nextId }>{ spans }</ccg>)
}
def assignId(ccg: Node): Node = {
ccg.replaceIf({ e => e.label == "lf" || e.label == "rule" }, continueSearch=true) {
_.addAttribute("id", Annotation.CCGSpan.nextId)
}
}
// Assume `assignId` is already performed
def assignChildren(ccg: Node, tokens: Seq[Node]): Node = {
// this is top-down to assign small ids to shallower nodes
ccg.replaceIf(_=>true, continueSearch=true) {
case e: Elem if e.label == "lf" || e.label == "rule" =>
val children: String = e.label match {
case "lf" =>
val idx = (e \\@ "start").toInt
tokens(idx) \\@ "id"
case "rule" =>
e.nonAtomChild.map(_ \\@ "id") mkString " "
}
e.addAttribute("children", children)
case e => e
}
}
def assignSpan(ccg: Node): Node = {
// fill begin/end values bottom-up
ccg.replaceIfBottomup { e => e.label == "lf" || e.label == "rule" } { e =>
val (begin, end) = e.label match {
case "lf" =>
val b = (e \\@ "start")
(b, (b.toInt + 1).toString)
case "rule" =>
val children = e.nonAtomChild
children.size match {
case 2 =>
(children(0) \\@ "begin", children(1) \\@ "end")
case 1 => // unary
(children(0) \\@ "begin", children(0) \\@ "end")
}
}
e.addAttributes(Map("begin" -> begin, "end" -> end))
}
}
}
| mynlp/jigg | src/main/scala/jigg/pipeline/CandCAnnotator.scala | Scala | apache-2.0 | 6,704 |
package io.taig.android.widget.operation
import android.support.v4.view.ViewPager
final class viewPager(viewPager: ViewPager) {
def next(smoothly: Boolean = true) =
viewPager.setCurrentItem(viewPager.getCurrentItem + 1)
def previous(smoothly: Boolean = true): Unit =
viewPager.setCurrentItem(viewPager.getCurrentItem - 1)
}
| Taig/Toolbelt | widget/src/main/scala/io/taig/android/widget/operation/viewPager.scala | Scala | mit | 339 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.util.Map.Entry
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.{Connector, IteratorSetting, ScannerBase}
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.security.Authorizations
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.accumulo.util.BatchMultiScanner
import org.locationtech.geomesa.index.PartitionParallelScan
import org.locationtech.geomesa.index.api.QueryPlan.{FeatureReducer, ResultsToFeatures}
import org.locationtech.geomesa.index.api.{FilterStrategy, QueryPlan}
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.index.utils.Reprojection.QueryReferenceSystems
import org.locationtech.geomesa.index.utils.ThreadManagement.{LowLevelScanner, ManagedScan, Timeout}
import org.locationtech.geomesa.utils.collection.{CloseableIterator, SelfClosingIterator}
import org.opengis.filter.Filter
/**
* Accumulo-specific query plan
*/
sealed trait AccumuloQueryPlan extends QueryPlan[AccumuloDataStore] {
override type Results = Entry[Key, Value]
def tables: Seq[String]
def columnFamily: Option[Text]
def ranges: Seq[org.apache.accumulo.core.data.Range]
def iterators: Seq[IteratorSetting]
def numThreads: Int
def join: Option[(AccumuloQueryPlan.JoinFunction, AccumuloQueryPlan)] = None
override def explain(explainer: Explainer, prefix: String = ""): Unit =
AccumuloQueryPlan.explain(this, explainer, prefix)
protected def configure(scanner: ScannerBase): Unit = {
iterators.foreach(scanner.addScanIterator)
columnFamily.foreach(scanner.fetchColumnFamily)
}
}
object AccumuloQueryPlan extends LazyLogging {
import scala.collection.JavaConverters._
// scan result => range
type JoinFunction = Entry[Key, Value] => org.apache.accumulo.core.data.Range
def explain(plan: AccumuloQueryPlan, explainer: Explainer, prefix: String): Unit = {
explainer.pushLevel(s"${prefix}Plan: ${plan.getClass.getSimpleName}")
explainer(s"Tables: ${plan.tables.mkString(", ")}")
explainer(s"Column Families: ${plan.columnFamily.getOrElse("all")}")
explainer(s"Ranges (${plan.ranges.size}): ${plan.ranges.take(5).map(rangeToString).mkString(", ")}")
explainer(s"Iterators (${plan.iterators.size}):", plan.iterators.map(i => () => i.toString))
plan.join.foreach { j => explain(j._2, explainer, "Join ") }
explainer.popLevel()
}
// converts a range to a printable string - only includes the row
private def rangeToString(r: org.apache.accumulo.core.data.Range): String = {
val a = if (r.isStartKeyInclusive) "[" else "("
val z = if (r.isEndKeyInclusive) "]" else ")"
val start = if (r.isInfiniteStartKey) "-inf" else keyToString(r.getStartKey)
val stop = if (r.isInfiniteStopKey) "+inf" else keyToString(r.getEndKey)
s"$a$start::$stop$z"
}
// converts a key to a printable string - only includes the row
private def keyToString(k: Key): String =
Key.toPrintableString(k.getRow.getBytes, 0, k.getRow.getLength, k.getRow.getLength)
// plan that will not actually scan anything
case class EmptyPlan(filter: FilterStrategy, reducer: Option[FeatureReducer] = None) extends AccumuloQueryPlan {
override val tables: Seq[String] = Seq.empty
override val iterators: Seq[IteratorSetting] = Seq.empty
override val ranges: Seq[org.apache.accumulo.core.data.Range] = Seq.empty
override val columnFamily: Option[Text] = None
override val numThreads: Int = 0
override val resultsToFeatures: ResultsToFeatures[Entry[Key, Value]] = ResultsToFeatures.empty
override val sort: Option[Seq[(String, Boolean)]] = None
override val maxFeatures: Option[Int] = None
override val projection: Option[QueryReferenceSystems] = None
override def scan(ds: AccumuloDataStore): CloseableIterator[Entry[Key, Value]] = CloseableIterator.empty
}
// batch scan plan
case class BatchScanPlan(
filter: FilterStrategy,
tables: Seq[String],
ranges: Seq[org.apache.accumulo.core.data.Range],
iterators: Seq[IteratorSetting],
columnFamily: Option[Text],
resultsToFeatures: ResultsToFeatures[Entry[Key, Value]],
reducer: Option[FeatureReducer],
sort: Option[Seq[(String, Boolean)]],
maxFeatures: Option[Int],
projection: Option[QueryReferenceSystems],
numThreads: Int
) extends AccumuloQueryPlan {
override def scan(ds: AccumuloDataStore): CloseableIterator[Entry[Key, Value]] = {
// convert the relative timeout to an absolute timeout up front
val timeout = ds.config.queries.timeout.map(Timeout.apply)
// note: calculate authorizations up front so that multi-threading doesn't mess up auth providers
scan(ds.connector, ds.auths, timeout)
}
/**
* Scan with pre-computed auths
*
* @param connector connector
* @param auths auths
* @param timeout absolute stop time, as sys time
* @return
*/
def scan(
connector: Connector,
auths: Authorizations,
timeout: Option[Timeout]): CloseableIterator[Entry[Key, Value]] = {
if (PartitionParallelScan.toBoolean.contains(true)) {
// kick off all the scans at once
tables.map(scanner(connector, _, auths, timeout)).foldLeft(CloseableIterator.empty[Entry[Key, Value]])(_ ++ _)
} else {
// kick off the scans sequentially as they finish
SelfClosingIterator(tables.iterator).flatMap(scanner(connector, _, auths, timeout))
}
}
/**
*
* @param connector connector
* @param table table
* @param auths auths
* @param timeout absolute stop time, as sys time
* @return
*/
private def scanner(
connector: Connector,
table: String,
auths: Authorizations,
timeout: Option[Timeout]): CloseableIterator[Entry[Key, Value]] = {
val scanner = connector.createBatchScanner(table, auths, numThreads)
scanner.setRanges(ranges.asJava)
configure(scanner)
timeout match {
case None => new ScanIterator(scanner)
case Some(t) => new ManagedScanIterator(t, new AccumuloScanner(scanner), this)
}
}
}
// join on multiple tables - requires multiple scans
case class JoinPlan(
filter: FilterStrategy,
tables: Seq[String],
ranges: Seq[org.apache.accumulo.core.data.Range],
iterators: Seq[IteratorSetting],
columnFamily: Option[Text],
numThreads: Int,
joinFunction: JoinFunction,
joinQuery: BatchScanPlan
) extends AccumuloQueryPlan {
override val join: Some[(JoinFunction, BatchScanPlan)] = Some((joinFunction, joinQuery))
override def resultsToFeatures: ResultsToFeatures[Entry[Key, Value]] = joinQuery.resultsToFeatures
override def reducer: Option[FeatureReducer] = joinQuery.reducer
override def sort: Option[Seq[(String, Boolean)]] = joinQuery.sort
override def maxFeatures: Option[Int] = joinQuery.maxFeatures
override def projection: Option[QueryReferenceSystems] = joinQuery.projection
override def scan(ds: AccumuloDataStore): CloseableIterator[Entry[Key, Value]] = {
// convert the relative timeout to an absolute timeout up front
val timeout = ds.config.queries.timeout.map(Timeout.apply)
// calculate authorizations up front so that multi-threading doesn't mess up auth providers
val auths = ds.auths
val joinTables = joinQuery.tables.iterator
if (PartitionParallelScan.toBoolean.contains(true)) {
// kick off all the scans at once
tables.map(scanner(ds.connector, _, joinTables.next, auths, timeout))
.foldLeft(CloseableIterator.empty[Entry[Key, Value]])(_ ++ _)
} else {
// kick off the scans sequentially as they finish
SelfClosingIterator(tables.iterator).flatMap(scanner(ds.connector, _, joinTables.next, auths, timeout))
}
}
private def scanner(
connector: Connector,
table: String,
joinTable: String,
auths: Authorizations,
timeout: Option[Timeout]): CloseableIterator[Entry[Key, Value]] = {
val primary = if (ranges.lengthCompare(1) == 0) {
val scanner = connector.createScanner(table, auths)
scanner.setRange(ranges.head)
scanner
} else {
val scanner = connector.createBatchScanner(table, auths, numThreads)
scanner.setRanges(ranges.asJava)
scanner
}
configure(primary)
val join = joinQuery.copy(tables = Seq(joinTable))
new BatchMultiScanner(connector, primary, join, joinFunction, auths, timeout)
}
}
private class ScanIterator(scanner: ScannerBase) extends CloseableIterator[Entry[Key, Value]] {
private val iter = scanner.iterator.asScala
override def hasNext: Boolean = iter.hasNext
override def next(): Entry[Key, Value] = iter.next()
override def close(): Unit = scanner.close()
}
private class ManagedScanIterator(
override val timeout: Timeout,
override protected val underlying: AccumuloScanner,
plan: AccumuloQueryPlan
) extends ManagedScan[Entry[Key, Value]] {
override protected def typeName: String = plan.filter.index.sft.getTypeName
override protected def filter: Option[Filter] = plan.filter.filter
}
private class AccumuloScanner(scanner: ScannerBase) extends LowLevelScanner[Entry[Key, Value]] {
override def iterator: Iterator[Entry[Key, Value]] = scanner.iterator.asScala
override def close(): Unit = scanner.close()
}
}
| aheyne/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/AccumuloQueryPlan.scala | Scala | apache-2.0 | 10,060 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.ast.conditions
import org.neo4j.cypher.internal.frontend.v2_3.ast._
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
class ContainsNoNodesOfTypeTest extends CypherFunSuite with AstConstructionTestSupport {
val condition: (Any => Seq[String]) = containsNoNodesOfType[UnaliasedReturnItem]()
test("Happy when not finding UnaliasedReturnItem") {
val ast: ASTNode = Match(optional = false, Pattern(Seq(EveryPath(NodePattern(None, Seq(), None, naked = true)_)))_, Seq(), None)_
condition(ast) should equal(Seq())
}
test("Fails when finding UnaliasedReturnItem") {
val ast: ASTNode = Return(false, ReturnItems(includeExisting = false, Seq(UnaliasedReturnItem(Identifier("foo")_, "foo")_))_, None, None, None)_
condition(ast) should equal(Seq("Expected none but found UnaliasedReturnItem at position line 1, column 0 (offset: 0)"))
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/ast/conditions/ContainsNoNodesOfTypeTest.scala | Scala | apache-2.0 | 1,731 |
/**
* COPYRIGHT (C) 2015 Alpine Data Labs Inc. All Rights Reserved.
*/
package com.alpine.plugin.core.dialog
import com.alpine.plugin.core.annotation.AlpineSdkApi
/**
* :: AlpineSdkApi ::
*/
@AlpineSdkApi
trait TabularDatasetColumnDropdownBox extends DropdownBox
| holdenk/PluginSDK | plugin-core/src/main/scala/com/alpine/plugin/core/dialog/TabularDatasetColumnDropdownBox.scala | Scala | apache-2.0 | 270 |
package metaconfig
import metaconfig.generic.Surface
case class Inner(a: String = "a", b: Boolean = true)
object Inner {
implicit val surface: Surface[Inner] = generic.deriveSurface[Inner]
implicit val codec: ConfCodec[Inner] = generic.deriveCodec(Inner())
}
| olafurpg/metaconfig | metaconfig-tests/jvm/src/test/scala/metaconfig/Inner.scala | Scala | apache-2.0 | 265 |
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.ostrich
package stats
import java.util.concurrent.ConcurrentHashMap
import scala.collection.{JavaConversions, Map, mutable, immutable}
import scala.util.matching.Regex
import com.twitter.conversions.time._
import com.twitter.json.{Json, JsonSerializable}
import com.twitter.util.Duration
import admin.{ServiceTracker, PeriodicBackgroundProcess}
object StatsListener {
val listeners = new ConcurrentHashMap[(String, StatsCollection), StatsListener]
def clearAll() {
listeners.clear()
}
def getOrRegister(id: String, collection: StatsCollection, listener: => StatsListener) = {
val key = (id, collection)
Option {
listeners.get(key)
}.getOrElse {
listeners.putIfAbsent(key, listener)
listeners.get(key)
}
}
/**
* Get a StatsListener that's attached to a specified stats collection tracked by name,
* creating it if it doesn't already exist.
*/
def apply(name: String, collection: StatsCollection): StatsListener = {
getOrRegister("ns:%s".format(name), collection, new StatsListener(collection, false))
}
/**
* Get a StatsListener that's attached to a specified stats collection and tracks periodic stats
* over the given duration, creating it if it doesn't already exist.
*/
def apply(period: Duration, collection: StatsCollection): StatsListener = {
getOrRegister("period:%d".format(period.inMillis), collection,
new LatchedStatsListener(collection, period, false))
}
/**
* Get a StatsListener that's attached to a specified stats collection and tracks periodic stats
* over the given duration, creating it if it doesn't already exist.
*/
def apply(period: Duration, collection: StatsCollection, filters: List[Regex]): StatsListener = {
getOrRegister("period:%d".format(period.inMillis), collection,
new LatchedStatsListener(collection, period, false, filters))
}
/**
* Get a StatsListener that's attached to the global stats collection and tracks periodic stats
* over the given duration, creating it if it doesn't already exist.
*/
def apply(period: Duration): StatsListener = apply(period, Stats)
}
/**
* Attaches to a StatsCollection and reports on all the counters, metrics, gauges, and labels.
* Each report resets state, so counters are reported as deltas, and metrics distributions are
* only tracked since the last report.
*/
class StatsListener(collection: StatsCollection, startClean: Boolean, filters: List[Regex]) {
def this(collection: StatsCollection, startClean: Boolean) = this(collection, startClean, Nil)
def this(collection: StatsCollection) = this(collection, true, Nil)
private val lastCounterMap = new mutable.HashMap[String, Long]()
private val lastMetricMap = new mutable.HashMap[String, Histogram]()
private val filterRegex = filters.mkString("(", ")|(", ")").r
collection.addListener(this)
if (startClean) {
collection.getCounters().foreach { case (key, value) =>
lastCounterMap(key) = value
}
collection.getMetrics().foreach { case (key, value) =>
val histo = new Histogram
histo.merge(value.histogram)
lastMetricMap(key) = histo
}
}
def getCounters(): Map[String, Long] = synchronized {
val deltas = new mutable.HashMap[String, Long]
for ((key, newValue) <- collection.getCounters()) {
deltas(key) = Stats.delta(lastCounterMap.getOrElse(key, 0), newValue)
lastCounterMap(key) = newValue
}
deltas
}
def getGauges(): Map[String, Double] = collection.getGauges()
def getLabels(): Map[String, String] = collection.getLabels()
def getMetrics(): Map[String, Histogram] = synchronized {
val deltas = new mutable.HashMap[String, Histogram]
for ((key, newValue) <- collection.getMetrics()) {
val oldValue = lastMetricMap.getOrElseUpdate(key, new Histogram())
deltas(key) = newValue.histogram - oldValue
oldValue.clear()
oldValue.merge(newValue.histogram)
}
deltas
}
def get(): StatsSummary =
StatsSummary(getCounters(), getMetrics() mapValues { Distribution(_) }, getGauges(), getLabels())
def get(filtered: Boolean): StatsSummary = if (filtered) getFiltered() else get()
def getFiltered(): StatsSummary = {
get().filterOut(filterRegex)
}
}
/**
* A StatsListener that cycles over a given period, and once each period, grabs a snapshot of the
* given StatsCollection and computes deltas since the previous timeslice. For example, for a
* one-minute period, it grabs a snapshot of stats at the top of each minute, and for the rest of
* the minute, reports these "latched" stats.
*/
class LatchedStatsListener(
collection: StatsCollection,
period: Duration,
startClean: Boolean,
filters: List[Regex]
) extends StatsListener(collection, startClean, filters) {
def this(collection: StatsCollection, period: Duration, startClean: Boolean) = this(collection, period, startClean, Nil)
def this(collection: StatsCollection, period: Duration) = this(collection, period, true, Nil)
@volatile private var counters: Map[String, Long] = Map()
@volatile private var gauges: Map[String, Double] = Map()
@volatile private var labels: Map[String, String] = Map()
private var metrics: mutable.Map[String, Histogram] = new mutable.HashMap[String, Histogram]
nextLatch()
override def getCounters() = counters
override def getGauges() = gauges
override def getLabels() = labels
override def getMetrics() = synchronized { metrics }
def nextLatch() {
counters = super.getCounters()
labels = super.getLabels()
syncMetrics()
// do gauges last since they might be constructed using the others.
gauges = super.getGauges()
}
private[this] def syncMetrics() {
synchronized {
val newMetrics = super.getMetrics()
newMetrics foreach { case (key, value) =>
val currValue = metrics.getOrElseUpdate(key, new Histogram())
currValue.clear()
currValue.merge(value)
}
}
}
// FIXME this would be more efficient as a Timer for all LatchedStatsListeners?
// lazy to allow a subclass to override it
lazy val service = new PeriodicBackgroundProcess("LatchedStatsListener", period) {
def periodic() {
nextLatch()
}
}
ServiceTracker.register(service)
service.start()
}
| hydro2k/ostrich | src/main/scala/com/twitter/ostrich/stats/StatsListener.scala | Scala | apache-2.0 | 6,910 |
object Foo
val x: Foo.type = Foo
val y: Foo.type | Nothing = x
val z: Foo.type = y
val a: 1 | Nothing = 1
val b: 1 = a
val intSuper = summon[(Int | Nothing) <:< Int]
val intSub = summon[Int <:< (Int | Nothing)]
val intEq = summon[Int =:= (Int | Nothing)]
val fooSuper = summon[(Foo.type | Nothing) <:< Foo.type]
val fooSub = summon[Foo.type <:< (Foo.type | Nothing)]
val fooEq = summon[Foo.type =:= (Foo.type | Nothing)]
val oneSuper = summon[(1 | Nothing) <:< 1]
val oneSub = summon[1 <:< (1 | Nothing)]
val oneEq = summon[1 =:= (1 | Nothing)]
| dotty-staging/dotty | tests/pos/i11003.scala | Scala | apache-2.0 | 550 |
package nodes.misc
import nodes.util.{SparseFeatureVectorizer, AllSparseFeatures, CommonSparseFeatures}
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import pipelines.Logging
import workflow.PipelineContext
class SparseFeatureVectorizerSuite extends FunSuite with PipelineContext with Logging {
test("sparse feature vectorization") {
sc = new SparkContext("local", "test")
val featureVectorizer = new SparseFeatureVectorizer(Map("First" -> 0, "Second" -> 1, "Third" -> 2))
val test = Seq(("Third", 4.0), ("Fourth", 6.0), ("First", 1.0))
val vector = featureVectorizer.apply(sc.parallelize(Seq(test))).first()
assert(vector.size == 3)
assert(vector(0) == 1)
assert(vector(1) == 0)
assert(vector(2) == 4)
}
test("all sparse feature selection") {
sc = new SparkContext("local", "test")
val train = sc.parallelize(List(Seq(("First", 0.0), ("Second", 6.0)), Seq(("Third", 3.0), ("Second", 4.0))))
val featureVectorizer = AllSparseFeatures().fit(train.map(x => x))
// The selected features should now be "First", "Second", and "Third"
val test = Seq(("Third", 4.0), ("Fourth", 6.0), ("First", 1.0))
val out = featureVectorizer.apply(sc.parallelize(Seq(test))).first().toArray
assert(out === Array(1.0, 0.0, 4.0))
}
test("common sparse feature selection") {
sc = new SparkContext("local", "test")
val train = sc.parallelize(List(
Seq(("First", 0.0), ("Second", 6.0)),
Seq(("Third", 3.0), ("Second", 4.8)),
Seq(("Third", 7.0), ("Fourth", 5.0)),
Seq(("Fifth", 5.0), ("Second", 7.3))
))
val featureVectorizer = CommonSparseFeatures(2).fit(train.map(x => x))
// The selected features should now be "Second", and "Third"
val test = Seq(("Third", 4.0), ("Seventh", 8.0), ("Second", 1.3), ("Fourth", 6.0), ("First", 1.0))
val out = featureVectorizer.apply(sc.parallelize(Seq(test))).first().toArray
assert(out === Array(1.3, 4.0))
}
}
| tomerk/keystone | src/test/scala/nodes/misc/SparseFeatureVectorizerSuite.scala | Scala | apache-2.0 | 1,982 |
package org.broadinstitute.clio.client.webclient
import java.time.OffsetDateTime
import java.util.Date
import com.google.auth.oauth2.{AccessToken, GoogleCredentials}
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.duration._
import scala.util.Random
class GoogleCredentialsGeneratorSpec extends FlatSpec with Matchers {
import GoogleCredentialsGeneratorSpec.MockOAuth2Credentials
behavior of "GoogleTokenGenerator"
it should "refresh uninitialized OAuth2Credentials" in {
val mockCreds = new MockOAuth2Credentials()
val generator = new GoogleCredentialsGenerator(mockCreds)
mockCreds.getAccessToken should be(null)
val token = generator.generateCredentials()
mockCreds.getAccessToken shouldNot be(null)
mockCreds.getAccessToken.getTokenValue should be(token.token)
}
it should "refresh expired OAuth2Credentials" in {
// Zero duration so each refresh produces an instantly-expired token.
val mockCreds = new MockOAuth2Credentials(Duration.Zero)
val generator = new GoogleCredentialsGenerator(mockCreds)
val token1 = generator.generateCredentials()
val token2 = generator.generateCredentials()
token1.token shouldNot be(token2.token)
mockCreds.getAccessToken.getTokenValue should be(token2.token)
}
it should "not refresh unexpired OAuth2Credentials" in {
val mockCreds = new MockOAuth2Credentials(100.seconds)
val generator = new GoogleCredentialsGenerator(mockCreds)
val token1 = generator.generateCredentials()
val token2 = generator.generateCredentials()
token1 should be(token2)
}
}
object GoogleCredentialsGeneratorSpec {
class MockOAuth2Credentials(tokenDuration: FiniteDuration = 1.second)
extends GoogleCredentials {
override def refreshAccessToken(): AccessToken = {
val newToken = Random.nextString(20)
val newExpiration =
Date.from(OffsetDateTime.now().plusNanos(tokenDuration.toNanos).toInstant)
new AccessToken(newToken, newExpiration)
}
}
}
| broadinstitute/clio | clio-client/src/test/scala/org/broadinstitute/clio/client/webclient/GoogleCredentialsGeneratorSpec.scala | Scala | bsd-3-clause | 2,023 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.{ArrayList => JArrayList, Arrays, List => JList}
import scala.collection.JavaConverters._
import org.apache.commons.lang3.exception.ExceptionUtils
import org.apache.hadoop.hive.metastore.api.{FieldSchema, Schema}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse
import org.apache.spark.SparkThrowable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
import org.apache.spark.sql.execution.HiveResult.hiveResultString
import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
private[hive] class SparkSQLDriver(val context: SQLContext = SparkSQLEnv.sqlContext)
extends Driver
with Logging {
private[hive] var tableSchema: Schema = _
private[hive] var hiveResponse: Seq[String] = _
override def init(): Unit = {
}
private def getResultSetSchema(query: QueryExecution): Schema = {
val analyzed = query.analyzed
logDebug(s"Result Schema: ${analyzed.output}")
if (analyzed.output.isEmpty) {
new Schema(Arrays.asList(new FieldSchema("Response code", "string", "")), null)
} else {
val fieldSchemas = analyzed.output.map { attr =>
new FieldSchema(attr.name, attr.dataType.catalogString, "")
}
new Schema(fieldSchemas.asJava, null)
}
}
override def run(command: String): CommandProcessorResponse = {
try {
val substitutorCommand = SQLConf.withExistingConf(context.conf) {
new VariableSubstitution().substitute(command)
}
context.sparkContext.setJobDescription(substitutorCommand)
val execution = context.sessionState.executePlan(context.sql(command).logicalPlan)
hiveResponse = SQLExecution.withNewExecutionId(execution, Some("cli")) {
hiveResultString(execution.executedPlan)
}
tableSchema = getResultSetSchema(execution)
new CommandProcessorResponse(0)
} catch {
case st: SparkThrowable =>
logDebug(s"Failed in [$command]", st)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(st), st.getSqlState, st)
case cause: Throwable =>
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(cause), null, cause)
}
}
override def close(): Int = {
hiveResponse = null
tableSchema = null
0
}
override def getResults(res: JList[_]): Boolean = {
if (hiveResponse == null) {
false
} else {
res.asInstanceOf[JArrayList[String]].addAll(hiveResponse.asJava)
hiveResponse = null
true
}
}
override def getSchema: Schema = tableSchema
override def destroy(): Unit = {
super.destroy()
hiveResponse = null
tableSchema = null
}
}
| ueshin/apache-spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala | Scala | apache-2.0 | 3,688 |
package scadla.examples
import scadla._
import scadla.InlineOps._
import scadla.utils.extrusion._
import scadla.EverythingIsIn.millimeters
object ExtrusionDemo {
def main(args: Array[String]): Unit = {
val r = scadla.backends.Renderer.default
val objects = List(
_2020(100),
C(20,20,10,4)(100),
H(20,20,4)(100),
L(20,20,4)(100),
T(20,20,4)(100),
U(20,20,4)(100),
Z(20,20,4)(100)
)
val moved = objects.zipWithIndex.map{ case(o, i) => o.moveY(25 * i) }
r.view(Union(moved: _*))
}
}
| dzufferey/scadla | src/main/scala/scadla/examples/ExtrusionDemo.scala | Scala | apache-2.0 | 550 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.application.assets.stocksAndShares
import iht.config.AppConfig
import iht.controllers.application.ApplicationControllerTest
import iht.forms.ApplicationForms._
import iht.models.application.ApplicationDetails
import iht.testhelpers.CommonBuilder
import iht.testhelpers.TestHelper._
import iht.utils.CommonHelper
import iht.views.html.application.asset.stocksAndShares.stocks_and_shares_listed
import play.api.mvc.MessagesControllerComponents
import play.api.test.Helpers.{contentAsString, _}
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
class StocksAndSharesListedControllerTest extends ApplicationControllerTest {
val submitUrl = CommonHelper.addFragmentIdentifierToUrl(routes.StocksAndSharesOverviewController.onPageLoad().url, AssetsStocksListedID)
def setUpTests(applicationDetails: ApplicationDetails) = {
createMocksForApplication(mockCachingConnector,
mockIhtConnector,
appDetails = Some(applicationDetails),
getAppDetails = true,
saveAppDetails = true,
storeAppDetailsInCache = true)
}
def stocksAndSharesListedController = new TestController {
val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
def stocksAndSharesListedControllerNotAuthorised = new TestController {
val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
protected abstract class TestController extends FrontendController(mockControllerComponents) with StocksAndSharesListedController {
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
override val stocksAndSharesListedView: stocks_and_shares_listed = app.injector.instanceOf[stocks_and_shares_listed]
}
"StocksAndSharesListedController" must {
"redirect to login page on PageLoad if the user is not logged in" in {
val result = stocksAndSharesListedControllerNotAuthorised.onPageLoad(createFakeRequest(isAuthorised = false))
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (Some(loginUrl))
}
"redirect to login page on Submit if the user is not logged in" in {
val result = stocksAndSharesListedControllerNotAuthorised.onSubmit(createFakeRequest(isAuthorised = false))
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (Some(loginUrl))
}
"respond with OK on page load" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
setUpTests(applicationDetails)
val result = stocksAndSharesListedController.onPageLoad(createFakeRequest())
status(result) must be(OK)
}
"save application and go to stocksAndShares overview page on submit" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
val formFill = stockAndShareListedForm.fill(CommonBuilder.buildStockAndShare.copy(isListed = Some(true),
valueListed = Some(200)))
implicit val request = createFakeRequest().withFormUrlEncodedBody(formFill.data.toSeq: _*)
setUpTests(applicationDetails)
val result = stocksAndSharesListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
}
"wipe out the sharesListed value if user selects No, save application and " +
"go to stocksAndShares overview page on submit" in {
val sharesListed = CommonBuilder.buildStockAndShare.copy(isListed = Some(false), valueListed = Some(200))
val applicationDetails = CommonBuilder.buildApplicationDetails.copy(
allAssets = Some(CommonBuilder.buildAllAssets.copy(
stockAndShare = Some(sharesListed))))
val formFill = stockAndShareListedForm.fill(sharesListed)
implicit val request = createFakeRequest().withFormUrlEncodedBody(formFill.data.toSeq: _*)
setUpTests(applicationDetails)
val result = stocksAndSharesListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
val capturedValue = verifyAndReturnSavedApplicationDetails(mockIhtConnector)
val expectedAppDetails = applicationDetails.copy(allAssets = applicationDetails.allAssets.map(_.copy(
stockAndShare = Some(CommonBuilder.buildStockAndShare.copy(valueListed = None, isListed = Some(false))))))
capturedValue mustBe expectedAppDetails
}
"display validation message when form is submitted with no values entered" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
implicit val request = createFakeRequest()
setUpTests(applicationDetails)
val result = stocksAndSharesListedController.onSubmit()(request)
status(result) must be (BAD_REQUEST)
contentAsString(result) must include (messagesApi("error.problem"))
}
"redirect to overview when form is submitted with answer yes and a value entered" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
implicit val request = createFakeRequest().withFormUrlEncodedBody(("isListed", "true"), ("valueListed", "233"))
setUpTests(applicationDetails)
val result = stocksAndSharesListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
}
"respond with bad request when incorrect value are entered on the page" in {
implicit val fakePostRequest = createFakeRequest().withFormUrlEncodedBody(("value", "utytyyterrrrrrrrrrrrrr"))
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector)
val result = stocksAndSharesListedController.onSubmit (fakePostRequest)
status(result) mustBe (BAD_REQUEST)
}
behave like controllerOnPageLoadWithNoExistingRegistrationDetails(mockCachingConnector,
stocksAndSharesListedController.onPageLoad(createFakeRequest()))
}
}
| hmrc/iht-frontend | test/iht/controllers/application/assets/stocksAndShares/StocksAndSharesListedControllerTest.scala | Scala | apache-2.0 | 6,755 |
package me.lignum.lambdacraft.computer.instructions
import me.lignum.lambdacraft.computer.{CPU, Instruction, InstructionType}
class Jmp extends Instruction {
def canJump(p: CPU): Boolean = true
override def getType: Byte = InstructionType.Jmp
override def execute(p: CPU): Unit = {
val stack = p.processor.stack
val offset = stack.pop()
if (canJump(p)) {
val newPC = p.processor.getProgramCounter + offset
p.processor.setProgramCounter(newPC)
}
}
}
| Lignumm/LambdaCraft | src/main/scala/me/lignum/lambdacraft/computer/instructions/Jmp.scala | Scala | mit | 490 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.learn.layers.rnn
import org.platanios.tensorflow.api.ops
import org.platanios.tensorflow.api.ops.Output
/**
* @author Emmanouil Antonios Platanios
*/
package object cell {
private[cell] val KERNEL_NAME: String = "Weights"
private[cell] val BIAS_NAME : String = "Bias"
type Tuple[O, S] = ops.rnn.cell.Tuple[O, S]
type BasicTuple = Tuple[Output, Output]
val Tuple: ops.rnn.cell.Tuple.type = ops.rnn.cell.Tuple
type LSTMState = ops.rnn.cell.LSTMState
val LSTMState: ops.rnn.cell.LSTMState.type = ops.rnn.cell.LSTMState
type LSTMTuple = ops.rnn.cell.LSTMTuple
def LSTMTuple(output: Output, state: LSTMState): LSTMTuple = ops.rnn.cell.LSTMTuple(output, state)
private[rnn] trait API {
type RNNCell[O, OS, S, SS] = cell.RNNCell[O, OS, S, SS]
type BasicRNNCell = cell.BasicRNNCell
type GRUCell = cell.GRUCell
type BasicLSTMCell = cell.BasicLSTMCell
type LSTMCell = cell.LSTMCell
type DeviceWrapper[O, OS, S, SS] = cell.DeviceWrapper[O, OS, S, SS]
type DropoutWrapper[O, OS, S, SS] = cell.DropoutWrapper[O, OS, S, SS]
type ResidualWrapper[O, OS, S, SS] = cell.ResidualWrapper[O, OS, S, SS]
type MultiCell[O, OS, S, SS] = cell.MultiCell[O, OS, S, SS]
val BasicRNNCell : cell.BasicRNNCell.type = cell.BasicRNNCell
val GRUCell : cell.GRUCell.type = cell.GRUCell
val BasicLSTMCell : cell.BasicLSTMCell.type = cell.BasicLSTMCell
val LSTMCell : cell.LSTMCell.type = cell.LSTMCell
val DeviceWrapper : cell.DeviceWrapper.type = cell.DeviceWrapper
val DropoutWrapper : cell.DropoutWrapper.type = cell.DropoutWrapper
val ResidualWrapper: cell.ResidualWrapper.type = cell.ResidualWrapper
val MultiCell : cell.MultiCell.type = cell.MultiCell
type RNNTuple[O, S] = cell.Tuple[O, S]
type BasicTuple = cell.Tuple[Output, Output]
type LSTMState = cell.LSTMState
val LSTMState: cell.LSTMState.type = cell.LSTMState
type LSTMTuple = cell.Tuple[Output, LSTMState]
val RNNTuple: cell.Tuple.type = cell.Tuple
def LSTMTuple(output: Output, state: LSTMState): LSTMTuple = cell.Tuple(output, state)
}
}
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/learn/layers/rnn/cell/package.scala | Scala | apache-2.0 | 2,834 |
package service
import models.support.UserId
import models.user.User
sealed abstract class Access extends Ordered[Access] {
val v: Short
def read: Boolean
def write: Boolean
def compare(that: Access): Int = this.v.compare(that.v)
def ceilEdit = Seq(this, Edit).min
def read[T](block: () => T): Option[T] = if (read) { Some(block()) } else { None }
def write[T](block: () => T): Option[T] = if (write) { Some(block()) } else { None }
}
case object Own extends Access {
val v = 40.toShort
override def read = true
override def write = true
}
case object Edit extends Access {
val v = 30.toShort
override def read = true
override def write = true
}
case object View extends Access {
val v = 20.toShort
override def read = true
override def write = false
}
case object Non extends Access { // This is called Non so as to avoid naming conflicts with the Option None
val v = 10.toShort
override def read = false
override def write = false
}
object Access {
val non: Access = Non // Just for type change
val view: Access = View // Just for type change
val edit: Access = Edit // Just for type change
val own: Access = Own // Just for type change
def toNum(access: Access): Short = access.v
def fromNum(access: Short) = access match {
case Own.v => Own
case Edit.v => Edit
case View.v => View
case Non.v => Non
case _ => throw new IllegalArgumentException("number " + access + " does not match an access level")
}
def apply(in: Option[Access]): Access = in match {
case Some(access) => access
case None => Non
}
def apply(user: User, owner: UserId): Access = if (user.id == owner) { Own } else { Non }
}
| kristiankime/web-education-games | app/service/Access.scala | Scala | mit | 1,654 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate
import java.io.File
import org.fusesource.scalate.util.{ IOUtil, Log }
import org.scalatest.ConfigMap
import scala.collection.immutable.Map
abstract class TemplateTestSupport extends FunSuiteSupport with Log {
var showOutput = false
var engine: TemplateEngine = _
override protected def beforeAll(configMap: ConfigMap) = {
super.beforeAll(configMap)
engine = createTemplateEngine
val workingDir = new File(baseDir, "target/test-data/" + getClass.getSimpleName)
if (workingDir.exists) {
// lets delete it before we run the tests
IOUtil.recursiveDelete(workingDir)
}
engine.workingDirectory = workingDir
configureTemplateEngine
}
protected def createTemplateEngine = new TemplateEngine
protected def configureTemplateEngine(): Unit = {
}
def assertTrimSspOutput(expectedOutput: String, templateText: String, attributes: Map[String, Any] = Map()): String =
assertSspOutput(expectedOutput, templateText, attributes, true)
def assertTrimOutput(expectedOutput: String, template: Template, attributes: Map[String, Any] = Map()): String =
assertOutput(expectedOutput, template, attributes, true)
def assertSspOutput(expectedOutput: String, templateText: String, attributes: Map[String, Any] = Map(), trim: Boolean = false): String = {
val template = engine.compileSsp(templateText)
assertOutput(expectedOutput, template, attributes, trim)
}
def assertMoustacheOutput(expectedOutput: String, templateText: String, attributes: Map[String, Any] = Map(), trim: Boolean = false): String = {
val template = engine.compileMoustache(templateText)
assertOutput(expectedOutput, template, attributes, trim)
}
protected def logOutput(output: String): Unit = {
if (showOutput) {
log.info("output: '" + output + "'")
} else {
debug("output: '" + output + "'")
}
}
def assertOutput(expectedOutput: String, template: Template, attributes: Map[String, Any] = Map(), trim: Boolean = false): String = {
var output = engine.layout("dummy.ssp", template, attributes)
logOutput(output)
if (trim) {
output = output.trim
}
assertResult(expectedOutput) { output }
output
}
def assertOutputContains(source: TemplateSource, expected: String*): String =
assertOutputContains(source, Map[String, Any](), expected: _*)
def assertOutputContains(source: TemplateSource, attributes: Map[String, Any], expected: String*): String = {
val template = engine.compile(source)
assertOutputContains(template, attributes, expected: _*)
}
def assertOutputContains(template: Template, expected: String*): String =
assertOutputContains(template, Map[String, Any](), expected: _*)
def assertOutputContains(template: Template, attributes: Map[String, Any], expected: String*): String = {
var output = engine.layout("dummy.ssp", template, attributes)
logOutput(output)
assertTextContains(output, "template " + template, expected: _*)
output
}
def assertUriOutputContains(uri: String, expected: String*): String =
assertUriOutputContains(uri, Map[String, Any](), expected: _*)
def assertUriOutputContains(uri: String, attributes: Map[String, Any], expected: String*): String =
assertOutputContains(fromUri(uri), attributes, expected: _*)
protected def fromUri(uri: String) = TemplateSource.fromUri(uri, engine.resourceLoader)
def assertTextContains(source: String, description: => String, textLines: String*): Unit = {
assume(source != null, "text was null for " + description)
var index = 0
for (text <- textLines if index >= 0) {
index = source.indexOf(text, index)
if (index >= 0) {
index += text.length
} else {
assume(false, "Text does not contain '" + text + "' for " + description + " when text was:\\n" + source)
}
}
}
def syntaxException(block: => Unit) = {
val e = intercept[InvalidSyntaxException] {
block
}
debug("caught: " + e, e)
e
}
def testSspSyntaxEception(name: String, template: String): Unit = {
test(name) {
syntaxException {
assertSspOutput("xxx", template)
}
}
}
protected def safeName(text: String): String =
text.foldLeft(new StringBuffer)((acc, ch) => safeName(ch, acc)).toString
private def safeName(ch: Char, buffer: StringBuffer): StringBuffer = {
if (ch == '&') {
buffer.append("amp_")
} else if (ch == '>') {
buffer.append("gt_")
} else if (ch == '<') {
buffer.append("lt_")
} else if (ch == '=') {
buffer.append("eq_")
} else if (ch == '!') {
buffer.append("pling_")
} else if (ch == '/') {
buffer.append("/")
} else if (Character.isDigit(ch) || Character.isJavaIdentifierPart(ch) || ch == '_' || ch == '.') {
buffer.append(ch)
} else {
buffer.append('_')
}
buffer
}
def compileScaml(name: String, templateText: String) = engine.compile(TemplateSource.fromText(safeName(name) + ".scaml", templateText))
def compileJade(name: String, templateText: String) = engine.compile(TemplateSource.fromText(safeName(name) + ".jade", templateText))
def compileSsp(name: String, templateText: String) = engine.compile(TemplateSource.fromText(safeName(name) + ".ssp", templateText))
def compileMoustache(name: String, templateText: String) = engine.compile(TemplateSource.fromText(safeName(name) + ".moustache", templateText))
}
| scalate/scalate | scalate-core/src/test/scala/org/fusesource/scalate/TemplateTestSupport.scala | Scala | apache-2.0 | 6,228 |
package scalax.collection.io
import net.liftweb.json.JValue
import scalax.collection.GraphEdge._
import scalax.collection.GraphPredef._
import scalax.collection.edge.LBase._
import scalax.collection.edge.WBase._
import scalax.collection.edge.WLBase._
import scalax.collection.edge._
import scalax.collection.generic.GraphCoreCompanion
import scalax.collection.io.json.imp.JsonList
import scalax.collection.io.json.imp.Parser.parse
import scalax.collection.io.json.imp.Stream.createOuterElems
import scalax.collection.{Graph, GraphLike}
import scala.reflect.ClassTag
/** Facilitates populating graphs with nodes/edges from JSON text
* and exporting `Graph`instances to JSON text.
*
* See also the
* [[http://www.scala-graph.org/guides/json Graph for Scala JSON User Guide]].
*
* @define DESCR top level JSON import/export descriptor to be filled with all `NodeDescriptor`s and `EdgeDescriptors`.
* @author Peter Empen
*/
package object json {
<<<<<<< HEAD
import scalax.collection.edge._, scalax.collection.edge.WBase._, scalax.collection.edge.LBase._,
scalax.collection.edge.WLBase._
type Descriptor[N] = descriptor.Descriptor[N]
type NodeDescriptor[N] = descriptor.NodeDescriptor[N]
type EdgeDescriptorBase[N, E <: EdgeLike[N], +C <: EdgeCompanionBase[E]] = descriptor.EdgeDescriptorBase[N, E, C]
type EdgeDescriptor[N, E[X] <: UnDiEdge[X], +C <: EdgeCompanion[E]] = descriptor.EdgeDescriptor[N, E, C]
type WEdgeDescriptor[N, E[X] <: UnDiEdge[X] with WEdge[X], +C <: WEdgeCompanion[E]] =
||||||| merged common ancestors
import scalax.collection.edge._, scalax.collection.edge.WBase._, scalax.collection.edge.LBase._,
scalax.collection.edge.WLBase._
type Descriptor[N] = descriptor.Descriptor[N]
type NodeDescriptor[N] = descriptor.NodeDescriptor[N]
type EdgeDescriptorBase[N, E[X] <: EdgeLikeIn[X], +C <: EdgeCompanionBase[E]] = descriptor.EdgeDescriptorBase[N, E, C]
type EdgeDescriptor[N, E[X] <: UnDiEdge[X], +C <: EdgeCompanion[E]] = descriptor.EdgeDescriptor[N, E, C]
type WEdgeDescriptor[N, E[X] <: UnDiEdge[X] with WEdge[X], +C <: WEdgeCompanion[E]] =
=======
type Descriptor[N] = descriptor.Descriptor[N]
type NodeDescriptor[N] = descriptor.NodeDescriptor[N]
type EdgeDescriptorBase[N, E[+X] <: EdgeLikeIn[X], +C <: EdgeCompanionBase[E]] =
descriptor.EdgeDescriptorBase[N, E, C]
type EdgeDescriptor[N, E[+X] <: UnDiEdge[X], +C <: EdgeCompanion[E]] = descriptor.EdgeDescriptor[N, E, C]
type WEdgeDescriptor[N, E[+X] <: UnDiEdge[X] with WEdge[X], +C <: WEdgeCompanion[E]] =
>>>>>>> 1.x
descriptor.WEdgeDescriptor[N, E, C]
type LEdgeDescriptor[N, E[+X] <: UnDiEdge[X] with LEdge[X], +C <: LEdgeCompanion[E], L <: AnyRef] =
descriptor.LEdgeDescriptor[N, E, C, L]
type WLEdgeDescriptor[N, E[+X] <: UnDiEdge[X] with WLEdge[X], +C <: WLEdgeCompanion[E], L <: AnyRef] =
descriptor.WLEdgeDescriptor[N, E, C, L]
type HyperEdgeDescriptor[N, E[+X] <: HyperEdge[X], +C <: HyperEdgeCompanion[E]] =
descriptor.HyperEdgeDescriptor[N, E, C]
type WHyperEdgeDescriptor[N, E[+X] <: WHyperEdge[X] with WEdge[X], +C <: WHyperEdgeCompanion[E]] =
descriptor.WHyperEdgeDescriptor[N, E, C]
type LHyperEdgeDescriptor[N, E[+X] <: LHyperEdge[X] with LEdge[X], +C <: LHyperEdgeCompanion[E], L <: AnyRef] =
descriptor.LHyperEdgeDescriptor[N, E, C, L]
type WLHyperEdgeDescriptor[N, E[+X] <: WLHyperEdge[X] with WLEdge[X], +C <: WLHyperEdgeCompanion[E], L <: AnyRef] =
descriptor.WLHyperEdgeDescriptor[N, E, C, L]
<<<<<<< HEAD
import imp._, imp.Parser.parse, imp.Stream.createOuterElems
import net.liftweb.json.JValue
implicit final class JsonGraphCoreCompanion[+G[N, E <: EdgeLike[N]] <: Graph[N, E] with GraphLike[N, E, G]](
||||||| merged common ancestors
import imp._, imp.Parser.parse, imp.Stream.createOuterElems
import net.liftweb.json.JValue
implicit final class JsonGraphCoreCompanion[+G[N, E[X] <: EdgeLikeIn[X]] <: Graph[N, E] with GraphLike[N, E, G]](
=======
implicit final class JsonGraphCoreCompanion[+G[N, E[+X] <: EdgeLikeIn[X]] <: Graph[N, E] with GraphLike[N, E, G]](
>>>>>>> 1.x
val companion: GraphCoreCompanion[G])
extends AnyVal {
/** Creates a new Graph instance and populates it with all nodes/edges found in
* the node/edge sections of a JSON text.
*
* @param jsonAST the JSON tree to be parsed for node/edge sections
* @param descriptor $DESCR
* @return new `Graph` instance populated from `jsonAST`
*/
<<<<<<< HEAD
def fromJson[N, E <: EdgeLike[N]](jsonAST: JValue, descriptor: Descriptor[N])(
||||||| merged common ancestors
def fromJson[N, E[X] <: EdgeLikeIn[X]](jsonAST: JValue, descriptor: Descriptor[N])(
=======
def fromJson[N, E[+X] <: EdgeLikeIn[X]](jsonAST: JValue, descriptor: Descriptor[N])(
>>>>>>> 1.x
config: companion.Config): G[N, E] =
fromJson[N, E](parse(jsonAST, descriptor), descriptor)
/** Creates a new Graph instance and populates it with all nodes/edges found in
* the node/edge sections of a JSON text.
*
* @param jsonText the JSON text to be parsed for node/edge sections
* @param descriptor $DESCR
* @return new `Graph` instance populated from `jsonText`
*/
<<<<<<< HEAD
def fromJson[N, E <: EdgeLike[N]](jsonText: String, descriptor: Descriptor[N])(
||||||| merged common ancestors
def fromJson[N, E[X] <: EdgeLikeIn[X]](jsonText: String, descriptor: Descriptor[N])(
=======
def fromJson[N, E[+X] <: EdgeLikeIn[X]](jsonText: String, descriptor: Descriptor[N])(
>>>>>>> 1.x
config: companion.Config = companion.defaultConfig): G[N, E] =
fromJson[N, E](parse(jsonText, descriptor), descriptor)
/** Creates a new Graph instance and populates it with all nodes/edges found in
* `jsonLists`.
*
* @param jsonLists node/edge lists usually attained by parsing a JSON text
* @param descriptor $DESCR
* @return new `Graph` instance populated from `jsonText`
*/
<<<<<<< HEAD
def fromJson[N, E <: EdgeLike[N]](jsonLists: Iterable[JsonList], descriptor: Descriptor[N])(
||||||| merged common ancestors
def fromJson[N, E[X] <: EdgeLikeIn[X]](jsonLists: Iterable[JsonList], descriptor: Descriptor[N])(
=======
def fromJson[N, E[+X] <: EdgeLikeIn[X]](jsonLists: Iterable[JsonList], descriptor: Descriptor[N])(
>>>>>>> 1.x
config: companion.Config): G[N, E] = {
val target = createOuterElems[N, E](jsonLists, descriptor)
companion.from[N, E](nodes = target._1, edges = target._2)(config)
}
}
<<<<<<< HEAD
implicit final class JsonGraph[N, E <: EdgeLike[N]](val graph: Graph[N, E]) extends AnyVal {
||||||| merged common ancestors
implicit final class JsonGraph[N, E[X] <: EdgeLikeIn[X]](val graph: Graph[N, E]) extends AnyVal {
=======
implicit final class JsonGraph[N, E[+X] <: EdgeLikeIn[X]](val graph: Graph[N, E]) extends AnyVal {
>>>>>>> 1.x
/** Creates a JSON text including all nodes/edges in this graph.
*
* @param descriptor $DESCR
* @return the JSON text
*/
def toJson(descriptor: Descriptor[N]): String = {
val export = new exp.Export[N, E](graph, descriptor)
import export._
jsonText(jsonAST(List(jsonASTNodes, jsonASTEdges)))
}
}
/** Replaces all occurrences of `paramPlaceholder` in source with the elements
* in `params` one by one. The result is guaranteed not to become longer than
* `maxLength`.
*/
def replacePlaceholders(source: String,
params: Iterable[String],
maxLength: Int = 50,
paramPlaceholder: String = "{}"): String = {
var target = source
val it = params.iterator
var i = 0
while ({ i = target.indexOfSlice(paramPlaceholder); i >= 0 } &&
it.hasNext) {
val param = it.next
target = target patch (i, if (param.length < maxLength) param
else param.substring(0, maxLength - 3) + "...", 2)
}
target
}
}
| scala-graph/scala-graph | json/src/main/scala/scalax/collection/io/json/package.scala | Scala | apache-2.0 | 8,263 |
package com.github.dcapwell.docker.builder
import com.github.dcapwell.docker.builder.lang._
import scala.annotation.tailrec
import scalaz.syntax.show._
import lang.Instruction.docker._
object Dockerfile {
def generate(traits: List[Trait]): List[String] = {
@tailrec
def loop(traits: List[Trait], accum: List[String]): List[String] = traits match {
case x :: xs =>
val fromStr = x.from.map(_.shows).toList
val restStr = x.instructions.map(_.shows)
loop(xs, accum ++ fromStr ++ restStr)
case Nil => accum
}
if(traits.isEmpty) Nil
else loop(traits, Nil)
}
}
| dcapwell/docker-builder | dockerbuilder/src/main/scala/com/github/dcapwell/docker/builder/Dockerfile.scala | Scala | mit | 620 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.regression
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.{LocalClusterSparkContext, LinearDataGenerator,
MLlibTestSparkContext}
import org.apache.spark.util.Utils
private object LinearRegressionSuite {
/** 3 features */
val model = new LinearRegressionModel(weights = Vectors.dense(0.1, 0.2, 0.3), intercept = 0.5)
}
class LinearRegressionSuite extends SparkFunSuite with MLlibTestSparkContext {
def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) {
val numOffPredictions = predictions.zip(input).count { case (prediction, expected) =>
// A prediction is off if the prediction is more than 0.5 away from expected value.
math.abs(prediction - expected.label) > 0.5
}
// At least 80% of the predictions should be on.
assert(numOffPredictions < input.length / 5)
}
// Test if we can correctly learn Y = 3 + 10*X1 + 10*X2
test("linear regression") {
val testRDD = sc.parallelize(LinearDataGenerator.generateLinearInput(
3.0, Array(10.0, 10.0), 100, 42), 2).cache()
val linReg = new LinearRegressionWithSGD().setIntercept(true)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(testRDD)
assert(model.intercept >= 2.5 && model.intercept <= 3.5)
val weights = model.weights
assert(weights.size === 2)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(1) >= 9.0 && weights(1) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(
3.0, Array(10.0, 10.0), 100, 17)
val validationRDD = sc.parallelize(validationData, 2).cache()
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
// Test if we can correctly learn Y = 10*X1 + 10*X2
test("linear regression without intercept") {
val testRDD = sc.parallelize(LinearDataGenerator.generateLinearInput(
0.0, Array(10.0, 10.0), 100, 42), 2).cache()
val linReg = new LinearRegressionWithSGD().setIntercept(false)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(testRDD)
assert(model.intercept === 0.0)
val weights = model.weights
assert(weights.size === 2)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(1) >= 9.0 && weights(1) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(
0.0, Array(10.0, 10.0), 100, 17)
val validationRDD = sc.parallelize(validationData, 2).cache()
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
}
// Test if we can correctly learn Y = 10*X1 + 10*X10000
test("sparse linear regression without intercept") {
val denseRDD = sc.parallelize(
LinearDataGenerator.generateLinearInput(0.0, Array(10.0, 10.0), 100, 42), 2)
val sparseRDD = denseRDD.map { case LabeledPoint(label, v) =>
val sv = Vectors.sparse(10000, Seq((0, v(0)), (9999, v(1))))
LabeledPoint(label, sv)
}.cache()
val linReg = new LinearRegressionWithSGD().setIntercept(false)
linReg.optimizer.setNumIterations(1000).setStepSize(1.0)
val model = linReg.run(sparseRDD)
assert(model.intercept === 0.0)
val weights = model.weights
assert(weights.size === 10000)
assert(weights(0) >= 9.0 && weights(0) <= 11.0)
assert(weights(9999) >= 9.0 && weights(9999) <= 11.0)
val validationData = LinearDataGenerator.generateLinearInput(0.0, Array(10.0, 10.0), 100, 17)
val sparseValidationData = validationData.map { case LabeledPoint(label, v) =>
val sv = Vectors.sparse(10000, Seq((0, v(0)), (9999, v(1))))
LabeledPoint(label, sv)
}
val sparseValidationRDD = sc.parallelize(sparseValidationData, 2)
// Test prediction on RDD.
validatePrediction(
model.predict(sparseValidationRDD.map(_.features)).collect(), sparseValidationData)
// Test prediction on Array.
validatePrediction(
sparseValidationData.map(row => model.predict(row.features)), sparseValidationData)
}
test("model save/load") {
val model = LinearRegressionSuite.model
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
// Save model, load it back, and compare.
try {
model.save(sc, path)
val sameModel = LinearRegressionModel.load(sc, path)
assert(model.weights == sameModel.weights)
assert(model.intercept == sameModel.intercept)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
class LinearRegressionClusterSuite extends SparkFunSuite with LocalClusterSparkContext {
test("task size should be small in both training and prediction") {
val m = 4
val n = 200000
val points = sc.parallelize(0 until m, 2).mapPartitionsWithIndex { (idx, iter) =>
val random = new Random(idx)
iter.map(i => LabeledPoint(1.0, Vectors.dense(Array.fill(n)(random.nextDouble()))))
}.cache()
// If we serialize data directly in the task closure, the size of the serialized task would be
// greater than 1MB and hence Spark would throw an error.
val model = LinearRegressionWithSGD.train(points, 2)
val predictions = model.predict(points.map(_.features))
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/test/scala/org/apache/spark/mllib/regression/LinearRegressionSuite.scala | Scala | apache-2.0 | 6,441 |
package dispatch.spec
import org.scalacheck._
object BasicSpecification
extends Properties("Json4s Native Json")
with DispatchCleanup {
import Prop.forAll
import org.json4s._
import org.json4s.native.JsonMethods._
import JsonDSL._
object Json {
import unfiltered.response._
def jsonToString(json: JValue) = compact(render(json))
def apply(json: JValue) =
new ComposeResponse(JsonContent ~> ResponseString(jsonToString(json)))
def apply(json: JValue, cb: String) =
new ComposeResponse(JsContent ~> ResponseString("%s(%s)" format(cb, jsonToString(json))))
}
private val port = unfiltered.util.Port.any
val server = {
import unfiltered.netty
import unfiltered.response._
import unfiltered.request._
object In extends Params.Extract("in", Params.first)
netty.Server.local(port).handler(netty.cycle.Planify {
case Params(In(in)) => Json(("out" -> in))
}).start()
}
import dispatch._
def localhost = host("127.0.0.1", port)
property("parse json") = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http.default(
localhost <:< Map("Accept" -> "application/json") <<? Map("in" -> sample) > as.json4s.Json
)
sample == (for { JObject(fields) <- res(); JField("out", JString(o)) <- fields } yield o).head
}
}
| dispatch/reboot | json4snative/src/test/scala/json.scala | Scala | lgpl-3.0 | 1,315 |
package com.twitter.finagle.postgres.values
import com.twitter.finagle.postgres.Spec
import io.netty.buffer.{ByteBuf, Unpooled}
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import com.twitter.finagle.postgres.Generators._
class UtilsSpec extends Spec with ScalaCheckDrivenPropertyChecks {
"Buffers.readCString" should {
def newBuffer(): (ByteBuf, String, String) = {
val str = "Some string"
val cStr = str + '\\u0000'
val buffer = Unpooled.copiedBuffer(cStr, Charsets.Utf8)
(buffer, str, cStr)
}
"fully read a string" in {
val (buffer, str, cStr) = newBuffer()
val actualStr = Buffers.readCString(buffer)
actualStr must equal(str)
}
"set reader index to the right value after reading" in {
val (buffer, str, cStr) = newBuffer()
Buffers.readCString(buffer)
buffer.readerIndex() must equal(cStr.length)
}
"respect the initial reader index" in {
val (buffer, str, cStr) = newBuffer()
buffer.readChar()
Buffers.readCString(buffer)
buffer.readerIndex() must equal(cStr.length)
}
"throw an appropriate exception if string passed is not C style" in {
val bufferWithWrongString = Unpooled.copiedBuffer("not a C style string", Charsets.Utf8)
an[IndexOutOfBoundsException] must be thrownBy {
Buffers.readCString(bufferWithWrongString)
}
}
}
"Md5Encryptor.encrypt" should {
def ba(str: String) = str.getBytes()
"encrypt everything correctly" in {
val samples =
List(
(ba("john"), ba("john25"), Array[Byte](1, 2, 3, 4), "md5305d62541687fa0c5871edfdb1140133"),
(ba("john"), ba("john25"), Array[Byte](4, 3, 1, 2), "md5156cf720128cad07c39e018eca91ff8d"),
(ba("john"), ba("john22"), Array[Byte](1, 2, 3, 4), "md57042902d6531e1840b3019a880f66edc"),
(ba("lomack"), ba("lowmuck245$3"), Array[Byte](15, 19, 33, 1), "md56aa29016af76de6b793f3e7e009a26c2")
)
samples.foreach {
case (user, password, salt, result) =>
new String(Md5Encryptor.encrypt(user, password, salt)) must equal(result)
}
}
"throw an exception if any of the parameters is missing or empty" in {
val user = ba("john")
val password = ba("john25")
val salt = Array[Byte](1, 2, 3, 4)
val empty = Array[Byte]()
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(empty, password, salt)
}
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(null, password, salt)
}
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(user, empty, salt)
}
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(user, null, salt)
}
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(user, password, empty)
}
an[IllegalArgumentException] must be thrownBy {
Md5Encryptor.encrypt(user, password, null)
}
}
}
"Numeric utils" should {
"write a numeric value" in forAll {
bd: BigDecimal =>
val read = Numerics.readNumeric(Numerics.writeNumeric(bd))
read must equal (bd)
}
}
}
| finagle/finagle-postgres | src/test/scala/com/twitter/finagle/postgres/values/UtilsSpec.scala | Scala | apache-2.0 | 3,268 |
package org.raml.domain
class DocItem(title: String, content: String)
| larroy/Scala_raml_parser | src/main/scala/org/raml/domain/DocItem.scala | Scala | apache-2.0 | 71 |
package breeze.linalg
import org.scalacheck._
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import org.junit.runner.RunWith
import breeze.math.{DoubleValuedTensorSpaceTestBase, TensorSpace, TensorSpaceTestBase}
/**
*
* @author dlwh
*/
class VectorTest {
}
/**
*
* @author dlwh
*/
@RunWith(classOf[JUnitRunner])
class VectorOps_DoubleTest extends DoubleValuedTensorSpaceTestBase[Vector[Double], Int] {
val space: TensorSpace[Vector[Double], Int, Double] = implicitly
val N = 30
implicit def genTriple: Arbitrary[(Vector[Double], Vector[Double], Vector[Double])] = {
Arbitrary {
for{x <- Arbitrary.arbitrary[Double].map { _ % 1E100}
bx <- Arbitrary.arbitrary[Boolean]
xl <- Arbitrary.arbitrary[List[Int]]
y <- Arbitrary.arbitrary[Double].map { _ % 1E100 }
by <- Arbitrary.arbitrary[Boolean]
yl <- Arbitrary.arbitrary[List[Int]]
z <- Arbitrary.arbitrary[Double].map { _ % 1E100 }
bz <- Arbitrary.arbitrary[Boolean]
zl <- Arbitrary.arbitrary[List[Int]]
} yield {
(if(bx) DenseVector.fill(N)(math.random * x) else SparseVector(N)( xl.map(i => (i % N).abs -> math.random * x ):_*),
if(by) DenseVector.fill(N)(math.random * y) else SparseVector(N)( yl.map(i => (i % N).abs -> math.random * y ):_*),
if(bz) DenseVector.fill(N)(math.random * z) else SparseVector(N)( zl.map(i => (i % N).abs -> math.random * z ):_*))
}
}
}
def genScalar: Arbitrary[Double] = Arbitrary(Arbitrary.arbitrary[Double].map{ _ % 1E10 })
}
@RunWith(classOf[JUnitRunner])
class VectorOps_FloatTest extends TensorSpaceTestBase[Vector[Float], Int, Float] {
val space: TensorSpace[Vector[Float], Int, Float] = implicitly
override val TOL: Double = 1E-2
val N = 30
implicit def genTriple: Arbitrary[(Vector[Float], Vector[Float], Vector[Float])] = {
Arbitrary {
for{x <- Arbitrary.arbitrary[Float].map { _ % 1000f}
bx <- Arbitrary.arbitrary[Boolean]
xl <- Arbitrary.arbitrary[List[Int]]
y <- Arbitrary.arbitrary[Float].map { _ % 1000f }
by <- Arbitrary.arbitrary[Boolean]
yl <- Arbitrary.arbitrary[List[Int]]
z <- Arbitrary.arbitrary[Float].map { _ % 1000f }
bz <- Arbitrary.arbitrary[Boolean]
zl <- Arbitrary.arbitrary[List[Int]]
} yield {
(if(bx) DenseVector.fill(N)(math.random * x toFloat) else SparseVector(N)( xl.map(i => (i % N).abs -> (math.random * x toFloat)):_*),
if(by) DenseVector.fill(N)(math.random * y toFloat) else SparseVector(N)( yl.map(i => (i % N).abs -> (math.random * y toFloat)):_*),
if(bz) DenseVector.fill(N)(math.random * z toFloat) else SparseVector(N)( zl.map(i => (i % N).abs ->(math.random * z toFloat)):_*))
}
}
}
def genScalar: Arbitrary[Float] = Arbitrary(Arbitrary.arbitrary[Float].map{ _ % 1000f })
}
@RunWith(classOf[JUnitRunner])
class VectorOps_IntTest extends TensorSpaceTestBase[Vector[Int], Int, Int] {
val space: TensorSpace[Vector[Int], Int, Int] = implicitly
val N = 30
implicit def genTriple: Arbitrary[(Vector[Int], Vector[Int], Vector[Int])] = {
Arbitrary {
for{x <- Arbitrary.arbitrary[Int].map { _ % 1000}
bx <- Arbitrary.arbitrary[Boolean]
xl <- Arbitrary.arbitrary[List[Int]]
y <- Arbitrary.arbitrary[Int].map { _ % 1000 }
by <- Arbitrary.arbitrary[Boolean]
yl <- Arbitrary.arbitrary[List[Int]]
z <- Arbitrary.arbitrary[Int].map { _ % 1000 }
bz <- Arbitrary.arbitrary[Boolean]
zl <- Arbitrary.arbitrary[List[Int]]
} yield {
(if(bx) DenseVector.fill(N)(math.random * x toInt) else SparseVector(N)( xl.map(i => (i % N).abs -> (math.random * x toInt)):_*),
if(by) DenseVector.fill(N)(math.random * y toInt) else SparseVector(N)( yl.map(i => (i % N).abs -> (math.random * y toInt)):_*),
if(bz) DenseVector.fill(N)(math.random * z toInt) else SparseVector(N)( zl.map(i => (i % N).abs ->(math.random * z toInt)):_*))
}
}
}
def genScalar: Arbitrary[Int] = Arbitrary(Arbitrary.arbitrary[Int].map{ _ % 1000 })
} | wavelets/breeze | src/test/scala/breeze/linalg/VectorTest.scala | Scala | apache-2.0 | 4,202 |
package org.bitcoins.core.script.control
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
/** Created by chris on 1/8/16.
*/
class ControlOperationsFactoryTest extends BitcoinSUnitTest {
"ControlOperationsFactory" must "match a string with a control operation" in {
ControlOperations.fromStringOpt("OP_ELSE") must be(Some(OP_ELSE))
ControlOperations.fromStringOpt("OP_ENDIF") must be(Some(OP_ENDIF))
ControlOperations.fromStringOpt("OP_IF") must be(Some(OP_IF))
ControlOperations.fromStringOpt("OP_NOTIF") must be(Some(OP_NOTIF))
ControlOperations.fromStringOpt("OP_RETURN") must be(Some(OP_RETURN))
ControlOperations.fromStringOpt("OP_VERIFY") must be(Some(OP_VERIFY))
ControlOperations.fromStringOpt("RANDOM") must be(None)
}
}
| bitcoin-s/bitcoin-s | core-test/src/test/scala/org/bitcoins/core/script/control/ControlOperationsFactoryTest.scala | Scala | mit | 773 |
package controllers
import play.api._
import play.api.mvc._
object Application extends Controller {
def index = Action {
Ok(views.html.index("Your new application is ready."))
}
def healthCheck = Action(Ok("Running"))
} | taraxe/gcloud-play-sample | app/controllers/Application.scala | Scala | apache-2.0 | 233 |
//
// Copyright 2014-2020 Paytronix Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package com.paytronix.utils.interchange.format.json
import scala.reflect.macros.whitebox.Context
import scalaz.NonEmptyList
import scalaz.syntax.foldable.ToFoldableOps
import com.paytronix.utils.interchange.base
import base.derive.DeriveCoderMacros
import NonEmptyList.{nel, nels}
private[json] class deriveImpl(val c: Context) extends DeriveCoderMacros {
import c.universe.{Annotation, Block, BlockTag, Ident, Quasiquote, TermName, Tree, TreeTag, Type, typeOf, weakTypeTag}
type Coder[A] = JsonCoder[A]
//def implicitCoderName = TermName("jsonCoder")
def coderType(tpe: Tree) = {
tq"com.paytronix.utils.interchange.format.json.JsonCoder[$tpe]"
}
def materializeCoder(tpe: Type, annotations: List[Annotation]): Tree = {
q"""
com.paytronix.utils.interchange.format.json.JsonCoder.make[$tpe] (
${materializeEncoder(tpe, annotations)},
${materializeDecoder(tpe, annotations)}
)
"""
}
type Encoder[A] = JsonEncoder[A]
def encoderType(tpe: Tree): Tree = {
tq"com.paytronix.utils.interchange.format.json.JsonEncoder[$tpe]"
}
def materializeEncoder(tpe: Type, annotations: List[Annotation]): Tree = {
wrapCoderForAnnotations (
coder = q"com.paytronix.utils.interchange.format.json.JsonEncoder[$tpe]",
annotations = annotations,
nullable = inside => q"com.paytronix.utils.interchange.format.json.container.nullableJsonEncoder($inside)",
default = value => inside => inside
)
}
type Decoder[A] = JsonDecoder[A]
def decoderType(tpe: Tree): Tree = {
tq"com.paytronix.utils.interchange.format.json.JsonDecoder[$tpe]"
}
def materializeDecoder(tpe: Type, annotations: List[Annotation]): Tree = {
wrapCoderForAnnotations (
coder = q"com.paytronix.utils.interchange.format.json.JsonDecoder[$tpe]",
annotations = annotations,
nullable = inside => q"com.paytronix.utils.interchange.format.json.container.nullableJsonDecoder($inside)",
default = value => inside => q"$inside.default($value)"
)
}
def makeStructureCoder(target: Tree): Tree = {
q"com.paytronix.utils.interchange.format.json.derive.structure.coder[$target]"
}
def structureEncoderMethods (
tpe: Type,
encoderFor: Property => Tree,
model: Structure
): List[Tree] = {
val flattenTpe = typeOf[com.paytronix.utils.interchange.format.json.flatten]
val jsonGenerator = TermName(c.freshName())
val instance = TermName(c.freshName())
val encode = sequenceResultBindings(model.properties) (
action = prop =>
if (prop.annotations.exists(_.tree.tpe =:= flattenTpe))
q"""
$jsonGenerator.filterNextObject(com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator.ObjectFilter.flatten)
val propEncoder = ${encoderFor(prop)}
if (propEncoder.mightBeNull) {
$jsonGenerator.omitNextMissing()
}
com.paytronix.utils.interchange.base.atProperty(${prop.externalName}) {
propEncoder.run(${prop.read(Ident(instance))}, $jsonGenerator)
}
"""
else
q"""
$jsonGenerator.writeFieldName(${prop.externalName}) >>
com.paytronix.utils.interchange.base.atProperty(${prop.externalName}) {
${encoderFor(prop)}.run(${prop.read(Ident(instance))}, $jsonGenerator)
}
""",
accept = (_, _) => q"",
body = _ => q"com.paytronix.utils.scala.result.Okay.unit"
)
List (
q"val mightBeNull = false",
q"val codesAsObject = true",
q"""
def run($instance: $tpe, $jsonGenerator: com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator) = {
$jsonGenerator.writeStartObject() >> $encode >> $jsonGenerator.writeEndObject()
}
"""
)
}
def structureDecoderMethods (
tpe: Type,
decoderFor: Property => Tree,
model: Structure
): List[Tree] = {
val flattenTpe = typeOf[com.paytronix.utils.interchange.format.json.flatten]
val allProps = (model.constructorProperties ++ model.mutableProperties).sorted
val (flattenProps, normalProps) = allProps.partition(_.annotations.exists(_.tree.tpe =:= flattenTpe))
val jsonParser = TermName(c.freshName())
val receiver = TermName(c.freshName())
val propReceiverNames = allProps.map { prop => TermName(c.freshName()) }
val propReceiverByProp = Map.empty ++ (allProps zip propReceiverNames)
val propFlagNames = normalProps.map { prop => TermName(c.freshName()) }
val propFlagByProp = Map.empty ++ (normalProps zip propFlagNames)
val declarePropReceivers = (propReceiverNames zip allProps).map { case (receiverName, prop) =>
q"val $receiverName = new com.paytronix.utils.interchange.base.Receiver[${prop.tpe}]()"
}
val declarePropFlags = propFlagNames.map { flagName =>
q"var $flagName = false"
}
val fieldDecoders = normalProps.map { prop =>
cq"""
${prop.externalName} =>
com.paytronix.utils.interchange.base.atProperty(${prop.externalName}) {
${propFlagByProp(prop)} = true
${decoderFor(prop)}.run($jsonParser, ${propReceiverByProp(prop)})
}
"""
}
val handleFlattenFields = sequenceResultBindings(flattenProps) (
action = prop => q"""
$jsonParser.excursion {
com.paytronix.utils.interchange.base.atProperty(${prop.externalName}) {
${decoderFor(prop)}.run($jsonParser, ${propReceiverByProp(prop)})
}
}
""",
accept = (_, _) => q"",
body = _ => q"com.paytronix.utils.scala.result.Okay.unit"
)
val handleMissing = sequenceResultBindings(normalProps) (
action = prop => q"""
if (!${propFlagByProp(prop)}) {
$jsonParser.currentValueIsMissing()
com.paytronix.utils.interchange.base.atProperty(${prop.externalName}) {
${decoderFor(prop)}.run($jsonParser, ${propReceiverByProp(prop)})
}
} else
com.paytronix.utils.scala.result.Okay.unit
""",
accept = (_, _) => q"",
body = _ => q"com.paytronix.utils.scala.result.Okay.unit"
)
List (
q"val mightBeNull = false",
q"val codesAsObject = true",
q"""
def run($jsonParser: com.paytronix.utils.interchange.format.json.InterchangeJsonParser,
$receiver: com.paytronix.utils.interchange.base.Receiver[$tpe]) = {
..$declarePropReceivers
..$declarePropFlags
$jsonParser.require(com.fasterxml.jackson.core.JsonToken.START_OBJECT) >>
$handleFlattenFields >>
$jsonParser.foreachFields {
case ..$fieldDecoders
case _ =>
$jsonParser.skipToEndOfValue();
com.paytronix.utils.scala.result.Okay.unit
} >>
{ $handleMissing } >>
$receiver(${model.constructAndAssign(prop => q"${propReceiverByProp(prop)}.value")})
}
"""
)
}
def makeWrapperCoder(target: Tree): Tree = {
q"com.paytronix.utils.interchange.format.json.derive.wrapper.coder[$target]"
}
def wrapperEncoderMethods (
targetType: Type,
property: Property,
unwrap: Tree => Tree
): List[Tree] = {
val instance = TermName(c.freshName())
val jsonGenerator = TermName(c.freshName())
List (
q"val mightBeNull = ${property.encoderName}.mightBeNull",
q"val codesAsObject = ${property.encoderName}.codesAsObject",
q"""
def run($instance: $targetType, $jsonGenerator: com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator) =
com.paytronix.utils.interchange.base.atProperty(${property.externalName}) {
${property.encoderName}.run(${unwrap(Ident(instance))}, $jsonGenerator)
}
"""
)
}
def wrapperDecoderMethods (
targetType: Type,
property: Property,
wrap: Tree => Tree
): List[Tree] = {
val jsonParser = TermName(c.freshName())
val out = TermName(c.freshName())
val receiver = TermName(c.freshName())
List (
q"val mightBeNull = ${property.decoderName}.mightBeNull",
q"val codesAsObject = ${property.decoderName}.codesAsObject",
q"""
def run($jsonParser: com.paytronix.utils.interchange.format.json.InterchangeJsonParser, $out: com.paytronix.utils.interchange.base.Receiver[$targetType]) = {
val $receiver = new com.paytronix.utils.interchange.base.Receiver[${property.tpe}]
com.paytronix.utils.interchange.base.atProperty(${property.externalName}) {
${property.decoderName}.run($jsonParser, $receiver) >> $out(${wrap(q"$receiver.value")})
}
}
"""
)
}
/* 2014-08-27 RMM: having multiple annotation macros which addToCompanion causes the compiler to not emit the object class (Blah$) even though
it doesn't error at runtime.
def deriveImplicitAdHocUnionCoderAnnotation(annottees: c.Expr[Any]*): c.Expr[Any] =
addToCompanion(annottees) { (targetName, annotations) =>
def isUnionAnno(t: Tree): Boolean =
t match {
case Ident(n: Name) => n.decodedName.toString == "union" // the tree isn't yet typechecked and resolved so uhh, this is spotty
case _ => false
}
val alts =
annotations.collectFirst { case q"new $anno(..$alts)" if isUnionAnno(anno) => alts }
.getOrElse(sys.error("couldn't find union annotation among " + annotations))
val q"new $_($noApplicableAlternates).macroTransform(..$_)" = c.macroApplication
List(q"""
implicit val ${implicitCoderName}: ${coderType(Ident(targetName))} =
com.paytronix.utils.interchange.format.json.derive.adHocUnion.coder[$targetName]($noApplicableAlternates, ..$alts)
""")
}
*/
private def parseAdHocUnionAlternates(alternateTrees: Seq[Tree]): NonEmptyList[Type] = {
val targets = alternateTrees.toList.map {
// FIXME? probably this should really typecheck the tree and get the type out from there rather than pattern matching the apply
case q"$_[$tpeTree]" => tpeTree.tpe
case tree =>
sys.error("unrecognized union alternative syntax: " + tree + ". expected alternate[Type]")
}
targets match {
case Nil => sys.error("union cannot be made with no alternates!")
case hd :: tl => nel(hd, tl)
}
}
def adHocUnionCoderDef[A: c.WeakTypeTag](noApplicableAlternates: c.Tree, alternates: c.Tree*): c.Expr[JsonCoder[A]] = try {
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Coder")
val targetSubtypes = parseAdHocUnionAlternates(alternates)
val subtypeEncoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val subtypeDecoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareCoders = (targetSubtypes zip (subtypeEncoderNames zip subtypeDecoderNames)).flatMap { case (tpe, (encoderName, decoderName)) =>
nels (
q"lazy val $decoderName = ${materializeDecoder(tpe, Nil)}",
q"lazy val $encoderName = ${materializeEncoder(tpe, Nil)}"
)
}.list
val subtypeEncoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeEncoderNames).stream
val subtypeDecoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeDecoderNames).stream
c.Expr[JsonCoder[A]](q"""
{
object $name extends ${coderType(tq"$targetType")} {
..$declareCoders
object decode extends ${decoderType(tq"$targetType")} {
..${adHocUnionDecoderMethods(noApplicableAlternates, targetType, targetSubtypes, subtypeDecoderNamesByType.mapValues(n => Ident(n)))}
}
object encode extends ${encoderType(tq"$targetType")} {
..${adHocUnionEncoderMethods(noApplicableAlternates, targetType, targetSubtypes, subtypeEncoderNamesByType.mapValues(n => Ident(n)))}
}
}
$name: ${coderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def adHocUnionDecoderDef[A: c.WeakTypeTag](noApplicableAlternates: c.Tree, alternates: c.Tree*): c.Expr[JsonDecoder[A]] = try {
import c.universe.{Block, BlockTag, Ident, Quasiquote, TermName, weakTypeTag}
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Decoder")
val targetSubtypes = parseAdHocUnionAlternates(alternates)
val subtypeDecoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareDecoders = (targetSubtypes zip subtypeDecoderNames).map { case (tpe, decoderName) =>
q"lazy val $decoderName = ${materializeDecoder(tpe, Nil)}"
}.list
val subtypeDecoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeDecoderNames).stream
c.Expr[JsonDecoder[A]](q"""
{
object $name extends ${decoderType(tq"$targetType")} {
..$declareDecoders
..${adHocUnionDecoderMethods(noApplicableAlternates, targetType, targetSubtypes, subtypeDecoderNamesByType.mapValues(n => Ident(n)))}
}
$name: ${decoderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def adHocUnionEncoderDef[A: c.WeakTypeTag](noApplicableAlternates: c.Tree, alternates: c.Tree*): c.Expr[JsonEncoder[A]] = try {
import c.universe.{Block, BlockTag, Ident, Quasiquote, TermName, weakTypeTag}
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Encoder")
val targetSubtypes = parseAdHocUnionAlternates(alternates)
val subtypeEncoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareEncoders = (targetSubtypes zip subtypeEncoderNames).map { case (tpe, encoderName) =>
q"lazy val $encoderName = ${materializeEncoder(tpe, Nil)}"
}.list
val subtypeEncoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeEncoderNames).stream
c.Expr[JsonEncoder[A]](q"""
{
object $name extends ${encoderType(tq"$targetType")} {
..$declareEncoders
..${adHocUnionEncoderMethods(noApplicableAlternates, targetType, targetSubtypes, subtypeEncoderNamesByType.mapValues(n => Ident(n)))}
}
$name: ${encoderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def adHocUnionEncoderMethods (
noApplicableAlternates: c.Tree,
targetType: Type,
targetSubtypes: NonEmptyList[Type],
encoderFor: Type => Tree
): List[Tree] = {
import c.universe.{Ident, TermName, Quasiquote}
val allEncoders = TermName(c.freshName())
val jsonGenerator = TermName(c.freshName())
val instance = TermName(c.freshName())
val encodeAlts = targetSubtypes.list.zipWithIndex.map { case (subtype, index) =>
val encoder = encoderFor(subtype)
val value = TermName(c.freshName())
cq"""
$value: $subtype =>
com.paytronix.utils.interchange.base.atIndex($index) {
com.paytronix.utils.scala.result.tryCatchResultG(com.paytronix.utils.interchange.base.terminal) {
$encoder.run($value, $jsonGenerator)
}
}
"""
}
val value = TermName(c.freshName())
List (
q"private val $allEncoders = List(..${targetSubtypes.map(t => encoderFor(t)).list})",
q"val mightBeNull = $allEncoders.exists(_.mightBeNull)",
q"val codesAsObject = $allEncoders.forall(_.codesAsObject)",
q"""
def run($instance: $targetType, $jsonGenerator: com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator) =
$instance match {
case ..$encodeAlts
case $value =>
com.paytronix.utils.scala.result.FailedG (
"cannot encode value " + $value + " as it was not configured as a valid union alternative",
com.paytronix.utils.interchange.base.CoderFailure.terminal
)
}
"""
)
}
def adHocUnionDecoderMethods (
noApplicableAlternates: c.Tree,
targetType: Type,
targetSubtypes: NonEmptyList[Type],
decoderFor: Type => Tree
): List[Tree] = {
import c.universe.{Ident, Quasiquote, TermName, Tree}
val allDecoders = TermName(c.freshName())
val jsonParser = TermName(c.freshName())
val receiver = TermName(c.freshName())
val decode = targetSubtypes.foldRight[Tree] (
q"com.paytronix.utils.scala.result.FailedG($noApplicableAlternates, $jsonParser.terminal)"
) { (tpe, rhs) =>
val subReceiver = TermName(c.freshName())
q"""
{
val $subReceiver = new com.paytronix.utils.interchange.base.Receiver[$tpe]
$jsonParser.excursion(${decoderFor(tpe)}.run($jsonParser, $subReceiver)) >>
$jsonParser.skipToEndOfValue() >>
$receiver($subReceiver.value)
} orElse $rhs
"""
}
List (
q"private val $allDecoders = List(..${targetSubtypes.map(decoderFor).list})",
q"val mightBeNull = $allDecoders.exists(_.mightBeNull)",
q"val codesAsObject = $allDecoders.forall(_.codesAsObject)",
q"""
def run($jsonParser: com.paytronix.utils.interchange.format.json.InterchangeJsonParser,
$receiver: com.paytronix.utils.interchange.base.Receiver[$targetType]) =
if (!$jsonParser.hasValue) $jsonParser.unexpectedMissingValue
else $decode
"""
)
}
/* 2014-08-27 RMM: having multiple annotation macros which addToCompanion causes the compiler to not emit the object class (Blah$) even though
it doesn't error at runtime.
def deriveImplicitTaggedUnionCoderAnnotation(annottees: c.Expr[Any]*): c.Expr[Any] =
addToCompanion(annottees) { (targetName, annotations) =>
import c.universe.{Ident, IdentTag, Name, NameTag, Quasiquote, TermName, TermNameTag, Tree, TreeTag}
def isUnionAnno(t: Tree): Boolean =
t match {
case Ident(n: Name) => n.decodedName.toString == "union" // the tree isn't yet typechecked and resolved so uhh, this is spotty
case _ => false
}
val q"new $_($determinant).macroTransform(..$_)" = c.macroApplication
val alts =
annotations.collectFirst { case q"new $anno(..$alts)" if isUnionAnno(anno) => alts }
.getOrElse(sys.error("couldn't find union annotation among " + annotations))
List(q"""
implicit val ${implicitCoderName}: ${coderType(Ident(targetName))} =
com.paytronix.utils.interchange.format.json.derive.taggedUnion.coder[$targetName]($determinant, ..$alts)
""")
}
*/
private def parseTaggedUnionAlternates(alternateTrees: Seq[Tree]): NonEmptyList[(Type, String)] = {
import c.universe.{Quasiquote, TreeTag}
val targets = alternateTrees.toList.map {
case q"$_[$tpeTree]($tag)" => (tpeTree.tpe, c.eval(c.Expr[String](c.untypecheck(tag.duplicate))))
case tree =>
sys.error("unrecognized union alternative syntax: " + tree + ". expected either alternate[Type](\\"Tag\\")")
}
targets match {
case Nil => sys.error("union cannot be made with no alternates!")
case hd :: tl => nel(hd, tl)
}
}
def taggedUnionCoderDef[A: c.WeakTypeTag](determinant: c.Tree, alternates: c.Tree*): c.Expr[JsonCoder[A]] = try {
import c.universe.{Block, BlockTag, Ident, Quasiquote, TermName}
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Coder")
val targetSubtypes = parseTaggedUnionAlternates(alternates)
val subtypeEncoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val subtypeDecoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareCoders = (targetSubtypes zip (subtypeEncoderNames zip subtypeDecoderNames)).flatMap { case ((tpe, _), (encoderName, decoderName)) =>
nels (
q"lazy val $decoderName = ${materializeDecoder(tpe, Nil)}",
q"lazy val $encoderName = ${materializeEncoder(tpe, Nil)}"
)
}.list
val subtypeEncoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeEncoderNames).stream
val subtypeDecoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeDecoderNames).stream
val decoderFor = subtypeDecoderNamesByType.map { case ((tpe, _), name) => (tpe, Ident(name)) }
val encoderFor = subtypeEncoderNamesByType.map { case ((tpe, _), name) => (tpe, Ident(name)) }
c.Expr[Coder[A]](q"""
{
object $name extends ${coderType(tq"$targetType")} {
..$declareCoders
object decode extends ${decoderType(tq"$targetType")} {
..${taggedUnionDecoderMethods(determinant, targetType, targetSubtypes, decoderFor)}
}
object encode extends ${encoderType(tq"$targetType")} {
..${taggedUnionEncoderMethods(determinant, targetType, targetSubtypes, encoderFor)}
}
}
$name: ${coderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def taggedUnionDecoderDef[A: c.WeakTypeTag](determinant: c.Tree, alternates: c.Tree*): c.Expr[JsonDecoder[A]] = try {
import c.universe.{Block, BlockTag, Ident, Quasiquote, TermName, weakTypeTag}
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Decoder")
val targetSubtypes = parseTaggedUnionAlternates(alternates)
val subtypeDecoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareDecoders = (targetSubtypes zip subtypeDecoderNames).map { case ((tpe, _), decoderName) =>
q"lazy val $decoderName = ${materializeDecoder(tpe, Nil)}"
}.list
val subtypeDecoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeDecoderNames).stream
val decoderFor = subtypeDecoderNamesByType.map { case ((tpe, _), name) => (tpe, Ident(name)) }
c.Expr[Decoder[A]](q"""
{
object $name extends ${decoderType(tq"$targetType")} {
..$declareDecoders
..${taggedUnionDecoderMethods(determinant, targetType, targetSubtypes, decoderFor)}
}
$name: ${decoderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def taggedUnionEncoderDef[A: c.WeakTypeTag](determinant: c.Tree, alternates: c.Tree*): c.Expr[JsonEncoder[A]] = try {
import c.universe.{Block, BlockTag, Ident, Quasiquote, TermName, weakTypeTag}
val targetType = weakTypeTag[A].tpe
val name = TermName(targetType.typeSymbol.name.decodedName.toString + "Encoder")
val targetSubtypes = parseTaggedUnionAlternates(alternates)
val subtypeEncoderNames = targetSubtypes.map { _ => TermName(c.freshName()) }
val declareEncoders = (targetSubtypes zip subtypeEncoderNames).map { case ((tpe, _), encoderName) =>
q"lazy val $encoderName = ${materializeEncoder(tpe, Nil)}"
}.list
val subtypeEncoderNamesByType = Map.empty ++ (targetSubtypes zip subtypeEncoderNames).stream
val encoderFor = subtypeEncoderNamesByType.map { case ((tpe, _), name) => (tpe, Ident(name)) }
c.Expr[Encoder[A]](q"""
{
object $name extends ${encoderType(tq"$targetType")} {
..$declareEncoders
..${taggedUnionEncoderMethods(determinant, targetType, targetSubtypes, encoderFor)}
}
$name: ${encoderType(tq"$targetType")}
}
""")
} catch { case e: Exception =>
System.err.println("uhoh, macro explosion!")
e.printStackTrace(System.err)
null
}
def taggedUnionEncoderMethods (
determinant: c.Tree,
targetType: Type,
targetSubtypes: NonEmptyList[(Type, String)],
encoderFor: Type => Tree
): List[Tree] = {
import c.universe.{Ident, TermName, Quasiquote}
val jsonGenerator = TermName(c.freshName())
val instance = TermName(c.freshName())
val encodeAlts = targetSubtypes.list.map { case (tpe, tag) =>
val encoder = encoderFor(tpe)
val value = TermName(c.freshName())
cq"""
$value: $tpe =>
com.paytronix.utils.scala.result.tryCatchResultG(com.paytronix.utils.interchange.base.terminal) {
$jsonGenerator.filterNextObject( new com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator.ObjectFilter {
override def beginning() = $jsonGenerator.writeFieldName($determinant) >> $jsonGenerator.writeString($tag)
})
$encoder.run($value, $jsonGenerator)
}
"""
}
val value = TermName(c.freshName())
List (
q"val mightBeNull = false",
q"val codesAsObject = true",
q"""
def run($instance: $targetType, $jsonGenerator: com.paytronix.utils.interchange.format.json.InterchangeJsonGenerator) =
$instance match {
case ..$encodeAlts
case $value =>
com.paytronix.utils.scala.result.FailedG (
"cannot encode value " + $value + " as it was not configured as a valid union alternative",
com.paytronix.utils.interchange.base.CoderFailure.terminal
)
}
"""
)
}
def taggedUnionDecoderMethods (
determinant: c.Tree,
targetType: Type,
targetSubtypes: NonEmptyList[(Type, String)],
decoderFor: Type => Tree
): List[Tree] = {
import c.universe.{Ident, Quasiquote, TermName}
val jsonParser = TermName(c.freshName())
val receiver = TermName(c.freshName())
val discrimValue = TermName(c.freshName())
val validTags = targetSubtypes.map(_._2)
val decodeAlts = targetSubtypes.map { case (tpe, tag) =>
val subReceiver = TermName(c.freshName())
cq"""
$tag =>
val $subReceiver = new com.paytronix.utils.interchange.base.Receiver[$tpe]
${decoderFor(tpe)}.run($jsonParser, $subReceiver) >> $receiver($subReceiver.value)
"""
}.list
List (
q"val mightBeNull = false",
q"val codesAsObject = true",
q"""
def run($jsonParser: com.paytronix.utils.interchange.format.json.InterchangeJsonParser,
$receiver: com.paytronix.utils.interchange.base.Receiver[$targetType]) =
$jsonParser.require(com.fasterxml.jackson.core.JsonToken.START_OBJECT) >>
$jsonParser.peekFields(scala.Array($determinant)) >>= {
case scala.Array(Some($discrimValue)) =>
$discrimValue match {
case ..$decodeAlts
case _ =>
com.paytronix.utils.interchange.base.atProperty($determinant) {
com.paytronix.utils.scala.result.FailedG (
"unexpected value \\"" + $discrimValue + "\\" (expected one of " + ${validTags.list.mkString(", ")} + ")",
$jsonParser.terminal
)
}
}
case _ =>
com.paytronix.utils.interchange.base.atProperty($determinant) {
com.paytronix.utils.scala.result.FailedG("required but missing", $jsonParser.terminal)
}
}
"""
)
}
// hack to avoid macro not liking synonyms
def structureCoderDef[A: c.WeakTypeTag]: c.Expr[JsonCoder[A]] = structureCoderDefImpl[A]
def structureDecoderDef[A: c.WeakTypeTag]: c.Expr[JsonDecoder[A]] = structureDecoderDefImpl[A]
def structureEncoderDef[A: c.WeakTypeTag]: c.Expr[JsonEncoder[A]] = structureEncoderDefImpl[A]
def wrapperCoderDef[A: c.WeakTypeTag]: c.Expr[JsonCoder[A]] = wrapperCoderDefImpl[A]
def wrapperDecoderDef[A: c.WeakTypeTag]: c.Expr[JsonDecoder[A]] = wrapperDecoderDefImpl[A]
def wrapperEncoderDef[A: c.WeakTypeTag]: c.Expr[JsonEncoder[A]] = wrapperEncoderDefImpl[A]
}
| paytronix/utils-open | interchange/json/src/main/scala/com/paytronix/utils/interchange/format/json/deriveImpl.scala | Scala | apache-2.0 | 32,471 |
package scalamachine.scalaz
import scalaz.{Monad, Traverse, Applicative}
import scalamachine.core.{HaltRes, ValueRes, Res, EmptyRes, ErrorRes}
package object res {
implicit val resScalazInstances = new Traverse[Res] with Monad[Res] {
def point[A](a: => A): Res[A] = ValueRes(a)
def traverseImpl[G[_],A,B](fa: Res[A])(f: A => G[B])(implicit G: Applicative[G]): G[Res[B]] =
map(fa)(a => G.map(f(a))(ValueRes(_): Res[B])) match {
case ValueRes(r) => r
case HaltRes(c,b) => G.point(HaltRes(c,b))
case ErrorRes(e) => G.point(ErrorRes(e))
case _ => G.point(EmptyRes)
}
def bind[A, B](fa: Res[A])(f: A => Res[B]): Res[B] = fa flatMap f
}
}
| stackmob/scalamachine | scalaz7/src/main/scala/scalamachine/scalaz/res/package.scala | Scala | apache-2.0 | 696 |
package org.powlab.jeye.decode.graph
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Buffer
import scala.collection.mutable.Map
import org.powlab.jeye.core.Opcodes
import org.powlab.jeye.decode.expression._
import org.powlab.jeye.decode.expression.Expressions._
import org.powlab.jeye.decode.RuntimeOpcode
import org.powlab.jeye.decode.graph.OpcodeDetails._
import org.powlab.jeye.decode.graph.OpcodeNodes._
import org.powlab.jeye.decode.graph.OpcodeTreeListeners._
import org.powlab.jeye.decode.sids.SidSelector
import org.powlab.jeye.decode.sids.Sid
import org.powlab.jeye.core.Exception
import org.powlab.jeye.core.Exception._
object OpcodeTree {
// голова
private val HEAD_RUNTIME_OPCODE = new RuntimeOpcode(-1, Opcodes.OPCODE_NOP, Array())
private val HEAD_EXPRESSION = Sex("head");
private val HEAD_NODE = new SimpleOpcodeNode(HEAD_RUNTIME_OPCODE)
private def indexOf(nodes: Buffer[OpcodeNode], node: OpcodeNode): Int = nodes.indexWhere(_.id == node.id)
private def removeFrom(nodes: Buffer[OpcodeNode], node: OpcodeNode): Int = {
val index = indexOf(nodes, node)
if (index != -1) {
nodes.remove(index)
}
index
}
private def replaceIn(nodes: Buffer[OpcodeNode], replacedNode: OpcodeNode, newNode: OpcodeNode): Int = {
val index = indexOf(nodes, replacedNode)
if (index != -1) {
nodes(index) = newNode
}
index
}
private def insertBefore(nodes: Buffer[OpcodeNode], node: OpcodeNode, newNode: OpcodeNode): Int = {
val index = indexOf(nodes, node)
if (index != -1) {
nodes.insert(index, newNode)
}
index
}
}
/** Связь */
class Relation {
val previews: Buffer[OpcodeNode] = new ArrayBuffer[OpcodeNode]()
val nexts: Buffer[OpcodeNode] = new ArrayBuffer[OpcodeNode]()
}
/**
* Дерево(граф) инструкций
* plainTree - физическая структура
*/
class OpcodeTree(val plainTree: OpcodeTree) {
/** Всегда держим ссылку на хэд как маркер для начала обхода дерева, чтобы избежать манимуляций с ним*/
val head = OpcodeTree.HEAD_NODE
private val id2Node: Map[String, OpcodeNode] = Map[String, OpcodeNode]()
private val id2Relation: Map[String, Relation] = Map[String, Relation]()
private val id2Detail: Map[String, OpcodeDetail] = Map[String, OpcodeDetail]()
/** Мэпинг id-узла на группу, которой он принадлежит.*/
private val id2Owner: Map[String, OpcodeNode] = Map[String, OpcodeNode]()
private val listeners = new ArrayBuffer[IdChanger]
var selector: SidSelector = null
// Инициализация головы
registryNode(head)
details(head).expression = OpcodeTree.HEAD_EXPRESSION
val resources = new OpcodeTreeResource
def top: OpcodeNode = next(head)
/** Регистрация узла в дереве */
def registryNode(node: OpcodeNode, details: OpcodeDetail = new OpcodeDetail) {
id2Node.put(node.id, node)
id2Relation.put(node.id, new Relation)
if (node.isInstanceOf[GroupOpcodeNode]) {
scanOpcodeNodes(node, someNode => id2Owner.put(someNode.id, node))
} else {
id2Owner.put(node.id, node)
}
changeDetails(node, details)
}
/** Подготовить счетчик перед новым обходом дерева */
def prepared(): OpcodeTreeMarker = new OpcodeTreeMarker(this)
/** Получить текущий узел, первый в горизонтальном списке */
def current(nodeId: String): OpcodeNode = id2Node.getOrElse(nodeId, null)
/** Получить текущий узел, первый в горизонтальном списке */
def current(node: OpcodeNode): OpcodeNode = current(node.id)
/** Получить текущий узел по номеру опкода, первый в горизонтальном списке */
def current(number: Int): OpcodeNode = current(number.toString);
/** Получить текущий узел по номеру опкода, последний в горизонтальном списке */
def currentLast(number: Int): OpcodeNode = current(makeId(number, lastPosition(number)));
/** Проверить существует ли указанный узел по номеру опкода */
def has(number: Int): Boolean = current(number.toString) != null
/** Проверить существует ли указанный узел по номеру опкода */
def has(id: String): Boolean = current(id) != null
/** Проверить существует ли указанный узел по номеру опкода */
def has(node: OpcodeNode): Boolean = has(node.id)
/** Получить предыдущий узел */
def preview(node: OpcodeNode): OpcodeNode = relation(node.id).previews.headOption.getOrElse(null)
/** Получить предыдущий узел, смещенный от указанного на count */
def preview(node: OpcodeNode, count: Int): OpcodeNode = {
val previewNode = preview(node)
if (count > 1 && previewNode != null) {
preview(previewNode, count - 1)
} else {
previewNode
}
}
/** Проверить, что существует более 1 связи */
def hasPreviews(node: OpcodeNode): Boolean = relation(node.id).previews.size > 1
/**Получить предыдущие узлы */
def previews(node: OpcodeNode): Buffer[OpcodeNode] = relation(node.id).previews
/**Получить количесвто следующие узлы */
def previewCount(node: OpcodeNode): Int = relation(node.id).previews.size
/** Проверить, есть ли следующий */
def hasNext(node: OpcodeNode): Boolean = relation(node.id).nexts.nonEmpty
/**Получить следующий узел */
def next(node: OpcodeNode): OpcodeNode = relation(node.id).nexts.headOption.getOrElse(null)
/**Получить следующие узлы */
def nexts(node: OpcodeNode): Buffer[OpcodeNode] = relation(node.id).nexts
/**Получить количесвто следующие узлы */
def nextCount(node: OpcodeNode): Int = relation(node.id).nexts.size
/**Связать 2 узла между собой */
def link(current: OpcodeNode, preview: OpcodeNode) {
if (preview != null && current != null) {
addNext(preview, current)
addPreview(current, preview)
}
}
/** Удалить узел */
def removeFromNexts(target: OpcodeNode, removedNode: OpcodeNode): Int = {
OpcodeTree.removeFrom(nexts(target), removedNode)
}
/** Удалить узел */
def removeFromPreviews(target: OpcodeNode, removedNode: OpcodeNode): Int = {
OpcodeTree.removeFrom(previews(target), removedNode)
}
/** Вставить перед */
def insertBeforeInPreviews(target: OpcodeNode, beforeNode: OpcodeNode, newNodeNode: OpcodeNode): Int = {
OpcodeTree.insertBefore(previews(target), beforeNode, newNodeNode)
}
/** Вставить перед */
def insertBeforeInNexts(target: OpcodeNode, beforeNode: OpcodeNode, newNodeNode: OpcodeNode): Int = {
OpcodeTree.insertBefore(nexts(target), beforeNode, newNodeNode)
}
/** Стереть информацию об узле */
def erase(erasedNode: OpcodeNode) {
val erasedId = erasedNode.id
id2Node.remove(erasedId)
id2Relation.remove(erasedId)
id2Detail.remove(erasedId)
id2Owner.remove(erasedId)
resources -= erasedNode
}
/** Изолировать узел */
def isolate(isolatedNode: OpcodeNode) {
val previewNodes = previews(isolatedNode)
val nextNodes = nexts(isolatedNode)
previewNodes.foreach(preview => {
nextNodes.foreach(next => {
if (preview != next) {
fireIdChangeEvent(isolatedNode.id, next.id)
insertBeforeInNexts(preview, isolatedNode, next)
}
})
})
nextNodes.foreach(next => {
previewNodes.foreach(preview => {
if (preview != next) {
fireIdChangeEvent(isolatedNode.id, preview.id)
insertBeforeInPreviews(next, isolatedNode, preview)
}
})
})
previewNodes.foreach(removeFromNexts(_, isolatedNode))
nextNodes.foreach(removeFromPreviews(_, isolatedNode))
previewNodes.clear
nextNodes.clear
removeFromSelector(isolatedNode)
}
/** Удалить информацию об узле */
def remove(removedNode: OpcodeNode) {
isolate(removedNode)
erase(removedNode)
}
def redefine(oldNode: OpcodeNode, newNode: OpcodeNode) {
if (!has(oldNode.id)) {
registryNode(oldNode)
}
if (!has(newNode.id)) {
registryNode(newNode, details(oldNode))
}
replace(oldNode, newNode)
}
def replace(oldNode: OpcodeNode, newNode: OpcodeNode) {
previews(oldNode).foreach(node => {
OpcodeTree.replaceIn(nexts(node), oldNode, newNode)
addPreview(newNode, node)
})
nexts(oldNode).foreach(node => {
OpcodeTree.replaceIn(previews(node), oldNode, newNode)
addNext(newNode, node)
})
fireIdChangeEvent(oldNode.id, newNode.id)
}
def changeNext(node: OpcodeNode, replacedNode: OpcodeNode, newNode: OpcodeNode) {
OpcodeTree.replaceIn(nexts(node), replacedNode, newNode)
}
def add(newNode: OpcodeNode) {
val id = makeId(newNode.runtimeOpcode.number, nextPosition(newNode.runtimeOpcode.number) - 1)
registryNode(newNode)
val current = this.current(id)
val previewNodes = previews(current)
previewNodes.foreach(preview => {
changeNext(preview, current, newNode)
addPreview(newNode, preview)
})
previewNodes.clear
link(current, newNode)
fireIdChangeEvent(id, newNode.id)
}
def owner(node: OpcodeNode): OpcodeNode = owner(node.id)
def owner(nodeId: String): OpcodeNode = id2Owner.getOrElse(nodeId, null)
/**
* Цель: все узлы группы должны вести себя как один узел, для этого
* все входящие связи (связи, которые указывают на любой из узлов группы) нужно нацелить на group узел
* и все исходящие аналогично. Приэтом у каждого узла группы должны быть свои детали
*/
def bind(group: GroupOpcodeNode, details: OpcodeDetail) {
registryNode(group, details)
val opcodes = group.opcodes
val incomes = new ArrayBuffer[OpcodeNode]
val outcomes = new ArrayBuffer[OpcodeNode]
opcodes.foreach(opcode => {
fireIdChangeEvent(opcode.id, group.id)
// Ищем узлы, которые указывают хотя бы на 1 из узлов opcodes, исключая самих себя
previews(opcode).filter(node => (!opcodes.contains(node) && !incomes.contains(node))).foreach(preview => {
incomes += preview
})
// Ищем узлы, на которые указывает узел из opcodes, исключая самих себя
nexts(opcode).filter(node => (!opcodes.contains(node) && !outcomes.contains(node))).foreach(next => {
outcomes += next
})
})
// Меняем связь с внутреннего узла на групповой (incomeNode <-> group)
incomes.foreach(incomeNode => {
nexts(incomeNode).filter(node => (node != group && opcodes.contains(node))).foreach(nodeFromOpcodes => {
val nextNodes = nexts(incomeNode)
if (OpcodeTree.indexOf(nextNodes, group) == -1) {
OpcodeTree.replaceIn(nextNodes, nodeFromOpcodes, group)
} else {
OpcodeTree.removeFrom(nextNodes, nodeFromOpcodes)
}
addPreview(group, incomeNode)
})
})
// Меняем связь с внутреннего узла на групповой (group <-> outcomeNode)
outcomes.foreach(outcomeNode => {
previews(outcomeNode).filter(node => (node != group && opcodes.contains(node))).foreach(nodeFromOpcodes => {
val previewNodes = previews(outcomeNode)
if (OpcodeTree.indexOf(previewNodes, group) == -1) {
OpcodeTree.replaceIn(previewNodes, nodeFromOpcodes, group)
} else {
OpcodeTree.removeFrom(previewNodes, nodeFromOpcodes)
}
addNext(group, outcomeNode) // выставили обратную связь outcomeNode -> group
})
})
}
def details(nodeId: String): OpcodeDetail = id2Detail(nodeId)
def details(node: OpcodeNode): OpcodeDetail = details(node.id)
def incDetails(node: OpcodeNode): IncDetail = {
val detail = details(node.id)
if (!detail.isInstanceOf[IncDetail]) {
val reason = "Запрашиваемая детализация по узлу '" + node + "' не является inc-детализацией."
val effect = "Обработка дерева инструкций будет прервана."
val action = "Необходимо устранить дефект в логике получения inc-детализации."
throw Exception(TREE_AREA, reason, effect, action)
}
detail.asInstanceOf[IncDetail]
}
def ifDetails(node: OpcodeNode): IfOpcodeDetail = {
val detail = details(node.id)
if (!detail.isInstanceOf[IfOpcodeDetail]) {
val reason = "Запрашиваемая детализация по узлу '" + node + "' не является if-детализацией."
val effect = "Обработка дерева инструкций будет прервана."
val action = "Необходимо устранить дефект в логике получения if-детализации."
throw Exception(TREE_AREA, reason, effect, action)
}
detail.asInstanceOf[IfOpcodeDetail]
}
def cycleDetails(node: OpcodeNode): CycleOpcodeDetail = {
val detail = details(node.id)
if (!isCycleDetails(detail)) {
val reason = "Запрашиваемая детализация по узлу '" + node + "' не является cycle-детализацией."
val effect = "Обработка дерева инструкций будет прервана."
val action = "Необходимо устранить дефект в логике получения cycle-детализации."
throw Exception(TREE_AREA, reason, effect, action)
}
detail.asInstanceOf[CycleOpcodeDetail]
}
def changeDetails(node: OpcodeNode, newDetails: OpcodeDetail) {
changeDetails(node.id, newDetails)
}
def changeDetails(nodeId: String, newDetails: OpcodeDetail) {
id2Detail.put(nodeId, newDetails)
if (newDetails.isInstanceOf[IdChanger]) {
listeners += newDetails.asInstanceOf[IdChanger]
}
}
def nextPosition(node: OpcodeNode): Int = nextPosition(node.runtimeOpcode.number)
def nextPosition(number: Int): Int = {
var position: Int = 0
while (has(makeId(number, position))) {
position += 1
}
position
}
def lastPosition(number: Int): Int = nextPosition(number) - 1
/**
* Получить sid
*/
def sid(node: OpcodeNode): String = details(node).sid
/**
* Получить sid как объект
*/
def sido(node: OpcodeNode): Sid = new Sid(sid(node))
def expression(node: OpcodeNode): IExpression = details(node).expression
private def removeFromSelector(node: OpcodeNode) {
if (selector != null) {
OpcodeNodes.scanOpcodeNodes(node, someNode => {
selector.clear(sid(someNode))
})
}
}
private def fireIdChangeEvent(oldId: String, newId: String) {
listeners.foreach(_.change(oldId, newId))
}
/**Получить контейнер с данными по узлу */
private def relation(nodeId: String): Relation = {
var relation = id2Relation.getOrElse(nodeId, null)
if (relation == null) {
relation = new Relation
id2Relation.put(nodeId, relation)
}
relation
}
private def containsNext(preview: OpcodeNode, current: OpcodeNode): Boolean = {
OpcodeTree.indexOf(nexts(preview), current) != -1
}
private def containsPreview(current: OpcodeNode, preview: OpcodeNode): Boolean = {
OpcodeTree.indexOf(previews(current), preview) != -1
}
private def addNext(preview: OpcodeNode, current: OpcodeNode) {
if (!containsNext(preview, current)) {
nexts(preview) += current
}
}
private def addPreview(current: OpcodeNode, preview: OpcodeNode) {
if (!containsPreview(current, preview)) {
previews(current) += preview
}
}
private def makeNothingOpcode(number: Int): OpcodeNode = {
val nothingOpcode = new RuntimeOpcode(number, Opcodes.OPCODE_NOP, Array())
val nothingNode = new SimpleOpcodeNode(nothingOpcode, false, nextPosition(number))
nothingNode
}
} | powlab/jeye | src/main/scala/org/powlab/jeye/decode/graph/OpcodeTree.scala | Scala | apache-2.0 | 16,919 |
package com.amichalo.smooolelo.providers
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import scala.collection.JavaConversions._
class ConfigValuesProvider(config: Config) extends ConfigurationProvider with LazyLogging {
private lazy val configuration: Map[String, String] = initialize()
private def initialize(): Map[String, String] = {
val map = config.entrySet().map(entry => entry.getKey -> entry.getValue.render()).toMap
logger.trace(s"Application config values: $map")
map
}
override def apply(): Map[String, String] = configuration
} | amichalo/smooolelo | src/main/scala/com/amichalo/smooolelo/providers/ConfigValuesProvider.scala | Scala | apache-2.0 | 598 |
class Outer {
class Inner {
def foo: Unit = assert(Outer.this ne null)
}
}
| scala/scala | test/files/run/t10423/A_2.scala | Scala | apache-2.0 | 84 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.util.concurrent._
import atomic._
import org.apache.kafka.common.utils.KafkaThread
/**
* A scheduler for running jobs
*
* This interface controls a job scheduler that allows scheduling either repeating background jobs
* that execute periodically or delayed one-time actions that are scheduled in the future.
*/
trait Scheduler {
/**
* Initialize this scheduler so it is ready to accept scheduling of tasks
*/
def startup()
/**
* Shutdown this scheduler. When this method is complete no more executions of background tasks will occur.
* This includes tasks scheduled with a delayed execution.
*/
def shutdown()
/**
* Check if the scheduler has been started
*/
def isStarted: Boolean
/**
* Schedule a task
* @param name The name of this task
* @param delay The amount of time to wait before the first execution
* @param period The period with which to execute the task. If < 0 the task will execute only once.
* @param unit The unit for the preceding times.
* @return A Future object to manage the task scheduled.
*/
def schedule(name: String, fun: ()=>Unit, delay: Long = 0, period: Long = -1, unit: TimeUnit = TimeUnit.MILLISECONDS) : ScheduledFuture[_]
}
/**
* A scheduler based on java.util.concurrent.ScheduledThreadPoolExecutor
*
* It has a pool of kafka-scheduler- threads that do the actual work.
*
* @param threads The number of threads in the thread pool
* @param threadNamePrefix The name to use for scheduler threads. This prefix will have a number appended to it.
* @param daemon If true the scheduler threads will be "daemon" threads and will not block jvm shutdown.
*/
@threadsafe
class KafkaScheduler(val threads: Int,
val threadNamePrefix: String = "kafka-scheduler-",
daemon: Boolean = true) extends Scheduler with Logging {
private var executor: ScheduledThreadPoolExecutor = null
private val schedulerThreadId = new AtomicInteger(0)
override def startup() {
debug("Initializing task scheduler.")
this synchronized {
if(isStarted)
throw new IllegalStateException("This scheduler has already been started!")
executor = new ScheduledThreadPoolExecutor(threads)
executor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false)
executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false)
executor.setRemoveOnCancelPolicy(true)
executor.setThreadFactory(new ThreadFactory() {
def newThread(runnable: Runnable): Thread =
new KafkaThread(threadNamePrefix + schedulerThreadId.getAndIncrement(), runnable, daemon)
})
}
}
override def shutdown() {
debug("Shutting down task scheduler.")
// We use the local variable to avoid NullPointerException if another thread shuts down scheduler at same time.
val cachedExecutor = this.executor
if (cachedExecutor != null) {
this synchronized {
cachedExecutor.shutdown()
this.executor = null
}
cachedExecutor.awaitTermination(1, TimeUnit.DAYS)
}
}
def scheduleOnce(name: String, fun: () => Unit): Unit = {
schedule(name, fun, delay = 0L, period = -1L, unit = TimeUnit.MILLISECONDS)
}
def schedule(name: String, fun: () => Unit, delay: Long, period: Long, unit: TimeUnit): ScheduledFuture[_] = {
debug("Scheduling task %s with initial delay %d ms and period %d ms."
.format(name, TimeUnit.MILLISECONDS.convert(delay, unit), TimeUnit.MILLISECONDS.convert(period, unit)))
this synchronized {
ensureRunning()
val runnable = CoreUtils.runnable {
try {
trace("Beginning execution of scheduled task '%s'.".format(name))
fun()
} catch {
case t: Throwable => error(s"Uncaught exception in scheduled task '$name'", t)
} finally {
trace("Completed execution of scheduled task '%s'.".format(name))
}
}
if(period >= 0)
executor.scheduleAtFixedRate(runnable, delay, period, unit)
else
executor.schedule(runnable, delay, unit)
}
}
/**
* Package private for testing.
*/
private[utils] def taskRunning(task: ScheduledFuture[_]): Boolean = {
executor.getQueue().contains(task)
}
def resizeThreadPool(newSize: Int): Unit = {
executor.setCorePoolSize(newSize)
}
def isStarted: Boolean = {
this synchronized {
executor != null
}
}
private def ensureRunning(): Unit = {
if (!isStarted)
throw new IllegalStateException("Kafka scheduler is not running.")
}
}
| KevinLiLu/kafka | core/src/main/scala/kafka/utils/KafkaScheduler.scala | Scala | apache-2.0 | 5,506 |
import play.api._
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2014
* See LICENSE.txt for details.
*/
object Global extends GlobalSettings {
override def configuration = Configuration.from(
Map("ws" -> Map("useragent" -> s"keemun/${keemun.Version.SYMBOLIC}"))
)
}
| maizy/keemun | app/Global.scala | Scala | mit | 278 |
package chapter.eighteen
import ExerciseFour._
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class ExerciseFourSpec extends FlatSpec with Matchers {
"function" should "" in {
}
}
| deekim/impatient-scala | src/test/scala/chapter/eighteen/ExerciseFourSpec.scala | Scala | apache-2.0 | 266 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.driver.shell
// TODO make this class generic; then should be moved to a util package
class ShellArgsReader(private val arglist: List[String]) {
type OptionMap = Map[String, Any]
val booleanOptions = Seq("tcp", "async")
val options = nextOption(Map(), arglist)
object BooleanOptionDisabledByDefault {
type StringKey = String
type BooleanValue = Boolean
def unapply(list: List[String]): Option[(StringKey, BooleanValue, List[String])] = {
list match {
case strOptName :: "true" :: tail =>
Some((strOptName, true, tail))
case strOptName :: "false" :: tail =>
Some((strOptName, false, tail))
case strOptName :: tail =>
Some((strOptName, true, tail))
}
} collect {
case (strOptName, anyBool, anyList) if strOptName.startsWith("--") =>
(strOptName.substring("--".length), anyBool, anyList)
}
}
private def nextOption(map: OptionMap, list: List[String]): OptionMap = {
list match {
case Nil => map
case "--user" :: username :: tail =>
nextOption(map ++ Map("user" -> username), tail)
case "--timeout" :: timeout :: tail =>
nextOption(map ++ Map("timeout" -> timeout.toInt), tail)
case "--query" :: query :: tail =>
nextOption(map ++ Map("query" -> query), tail)
case BooleanOptionDisabledByDefault(optName, boolValue, tail) if booleanOptions contains optName =>
nextOption(map ++ Map(optName -> boolValue), tail)
}
}
} | jjlopezm/crossdata | driver/src/main/scala/com/stratio/crossdata/driver/shell/ShellArgsReader.scala | Scala | apache-2.0 | 2,154 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.calcite.LegacySink
import org.apache.flink.table.sinks.UpsertStreamTableSink
import scala.collection.JavaConversions._
object UpdatingPlanChecker {
def getUniqueKeyForUpsertSink(
sinkNode: LegacySink,
sink: UpsertStreamTableSink[_]): Option[Array[String]] = {
// extract unique key fields
// Now we pick shortest one to sink
// TODO UpsertStreamTableSink setKeyFields interface should be Array[Array[String]]
val sinkFieldNames = sink.getTableSchema.getFieldNames
/** Extracts the unique keys of the table produced by the plan. */
val fmq = FlinkRelMetadataQuery.reuseOrCreate(sinkNode.getCluster.getMetadataQuery)
val uniqueKeys = fmq.getUniqueKeys(sinkNode.getInput)
if (uniqueKeys != null && uniqueKeys.size() > 0) {
uniqueKeys
.filter(_.nonEmpty)
.map(_.toArray.map(sinkFieldNames))
.toSeq
.sortBy(_.length)
.headOption
} else {
None
}
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/UpdatingPlanChecker.scala | Scala | apache-2.0 | 1,945 |
/*
* Copyright (c) 2013. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.berkeley.cs.amplab.adam.converters
import net.sf.samtools.{SAMReadGroupRecord, SAMRecord}
import edu.berkeley.cs.amplab.adam.avro.ADAMRecord
import scala.collection.JavaConverters._
import edu.berkeley.cs.amplab.adam.models.{Attribute, RecordGroupDictionary, SequenceDictionary}
import edu.berkeley.cs.amplab.adam.util.AttributeUtils
class SAMRecordConverter extends Serializable {
def convert(samRecord: SAMRecord, dict: SequenceDictionary, readGroups: RecordGroupDictionary): ADAMRecord = {
val builder: ADAMRecord.Builder = ADAMRecord.newBuilder
.setReadName(samRecord.getReadName)
.setSequence(samRecord.getReadString)
.setCigar(samRecord.getCigarString)
.setQual(samRecord.getBaseQualityString)
// Only set the reference information if the read is aligned, matching the mate reference
// This prevents looking up a -1 in the sequence dictionary
val readReference: Int = samRecord.getReferenceIndex
if (readReference != SAMRecord.NO_ALIGNMENT_REFERENCE_INDEX) {
builder
.setReferenceId(readReference)
.setReferenceName(samRecord.getReferenceName)
.setReferenceLength(dict(samRecord.getReferenceIndex).length)
.setReferenceUrl(dict(samRecord.getReferenceIndex).url)
val start: Int = samRecord.getAlignmentStart
if (start != 0) {
builder.setStart((start - 1).asInstanceOf[Long])
}
val mapq: Int = samRecord.getMappingQuality
if (mapq != SAMRecord.UNKNOWN_MAPPING_QUALITY) {
builder.setMapq(mapq)
}
}
// Position of the mate/next segment
val mateReference: Int = samRecord.getMateReferenceIndex
if (mateReference != SAMRecord.NO_ALIGNMENT_REFERENCE_INDEX) {
builder
.setMateReferenceId(mateReference)
.setMateReference(samRecord.getMateReferenceName)
.setMateReferenceLength(dict(samRecord.getMateReferenceName).length)
.setMateReferenceUrl(dict(samRecord.getMateReferenceName).url)
val mateStart = samRecord.getMateAlignmentStart
if (mateStart > 0) {
// We subtract one here to be 0-based offset
builder.setMateAlignmentStart(mateStart - 1)
}
}
// The Avro scheme defines all flags as defaulting to 'false'. We only need to set the flags that are true.
if (samRecord.getFlags != 0) {
if (samRecord.getReadPairedFlag) {
builder.setReadPaired(true)
if (samRecord.getMateNegativeStrandFlag) {
builder.setMateNegativeStrand(true)
}
if (!samRecord.getMateUnmappedFlag) {
builder.setMateMapped(true)
}
if (samRecord.getProperPairFlag) {
builder.setProperPair(true)
}
if (samRecord.getFirstOfPairFlag) {
builder.setFirstOfPair(true)
}
if (samRecord.getSecondOfPairFlag) {
builder.setSecondOfPair(true)
}
}
if (samRecord.getDuplicateReadFlag) {
builder.setDuplicateRead(true)
}
if (samRecord.getReadNegativeStrandFlag) {
builder.setReadNegativeStrand(true)
}
if (!samRecord.getNotPrimaryAlignmentFlag) {
builder.setPrimaryAlignment(true)
}
if (samRecord.getReadFailsVendorQualityCheckFlag) {
builder.setFailedVendorQualityChecks(true)
}
if (!samRecord.getReadUnmappedFlag) {
builder.setReadMapped(true)
}
}
if (samRecord.getAttributes != null) {
var tags = List[Attribute]()
samRecord.getAttributes.asScala.foreach {
attr =>
if (attr.tag == "MD") {
builder.setMismatchingPositions(attr.value.toString)
} else {
tags ::= AttributeUtils.convertSAMTagAndValue(attr)
}
}
builder.setAttributes(tags.mkString("\\t"))
}
val recordGroup: SAMReadGroupRecord = samRecord.getReadGroup
if (recordGroup != null) {
Option(recordGroup.getRunDate) match {
case Some(date) => builder.setRecordGroupRunDateEpoch(date.getTime)
case None =>
}
recordGroup.getId
builder.setRecordGroupId(readGroups(recordGroup.getReadGroupId))
.setRecordGroupName(recordGroup.getReadGroupId)
.setRecordGroupSequencingCenter(recordGroup.getSequencingCenter)
.setRecordGroupDescription(recordGroup.getDescription)
.setRecordGroupFlowOrder(recordGroup.getFlowOrder)
.setRecordGroupKeySequence(recordGroup.getKeySequence)
.setRecordGroupLibrary(recordGroup.getLibrary)
.setRecordGroupPredictedMedianInsertSize(recordGroup.getPredictedMedianInsertSize)
.setRecordGroupPlatform(recordGroup.getPlatform)
.setRecordGroupPlatformUnit(recordGroup.getPlatformUnit)
.setRecordGroupSample(recordGroup.getSample)
}
builder.build
}
}
| fnothaft/adam | adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/converters/SAMRecordConverter.scala | Scala | apache-2.0 | 5,452 |
package org.geow.util.test
import org.geow.generator.OsmObjectGenerator
import org.geow.model.OsmId
import org.geow.model.geometry.Point
import org.geow.util.Denormalizer
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
class GeowUtilsSpec extends Specification with ScalaCheck {
sequential
val generator = OsmObjectGenerator()
import generator._
"The GeowUtils" should {
"should denormalize an \\"OsmWay\\"" in {
val expectedWay = generateWay
val expectedNds = expectedWay.nds
val expectedId = expectedWay.id
val expectedUser = expectedWay.user
val expectedVersion = expectedWay.version
val expectedTags = expectedWay.tags
val expectedMappingsList = for (nd <- expectedNds) yield nd -> generatePoint
val expectedMappings: Map[OsmId, Point] = expectedMappingsList.toMap
val denormalizedWay = Denormalizer.denormalizeWay(expectedWay, expectedMappings)
val actualId = denormalizedWay.id
val actualUser = denormalizedWay.user
val actualVersion = denormalizedWay.version
val actualTags = denormalizedWay.tags
val actualGeometry = denormalizedWay.geometry
val actualPoints = actualGeometry.points
for (nd <- expectedNds){
val expectedPoint = expectedMappings(nd)
actualPoints must contain(expectedPoint)
}
actualTags must containTheSameElementsAs(expectedTags)
actualId must be_==(expectedId)
actualUser must be_==(expectedUser)
actualVersion must be_==(expectedVersion)
}
}
} | geow-org/api | src/test/scala/org/geow/util/test/GeowUtilsSpec.scala | Scala | apache-2.0 | 1,564 |
object main {
def main(args: Array[String]){
/*
* A shadow variable is a variable inside a function or a method that is of the same name
* as the outer variable, but it is not visible to the outer part of the program
* Examples below:
*/
val a = 1
/*
* 1) Uncommenting line below will cause an error due to sharing the same scope
* by two values (val) of the same name.
* Thus, it is a re-assignment, which is not possible with vals.
*/
// val a = 2
println(a)
/*
* 2) Example below presents a shadow variable.
*/
val b = 1
object objectA{
val b = 2
println(b)
}
objectA
println(b)
}
} | arcyfelix/Courses | 18-10-18-Programming-in-Scala-by-Martin-Odersky-Lex-Spoon-and-Bill-Venners/23-ShadowVariable/src/main.scala | Scala | apache-2.0 | 708 |
package com.cloudray.scalapress.plugin.listings.controller
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{ModelAttribute, RequestMethod, PathVariable, ResponseBody, RequestMapping}
import javax.servlet.http.HttpServletRequest
import org.springframework.beans.factory.annotation.Autowired
import scala.collection.JavaConverters._
import com.cloudray.scalapress.item.Item
import com.cloudray.scalapress.item.attr.AttributeValue
import com.cloudray.scalapress.util.mvc.ScalapressPage
import com.cloudray.scalapress.theme.ThemeService
import com.cloudray.scalapress.security.SpringSecurityResolver
import com.cloudray.scalapress.plugin.listings.controller.renderer.ListingFieldsRenderer
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
@Controller
@RequestMapping(Array("listing/{id}"))
class ListingEditController {
@Autowired var context: ScalapressContext = _
@Autowired var themeService: ThemeService = _
@ResponseBody
@RequestMapping(method = Array(RequestMethod.GET), produces = Array("text/html"))
def edit(@ModelAttribute("item") obj: Item, req: HttpServletRequest): ScalapressPage = {
val account = SpringSecurityResolver.getAccount(req)
require(account.get.id == obj.account.id)
val sreq = ScalapressRequest(req, context).withTitle("Edit Listing")
val theme = themeService.default
val page = ScalapressPage(theme, sreq)
page.body(ListingFieldsRenderer.render(obj))
page
}
@RequestMapping(method = Array(RequestMethod.POST))
def save(@ModelAttribute("item") item: Item, req: HttpServletRequest): String = {
item.name = req.getParameter("title")
item.content = req.getParameter("content")
item.attributeValues.clear()
for ( a <- item.objectType.attributes.asScala ) {
val values = req.getParameterValues("attributeValue_" + a.id)
if (values != null) {
values.map(_.trim).filter(_.length > 0).foreach(value => {
val av = new AttributeValue
av.attribute = a
av.value = value
av.item = item
item.attributeValues.add(av)
})
}
}
context.itemDao.save(item)
"redirect:/listing/"
}
@ModelAttribute("item") def listing(@PathVariable("id") id: Long) = context.itemDao.find(id)
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/plugin/listings/controller/ListingEditController.scala | Scala | apache-2.0 | 2,356 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.dl
import java.util.HashSet
import caffe.Caffe.LayerParameter;
import caffe.Caffe.NetParameter;
import caffe.Caffe.SolverParameter;
import org.apache.sysml.runtime.DMLRuntimeException;
import scala.collection.JavaConversions._
import caffe.Caffe
trait BaseDMLGenerator {
def commaSep(arr: List[String]): String =
if (arr.length == 1) arr(0)
else {
var ret = arr(0)
for (i <- 1 until arr.length) {
ret = ret + "," + arr(i)
}
ret
}
def commaSep(arr: String*): String =
if (arr.length == 1) arr(0)
else {
var ret = arr(0)
for (i <- 1 until arr.length) {
ret = ret + "," + arr(i)
}
ret
}
def int_add(v1: String, v2: String): String =
try { (v1.toDouble + v2.toDouble).toInt.toString } catch { case _: Throwable => "(" + v1 + "+" + v2 + ")" }
def int_mult(v1: String, v2: String, v3: String): String =
try { (v1.toDouble * v2.toDouble * v3.toDouble).toInt.toString } catch { case _: Throwable => "(" + v1 + "*" + v2 + "*" + v3 + ")" }
def int_mult(v1: String, v2: String): String =
try { (v1.toDouble * v2.toDouble).toInt.toString } catch { case _: Throwable => "(" + v1 + "*" + v2 + ")" }
def isNumber(x: String): Boolean = x forall Character.isDigit
def transpose(x: String): String = "t(" + x + ")"
def write(varName: String, fileName: String, format: String): String = "write(" + varName + ", \"" + fileName + "\", format=\"" + format + "\")\n"
def readWeight(varName: String, fileName: String, sep: String = "/"): String = varName + " = read(weights + \"" + sep + fileName + "\")\n"
def readScalarWeight(varName: String, fileName: String, sep: String = "/"): String = varName + " = as.scalar(read(weights + \"" + sep + fileName + "\"))\n"
def asDMLString(str: String): String = "\"" + str + "\""
def assign(dmlScript: StringBuilder, lhsVar: String, rhsVar: String): Unit =
dmlScript.append(lhsVar).append(" = ").append(rhsVar).append("\n")
def assignPlusEq(dmlScript: StringBuilder, lhsVar: String, rhsVar: String): Unit =
dmlScript.append(lhsVar).append(" += ").append(rhsVar).append("\n")
def sum(dmlScript: StringBuilder, variables: List[String]): StringBuilder = {
if (variables.length > 1) dmlScript.append("(")
dmlScript.append(variables(0))
for (i <- 1 until variables.length) {
dmlScript.append(" + ").append(variables(i))
}
if (variables.length > 1) dmlScript.append(")")
return dmlScript
}
def addAndAssign(dmlScript: StringBuilder, lhsVar: String, rhsVars: List[String]): Unit = {
dmlScript.append(lhsVar).append(" = ")
sum(dmlScript, rhsVars)
dmlScript.append("\n")
}
def rightIndexing(dmlScript: StringBuilder, lhsVar:String, rhsVar: String, rl: String, ru: String, cl: String=null, cu: String=null): StringBuilder = {
dmlScript.append(lhsVar).append(" = ").append(rhsVar).append("[")
if (rl != null && ru != null) dmlScript.append(rl).append(":").append(ru)
dmlScript.append(",")
if (cl != null && cu != null) dmlScript.append(cl).append(":").append(cu)
dmlScript.append("]\n")
}
// Performs assignVar = ceil(lhsVar/rhsVar)
def ceilDivide(dmlScript: StringBuilder, assignVar: String, lhsVar: String, rhsVar: String): Unit =
dmlScript.append(assignVar).append(" = ").append("ceil(").append(lhsVar).append(" / ").append(rhsVar).append(")\n")
def print(arg: String): String = "print(" + arg + ")\n"
def dmlConcat(arg: String*): String = {
val ret = new StringBuilder
ret.append(arg(0))
for (i <- 1 until arg.length) {
ret.append(" + ").append(arg(i))
}
ret.toString
}
def matrix(init: String, rows: String, cols: String): String = "matrix(" + init + ", rows=" + rows + ", cols=" + cols + ")"
def sum(m: String): String = "sum(" + m + ")"
def nrow(m: String): String = "nrow(" + m + ")"
def ceil(m: String): String = "ceil(" + m + ")"
def floor(m: String): String = "floor(" + m + ")"
def stop(dmlScript: StringBuilder, m: String): StringBuilder = dmlScript.append("stop(" + m + ")\n")
def asInteger(m: String): String = "as.integer(" + m + ")"
def ncol(m: String): String = "ncol(" + m + ")"
def customAssert(cond: Boolean, msg: String) = if (!cond) throw new DMLRuntimeException(msg)
def multiply(v1: String, v2: String): String = v1 + "*" + v2
def colSums(m: String): String = "colSums(" + m + ")"
def ifdef(cmdLineVar: String, defaultVal: String): String = "ifdef(" + cmdLineVar + ", " + defaultVal + ")"
def ifdef(cmdLineVar: String): String = ifdef(cmdLineVar, "\" \"")
def read(filePathVar: String, format: String): String = "read(" + filePathVar + ", format=\"" + format + "\")"
}
trait TabbedDMLGenerator extends BaseDMLGenerator {
def tabDMLScript(dmlScript: StringBuilder, numTabs: Int): StringBuilder = tabDMLScript(dmlScript, numTabs, false)
def tabDMLScript(dmlScript: StringBuilder, numTabs: Int, prependNewLine: Boolean): StringBuilder = {
if (prependNewLine) dmlScript.append("\n")
for (i <- 0 until numTabs) dmlScript.append("\t")
dmlScript
}
}
trait SourceDMLGenerator extends TabbedDMLGenerator {
val alreadyImported: HashSet[String] = new HashSet[String]
def source(dmlScript: StringBuilder, numTabs: Int, sourceFileName: String, dir: String): Unit =
if (sourceFileName != null && !alreadyImported.contains(sourceFileName)) {
tabDMLScript(dmlScript, numTabs).append("source(\"" + dir + sourceFileName + ".dml\") as " + sourceFileName + "\n")
alreadyImported.add(sourceFileName)
}
def source(dmlScript: StringBuilder, numTabs: Int, net: CaffeNetwork, solver: CaffeSolver, otherFiles: Array[String]): Unit = {
// Add layers with multiple source files
if (net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[SoftmaxWithLoss]).length > 0) {
source(dmlScript, numTabs, "softmax", Caffe2DML.layerDir)
source(dmlScript, numTabs, "cross_entropy_loss", Caffe2DML.layerDir)
}
net.getLayers.map(layer => source(dmlScript, numTabs, net.getCaffeLayer(layer).sourceFileName, Caffe2DML.layerDir))
if (solver != null)
source(dmlScript, numTabs, solver.sourceFileName, Caffe2DML.optimDir)
if (otherFiles != null)
otherFiles.map(sourceFileName => source(dmlScript, numTabs, sourceFileName, Caffe2DML.layerDir))
}
}
trait NextBatchGenerator extends TabbedDMLGenerator {
def min(lhs: String, rhs: String): String = "min(" + lhs + ", " + rhs + ")"
// Creates a DML script for:
// index_prefix_beg = ((i-1) * batchSize) %% N + 1;
// index_prefix_end = min(index_prefix_beg + batchSize - 1, N);
// Xb = X[ index_prefix_beg: index_prefix_end, ]; yb = y[ index_prefix_beg: index_prefix_end, ];
def assignBatch(dmlScript: StringBuilder, Xb: String, X: String, yb: String, y: String, indexPrefix: String, N: String, i: String, batchSize:String): StringBuilder = {
dmlScript.append(indexPrefix).append("beg = ((" + i + "-1) * " + batchSize + ") %% " + N + " + 1; ")
dmlScript.append(indexPrefix).append("end = min(" + indexPrefix + "beg + " + batchSize + " - 1, " + N + "); ")
dmlScript.append(Xb).append(" = ").append(X).append("[").append(indexPrefix).append("beg:").append(indexPrefix).append("end,]; ")
if (yb != null && y != null)
dmlScript.append(yb).append(" = ").append(y).append("[").append(indexPrefix).append("beg:").append(indexPrefix).append("end,]; ")
dmlScript.append("\n")
}
def getTestBatch(tabDMLScript: StringBuilder): Unit =
assignBatch(tabDMLScript, "Xb", Caffe2DML.X, null, null, "", Caffe2DML.numImages, "iter", Caffe2DML.batchSize)
def getTrainingBatch(tabDMLScript: StringBuilder): Unit =
assignBatch(tabDMLScript, "Xb", Caffe2DML.X, "yb", Caffe2DML.y, "", Caffe2DML.numImages, "iter", Caffe2DML.batchSize)
def getValidationBatch(tabDMLScript: StringBuilder): Unit =
assignBatch(tabDMLScript, "Xb", Caffe2DML.XVal, "yb", Caffe2DML.yVal, "", Caffe2DML.numValidationImages, "iVal", Caffe2DML.batchSize)
}
trait DMLGenerator extends SourceDMLGenerator with NextBatchGenerator {
// Also makes "code reading" possible for Caffe2DML :)
var dmlScript = new StringBuilder
var numTabs = 0
def reset(): Unit = {
dmlScript.clear()
alreadyImported.clear()
numTabs = 0
}
// -------------------------------------------------------------------------------------------------
// Helper functions that calls super class methods and simplifies the code of this trait
def tabDMLScript(): StringBuilder = tabDMLScript(dmlScript, numTabs, false)
def tabDMLScript(prependNewLine: Boolean): StringBuilder = tabDMLScript(dmlScript, numTabs, prependNewLine)
def source(net: CaffeNetwork, solver: CaffeSolver, otherFiles: Array[String]): Unit =
source(dmlScript, numTabs, net, solver, otherFiles)
// -------------------------------------------------------------------------------------------------
def ifBlock(cond: String)(op: => Unit) {
tabDMLScript.append("if(" + cond + ") {\n")
numTabs += 1
op
numTabs -= 1
tabDMLScript.append("}\n")
}
def whileBlock(cond: String)(op: => Unit) {
tabDMLScript.append("while(" + cond + ") {\n")
numTabs += 1
op
numTabs -= 1
tabDMLScript.append("}\n")
}
def forBlock(iterVarName: String, startVal: String, endVal: String, step:String)(op: => Unit) {
tabDMLScript.append("for(" + iterVarName + " in seq(" + startVal + "," + endVal + "," + step + ")) {\n")
numTabs += 1
op
numTabs -= 1
tabDMLScript.append("}\n")
}
def forBlock(iterVarName: String, startVal: String, endVal: String)(op: => Unit) {
tabDMLScript.append("for(" + iterVarName + " in " + startVal + ":" + endVal + ") {\n")
numTabs += 1
op
numTabs -= 1
tabDMLScript.append("}\n")
}
def parForBlock(iterVarName: String, startVal: String, endVal: String, step:String, parforParameters:String)(op: => Unit) {
if(step.equals("1"))
tabDMLScript.append("parfor(" + iterVarName + " in " + startVal + ":" + endVal + parforParameters + ") {\n")
else
tabDMLScript.append("parfor(" + iterVarName + " in seq(" + startVal + "," + endVal + "," + step + ")" + parforParameters + ") {\n")
numTabs += 1
op
numTabs -= 1
tabDMLScript.append("}\n")
}
def printClassificationReport(): Unit =
ifBlock("debug") {
assign(tabDMLScript, "num_rows_error_measures", min("10", ncol("yb")))
assign(tabDMLScript, "error_measures", matrix("0", "num_rows_error_measures", "5"))
forBlock("class_i", "1", "num_rows_error_measures") {
assign(tabDMLScript, "tp", "sum( (true_yb == predicted_yb) * (true_yb == class_i) )")
assign(tabDMLScript, "tp_plus_fp", "sum( (predicted_yb == class_i) )")
assign(tabDMLScript, "tp_plus_fn", "sum( (true_yb == class_i) )")
assign(tabDMLScript, "precision", "tp / tp_plus_fp")
assign(tabDMLScript, "recall", "tp / tp_plus_fn")
assign(tabDMLScript, "f1Score", "2*precision*recall / (precision+recall)")
assign(tabDMLScript, "error_measures[class_i,1]", "class_i")
assign(tabDMLScript, "error_measures[class_i,2]", "precision")
assign(tabDMLScript, "error_measures[class_i,3]", "recall")
assign(tabDMLScript, "error_measures[class_i,4]", "f1Score")
assign(tabDMLScript, "error_measures[class_i,5]", "tp_plus_fn")
}
val dmlTab = "\\t"
val header = "class " + dmlTab + "precision" + dmlTab + "recall " + dmlTab + "f1-score" + dmlTab + "num_true_labels\\n"
val errorMeasures = "toString(error_measures, decimal=7, sep=" + asDMLString(dmlTab) + ")"
tabDMLScript.append(print(dmlConcat(asDMLString(header), errorMeasures)))
}
// Appends DML corresponding to source and externalFunction statements.
def appendHeaders(net: CaffeNetwork, solver: CaffeSolver, isTraining: Boolean): Unit = {
// Append source statements for layers as well as solver
source(net, solver, if (isTraining) Array[String]("l1_reg") else null)
source(net, solver, if (isTraining) Array[String]("l2_reg") else null)
source(dmlScript, numTabs, "util", Caffe2DML.nnDir)
if (isTraining) {
// Append external built-in function headers:
// 1. update_nesterov external built-in function header
if (Caffe2DML.USE_NESTEROV_UDF) {
tabDMLScript.append(
"update_nesterov = externalFunction(matrix[double] X, matrix[double] dX, double lr, double mu, matrix[double] v, double lambda) return (matrix[double] X, matrix[double] v) implemented in (classname=\"org.apache.sysml.udf.lib.SGDNesterovUpdate\",exectype=\"mem\"); \n"
)
}
}
}
def readMatrix(varName: String, cmdLineVar: String): Unit = {
val pathVar = varName + "_path"
assign(tabDMLScript, pathVar, ifdef(cmdLineVar))
// Uncomment the following lines if we want to the user to pass the format
// val formatVar = varName + "_fmt"
// assign(tabDMLScript, formatVar, ifdef(cmdLineVar + "_fmt", "\"csv\""))
// assign(tabDMLScript, varName, "read(" + pathVar + ", format=" + formatVar + ")")
assign(tabDMLScript, varName, "read(" + pathVar + ")")
}
def readInputData(net: CaffeNetwork, isTraining: Boolean, performOneHotEncoding:Boolean): Unit = {
// Read and convert to one-hot encoding
readMatrix("X_full", "$X")
if (isTraining) {
readMatrix("y_full", "$y")
tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(y_full)\n")
if(performOneHotEncoding) {
tabDMLScript.append("# Convert to one-hot encoding (Assumption: 1-based labels) \n")
tabDMLScript.append("y_full = table(seq(1," + Caffe2DML.numImages + ",1), y_full, " + Caffe2DML.numImages + ", " + Utils.numClasses(net) + ")\n")
}
} else {
tabDMLScript.append(Caffe2DML.numImages + " = nrow(X_full)\n")
}
}
def initWeights(net: CaffeNetwork, solver: CaffeSolver, readWeights: Boolean): Unit =
initWeights(net, solver, readWeights, new HashSet[String]())
def initWeights(net: CaffeNetwork, solver: CaffeSolver, readWeights: Boolean, layersToIgnore: HashSet[String]): Unit = {
tabDMLScript.append("weights = ifdef($weights, \" \")\n")
// Initialize the layers and solvers
tabDMLScript.append("# Initialize the layers and solvers\n")
net.getLayers.map(layer => net.getCaffeLayer(layer).init(tabDMLScript))
if (readWeights) {
// Loading existing weights. Note: keeping the initialization code in case the layer wants to initialize non-weights and non-bias
tabDMLScript.append("# Load the weights. Note: keeping the initialization code in case the layer wants to initialize non-weights and non-bias\n")
val allLayers = net.getLayers.filter(l => !layersToIgnore.contains(l)).map(net.getCaffeLayer(_))
allLayers.filter(_.weight != null).map(l => tabDMLScript.append(readWeight(l.weight, l.param.getName + "_weight.mtx")))
allLayers.filter(_.extraWeight != null).map(l => tabDMLScript.append(readWeight(l.extraWeight, l.param.getName + "_extra_weight.mtx")))
allLayers.filter(_.bias != null).map(l => tabDMLScript.append(readWeight(l.bias, l.param.getName + "_bias.mtx")))
}
net.getLayers.map(layer => solver.init(tabDMLScript, net.getCaffeLayer(layer)))
}
def getLossLayers(net: CaffeNetwork): List[IsLossLayer] = {
val lossLayers = net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[IsLossLayer]).map(layer => net.getCaffeLayer(layer).asInstanceOf[IsLossLayer])
if (lossLayers.length != 1)
throw new DMLRuntimeException(
"Expected exactly one loss layer, but found " + lossLayers.length + ":" + net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[IsLossLayer])
)
lossLayers
}
def updateMeanVarianceForBatchNorm(net: CaffeNetwork, value: Boolean): Unit =
net.getLayers.filter(net.getCaffeLayer(_).isInstanceOf[BatchNorm]).map(net.getCaffeLayer(_).asInstanceOf[BatchNorm].update_mean_var = value)
} | deroneriksson/systemml | src/main/scala/org/apache/sysml/api/dl/DMLGenerator.scala | Scala | apache-2.0 | 17,292 |
package akka
import akka.actor.{Actor, ActorSystem, Props}
import scala.concurrent.duration._
/**
* Created by Om Prakash C on 20-06-2017.
*/
object SchedulerExample extends App {
case object Count
class SchedulerActor extends Actor {
var n = 0
def receive = {
case Count =>
n += 1
println(n)
}
}
val system = ActorSystem("SimpleSystem")
val actor = system.actorOf(Props[SchedulerActor], "SimpleActor1")
implicit val ec = system.dispatcher
actor ! Count
system.scheduler.scheduleOnce(1.seconds)(actor ! Count)
val can = system.scheduler.schedule(0.seconds, 100.millis)(actor ! Count)
Thread.sleep(2000)
can.cancel()
system.terminate()
}
| comprakash/learning-scala | concurrency/src/main/scala/akka/SchedulerExample.scala | Scala | gpl-3.0 | 707 |
package org.nisshiee.crowd4s
case class CrowdConnection (
urlPrefix: String
,appname: String
,password: String
)
| nisshiee/crowd4s | src/main/scala/types/CrowdConnection.scala | Scala | mit | 121 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.frontend.filters
import akka.stream.Materializer
import org.scalatest.concurrent.Eventually
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, OptionValues, WordSpecLike}
import org.slf4j.Logger
import play.api.LoggerLike
import play.api.mvc._
import play.api.test.{DefaultAwaitTimeout, FutureAwaits}
import scala.concurrent.Future
class LoggingFilterSpec
extends WordSpecLike
with MockitoSugar
with Matchers
with OptionValues
with FutureAwaits
with DefaultAwaitTimeout
with Eventually {
class LoggingFilterTest(loggerIn: LoggerLike, controllerNeedsLogging: Boolean)(implicit val mat: Materializer)
extends LoggingFilter {
override def logger = loggerIn
override def controllerNeedsLogging(controllerName: String): Boolean = controllerNeedsLogging
}
"the LoggingFilter should" should {
def buildFakeLogger() = new LoggerLike {
var lastInfoMessage: Option[String] = None
override val logger: Logger = new FakeLogger {
override def isInfoEnabled = true
override def info(s: String): Unit =
lastInfoMessage = Some(s)
}
}
def requestWith(loggingFilter: LoggingFilter, someTags: Map[String, String] = Map()) =
loggingFilter.apply(rh => Future.successful(Results.NoContent))(new DummyRequestHeader() {
override def tags = someTags
})
"log when a requests' path matches a controller which is configured to log" in {
val fakeLogger = buildFakeLogger()
implicit val mat: Materializer = mock[Materializer]
val loggingFilter = new LoggingFilterTest(fakeLogger, true)
await(requestWith(loggingFilter))
eventually {
fakeLogger.lastInfoMessage.value.length should be > 0
}
}
"not log when a requests' path does not match a controller which is not configured to log" in {
val fakeLogger = buildFakeLogger()
implicit val mat: Materializer = mock[Materializer]
val loggingFilter = new LoggingFilterTest(fakeLogger, false)
await(requestWith(loggingFilter, Map(play.routing.Router.Tags.ROUTE_CONTROLLER -> "exists")))
fakeLogger.lastInfoMessage shouldBe None
}
}
}
| hmrc/frontend-bootstrap | src/test/scala/uk/gov/hmrc/play/frontend/filters/LoggingFilterSpec.scala | Scala | apache-2.0 | 2,903 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package patterns
import com.intellij.psi.PsiElement
import com.intellij.psi.tree.TokenSet
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlock, ScExpression, ScGuard}
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
trait ScCaseClause extends ScalaPsiElement {
def pattern: Option[ScPattern] = findChild[ScPattern]
def expr: Option[ScExpression] = findChild[ScExpression]
def guard: Option[ScGuard] = findChild[ScGuard]
def funType: Option[PsiElement] = {
val result = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tFUNTYPE,
ScalaTokenTypes.tFUNTYPE_ASCII))
if (result.length != 1) None
else Some(result(0).getPsi)
}
override protected def acceptScala(visitor: ScalaElementVisitor): Unit = {
visitor.visitCaseClause(this)
}
}
object ScCaseClause {
def unapply(e: ScCaseClause): Option[(Option[ScPattern], Option[ScGuard], Option[ScExpression])] =
Option(e).map(e => (e.pattern, e.guard, e.expr))
implicit class ScCaseClauseExt(private val cc: ScCaseClause) extends AnyVal {
def resultExpr: Option[ScExpression] =
cc.expr match {
case Some(block: ScBlock) => block.resultExpression
case _ => None
}
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/base/patterns/ScCaseClause.scala | Scala | apache-2.0 | 1,360 |
package s_mach.validate.impl
import s_mach.validate._
import scala.reflect.ClassTag
case class ValueClassValidator[V <: IsValueClass[A],A](
va: Validator[A]
)(implicit
ca: ClassTag[A],
cv: ClassTag[V]
) extends ValidatorImpl[V] {
def apply(a: V) = va(a.underlying)
def rules = va.rules
def descendantSchema = va.descendantSchema
// Note: using underlying type in Schema for better output in explain
override val schema = Schema(Nil,ca.toString(),(1,1))
}
| S-Mach/s_mach.validate | validate-core/src/main/scala/s_mach/validate/impl/ValueClassValidator.scala | Scala | mit | 474 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package doc
package model
import base._
/** This trait extracts all required information for documentation from compilation units */
trait MemberLookup extends base.MemberLookupBase {
thisFactory: ModelFactory =>
import global._
import definitions.{ NothingClass, AnyClass, AnyValClass, AnyRefClass }
override def internalLink(sym: Symbol, site: Symbol): Option[LinkTo] =
findTemplateMaybe(sym) match {
case Some(tpl) => Some(LinkToTpl(tpl))
case None =>
findTemplateMaybe(site) flatMap { inTpl =>
inTpl.members find (_.asInstanceOf[EntityImpl].sym == sym) map (LinkToMember(_, inTpl))
}
}
override def chooseLink(links: List[LinkTo]): LinkTo = {
val mbrs = links.collect {
case lm@LinkToMember(mbr: MemberEntity, _) => (mbr, lm)
}
if (mbrs.isEmpty)
links.head
else
mbrs.min(Ordering[MemberEntity].on[(MemberEntity, LinkTo)](_._1))._2
}
override def toString(link: LinkTo) = link match {
case LinkToTpl(tpl: EntityImpl) => tpl.sym.toString
case LinkToMember(mbr: EntityImpl, inTpl: EntityImpl) =>
mbr.sym.signatureString + " in " + inTpl.sym.toString
case _ => link.toString
}
override def findExternalLink(sym: Symbol, name: String): Option[LinkTo] = {
val sym1 =
if (sym == AnyClass || sym == AnyRefClass || sym == AnyValClass || sym == NothingClass)
definitions.ScalaPackageClass.info.member(newTermName("package"))
else if (sym.hasPackageFlag)
/* Get package object which has associatedFile ne null */
sym.info.member(newTermName("package"))
else sym
def classpathEntryFor(s: Symbol): Option[String] = {
Option(s.associatedFile).flatMap(_.underlyingSource).map { src =>
val path = src.canonicalPath
if(path.endsWith(".class")) { // Individual class file -> Classpath entry is root dir
val nesting = s.ownerChain.count(_.hasPackageFlag)
if(nesting > 0) {
val p = 0.until(nesting).foldLeft(src) {
case (null, _) => null
case (f, _) => f.container
}
if(p eq null) path else p.canonicalPath
} else path
} else path // JAR file (and fallback option)
}
}
classpathEntryFor(sym1) flatMap { path =>
settings.extUrlMapping get path map { url => {
LinkToExternalTpl(name, url, makeTemplate(sym))
}
}
}
}
override def warnNoLink = !settings.docNoLinkWarnings.value
}
| martijnhoekstra/scala | src/scaladoc/scala/tools/nsc/doc/model/MemberLookup.scala | Scala | apache-2.0 | 2,823 |
package com.jantvrdik.scala.app
import scalafx.event.ActionEvent
import scalafx.scene.canvas.Canvas
import scalafx.scene.control.TextField
import scalafx.scene.input.MouseButton
import scalafx.scene.paint.Color
import scalafxml.core.macros.sfxml
@sfxml
class MainWindowController(
private val baseCanvas: Canvas,
private val topCanvas: Canvas,
private val playerCanvas: Canvas,
private val dimensionsInput: TextField,
private val winLengthInput: TextField,
private val playersCountInput: TextField
) {
// init
val parent = baseCanvas.parent().asInstanceOf[javafx.scene.layout.Region]
baseCanvas.widthProperty().bind(parent.widthProperty())
baseCanvas.heightProperty().bind(parent.heightProperty())
topCanvas.widthProperty().bind(parent.widthProperty())
topCanvas.heightProperty().bind(parent.heightProperty())
val players = Vector(Player(Color.Red), Player(Color.Blue), Player(Color.Green), Player(Color.Black), Player(Color.Magenta))
def handleStartButtonClick(event: ActionEvent) = {
val settings = loadSettings
val model = new GameModel(settings)
val canvas = new GameCanvas(settings, baseCanvas, topCanvas)
model.onTurn = (player, pos) => {
canvas.drawMark(pos, player.color)
showCurrentPlayer(model)
}
model.onVictory = (player, row) => {
canvas.drawHighlights(row)
}
canvas.onMousePressed = (event, pos) => {
if (event.button == MouseButton.SECONDARY) {
canvas.drawNeighbours(model.neighbors(pos))
}
}
canvas.onMouseReleased = (event, pos) => {
canvas.drawNeighbours(List.empty)
if (event.button == MouseButton.PRIMARY) {
model.select(pos)
}
}
canvas.onMouseDragged = (event, pos) => {
if (event.secondaryButtonDown) {
canvas.drawNeighbours(model.neighbors(pos))
}
}
canvas.redraw()
showCurrentPlayer(model)
topCanvas.requestFocus()
}
private def loadDimensions = {
try {
var dim = dimensionsInput.text().split("\\\\*").map(_.trim.toInt).toVector
if (dim.exists(_ <= 0)) throw new NumberFormatException
if (dim.length < 2) dim = dim :+ 1
dim
} catch {
case _: NumberFormatException => Vector(7, 7, 7)
}
}
private def loadWinLength(dim: Dimensions) = {
try {
Math.max(1, Math.min(dim.max, winLengthInput.text().toInt))
} catch {
case _: NumberFormatException => Math.min(dim.max, 5)
}
}
private def loadPlayersCount(players: Vector[Player]) = {
try {
Math.max(1, Math.min(players.length, playersCountInput.text().toInt))
} catch {
case _: NumberFormatException => Math.min(players.length, 3)
}
}
private def loadSettings = {
val dim = loadDimensions
val winLength = loadWinLength(dim)
val playersCount = loadPlayersCount(players)
dimensionsInput.text = dim.mkString(" * ")
winLengthInput.text = winLength.toString
playersCountInput.text = playersCount.toString
GameSettings(dim, winLength, players.take(playersCount))
}
private def showCurrentPlayer(model: GameModel) {
val context = playerCanvas.graphicsContext2D
context.setFill(model.currentPlayer.color)
context.clearRect(0, 0, playerCanvas.width(), playerCanvas.height())
context.fillOval(0, 0, playerCanvas.width(), playerCanvas.height())
}
}
| JanTvrdik/pisqworks | src/main/scala/com/jantvrdik/scala/app/MainWindowController.scala | Scala | mit | 3,540 |
import sbt._
object Build extends sbt.Build {
lazy val root = Project(
id = "asterisque",
base = file(".")
) aggregate(ri)
lazy val ri = Project(
id = "asterisque-ri",
base = file("ri")
)
}
| torao/asterisque | core-scala/project/Build.scala | Scala | apache-2.0 | 205 |
package play.api.libs {
/**
* The Iteratee monad provides strict, safe, and functional I/O.
*/
package object iteratee {
type K[E, A] = Input[E] => Iteratee[E, A]
}
}
package play.api.libs.iteratee {
private[iteratee] object internal {
import play.api.libs.iteratee.Iteratee
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.control.NonFatal
/**
* Executes code immediately on the current thread, returning a successful or failed Future depending on
* the result.
*
* TODO: Rename to `tryFuture`.
*/
def eagerFuture[A](body: => A): Future[A] = try Future.successful(body) catch { case NonFatal(e) => Future.failed(e) }
/**
* Executes code in the given ExecutionContext, flattening the resulting Future.
*/
def executeFuture[A](body: => Future[A])(implicit ec: ExecutionContext): Future[A] = {
Future {
body
}(ec /* Future.apply will prepare */ ).flatMap(identityFunc.asInstanceOf[Future[A] => Future[A]])(Execution.overflowingExecutionContext)
}
/**
* Executes code in the given ExecutionContext, flattening the resulting Iteratee.
*/
def executeIteratee[A, E](body: => Iteratee[A, E])(implicit ec: ExecutionContext): Iteratee[A, E] = Iteratee.flatten(Future(body)(ec))
/**
* Prepare an ExecutionContext and pass it to the given function, returning the result of
* the function.
*
* Makes it easy to write single line functions with a prepared ExecutionContext, eg:
* {{{
* def myFunc(implicit ec: ExecutionContext) = prepared(ec)(pec => ...)
* }}}
*/
def prepared[A](ec: ExecutionContext)(f: ExecutionContext => A): A = {
val pec = ec.prepare()
f(pec)
}
val identityFunc: (Any => Any) = (x: Any) => x
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/iteratees/src/main/scala/play/api/libs/iteratee/package.scala | Scala | mit | 1,831 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java.service.assemblers
import ws.epigraph.compiler.{CField, CType, CTypeKind}
import ws.epigraph.java.JavaGenNames.{jn, ln, lqn2}
import ws.epigraph.java.NewlineStringInterpolator.NewlineHelper
import ws.epigraph.java.service.projections.req.output.ReqOutputRecordModelProjectionGen
import ws.epigraph.java.service.projections.req.{ReqFieldProjectionGen, ReqProjectionGen}
import ws.epigraph.java.{GenContext, JavaGen, JavaGenUtils}
import scala.collection.immutable.ListMap
/**
* @author <a href="mailto:konstantin.sobolev@gmail.com">Konstantin Sobolev</a>
*/
class RecordAsmGen(
override val g: ReqOutputRecordModelProjectionGen,
val ctx: GenContext) extends JavaGen with ModelAsmGen {
override protected type G = ReqOutputRecordModelProjectionGen
lazy val fieldAssemblersGen: FieldAssemblersGen = new FieldAssemblersGen(this, ctx)
override def children = Iterable(fieldAssemblersGen)
import Imports._
case class FieldParts(field: CField, fieldGen: ReqProjectionGen) extends Comparable[FieldParts] {
def fieldName: String = jn(field.name)
def fieldType: CType = field.typeRef.resolved
def isEntity: Boolean = fieldType.kind == CTypeKind.ENTITY
val fieldProjection: importManager.ImportedName = importManager.use(fieldGen.fullClassName)
val assemblerResultType: importManager.ImportedName = importManager.use(
lqn2(
fieldType,
g.namespace.toString
)
)
def asmResultValueType = s"$assemblerResultType${ if (isEntity) "" else ".Value" }"
def fieldAsmType: String = s"$asm<? super D, ? super $fieldProjection, ? extends $asmResultValueType>"
def fbf: String = field.name + "FieldAsm"
def getter: String = fieldName + "()"
def setter: String = "set" + JavaGenUtils.up(field.name) + (if (isEntity) "" else "_")
def dispatchFieldInit: String = s"if (p.$getter != null) b.$setter($fbf.assemble(dto, p.$getter, ctx));"
def javadoc: String = s"$fbf {@code $fieldName} field assembler"
override def compareTo(o: FieldParts): Int = field.name.compareTo(o.field.name)
}
private def fieldGenerators(g: G): Map[String, (CField, ReqFieldProjectionGen)] =
g.parentClassGenOpt.map(pg => fieldGenerators(pg.asInstanceOf[G])).getOrElse(ListMap()) ++
g.fieldGenerators.map { case (f, p) => f.name -> (f, p) }
private val fps: Seq[FieldParts] = fieldGenerators(g).map { case (_, (f, fg)) =>
FieldParts(f, fg.dataProjectionGen)
}.toSeq.sorted
def fieldPart(fieldName: String): Option[FieldParts] = fps.find(_.field.name == fieldName)
private val obj = importManager.use("java.lang.Object")
protected override lazy val defaultBuild: String = {
/*@formatter:off*/sn"""\\
$asmCtx.Key key = new $asmCtx.Key(dto, p);
$obj visited = ctx.visited.get(key);
if (visited != null)
return ($t.Value) visited;
else {
$t.Builder b = $t.create();
ctx.visited.put(key, b.asValue());
${fps.map { fp => s" if (p.${fp.getter} != null) b.${fp.setter}(${fp.fbf}.assemble(dto, p.${fp.getter}, ctx));" }.mkString("\\n")}
${if (hasMeta) s" b.setMeta(metaAsm.assemble(dto, p.meta(), ctx));\\n" else ""}\\
return b.asValue();
}
"""/*@formatter:on*/
}
override protected def generate: String = {
val fieldAssembersImp = importManager.use(fieldAssemblersGen.fullClassName)
closeImports()
def shortFieldAsm(fp: FieldParts) = s"fieldAssemblers::${fieldAssemblersGen.methodName(fp.field.name)}"
// need this in case there's only one field asm, otherwise Java can't figure out which constructor to call
def longFieldAsm(fp: FieldParts) = /*@formatter:off*/sn"""\\
new $asm<D, ${fp.fieldProjection}, ${fp.asmResultValueType}>() {
public ${fp.asmResultValueType} assemble(D dto, ${fp.fieldProjection} p, $asmCtx ctx) {
return fieldAssemblers.${fieldAssemblersGen.methodName(fp.field.name)}(dto, p, ctx);
}
}"""/*@formatter:on*/
def fieldAsm(fp: FieldParts, numFields: Int) = if (numFields == 1) longFieldAsm(fp) else shortFieldAsm(fp)
/*@formatter:off*/sn"""\\
${JavaGenUtils.topLevelComment}
package ${g.namespace};
${JavaGenUtils.generateImports(importManager.imports)}
/**
* Value assembler for {@code ${ln(cType)}} type, driven by request output projection
*/
${JavaGenUtils.generatedAnnotation(this)}
public class $shortClassName<D> implements $asm<D, $notNull$projectionName, $notNull$t.Value> {
${if (hasTails) s" private final $notNull$func<? super D, ? extends Type> typeExtractor;\\n" else "" }\\
//field assemblers
${fps.map { fp => s" private final $notNull${fp.fieldAsmType} ${fp.fbf};"}.mkString("\\n") }\\
${if (hasTails) tps.map { tp => s" private final $notNull${tp.assemblerType} ${tp.assembler};"}.mkString("\\n //tail assemblers\\n","\\n","") else "" }\\
${if (hasMeta) s" //meta assembler\\n private final $notNull$metaAsmType metaAsm;" else ""}
/**
* Asm constructor from individual field assemblers
*
${if (hasTails) s" * @param typeExtractor data type extractor, used to determine DTO type\\n" else ""}\\
${fps.map { fp => s" * @param ${fp.javadoc}"}.mkString("\\n") }\\
${if (hasTails) tps.map { tp => s" * @param ${tp.javadoc}"}.mkString("\\n","\\n","") else "" }\\
${if (hasMeta) s"\\n * @param metaAsm metadata assembler" else ""}
*/
public $shortClassName(
${if (hasTails) s" $notNull$func<? super D, ? extends Type> typeExtractor,\\n" else "" }\\
${fps.map { fp => s" $notNull${fp.fieldAsmType} ${fp.fbf}"}.mkString(",\\n") }\\
${if (hasTails) tps.map { tp => s" $notNull${tp.assemblerType} ${tp.assembler}"}.mkString(",\\n", ",\\n", "") else ""}\\
${if (hasMeta) s",\\n $notNull$metaAsmType metaAsm" else ""}
) {
${if (hasTails) s" this.typeExtractor = typeExtractor;\\n" else "" }\\
${fps.map { fp => s" this.${fp.fbf} = ${fp.fbf};"}.mkString("\\n") }\\
${if (hasTails) tps.map { tp => s" this.${tp.assembler} = ${tp.assembler};"}.mkString("\\n","\\n","") else ""}\\
${if (hasMeta) s"\\n this.metaAsm = metaAsm;" else ""}
}
/**
* Asm constructor from the field assemblers supplier object
*
${if (hasTails) s" * @param typeExtractor data type extractor, used to determine DTO type\\n" else ""}\\
* @param fieldAssemblers field assemblers supplier object
${if (hasTails) tps.map { tp => s" * @param ${tp.javadoc}"}.mkString("","\\n","\\n") else "" }\\
${if (hasMeta) s"\\n * @param metaAsm metadata assembler\\n" else ""}\\
*/
public $shortClassName(
${if (hasTails) s" $notNull$func<? super D, ? extends Type> typeExtractor,\\n" else "" }\\
$notNull$fieldAssembersImp<D> fieldAssemblers\\
${if (hasTails) tps.map { tp => s"$notNull${tp.assemblerType} ${tp.assembler}"}.mkString(",\\n ", ",\\n ","") else ""}\\
${if (hasMeta) s",\\n $notNull$metaAsmType metaAsm" else ""}
) {
this(\\
${if (hasTails) s"\\n typeExtractor," else "" }\\
${fps.map {fp => s"\\n ${fieldAsm(fp, fps.size)}"}.mkString("", ",", if(hasTails||hasMeta) "," else "")}\\
${if (hasTails) tps.map { tp => s" ${tp.assembler}"}.mkString("\\n", ",\\n", if(hasMeta) "," else "") else ""}\\
${if (hasMeta) "\\n metaAsm" else ""}
);
}
/**
* Assembles {@code $t} value from DTO
*
* @param dto data transfer object
* @param p request projection
* @param ctx assembly context
*
* @return {@code $t} value object
*/
@Override
public $notNull$t.Value assemble(D dto, $notNull$projectionName p, $notNull$asmCtx ctx) {
if (dto == null)
return $t.type.createValue($errValue.NULL);
else ${if (hasTails) tailsBuild else nonTailsBuild}
}
}"""/*@formatter:on*/
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/service/assemblers/RecordAsmGen.scala | Scala | apache-2.0 | 8,179 |
package de.tu_berlin.formic.datastructure.json
/**
* @author Ronny Bräunlich
*/
case class JsonPath(path: String*){
def dropFirstElement = JsonPath(path.drop(1):_*)
}
| rbraeunlich/formic | json/shared/src/main/scala/de/tu_berlin/formic/datastructure/json/JsonPath.scala | Scala | apache-2.0 | 177 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package solvers
package unrolling
import leon.utils._
import purescala.Common._
import purescala.Definitions._
import purescala.Extractors._
import purescala.Constructors._
import purescala.Expressions._
import purescala.ExprOps._
import purescala.Types._
import purescala.TypeOps._
import purescala.Quantification.{QuantificationTypeMatcher => QTM, QuantificationMatcher => QM, Domains}
import evaluators._
import Instantiation._
import Template._
import scala.collection.mutable.{Map => MutableMap, Set => MutableSet, Stack => MutableStack, Queue}
case class Matcher[T](caller: T, tpe: TypeTree, args: Seq[Arg[T]], encoded: T) {
override def toString = caller + args.map {
case Right(m) => m.toString
case Left(v) => v.toString
}.mkString("(",",",")")
def substitute(substituter: T => T, matcherSubst: Map[T, Matcher[T]]): Matcher[T] = copy(
caller = substituter(caller),
args = args.map {
case Left(v) => matcherSubst.get(v) match {
case Some(m) => Right(m)
case None => Left(substituter(v))
}
case Right(m) => Right(m.substitute(substituter, matcherSubst))
},
encoded = substituter(encoded)
)
}
class QuantificationTemplate[T](
val quantificationManager: QuantificationManager[T],
val pathVar: (Identifier, T),
val qs: (Identifier, T),
val q2s: (Identifier, T),
val insts: (Identifier, T),
val guardVar: T,
val quantifiers: Seq[(Identifier, T)],
val condVars: Map[Identifier, T],
val exprVars: Map[Identifier, T],
val condTree: Map[Identifier, Set[Identifier]],
val clauses: Seq[T],
val blockers: Map[T, Set[TemplateCallInfo[T]]],
val applications: Map[T, Set[App[T]]],
val matchers: Map[T, Set[Matcher[T]]],
val lambdas: Seq[LambdaTemplate[T]],
val structure: Forall,
val dependencies: Map[Identifier, T],
val forall: Forall,
stringRepr: () => String) {
lazy val start = pathVar._2
lazy val key: (Forall, Seq[T]) = (structure, {
var cls: Seq[T] = Seq.empty
purescala.ExprOps.preTraversal {
case Variable(id) => cls ++= dependencies.get(id)
case _ =>
} (structure)
cls
})
def substitute(substituter: T => T, matcherSubst: Map[T, Matcher[T]]): QuantificationTemplate[T] = {
new QuantificationTemplate[T](
quantificationManager,
pathVar._1 -> substituter(start),
qs,
q2s,
insts,
guardVar,
quantifiers,
condVars,
exprVars,
condTree,
clauses.map(substituter),
blockers.map { case (b, fis) =>
substituter(b) -> fis.map(fi => fi.copy(
args = fi.args.map(_.substitute(substituter, matcherSubst))
))
},
applications.map { case (b, apps) =>
substituter(b) -> apps.map(app => app.copy(
caller = substituter(app.caller),
args = app.args.map(_.substitute(substituter, matcherSubst))
))
},
matchers.map { case (b, ms) =>
substituter(b) -> ms.map(_.substitute(substituter, matcherSubst))
},
lambdas.map(_.substitute(substituter, matcherSubst)),
structure,
dependencies.map { case (id, value) => id -> substituter(value) },
forall,
stringRepr
)
}
private lazy val str : String = stringRepr()
override def toString : String = str
}
object QuantificationTemplate {
def apply[T](
encoder: TemplateEncoder[T],
quantificationManager: QuantificationManager[T],
pathVar: (Identifier, T),
qs: (Identifier, T),
q2: Identifier,
inst: Identifier,
guard: Identifier,
quantifiers: Seq[(Identifier, T)],
condVars: Map[Identifier, T],
exprVars: Map[Identifier, T],
condTree: Map[Identifier, Set[Identifier]],
guardedExprs: Map[Identifier, Seq[Expr]],
lambdas: Seq[LambdaTemplate[T]],
baseSubstMap: Map[Identifier, T],
dependencies: Map[Identifier, T],
proposition: Forall
): QuantificationTemplate[T] = {
val q2s: (Identifier, T) = q2 -> encoder.encodeId(q2)
val insts: (Identifier, T) = inst -> encoder.encodeId(inst)
val guards: (Identifier, T) = guard -> encoder.encodeId(guard)
val (clauses, blockers, applications, functions, matchers, templateString) =
Template.encode(encoder, pathVar, quantifiers, condVars, exprVars, guardedExprs, lambdas, Seq.empty,
substMap = baseSubstMap + q2s + insts + guards + qs)
val (structuralQuant, deps) = normalizeStructure(proposition)
val keyDeps = deps.map { case (id, dep) => id -> encoder.encodeExpr(dependencies)(dep) }
new QuantificationTemplate[T](quantificationManager,
pathVar, qs, q2s, insts, guards._2, quantifiers, condVars, exprVars, condTree,
clauses, blockers, applications, matchers, lambdas, structuralQuant, keyDeps, proposition,
() => "Template for " + proposition + " is :\\n" + templateString())
}
}
class QuantificationManager[T](encoder: TemplateEncoder[T]) extends LambdaManager[T](encoder) {
private[solvers] val quantifications = new IncrementalSeq[MatcherQuantification]
private val instCtx = new InstantiationContext
private val ignoredMatchers = new IncrementalSeq[(Int, Set[T], Matcher[T])]
private val ignoredSubsts = new IncrementalMap[MatcherQuantification, MutableSet[(Int, Set[T], Map[T,Arg[T]])]]
private val handledSubsts = new IncrementalMap[MatcherQuantification, MutableSet[(Set[T], Map[T,Arg[T]])]]
private val lambdaAxioms = new IncrementalSet[(LambdaStructure[T], Seq[(Identifier, T)])]
private val templates = new IncrementalMap[(Expr, Seq[T]), T]
override protected def incrementals: List[IncrementalState] =
List(quantifications, instCtx, ignoredMatchers, ignoredSubsts,
handledSubsts, lambdaAxioms, templates) ++ super.incrementals
private var currentGen = 0
private sealed abstract class MatcherKey(val tpe: TypeTree)
private case class CallerKey(caller: T, tt: TypeTree) extends MatcherKey(tt)
private case class LambdaKey(lambda: Lambda, tt: TypeTree) extends MatcherKey(tt)
private case class TypeKey(tt: TypeTree) extends MatcherKey(tt)
private def matcherKey(caller: T, tpe: TypeTree): MatcherKey = tpe match {
case ft: FunctionType if knownFree(ft)(caller) => CallerKey(caller, tpe)
case _: FunctionType if byID.isDefinedAt(caller) => LambdaKey(byID(caller).structure.lambda, tpe)
case _ => TypeKey(tpe)
}
@inline
private def correspond(qm: Matcher[T], m: Matcher[T]): Boolean =
correspond(qm, m.caller, m.tpe)
private def correspond(qm: Matcher[T], caller: T, tpe: TypeTree): Boolean = {
val qkey = matcherKey(qm.caller, qm.tpe)
val key = matcherKey(caller, tpe)
qkey == key || (qkey.tpe == key.tpe && (qkey.isInstanceOf[TypeKey] || key.isInstanceOf[TypeKey]))
}
class VariableNormalizer {
private val varMap: MutableMap[TypeTree, Seq[T]] = MutableMap.empty
private val varSet: MutableSet[T] = MutableSet.empty
def normalize(ids: Seq[Identifier]): Seq[T] = {
val mapping = ids.groupBy(id => bestRealType(id.getType)).flatMap { case (tpe, idst) =>
val prev = varMap.get(tpe) match {
case Some(seq) => seq
case None => Seq.empty
}
if (prev.size >= idst.size) {
idst zip prev.take(idst.size)
} else {
val (handled, newIds) = idst.splitAt(prev.size)
val uIds = newIds.map(id => id -> encoder.encodeId(id))
varMap(tpe) = prev ++ uIds.map(_._2)
varSet ++= uIds.map(_._2)
(handled zip prev) ++ uIds
}
}.toMap
ids.map(mapping)
}
def normalSubst(qs: Seq[(Identifier, T)]): Map[T, T] = {
(qs.map(_._2) zip normalize(qs.map(_._1))).toMap
}
def contains(idT: T): Boolean = varSet(idT)
def get(tpe: TypeTree): Option[Seq[T]] = varMap.get(tpe)
}
private val abstractNormalizer = new VariableNormalizer
private val concreteNormalizer = new VariableNormalizer
def isQuantifier(idT: T): Boolean = abstractNormalizer.contains(idT)
override def assumptions: Seq[T] = super.assumptions ++
quantifications.collect { case q: Quantification => q.currentQ2Var }.toSeq
def typeInstantiations: Map[TypeTree, Matchers] = instCtx.map.instantiations.collect {
case (TypeKey(tpe), matchers) => tpe -> matchers
}
def lambdaInstantiations: Map[Lambda, Matchers] = instCtx.map.instantiations.collect {
case (LambdaKey(lambda, tpe), matchers) => lambda -> (matchers ++ instCtx.map.get(TypeKey(tpe)).toMatchers)
}
def partialInstantiations: Map[T, Matchers] = instCtx.map.instantiations.collect {
case (CallerKey(caller, tpe), matchers) => caller -> (matchers ++ instCtx.map.get(TypeKey(tpe)).toMatchers)
}
private def maxDepth(m: Matcher[T]): Int = 1 + (0 +: m.args.map {
case Right(ma) => maxDepth(ma)
case _ => 0
}).max
private def totalDepth(m: Matcher[T]): Int = 1 + m.args.map {
case Right(ma) => totalDepth(ma)
case _ => 0
}.sum
private def encodeEnablers(es: Set[T]): T =
if (es.isEmpty) trueT else encoder.mkAnd(es.toSeq.sortBy(_.toString) : _*)
private type Matchers = Set[(T, Matcher[T])]
private class Context private(ctx: Map[Matcher[T], Set[Set[T]]]) extends Iterable[(Set[T], Matcher[T])] {
def this() = this(Map.empty)
def apply(p: (Set[T], Matcher[T])): Boolean = ctx.get(p._2) match {
case None => false
case Some(blockerSets) => blockerSets(p._1) || blockerSets.exists(set => set.subsetOf(p._1))
}
def +(p: (Set[T], Matcher[T])): Context = if (apply(p)) this else {
val prev = ctx.getOrElse(p._2, Set.empty)
val newSet = prev.filterNot(set => p._1.subsetOf(set)).toSet + p._1
new Context(ctx + (p._2 -> newSet))
}
def ++(that: Context): Context = that.foldLeft(this)((ctx, p) => ctx + p)
def iterator = ctx.toSeq.flatMap { case (m, bss) => bss.map(bs => bs -> m) }.iterator
def toMatchers: Matchers = this.map(p => encodeEnablers(p._1) -> p._2).toSet
}
private class ContextMap(
private var tpeMap: MutableMap[TypeTree, Context] = MutableMap.empty,
private var funMap: MutableMap[MatcherKey, Context] = MutableMap.empty
) extends IncrementalState {
private val stack = new MutableStack[(MutableMap[TypeTree, Context], MutableMap[MatcherKey, Context])]
def clear(): Unit = {
stack.clear()
tpeMap.clear()
funMap.clear()
}
def reset(): Unit = clear()
def push(): Unit = {
stack.push((tpeMap, funMap))
tpeMap = tpeMap.clone
funMap = funMap.clone
}
def pop(): Unit = {
val (ptpeMap, pfunMap) = stack.pop()
tpeMap = ptpeMap
funMap = pfunMap
}
def +=(p: (Set[T], Matcher[T])): Unit = matcherKey(p._2.caller, p._2.tpe) match {
case TypeKey(tpe) => tpeMap(tpe) = tpeMap.getOrElse(tpe, new Context) + p
case key => funMap(key) = funMap.getOrElse(key, new Context) + p
}
def merge(that: ContextMap): this.type = {
for ((tpe, values) <- that.tpeMap) tpeMap(tpe) = tpeMap.getOrElse(tpe, new Context) ++ values
for ((caller, values) <- that.funMap) funMap(caller) = funMap.getOrElse(caller, new Context) ++ values
this
}
def get(caller: T, tpe: TypeTree): Context =
funMap.getOrElse(matcherKey(caller, tpe), new Context) ++ tpeMap.getOrElse(tpe, new Context)
def get(key: MatcherKey): Context = key match {
case TypeKey(tpe) => tpeMap.getOrElse(tpe, new Context)
case key => funMap.getOrElse(key, new Context) ++ tpeMap.getOrElse(key.tpe, new Context)
}
def instantiations: Map[MatcherKey, Matchers] =
(funMap.toMap ++ tpeMap.map { case (tpe,ms) => TypeKey(tpe) -> ms }).mapValues(_.toMatchers)
}
private class InstantiationContext private (
private var _instantiated : Context, val map : ContextMap
) extends IncrementalState {
private val stack = new MutableStack[Context]
def this() = this(new Context, new ContextMap)
def clear(): Unit = {
stack.clear()
map.clear()
_instantiated = new Context
}
def reset(): Unit = clear()
def push(): Unit = {
stack.push(_instantiated)
map.push()
}
def pop(): Unit = {
_instantiated = stack.pop()
map.pop()
}
def instantiated: Context = _instantiated
def apply(p: (Set[T], Matcher[T])): Boolean = _instantiated(p)
def corresponding(m: Matcher[T]): Context = map.get(m.caller, m.tpe)
def instantiate(blockers: Set[T], matcher: Matcher[T])(qs: MatcherQuantification*): Instantiation[T] = {
if (this(blockers -> matcher)) {
Instantiation.empty[T]
} else {
map += (blockers -> matcher)
_instantiated += (blockers -> matcher)
var instantiation = Instantiation.empty[T]
for (q <- qs) instantiation ++= q.instantiate(blockers, matcher)
instantiation
}
}
def merge(that: InstantiationContext): this.type = {
_instantiated ++= that._instantiated
map.merge(that.map)
this
}
}
private[solvers] trait MatcherQuantification {
val pathVar: (Identifier, T)
val quantifiers: Seq[(Identifier, T)]
val matchers: Set[Matcher[T]]
val allMatchers: Map[T, Set[Matcher[T]]]
val condVars: Map[Identifier, T]
val exprVars: Map[Identifier, T]
val condTree: Map[Identifier, Set[Identifier]]
val clauses: Seq[T]
val blockers: Map[T, Set[TemplateCallInfo[T]]]
val applications: Map[T, Set[App[T]]]
val lambdas: Seq[LambdaTemplate[T]]
val holds: T
val body: Expr
lazy val quantified: Set[T] = quantifiers.map(_._2).toSet
lazy val start = pathVar._2
private lazy val depth = matchers.map(maxDepth).max
private lazy val transMatchers: Set[Matcher[T]] = (for {
(b, ms) <- allMatchers.toSeq
m <- ms if !matchers(m) && maxDepth(m) <= depth
} yield m).toSet
/* Build a mapping from applications in the quantified statement to all potential concrete
* applications previously encountered. Also make sure the current `app` is in the mapping
* as other instantiations have been performed previously when the associated applications
* were first encountered.
*/
private def mappings(bs: Set[T], matcher: Matcher[T]): Set[Set[(Set[T], Matcher[T], Matcher[T])]] = {
/* 1. select an application in the quantified proposition for which the current app can
* be bound when generating the new constraints
*/
matchers.filter(qm => correspond(qm, matcher))
/* 2. build the instantiation mapping associated to the chosen current application binding */
.flatMap { bindingMatcher =>
/* 2.1. select all potential matches for each quantified application */
val matcherToInstances = matchers
.map(qm => if (qm == bindingMatcher) {
bindingMatcher -> Set(bs -> matcher)
} else {
qm -> instCtx.corresponding(qm)
}).toMap
/* 2.2. based on the possible bindings for each quantified application, build a set of
* instantiation mappings that can be used to instantiate all necessary constraints
*/
val allMappings = matcherToInstances.foldLeft[Set[Set[(Set[T], Matcher[T], Matcher[T])]]](Set(Set.empty)) {
case (mappings, (qm, instances)) => Set(instances.toSeq.flatMap {
case (bs, m) => mappings.map(mapping => mapping + ((bs, qm, m)))
} : _*)
}
allMappings
}
}
private def extractSubst(mapping: Set[(Set[T], Matcher[T], Matcher[T])]): (Set[T], Map[T,Arg[T]], Boolean) = {
var constraints: Set[T] = Set.empty
var eqConstraints: Set[(T, T)] = Set.empty
var subst: Map[T, Arg[T]] = Map.empty
var matcherEqs: Set[(T, T)] = Set.empty
def strictnessCnstr(qarg: Arg[T], arg: Arg[T]): Unit = (qarg, arg) match {
case (Right(qam), Right(am)) => (qam.args zip am.args).foreach(p => strictnessCnstr(p._1, p._2))
case _ => matcherEqs += qarg.encoded -> arg.encoded
}
for {
(bs, qm @ Matcher(qcaller, _, qargs, _), m @ Matcher(caller, _, args, _)) <- mapping
_ = constraints ++= bs
(qarg, arg) <- (qargs zip args)
_ = strictnessCnstr(qarg, arg)
} qarg match {
case Left(quant) if !quantified(quant) || subst.isDefinedAt(quant) =>
eqConstraints += (quant -> arg.encoded)
case Left(quant) if quantified(quant) =>
subst += quant -> arg
case Right(qam) =>
eqConstraints += (qam.encoded -> arg.encoded)
}
val substituter = encoder.substitute(subst.mapValues(_.encoded))
val substConstraints = constraints.filter(_ != trueT).map(substituter)
val substEqs = eqConstraints.map(p => substituter(p._1) -> p._2)
.filter(p => p._1 != p._2).map(p => encoder.mkEquals(p._1, p._2))
val enablers = substConstraints ++ substEqs
val isStrict = matcherEqs.forall(p => substituter(p._1) == p._2)
(enablers, subst, isStrict)
}
def instantiate(bs: Set[T], matcher: Matcher[T]): Instantiation[T] = {
var instantiation = Instantiation.empty[T]
for (mapping <- mappings(bs, matcher)) {
val (enablers, subst, isStrict) = extractSubst(mapping)
if (!skip(subst)) {
if (!isStrict) {
val msubst = subst.collect { case (c, Right(m)) => c -> m }
val substituter = encoder.substitute(subst.mapValues(_.encoded))
ignoredSubsts(this) += ((currentGen + 3, enablers, subst))
} else {
instantiation ++= instantiateSubst(enablers, subst, strict = true)
}
}
}
instantiation
}
def instantiateSubst(enablers: Set[T], subst: Map[T, Arg[T]], strict: Boolean = false): Instantiation[T] = {
if (handledSubsts(this)(enablers -> subst)) {
Instantiation.empty[T]
} else {
handledSubsts(this) += enablers -> subst
var instantiation = Instantiation.empty[T]
val (enabler, optEnabler) = freshBlocker(enablers)
if (optEnabler.isDefined) {
instantiation = instantiation withClause encoder.mkEquals(enabler, optEnabler.get)
}
val baseSubst = subst ++ instanceSubst(enabler).mapValues(Left(_))
val (substMap, inst) = Template.substitution[T](encoder, QuantificationManager.this,
condVars, exprVars, condTree, Seq.empty, lambdas, Set.empty, baseSubst, pathVar._1, enabler)
instantiation ++= inst
val msubst = substMap.collect { case (c, Right(m)) => c -> m }
val substituter = encoder.substitute(substMap.mapValues(_.encoded))
registerBlockers(substituter)
instantiation ++= Template.instantiate(encoder, QuantificationManager.this,
clauses, blockers, applications, Map.empty, substMap)
for ((b,ms) <- allMatchers; m <- ms) {
val sb = enablers ++ (if (b == start) Set.empty else Set(substituter(b)))
val sm = m.substitute(substituter, msubst)
if (strict && (matchers(m) || transMatchers(m))) {
instantiation ++= instCtx.instantiate(sb, sm)(quantifications.toSeq : _*)
} else if (!matchers(m)) {
ignoredMatchers += ((currentGen + 2 + totalDepth(m), sb, sm))
}
}
instantiation
}
}
protected def instanceSubst(enabler: T): Map[T, T]
protected def skip(subst: Map[T, Arg[T]]): Boolean = false
protected def registerBlockers(substituter: T => T): Unit = ()
}
private class Quantification (
val pathVar: (Identifier, T),
val qs: (Identifier, T),
val q2s: (Identifier, T),
val insts: (Identifier, T),
val guardVar: T,
val quantifiers: Seq[(Identifier, T)],
val matchers: Set[Matcher[T]],
val allMatchers: Map[T, Set[Matcher[T]]],
val condVars: Map[Identifier, T],
val exprVars: Map[Identifier, T],
val condTree: Map[Identifier, Set[Identifier]],
val clauses: Seq[T],
val blockers: Map[T, Set[TemplateCallInfo[T]]],
val applications: Map[T, Set[App[T]]],
val lambdas: Seq[LambdaTemplate[T]],
val template: QuantificationTemplate[T]) extends MatcherQuantification {
private var _currentQ2Var: T = qs._2
def currentQ2Var = _currentQ2Var
val holds = qs._2
val body = template.forall.body
private var _currentInsts: Map[T, Set[T]] = Map.empty
def currentInsts = _currentInsts
protected def instanceSubst(enabler: T): Map[T, T] = {
val nextQ2Var = encoder.encodeId(q2s._1)
val subst = Map(qs._2 -> currentQ2Var, guardVar -> enabler,
q2s._2 -> nextQ2Var, insts._2 -> encoder.encodeId(insts._1))
_currentQ2Var = nextQ2Var
subst
}
override def registerBlockers(substituter: T => T): Unit = {
val freshInst = substituter(insts._2)
val bs = (blockers.keys ++ applications.keys).map(substituter).toSet
_currentInsts += freshInst -> bs
}
}
private lazy val blockerId = FreshIdentifier("blocker", BooleanType, true)
private lazy val enablersToBlocker: MutableMap[Set[T], T] = MutableMap.empty
private lazy val blockerToEnablers: MutableMap[T, Set[T]] = MutableMap.empty
private def freshBlocker(enablers: Set[T]): (T, Option[T]) = enablers.toSeq match {
case Seq(b) if isBlocker(b) => (b, None)
case _ =>
val last = enablersToBlocker.get(enablers).orElse {
val initialEnablers = enablers.flatMap(e => blockerToEnablers.getOrElse(e, Set(e)))
enablersToBlocker.get(initialEnablers)
}
last match {
case Some(b) => (b, None)
case None =>
val nb = encoder.encodeId(blockerId)
enablersToBlocker += enablers -> nb
blockerToEnablers += nb -> enablers
for (b <- enablers if isBlocker(b)) implies(b, nb)
blocker(nb)
(nb, Some(encodeEnablers(enablers)))
}
}
private class LambdaAxiom (
val pathVar: (Identifier, T),
val blocker: T,
val guardVar: T,
val quantifiers: Seq[(Identifier, T)],
val matchers: Set[Matcher[T]],
val allMatchers: Map[T, Set[Matcher[T]]],
val condVars: Map[Identifier, T],
val exprVars: Map[Identifier, T],
val condTree: Map[Identifier, Set[Identifier]],
val clauses: Seq[T],
val blockers: Map[T, Set[TemplateCallInfo[T]]],
val applications: Map[T, Set[App[T]]],
val lambdas: Seq[LambdaTemplate[T]],
val template: LambdaTemplate[T]) extends MatcherQuantification {
val holds = start
val body = template.lambda.body
protected def instanceSubst(enabler: T): Map[T, T] = {
Map(guardVar -> start, blocker -> enabler)
}
override protected def skip(subst: Map[T, Arg[T]]): Boolean = {
val substituter = encoder.substitute(subst.mapValues(_.encoded))
val msubst = subst.collect { case (c, Right(m)) => c -> m }
allMatchers.forall { case (b, ms) =>
ms.forall(m => matchers(m) || instCtx(Set(substituter(b)) -> m.substitute(substituter, msubst)))
}
}
}
private def extractQuorums(
quantified: Set[T],
matchers: Set[Matcher[T]],
lambdas: Seq[LambdaTemplate[T]]
): Seq[Set[Matcher[T]]] = {
val extMatchers: Set[Matcher[T]] = {
def rec(templates: Seq[LambdaTemplate[T]]): Set[Matcher[T]] =
templates.foldLeft(Set.empty[Matcher[T]]) {
case (matchers, template) => matchers ++ template.matchers.flatMap(_._2) ++ rec(template.lambdas)
}
matchers ++ rec(lambdas)
}
val quantifiedMatchers = for {
m @ Matcher(_, _, args, _) <- extMatchers
if args exists (_.left.exists(quantified))
} yield m
purescala.Quantification.extractQuorums(quantifiedMatchers, quantified,
(m: Matcher[T]) => m.args.collect { case Right(m) if quantifiedMatchers(m) => m }.toSet,
(m: Matcher[T]) => m.args.collect { case Left(a) if quantified(a) => a }.toSet)
}
def instantiateAxiom(template: LambdaTemplate[T], substMap: Map[T, Arg[T]]): Instantiation[T] = {
def quantifiedMatcher(m: Matcher[T]): Boolean = m.args.exists(a => a match {
case Left(v) => isQuantifier(v)
case Right(m) => quantifiedMatcher(m)
})
val quantified = template.arguments flatMap {
case (id, idT) => substMap(idT) match {
case Left(v) if isQuantifier(v) => Some(id)
case Right(m) if quantifiedMatcher(m) => Some(id)
case _ => None
}
}
val quantifiers = quantified zip abstractNormalizer.normalize(quantified)
val key = template.structure -> quantifiers
if (quantifiers.isEmpty || lambdaAxioms(key)) {
Instantiation.empty[T]
} else {
lambdaAxioms += key
val blockerT = encoder.encodeId(blockerId)
val guard = FreshIdentifier("guard", BooleanType, true)
val guardT = encoder.encodeId(guard)
val substituter = encoder.substitute(substMap.mapValues(_.encoded) + (template.start -> blockerT))
val msubst = substMap.collect { case (c, Right(m)) => c -> m }
val allMatchers = template.matchers map { case (b, ms) =>
substituter(b) -> ms.map(_.substitute(substituter, msubst))
}
val qMatchers = allMatchers.flatMap(_._2).toSet
val encArgs = template.args map (arg => Left(arg).substitute(substituter, msubst))
val app = Application(Variable(template.ids._1), template.arguments.map(_._1.toVariable))
val appT = encoder.encodeExpr((template.arguments.map(_._1) zip encArgs.map(_.encoded)).toMap + template.ids)(app)
val selfMatcher = Matcher(template.ids._2, template.tpe, encArgs, appT)
val instMatchers = allMatchers + (template.start -> (allMatchers.getOrElse(template.start, Set.empty) + selfMatcher))
val enablingClause = encoder.mkImplies(guardT, blockerT)
val condVars = template.condVars map { case (id, idT) => id -> substituter(idT) }
val exprVars = template.exprVars map { case (id, idT) => id -> substituter(idT) }
val clauses = (template.clauses map substituter) :+ enablingClause
val blockers = template.blockers map { case (b, fis) =>
substituter(b) -> fis.map(fi => fi.copy(args = fi.args.map(_.substitute(substituter, msubst))))
}
val applications = template.applications map { case (b, apps) =>
substituter(b) -> apps.map(app => app.copy(
caller = substituter(app.caller),
args = app.args.map(_.substitute(substituter, msubst))
))
}
val lambdas = template.lambdas map (_.substitute(substituter, msubst))
val quantified = quantifiers.map(_._2).toSet
val matchQuorums = extractQuorums(quantified, qMatchers, lambdas)
var instantiation = Instantiation.empty[T]
for (matchers <- matchQuorums) {
val axiom = new LambdaAxiom(template.pathVar._1 -> substituter(template.start),
blockerT, guardT, quantifiers, matchers, instMatchers, condVars, exprVars, template.condTree,
clauses, blockers, applications, lambdas, template)
quantifications += axiom
handledSubsts += axiom -> MutableSet.empty
ignoredSubsts += axiom -> MutableSet.empty
val newCtx = new InstantiationContext()
for ((b,m) <- instCtx.instantiated) {
instantiation ++= newCtx.instantiate(b, m)(axiom)
}
instCtx.merge(newCtx)
}
instantiation ++= instantiateConstants(quantifiers, qMatchers)
instantiation
}
}
def instantiateQuantification(template: QuantificationTemplate[T]): (T, Instantiation[T]) = {
templates.get(template.key) match {
case Some(idT) =>
(idT, Instantiation.empty)
case None =>
val qT = encoder.encodeId(template.qs._1)
val quantified = template.quantifiers.map(_._2).toSet
val matcherSet = template.matchers.flatMap(_._2).toSet
val matchQuorums = extractQuorums(quantified, matcherSet, template.lambdas)
var instantiation = Instantiation.empty[T]
val qs = for (matchers <- matchQuorums) yield {
val newQ = encoder.encodeId(template.qs._1)
val substituter = encoder.substitute(Map(template.qs._2 -> newQ))
val quantification = new Quantification(
template.pathVar,
template.qs._1 -> newQ,
template.q2s, template.insts, template.guardVar,
template.quantifiers, matchers, template.matchers,
template.condVars, template.exprVars, template.condTree,
template.clauses map substituter, // one clause depends on 'q' (and therefore 'newQ')
template.blockers, template.applications, template.lambdas, template)
quantifications += quantification
handledSubsts += quantification -> MutableSet.empty
ignoredSubsts += quantification -> MutableSet.empty
val newCtx = new InstantiationContext()
for ((b,m) <- instCtx.instantiated) {
instantiation ++= newCtx.instantiate(b, m)(quantification)
}
instCtx.merge(newCtx)
quantification.qs._2
}
instantiation = instantiation withClause {
val newQs =
if (qs.isEmpty) trueT
else if (qs.size == 1) qs.head
else encoder.mkAnd(qs : _*)
encoder.mkImplies(template.start, encoder.mkEquals(qT, newQs))
}
instantiation ++= instantiateConstants(template.quantifiers, matcherSet)
templates += template.key -> qT
(qT, instantiation)
}
}
def instantiateMatcher(blocker: T, matcher: Matcher[T]): Instantiation[T] = {
instCtx.instantiate(Set(blocker), matcher)(quantifications.toSeq : _*)
}
def hasIgnored: Boolean = ignoredSubsts.nonEmpty || ignoredMatchers.nonEmpty
def instantiateIgnored(force: Boolean = false): Instantiation[T] = {
currentGen = if (!force) currentGen + 1 else {
val gens = ignoredSubsts.toSeq.flatMap(_._2).map(_._1) ++ ignoredMatchers.toSeq.map(_._1)
if (gens.isEmpty) currentGen else gens.min
}
var instantiation = Instantiation.empty[T]
val matchersToRelease = ignoredMatchers.toList.flatMap { case e @ (gen, b, m) =>
if (gen == currentGen) {
ignoredMatchers -= e
Some(b -> m)
} else {
None
}
}
for ((bs,m) <- matchersToRelease) {
instantiation ++= instCtx.instantiate(bs, m)(quantifications.toSeq : _*)
}
val substsToRelease = quantifications.toList.flatMap { q =>
val qsubsts = ignoredSubsts(q)
qsubsts.toList.flatMap { case e @ (gen, enablers, subst) =>
if (gen == currentGen) {
qsubsts -= e
Some((q, enablers, subst))
} else {
None
}
}
}
for ((q, enablers, subst) <- substsToRelease) {
instantiation ++= q.instantiateSubst(enablers, subst, strict = false)
}
instantiation
}
private def instantiateConstants(quantifiers: Seq[(Identifier, T)], matchers: Set[Matcher[T]]): Instantiation[T] = {
var instantiation: Instantiation[T] = Instantiation.empty
for (normalizer <- List(abstractNormalizer, concreteNormalizer)) {
val quantifierSubst = normalizer.normalSubst(quantifiers)
val substituter = encoder.substitute(quantifierSubst)
for {
m <- matchers
sm = m.substitute(substituter, Map.empty)
if !instCtx.corresponding(sm).exists(_._2.args == sm.args)
} instantiation ++= instCtx.instantiate(Set.empty, sm)(quantifications.toSeq : _*)
def unifyMatchers(matchers: Seq[Matcher[T]]): Instantiation[T] = matchers match {
case sm +: others =>
var instantiation = Instantiation.empty[T]
for (pm <- others if correspond(pm, sm)) {
val encodedArgs = (sm.args zip pm.args).map(p => p._1.encoded -> p._2.encoded)
val mismatches = encodedArgs.zipWithIndex.collect {
case ((sa, pa), idx) if isQuantifier(sa) && isQuantifier(pa) && sa != pa => (idx, (pa, sa))
}.toMap
def extractChains(indexes: Seq[Int], partials: Seq[Seq[Int]]): Seq[Seq[Int]] = indexes match {
case idx +: xs =>
val (p1, p2) = mismatches(idx)
val newPartials = Seq(idx) +: partials.map { seq =>
if (mismatches(seq.head)._1 == p2) idx +: seq
else if (mismatches(seq.last)._2 == p1) seq :+ idx
else seq
}
val (closed, remaining) = newPartials.partition { seq =>
mismatches(seq.head)._1 == mismatches(seq.last)._2
}
closed ++ extractChains(xs, partials ++ remaining)
case _ => Seq.empty
}
val chains = extractChains(mismatches.keys.toSeq, Seq.empty)
val positions = chains.foldLeft(Map.empty[Int, Int]) { (mapping, seq) =>
val res = seq.min
mapping ++ seq.map(i => i -> res)
}
def extractArgs(args: Seq[Arg[T]]): Seq[Arg[T]] =
(0 until args.size).map(i => args(positions.getOrElse(i, i)))
instantiation ++= instCtx.instantiate(Set.empty, sm.copy(args = extractArgs(sm.args)))(quantifications.toSeq : _*)
instantiation ++= instCtx.instantiate(Set.empty, pm.copy(args = extractArgs(pm.args)))(quantifications.toSeq : _*)
}
instantiation ++ unifyMatchers(others)
case _ => Instantiation.empty[T]
}
if (normalizer == abstractNormalizer) {
val substMatchers = matchers.map(_.substitute(substituter, Map.empty))
instantiation ++= unifyMatchers(substMatchers.toSeq)
}
}
instantiation
}
def checkClauses: Seq[T] = {
val clauses = new scala.collection.mutable.ListBuffer[T]
val keyClause = MutableMap.empty[MatcherKey, (Seq[T], T)]
for ((_, bs, m) <- ignoredMatchers) {
val key = matcherKey(m.caller, m.tpe)
val QTM(argTypes, _) = key.tpe
val (values, clause) = keyClause.getOrElse(key, {
val insts = instCtx.map.get(key).toMatchers
val guard = FreshIdentifier("guard", BooleanType)
val elems = argTypes.map(tpe => FreshIdentifier("elem", tpe))
val values = argTypes.map(tpe => FreshIdentifier("value", tpe))
val expr = andJoin(Variable(guard) +: (elems zip values).map(p => Equals(Variable(p._1), Variable(p._2))))
val guardP = guard -> encoder.encodeId(guard)
val elemsP = elems.map(e => e -> encoder.encodeId(e))
val valuesP = values.map(v => v -> encoder.encodeId(v))
val exprT = encoder.encodeExpr(elemsP.toMap ++ valuesP + guardP)(expr)
val disjuncts = insts.toSeq.map { case (b, im) =>
val bp = if (m.caller != im.caller) encoder.mkAnd(encoder.mkEquals(m.caller, im.caller), b) else b
val subst = (elemsP.map(_._2) zip im.args.map(_.encoded)).toMap + (guardP._2 -> bp)
encoder.substitute(subst)(exprT)
}
val res = (valuesP.map(_._2), encoder.mkOr(disjuncts : _*))
keyClause += key -> res
res
})
val b = encodeEnablers(bs)
val substMap = (values zip m.args.map(_.encoded)).toMap
clauses += encoder.substitute(substMap)(encoder.mkImplies(b, clause))
}
for (q <- quantifications) {
val guard = FreshIdentifier("guard", BooleanType)
val elems = q.quantifiers.map(_._1)
val values = elems.map(id => id.freshen)
val expr = andJoin(Variable(guard) +: (elems zip values).map(p => Equals(Variable(p._1), Variable(p._2))))
val guardP = guard -> encoder.encodeId(guard)
val elemsP = elems.map(e => e -> encoder.encodeId(e))
val valuesP = values.map(v => v -> encoder.encodeId(v))
val exprT = encoder.encodeExpr(elemsP.toMap ++ valuesP + guardP)(expr)
val disjunction = handledSubsts(q) match {
case set if set.isEmpty => encoder.encodeExpr(Map.empty)(BooleanLiteral(false))
case set => encoder.mkOr(set.toSeq.map { case (enablers, subst) =>
val b = if (enablers.isEmpty) trueT else encoder.mkAnd(enablers.toSeq : _*)
val substMap = (elemsP.map(_._2) zip q.quantifiers.map(p => subst(p._2).encoded)).toMap + (guardP._2 -> b)
encoder.substitute(substMap)(exprT)
} : _*)
}
for ((_, enablers, subst) <- ignoredSubsts(q)) {
val b = if (enablers.isEmpty) trueT else encoder.mkAnd(enablers.toSeq : _*)
val substMap = (valuesP.map(_._2) zip q.quantifiers.map(p => subst(p._2).encoded)).toMap
clauses += encoder.substitute(substMap)(encoder.mkImplies(b, disjunction))
}
}
def isQuantified(e: Arg[T]): Boolean = e match {
case Left(t) => isQuantifier(t)
case Right(m) => m.args.exists(isQuantified)
}
for ((key, ctx) <- instCtx.map.instantiations) {
val QTM(argTypes, _) = key.tpe
for {
(tpe, idx) <- argTypes.zipWithIndex
quants <- abstractNormalizer.get(tpe) if quants.nonEmpty
(b, m) <- ctx
arg = m.args(idx) if !isQuantified(arg)
} clauses += encoder.mkAnd(quants.map(q => encoder.mkNot(encoder.mkEquals(q, arg.encoded))) : _*)
val byPosition: Iterable[Seq[T]] = ctx.flatMap { case (b, m) =>
if (b != trueT) Seq.empty else m.args.zipWithIndex
}.groupBy(_._2).map(p => p._2.toSeq.flatMap {
case (a, _) => if (isQuantified(a)) Some(a.encoded) else None
}).filter(_.nonEmpty)
for ((a +: as) <- byPosition; a2 <- as) {
clauses += encoder.mkEquals(a, a2)
}
}
clauses.toSeq
}
trait ModelView {
protected val vars: Map[Identifier, T]
protected val evaluator: evaluators.DeterministicEvaluator
protected def get(id: Identifier): Option[Expr]
protected def eval(elem: T, tpe: TypeTree): Option[Expr]
implicit lazy val context = evaluator.context
lazy val reporter = context.reporter
private def extract(b: T, m: Matcher[T]): Option[Seq[Expr]] = {
val QTM(fromTypes, _) = m.tpe
val optEnabler = eval(b, BooleanType)
optEnabler.filter(_ == BooleanLiteral(true)).flatMap { _ =>
val optArgs = (m.args zip fromTypes).map { case (arg, tpe) => eval(arg.encoded, tpe) }
if (optArgs.forall(_.isDefined)) Some(optArgs.map(_.get))
else None
}
}
private def functionsOf(expr: Expr, path: Expr): (Seq[(Expr, Expr)], Seq[Expr] => Expr) = {
def reconstruct(subs: Seq[(Seq[(Expr, Expr)], Seq[Expr] => Expr)],
recons: Seq[Expr] => Expr): (Seq[(Expr, Expr)], Seq[Expr] => Expr) =
(subs.flatMap(_._1), (exprs: Seq[Expr]) => {
var curr = exprs
recons(subs.map { case (es, recons) =>
val (used, remaining) = curr.splitAt(es.size)
curr = remaining
recons(used)
})
})
def rec(expr: Expr, path: Expr): (Seq[(Expr, Expr)], Seq[Expr] => Expr) = expr match {
case (_: Lambda) | (_: FiniteLambda) =>
(Seq(expr -> path), (es: Seq[Expr]) => es.head)
case Tuple(es) => reconstruct(es.zipWithIndex.map {
case (e, i) => rec(e, TupleSelect(path, i + 1))
}, Tuple)
case CaseClass(cct, es) => reconstruct((cct.classDef.fieldsIds zip es).map {
case (id, e) => rec(e, CaseClassSelector(cct, path, id))
}, CaseClass(cct, _))
case _ => (Seq.empty, (es: Seq[Expr]) => expr)
}
rec(expr, path)
}
def getPartialModel: PartialModel = {
val typeDomains: Map[TypeTree, Set[Seq[Expr]]] = typeInstantiations.map {
case (tpe, domain) => tpe -> domain.flatMap { case (b, m) => extract(b, m) }.toSet
}
val lambdaDomains: Map[Lambda, Set[Seq[Expr]]] = lambdaInstantiations.map {
case (l, domain) => l -> domain.flatMap { case (b, m) => extract(b, m) }.toSet
}
val domains = new Domains(lambdaDomains, typeDomains)
val partialDomains: Map[T, Set[Seq[Expr]]] = partialInstantiations.map {
case (t, domain) => t -> domain.flatMap { case (b, m) => extract(b, m) }.toSet
}
def extractElse(body: Expr): Expr = body match {
case IfExpr(cond, thenn, elze) => extractElse(elze)
case _ => body
}
val mapping = vars.map { case (id, idT) =>
val value = get(id).getOrElse(simplestValue(id.getType))
val (functions, recons) = functionsOf(value, Variable(id))
id -> recons(functions.map { case (f, path) =>
val encoded = encoder.encodeExpr(Map(id -> idT))(path)
val tpe = bestRealType(f.getType).asInstanceOf[FunctionType]
partialDomains.get(encoded).orElse(typeDomains.get(tpe)).map { domain =>
FiniteLambda(domain.toSeq.map { es =>
val optEv = evaluator.eval(application(f, es)).result
es -> optEv.getOrElse(scala.sys.error("Unexpectedly failed to evaluate " + application(f, es)))
}, f match {
case FiniteLambda(_, dflt, _) => dflt
case Lambda(_, body) => extractElse(body)
case _ => scala.sys.error("What kind of function is this : " + f.asString + " !?")
}, tpe)
}.getOrElse(f)
})
}
new PartialModel(mapping, domains)
}
def getTotalModel: Model = {
def checkForalls(quantified: Set[Identifier], body: Expr): Option[String] = {
val matchers = purescala.ExprOps.collect[(Expr, Seq[Expr])] {
case QM(e, args) => Set(e -> args)
case _ => Set.empty
} (body)
if (matchers.isEmpty)
return Some("No matchers found.")
val matcherToQuants = matchers.foldLeft(Map.empty[Expr, Set[Identifier]]) {
case (acc, (m, args)) => acc + (m -> (acc.getOrElse(m, Set.empty) ++ args.flatMap {
case Variable(id) if quantified(id) => Set(id)
case _ => Set.empty[Identifier]
}))
}
val bijectiveMappings = matcherToQuants.filter(_._2.nonEmpty).groupBy(_._2)
if (bijectiveMappings.size > 1)
return Some("Non-bijective mapping for symbol " + bijectiveMappings.head._2.head._1.asString)
def quantifiedArg(e: Expr): Boolean = e match {
case Variable(id) => quantified(id)
case QM(_, args) => args.forall(quantifiedArg)
case _ => false
}
purescala.ExprOps.postTraversal(m => m match {
case QM(_, args) =>
val qArgs = args.filter(quantifiedArg)
if (qArgs.nonEmpty && qArgs.size < args.size)
return Some("Mixed ground and quantified arguments in " + m.asString)
case Operator(es, _) if es.collect { case Variable(id) if quantified(id) => id }.nonEmpty =>
return Some("Invalid operation on quantifiers " + m.asString)
case (_: Equals) | (_: And) | (_: Or) | (_: Implies) | (_: Not) => // OK
case Operator(es, _) if (es.flatMap(variablesOf).toSet & quantified).nonEmpty =>
return Some("Unandled implications from operation " + m.asString)
case _ =>
}) (body)
body match {
case Variable(id) if quantified(id) =>
Some("Unexpected free quantifier " + id.asString)
case _ => None
}
}
val issues: Iterable[(Seq[Identifier], Expr, String)] = for {
q <- quantifications.view
if eval(q.holds, BooleanType) == Some(BooleanLiteral(true))
msg <- checkForalls(q.quantifiers.map(_._1).toSet, q.body)
} yield (q.quantifiers.map(_._1), q.body, msg)
if (issues.nonEmpty) {
val (quantifiers, body, msg) = issues.head
reporter.warning("Model soundness not guaranteed for \\u2200" +
quantifiers.map(_.asString).mkString(",") + ". " + body.asString+" :\\n => " + msg)
}
val types = typeInstantiations
val partials = partialInstantiations
def extractCond(params: Seq[Identifier], args: Seq[(T, Expr)], structure: Map[T, Identifier]): Seq[Expr] = (params, args) match {
case (id +: rparams, (v, arg) +: rargs) =>
if (isQuantifier(v)) {
structure.get(v) match {
case Some(pid) => Equals(Variable(id), Variable(pid)) +: extractCond(rparams, rargs, structure)
case None => extractCond(rparams, rargs, structure + (v -> id))
}
} else {
Equals(Variable(id), arg) +: extractCond(rparams, rargs, structure)
}
case _ => Seq.empty
}
new Model(vars.map { case (id, idT) =>
val value = get(id).getOrElse(simplestValue(id.getType))
val (functions, recons) = functionsOf(value, Variable(id))
id -> recons(functions.map { case (f, path) =>
val encoded = encoder.encodeExpr(Map(id -> idT))(path)
val tpe = bestRealType(f.getType).asInstanceOf[FunctionType]
val params = tpe.from.map(tpe => FreshIdentifier("x", tpe, true))
partials.get(encoded).orElse(types.get(tpe)).map { domain =>
val conditionals = domain.flatMap { case (b, m) =>
extract(b, m).map { args =>
val result = evaluator.eval(application(f, args)).result.getOrElse {
scala.sys.error("Unexpectedly failed to evaluate " + application(f, args))
}
val cond = if (m.args.exists(arg => isQuantifier(arg.encoded))) {
extractCond(params, m.args.map(_.encoded) zip args, Map.empty)
} else {
(params zip args).map(p => Equals(Variable(p._1), p._2))
}
cond -> result
}
}.toMap
if (conditionals.isEmpty) f match {
case FiniteLambda(mapping, dflt, tpe) =>
Lambda(params.map(ValDef(_)), mapping.foldRight(dflt) { case ((es, v), elze) =>
IfExpr(andJoin((params zip es).map(p => Equals(p._1.toVariable, p._2))), v, elze)
})
case _ => f
} else {
val ((_, dflt)) +: rest = conditionals.toSeq.sortBy { case (conds, _) =>
(conds.flatMap(variablesOf).toSet.size, conds.size)
}
val body = rest.foldLeft(dflt) { case (elze, (conds, res)) =>
if (conds.isEmpty) elze else (elze match {
case pres if res == pres => res
case _ => IfExpr(andJoin(conds), res, elze)
})
}
Lambda(params.map(ValDef(_)), body)
}
}.getOrElse(f)
})
})
}
}
def getModel(vs: Map[Identifier, T], ev: DeterministicEvaluator, _get: Identifier => Option[Expr], _eval: (T, TypeTree) => Option[Expr]) = new ModelView {
val vars: Map[Identifier, T] = vs
val evaluator: DeterministicEvaluator = ev
def get(id: Identifier): Option[Expr] = _get(id)
def eval(elem: T, tpe: TypeTree): Option[Expr] = _eval(elem, tpe)
}
def getBlockersToPromote(eval: (T, TypeTree) => Option[Expr]): Seq[T] = quantifications.toSeq.flatMap {
case q: Quantification if eval(q.qs._2, BooleanType) == Some(BooleanLiteral(false)) =>
val falseInsts = q.currentInsts.filter { case (inst, bs) => eval(inst, BooleanType) == Some(BooleanLiteral(false)) }
falseInsts.flatMap(_._2)
case _ => Seq.empty
}
}
| regb/leon | src/main/scala/leon/solvers/unrolling/QuantificationManager.scala | Scala | gpl-3.0 | 47,462 |
package org.broadinstitute.dsde.vault.datamanagement.model
import com.wordnik.swagger.annotations.{ApiModel, ApiModelProperty}
import scala.annotation.meta.field
@ApiModel(value = "An entity search result")
case class EntitySearchResult
(
@(ApiModelProperty@field)(value = "The unique id for this entity.", required = true)
guid: String,
@(ApiModelProperty@field)(value = "The type of entity.", required = true)
`type`: String
)
| broadinstitute/vault-datamanagement | src/main/scala/org/broadinstitute/dsde/vault/datamanagement/model/EntitySearchResult.scala | Scala | bsd-3-clause | 443 |
/** Run with :
*
* ./test
* mvn scala:compile && scala -cp target/classes Test
* mvn scala:compile && scala -cp target\\classes Test
* javap -c -classpath target\\classes Test$
*/
object TestIntRangeLoops {
import TestUtils._
def main(args: Array[String]): Unit = {
val name = if (args.isEmpty) "Normal" else args(0)
val m = 10
val n = 10
val o = 30
val mn = m * n
val mno = m * n * o
def test1_mno = {
var t = 0.0
for (i <- 0 until mno)
t += i / 10
t
}
def test2_mn_o = {
var t = 0.0
for (i <- 0 until mn)
for (j <- 0 until o)
t += (i + j) / 10
t
}
def test3_mno = {
var t = 0.0
for (i <- 0 until n)
for (j <- 0 until m)
for (k <- 0 until o)
t += (i + j + k) / 10
t
}
def test4_mnom = {
var t = 0.0
for (i <- 0 until n)
for (j <- 0 until m)
for (k <- 0 until o)
for (l <- 0 until m)
t += (i + j + k + l) / 10
t
}
val (cold1, warm1) = tst(mno) { test1_mno }
val (cold2, warm2) = tst(mno) { test2_mn_o }
val (cold3, warm3) = tst(mno) { test3_mno }
val (cold4, warm4) = tst(mno * m) { test4_mnom }
println(Array(name, "Cold", 1, cold1).mkString("\\t"));
println(Array(name, "Cold", 2, cold2).mkString("\\t"));
println(Array(name, "Cold", 3, cold3).mkString("\\t"));
println(Array(name, "Cold", 4, cold4).mkString("\\t"));
println(Array(name, "Warm", 1, warm1).mkString("\\t"));
println(Array(name, "Warm", 2, warm2).mkString("\\t"));
println(Array(name, "Warm", 3, warm3).mkString("\\t"));
println(Array(name, "Warm", 4, warm4).mkString("\\t"));
}
}
| nativelibs4java/ScalaCL | Old/Test/src/main/scala/TestIntRangeLoops.scala | Scala | bsd-3-clause | 2,033 |
package at.forsyte.apalache.tla.pp
import at.forsyte.apalache.tla.lir._
import at.forsyte.apalache.tla.lir.oper._
import at.forsyte.apalache.tla.lir.transformations.standard.FlatLanguagePred
import at.forsyte.apalache.tla.lir.transformations.{LanguageWatchdog, TlaExTransformation, TransformationTracker}
import at.forsyte.apalache.tla.lir.values.{TlaBool, TlaInt}
/**
* A simplifier of constant TLA+ expressions, e.g., rewriting 1 + 2 to 3.
*
* @author Igor Konnov
*/
class ConstSimplifier(tracker: TransformationTracker) extends TlaExTransformation {
override def apply(expr: TlaEx): TlaEx = {
LanguageWatchdog(FlatLanguagePred()).check(expr)
simplify(expr)
}
def simplify(rootExpr: TlaEx): TlaEx = {
rewriteDeep(rootExpr)
}
private def rewriteDeep: TlaExTransformation = tracker.track {
case ex @ ValEx(_) => ex
case ex @ NameEx(_) => ex
case OperEx(oper, args @ _*) =>
simplifyShallow(OperEx(oper, args map rewriteDeep :_*))
case LetInEx(body, defs @ _*) =>
val newDefs = defs.map {
d => TlaOperDecl(d.name, d.formalParams, simplify(d.body))
}
LetInEx(simplify(body), newDefs :_*)
case ex => ex
}
private def simplifyShallow(ex: TlaEx): TlaEx = ex match {
case ValEx(_) => ex
case NameEx(_) => ex
// integer operations
case OperEx(TlaArithOper.plus, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaInt(left + right))
case OperEx(TlaArithOper.minus, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaInt(left - right))
case OperEx(TlaArithOper.minus, NameEx(left), NameEx(right)) =>
if (left == right) ValEx(TlaInt(0)) else ex // this actually happens
case OperEx(TlaArithOper.mult, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaInt(left * right))
case OperEx(TlaArithOper.div, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaInt(left / right))
case OperEx(TlaArithOper.mod, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaInt(left % right))
case OperEx(TlaArithOper.exp, ValEx(TlaInt(base)), ValEx(TlaInt(power))) =>
ValEx(TlaInt(Math.pow(base.toDouble, power.toDouble).toInt))
case OperEx(TlaArithOper.uminus, ValEx(TlaInt(value))) =>
ValEx(TlaInt(-value))
case OperEx(TlaArithOper.lt, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left < right))
case OperEx(TlaArithOper.le, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left <= right))
case OperEx(TlaArithOper.gt, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left > right))
case OperEx(TlaArithOper.ge, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left >= right))
case OperEx(TlaOper.eq, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left == right))
case OperEx(TlaOper.eq, NameEx(left), NameEx(right)) =>
if (left == right) ValEx(TlaBool(true)) else ex
case OperEx(TlaOper.ne, ValEx(TlaInt(left)), ValEx(TlaInt(right))) =>
ValEx(TlaBool(left != right))
case OperEx(TlaOper.ne, NameEx(left), NameEx(right)) =>
if (left == right) ValEx(TlaBool(false)) else ex
// boolean operations
case OperEx(TlaBoolOper.and, args @ _*) if args.contains(ValEx(TlaBool(false))) =>
ValEx(TlaBool(false))
case OperEx(TlaBoolOper.and, args @ _*) if args.forall (_.isInstanceOf[ValEx]) =>
val result = !args.contains(ValEx(TlaBool(false)))
ValEx(TlaBool(result))
case OperEx(TlaBoolOper.and, args @ _*) =>
val simpEx = OperEx(TlaBoolOper.and, args.filterNot { _ == ValEx(TlaBool(true)) } :_*)
simpEx match {
case OperEx(TlaBoolOper.and) => ValEx(TlaBool(true)) // an empty disjunction is true
case e => e
}
case OperEx(TlaBoolOper.or, args @ _*) if args.contains(ValEx(TlaBool(true))) =>
ValEx(TlaBool(true))
case OperEx(TlaBoolOper.or, args @ _*) if args.forall (_.isInstanceOf[ValEx]) =>
val result = args.contains(ValEx(TlaBool(true)))
ValEx(TlaBool(result))
case OperEx(TlaBoolOper.or, args @ _*) =>
val simpEx = OperEx(TlaBoolOper.or, args.filterNot { _ == ValEx(TlaBool(false)) } :_*)
simpEx match {
case OperEx(TlaBoolOper.or) => ValEx(TlaBool(false)) // an empty disjunction is false
case e => e
}
case OperEx(TlaBoolOper.not, ValEx(TlaBool(b))) =>
ValEx(TlaBool(!b))
case OperEx(TlaBoolOper.not, OperEx(TlaBoolOper.not, underDoubleNegation)) =>
underDoubleNegation
case OperEx(TlaBoolOper.not, OperEx(TlaOper.ne, lhs, rhs)) =>
OperEx(TlaOper.eq, lhs, rhs)
// Keep unmodified, as KerA+ does not allow for /=
// case OperEx(TlaBoolOper.not, OperEx(TlaOper.eq, lhs, rhs)) =>
// OperEx(TlaOper.ne, lhs, rhs)
// Keep unmodified, as KerA+ does not allow for \\notin
// case OperEx(TlaBoolOper.not, OperEx(TlaSetOper.in, lhs, rhs)) =>
// OperEx(TlaSetOper.notin, lhs, rhs)
case OperEx(TlaBoolOper.not, OperEx(TlaSetOper.notin, lhs, rhs)) =>
OperEx(TlaSetOper.in, lhs, rhs)
case OperEx(TlaBoolOper.implies, ValEx(TlaBool(left)), ValEx(TlaBool(right))) =>
ValEx(TlaBool(!left || right))
case OperEx(TlaBoolOper.implies, ValEx(TlaBool(false)), _) =>
ValEx(TlaBool(true))
case OperEx(TlaBoolOper.implies, ValEx(TlaBool(true)), right) =>
simplifyShallow(OperEx(TlaBoolOper.not, right))
case OperEx(TlaBoolOper.implies, lhs, ValEx(TlaBool(true))) =>
ValEx(TlaBool(true))
case OperEx(TlaBoolOper.implies, lhs, ValEx(TlaBool(false))) =>
simplifyShallow(OperEx(TlaBoolOper.not, lhs))
case OperEx(TlaBoolOper.equiv, ValEx(TlaBool(left)), ValEx(TlaBool(right))) =>
ValEx(TlaBool(left == right))
case OperEx(TlaBoolOper.equiv, ValEx(TlaBool(left)), right) =>
if (left) {
right
} else {
simplifyShallow(OperEx(TlaBoolOper.not, right))
}
case OperEx(TlaBoolOper.equiv, left, ValEx(TlaBool(right))) =>
if (right) {
left
} else {
simplifyShallow(OperEx(TlaBoolOper.not, left))
}
// many ite expressions can be simplified like this
case OperEx(TlaControlOper.ifThenElse, ValEx(TlaBool(true)), thenEx, _) =>
thenEx
case OperEx(TlaControlOper.ifThenElse, ValEx(TlaBool(false)), _, elseEx) =>
elseEx
case OperEx(TlaControlOper.ifThenElse, pred, ValEx(TlaBool(false)), elseEx) =>
simplifyShallow(OperEx(TlaBoolOper.and, OperEx(TlaBoolOper.not, pred), elseEx))
case OperEx(TlaControlOper.ifThenElse, pred, ValEx(TlaBool(true)), ValEx(TlaBool(false))) =>
simplifyShallow(pred)
case OperEx(TlaControlOper.ifThenElse, pred, ValEx(TlaBool(false)), ValEx(TlaBool(true))) =>
simplifyShallow(OperEx(TlaBoolOper.not, pred))
case ite @ OperEx(TlaControlOper.ifThenElse, _, thenEx, elseEx) =>
if (thenEx != elseEx) {
ite
} else {
thenEx
}
// default
case _ =>
ex
}
}
object ConstSimplifier {
def apply(tracker: TransformationTracker): ConstSimplifier = new ConstSimplifier(tracker)
} | konnov/apalache | tla-pp/src/main/scala/at/forsyte/apalache/tla/pp/ConstSimplifier.scala | Scala | apache-2.0 | 7,160 |
package com.zinnia.hackthon
/**
* Author: madhu
* Internal representation of record
*/
class Record() extends Serializable {
var id: Long = _
var date : String = _
var time : String = _
var day: Int = _
var hourofDay: Long = _
var month: Int = _
var year: Long = _
var activePower: Double = _
var reactivePower: Double = _
var voltage: Double = _
var globalIntensity: Double = _
var subMetering1: Double = _
var subMetering2: Double = _
var subMetering3: Double = _
var totalCost: Double = _
def totalPowerUsed: Double = activePower * 1000 / 60
def powerMetered: Double = subMetering1 + subMetering2 + subMetering3
override def toString: String = {
id + ";" + day.toString + ";" + hourofDay + ";" + month +
";" + year + ";" + activePower + ";" + reactivePower + ";" +
voltage + ";" + globalIntensity + ";" + subMetering1 + ";" + subMetering2 + ";" + subMetering3 + ";" + totalPowerUsed + ";" + powerMetered
}
override def equals(obj: scala.Any): Boolean = this.id == obj.asInstanceOf[Record].id
}
| zinniasystems/spark-energy-prediction | src/main/java/com/zinnia/hackthon/Record.scala | Scala | apache-2.0 | 1,061 |
import models.Note
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import _root_.anorm.SqlParser._
import _root_.anorm._
import _root_.anorm.~
import play.api.db.DB
import play.api.test.FakeApplication
import play.api.test.Helpers._
import play.api.Play.current
@RunWith(classOf[JUnitRunner])
class DraftSpec extends Specification {
// -- Date helpers
// --
//val c = Computer.findById(1)
//println(list)
"Application" should {
"redirect to the computer list on /" in {
running(FakeApplication()) {
val id = 5l;
val note = DB.withConnection { implicit connection =>
SQL("select * from note where id = {id}").on('id -> id).as(Note.simple.singleOpt)
}
//print("id = " + note.get.id)
note must be not
}
}
}
} | Nectarius/taffeit | test/DraftSpec.scala | Scala | mit | 836 |
/*******************************************************************************
* This file is part of tiscaf.
*
* tiscaf is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Foobar is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with tiscaf. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package tiscaf
package rest
import java.text.{ SimpleDateFormat, ParseException }
/** A bunch of helpers used to simplify writing Restful APIs.
* It mainly consists of nice extractors helping to match agains a resource path,
* query string, ...
*
* @author Lucas Satabin
*/
trait HRest {
object dot {
def unapply(input: String) = {
val index = input.lastIndexOf('.')
if (index > 0) {
// there is at least tow elements
Some((input.substring(0, index), input.substring(index + 1)))
} else {
None
}
}
}
object long {
def unapply(input: String) = try {
Some(input.toLong)
} catch {
case _: Exception => None
}
}
object int {
def unapply(input: String) = try {
Some(input.toInt)
} catch {
case _: Exception => None
}
}
object date {
val formatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ")
def unapply(input: String) = try {
Option(formatter.parse(input))
} catch {
case _: ParseException =>
None
}
}
/** Specific extractor to extract the path and query parts from a request */
object ? {
def unapply(req: HReqData): Option[(String, String)] =
if(req.query != "")
Some(req.uriPath, req.query)
else
None
}
/** Enriches `StringContext` with string enterpolators used to pattern match against a request */
implicit class RestContext(val sc: StringContext) {
/** Allows people to pattern match against some URL and bind values when needed */
object p {
val regex =
sc.parts.map(scala.util.matching.Regex.quoteReplacement).mkString("/?", "([^/]+)", "").r
def unapplySeq(s: String): Option[Seq[String]] =
regex.unapplySeq(s)
def unapplySeq(req: HReqData): Option[Seq[String]] =
regex.unapplySeq(req.uriPath)
}
/** Allows people to pattern match against some query string */
object q {
val regex = sc.parts.map(scala.util.matching.Regex.quoteReplacement).mkString("([^&]+)").r
def unapplySeq(s: String): Option[Seq[String]] =
regex.unapplySeq(s)
}
}
implicit class RichOption[T](opt: Option[T]) {
def is(v: T): Boolean = opt match {
case Some(v1) => v1 == v
case None => false
}
}
}
object HReqVerb extends Enumeration {
val Get, Post, Put, Patch, Delete, Options, Head = Value
def fromReqType(req: HReqType.Value): HReqVerb.Value = req match {
case HReqType.Get =>
Get
case HReqType.PostData | HReqType.PostOctets | HReqType.PostMulti =>
Post
case HReqType.Put =>
Put
case HReqType.Patch =>
Patch
case HReqType.Delete =>
Delete
case HReqType.Options =>
Options
case HReqType.Head =>
Head
}
}
| gnieh/tiscaf | rest/src/main/scala/tiscaf/rest/HRest.scala | Scala | lgpl-3.0 | 3,663 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.ml.math
import breeze.linalg.{SparseVector => BreezeSparseVector, DenseVector => BreezeDenseVector, Vector => BreezeVector}
import scala.util.Sorting
/** Sparse vector implementation storing the data in two arrays. One index contains the sorted
* indices of the non-zero vector entries and the other the corresponding vector entries
*/
case class SparseVector(size: Int, indices: Array[Int], data: Array[Double])
extends Vector with Serializable {
/** Updates the element at the given index with the provided value
*
* @param index Index whose value is updated.
* @param value The value used to update the index.
*/
override def update(index: Int, value: Double): Unit = {
val resolvedIndex = locate(index)
if (resolvedIndex < 0) {
throw new IllegalArgumentException("Cannot update zero value of sparse vector at " +
s"index $index")
} else {
data(resolvedIndex) = value
}
}
/** Copies the vector instance
*
* @return Copy of the [[SparseVector]] instance
*/
override def copy: SparseVector = {
new SparseVector(size, indices.clone, data.clone)
}
/** Returns the dot product of the recipient and the argument
*
* @param other a Vector
* @return a scalar double of dot product
*/
override def dot(other: Vector): Double = {
require(size == other.size, "The size of vector must be equal.")
other match {
case DenseVector(otherData) =>
indices.zipWithIndex.map { case (sparseIdx, idx) => data(idx) * otherData(sparseIdx) }.sum
case SparseVector(_, otherIndices, otherData) =>
var left = 0
var right = 0
var result = 0.0
while (left < indices.length && right < otherIndices.length) {
if (indices(left) < otherIndices(right)) {
left += 1
} else if (otherIndices(right) < indices(left)) {
right += 1
} else {
result += data(left) * otherData(right)
left += 1
right += 1
}
}
result
}
}
/** Returns the outer product (a.k.a. Kronecker product) of `this` with `other`. The result is
* given in [[SparseMatrix]] representation.
*
* @param other a [[Vector]]
* @return the [[SparseMatrix]] which equals the outer product of `this` with `other.`
*/
override def outer(other: Vector): SparseMatrix = {
val numRows = size
val numCols = other.size
val entries = other match {
case sv: SparseVector =>
for {
(i, k) <- indices.zipWithIndex
(j, l) <- sv.indices.zipWithIndex
value = data(k) * sv.data(l)
if value != 0
} yield (i, j, value)
case _ =>
for {
(i, k) <- indices.zipWithIndex
j <- 0 until numCols
value = data(k) * other(j)
if value != 0
} yield (i, j, value)
}
SparseMatrix.fromCOO(numRows, numCols, entries)
}
/** Magnitude of a vector
*
* @return The length of the vector
*/
override def magnitude: Double = math.sqrt(data.map(x => x * x).sum)
/** Element wise access function
*
* * @param index index of the accessed element
* @return element with index
*/
override def apply(index: Int): Double = {
val resolvedIndex = locate(index)
if(resolvedIndex < 0) {
0
} else {
data(resolvedIndex)
}
}
/** Converts the [[SparseVector]] to a [[DenseVector]]
*
* @return The DenseVector out of the SparseVector
*/
def toDenseVector: DenseVector = {
val denseVector = DenseVector.zeros(size)
for(index <- 0 until size) {
denseVector(index) = this(index)
}
denseVector
}
override def equals(obj: Any): Boolean = {
obj match {
case sv: SparseVector if size == sv.size =>
indices.sameElements(sv.indices) && data.sameElements(sv.data)
case _ => false
}
}
override def hashCode: Int = {
val hashCodes = List(size.hashCode, java.util.Arrays.hashCode(indices),
java.util.Arrays.hashCode(data))
hashCodes.foldLeft(3){ (left, right) => left * 41 + right}
}
override def toString: String = {
val entries = indices.zip(data).mkString(", ")
"SparseVector(" + entries + ")"
}
private def locate(index: Int): Int = {
require(0 <= index && index < size, index + " not in [0, " + size + ")")
java.util.Arrays.binarySearch(indices, 0, indices.length, index)
}
}
object SparseVector {
/** Constructs a sparse vector from a coordinate list (COO) representation where each entry
* is stored as a tuple of (index, value).
*
* @param size The number of elements in the vector
* @param entries The values in the vector
* @return a new [[SparseVector]]
*/
def fromCOO(size: Int, entries: (Int, Double)*): SparseVector = {
fromCOO(size, entries)
}
/** Constructs a sparse vector from a coordinate list (COO) representation where each entry
* is stored as a tuple of (index, value).
*
* @param size The number of elements in the vector
* @param entries An iterator supplying the values in the vector
* @return a new [[SparseVector]]
*/
def fromCOO(size: Int, entries: Iterable[(Int, Double)]): SparseVector = {
val entryArray = entries.toArray
entryArray.foreach { case (index, _) =>
require(0 <= index && index < size, index + " not in [0, " + size + ")")
}
val COOOrdering = new Ordering[(Int, Double)] {
override def compare(x: (Int, Double), y: (Int, Double)): Int = {
x._1 - y._1
}
}
Sorting.quickSort(entryArray)(COOOrdering)
// calculate size of the array
val arraySize = entryArray.foldLeft((-1, 0)){ case ((lastIndex, numRows), (index, _)) =>
if(lastIndex == index) {
(lastIndex, numRows)
} else {
(index, numRows + 1)
}
}._2
val indices = new Array[Int](arraySize)
val data = new Array[Double](arraySize)
val (index, value) = entryArray(0)
indices(0) = index
data(0) = value
var i = 1
var lastIndex = indices(0)
var lastDataIndex = 0
while(i < entryArray.length) {
val (curIndex, curValue) = entryArray(i)
if(curIndex == lastIndex) {
data(lastDataIndex) += curValue
} else {
lastDataIndex += 1
data(lastDataIndex) = curValue
indices(lastDataIndex) = curIndex
lastIndex = curIndex
}
i += 1
}
new SparseVector(size, indices, data)
}
/** Convenience method to be able to instantiate a SparseVector with a single element. The Scala
* type inference mechanism cannot infer that the second tuple value has to be of type Double
* if only a single tuple is provided.
*
* @param size The number of elements in the vector
* @param entry The value in the vector
* @return a new [[SparseVector]]
*/
def fromCOO(size: Int, entry: (Int, Int)): SparseVector = {
fromCOO(size, (entry._1, entry._2.toDouble))
}
/** BreezeVectorConverter implementation for [[org.apache.flink.ml.math.SparseVector]]
*
* This allows to convert Breeze vectors into [[SparseVector]]
*/
implicit val sparseVectorConverter = new BreezeVectorConverter[SparseVector] {
override def convert(vector: BreezeVector[Double]): SparseVector = {
vector match {
case dense: BreezeDenseVector[Double] =>
SparseVector.fromCOO(
dense.length,
dense.iterator.toIterable)
case sparse: BreezeSparseVector[Double] =>
new SparseVector(
sparse.length,
sparse.index.take(sparse.used),
sparse.data.take(sparse.used))
}
}
}
}
| oscarceballos/flink-1.3.2 | flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/math/SparseVector.scala | Scala | apache-2.0 | 8,588 |
import sbt._
import Keys._
import com.typesafe.sbt.packager.docker.{Cmd, DockerKeys}
object CommonDockerSettingsPlugin extends AutoPlugin with DockerKeys {
override def trigger = allRequirements
override def requires = com.typesafe.sbt.packager.docker.DockerPlugin
override lazy val projectSettings = Seq(
dockerBaseImage := "dockerfile/java:oracle-java8",
dockerCommands ++= Seq(
Cmd("MAINTAINER", "Pawel Kaczor <newion@o2.pl>"),
Cmd("ENV", "ES_HOST=127.0.0.1"),
Cmd("ENV", "ES_PASSWORD=changeit")
)
)
} | odd/ddd-leaven-akka-v2 | project/CommonDockerSettingsPlugin.scala | Scala | mit | 555 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes
import org.neo4j.cypher.internal.compiler.v2_3.ExecutionContext
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.{ReadsAllNodes, Effects, ReadsRelationships}
import org.neo4j.cypher.internal.compiler.v2_3.planDescription.InternalPlanDescription.Arguments.ExpandExpression
import org.neo4j.cypher.internal.frontend.v2_3.{SemanticDirection, InternalException}
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
import org.neo4j.graphdb.{Node, Relationship}
case class ExpandAllPipe(source: Pipe,
fromName: String,
relName: String,
toName: String,
dir: SemanticDirection,
types: LazyTypes)(val estimatedCardinality: Option[Double] = None)
(implicit pipeMonitor: PipeMonitor)
extends PipeWithSource(source, pipeMonitor) with RonjaPipe {
protected def internalCreateResults(input: Iterator[ExecutionContext], state: QueryState): Iterator[ExecutionContext] = {
input.flatMap {
row =>
getFromNode(row) match {
case n: Node =>
val relationships: Iterator[Relationship] = state.query.getRelationshipsForIds(n, dir, types.types(state.query))
relationships.map {
case r =>
row.newWith2(relName, r, toName, r.getOtherNode(n))
}
case null => None
case value => throw new InternalException(s"Expected to find a node at $fromName but found $value instead")
}
}
}
def typeNames = types.names
def getFromNode(row: ExecutionContext): Any =
row.getOrElse(fromName, throw new InternalException(s"Expected to find a node at $fromName but found nothing"))
def planDescriptionWithoutCardinality =
source.planDescription.andThen(this.id, "Expand(All)", identifiers, ExpandExpression(fromName, relName, typeNames, toName, dir))
val symbols = source.symbols.add(toName, CTNode).add(relName, CTRelationship)
override def localEffects = Effects(ReadsAllNodes, ReadsRelationships)
def dup(sources: List[Pipe]): Pipe = {
val (source :: Nil) = sources
copy(source = source)(estimatedCardinality)
}
def withEstimatedCardinality(estimated: Double) = copy()(Some(estimated))
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/ExpandAllPipe.scala | Scala | apache-2.0 | 3,136 |
val legs = for (a <- 1 to 1000; b <- 1 to 1000 if a <= b) yield (a, b)
def quadratic(a: Int, b: Int) = {
math.sqrt(a * a + b * b)
}
val results = legs
.map(x => (x._1, x._2, quadratic(x._1, x._2)))
.filter(_._3.isWhole)
.map(x => x._1 + x._2 + x._3.toInt)
.filter(_ <= 1000)
.groupBy(identity)
.maxBy(_._2.length)
._1
println(results) | brandonhorst/project-euler-scala | 039.scala | Scala | mit | 353 |
package io.skysail.server.app.bookmarks.resources
import java.util.UUID
import akka.actor.{ActorSelection, ActorSystem}
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import io.skysail.domain._
import io.skysail.domain.messages.ProcessCommand
import io.skysail.domain.resources.{EntityResource, PostResource, PutResource}
import io.skysail.server.app.bookmarks.BookmarksApplication
import io.skysail.server.app.bookmarks.domain.{Bookmark, BookmarkList, HttpResource}
class BookmarksResource extends EntityResource[BookmarksApplication, BookmarkList] {
override def getEntity(re: RequestEvent) = Some(BookmarkList(getApplication().repo.find()))
}
class PostBookmarkResource extends PostResource[BookmarksApplication, Bookmark] {
def get(requestEvent: RequestEvent): ResponseEvent[Bookmark] = {
ResponseEvent(requestEvent, Bookmark(None, "", ""))
}
def post(requestEvent: RequestEvent)(implicit actorSystem: ActorSystem): ResponseEventBase = {
var bookmark = requestEvent.cmd.entity.asInstanceOf[Bookmark]
//val bmWithMetadata = BookmarksService.addMetadata(bookmark)
val b = getApplication().repo.save(bookmark.copy(root = HttpResource(bookmark.url)))
// getApplication().eventService.send("bookmark created")
val redirectTo = Some("/bookmarks/v1/bms")
if (requestEvent.cmd.ctx != null) {
val newRequest = requestEvent.cmd.ctx.request.copy(method = HttpMethods.GET)
RedirectResponseEvent(requestEvent, "", redirectTo)
} else {
AsyncResponseEvent(requestEvent)
}
}
override def createRoute(applicationActor: ActorSelection, processCommand: ProcessCommand)(implicit system: ActorSystem): Route = {
formFieldMap { map =>
val entity = Bookmark(Some(UUID.randomUUID().toString), map.getOrElse("title", "Unknown"), map.getOrElse("url", "Unknown"))
super.createRoute(applicationActor, processCommand.copy(entity = entity))
}
}
}
class PutBookmarkResource extends PutResource[BookmarksApplication, Bookmark] {
override def get(requestEvent: RequestEvent): ResponseEvent[Bookmark] = {
val optionalBookmark = getApplication().repo.find(requestEvent.cmd.urlParameter.head)
ResponseEvent(requestEvent, optionalBookmark.get)
}
override def put(requestEvent: RequestEvent)(implicit system: ActorSystem): ResponseEventBase = {
val optionalBookmark = getApplication().repo.find(requestEvent.cmd.urlParameter.head)
val updatedBookmark = requestEvent.cmd.entity.asInstanceOf[Bookmark]
val bookmarkToSave = updatedBookmark.copy(id = optionalBookmark.get.id)
getApplication().repo.save(bookmarkToSave)
null
}
override def createRoute(applicationActor: ActorSelection, processCommand: ProcessCommand)(implicit system: ActorSystem): Route = {
formFieldMap { map =>
val entity = Bookmark(Some(UUID.randomUUID().toString), map.getOrElse("title", "Unknown"), map.getOrElse("url", "Unknown"))
super.createRoute(applicationActor, processCommand.copy(entity = entity))
}
}
override def delete(requestEvent: RequestEvent): ResponseEventBase = {
getApplication().repo.delete(requestEvent.cmd.urlParameter.head)
RedirectResponseEvent(requestEvent, "", Some("/bookmarks/v1/bms"))
}
}
class BookmarkResource extends EntityResource[BookmarksApplication, Bookmark] {
//override def get(requestEvent: RequestEvent) = {
override def getEntity(re: RequestEvent): Option[Bookmark] = {
val app: BookmarksApplication = getApplication()
val optionalBookmark = app.repo.find(re.cmd.urlParameter.head)
val bm = optionalBookmark.getOrElse(Bookmark(None, "undef", "undef"))
// if (bm.url.contains("$")) {
// val pattern = new Regex("\\$\\{(.*?)}")
// val matchList = (pattern findAllIn bm.url).toList
// val hits = matchList.map(hit => hit.substring(2, hit.length - 1)).map(hit => hit -> app.getList(hit))
// val variant1 = new Bookmark(None, "var", "1")
//
// val lists = hits
// .map(hit => hit._2.map(sub => bm.url.replace("${" + hit._1 + "}", sub)))
// .flatten.toList
//
// val variants = lists.map(l => Bookmark(None, "-", l)).toList
//
//
// val bmWithVariants = new Bookmark(bm.id, "*" + bm.title + "*", bm.url/*, variants*/)
//
// //ResponseEvent(requestEvent, bmWithVariants)
// Some(bmWithVariants)
// } else {
//ResponseEvent(requestEvent, bm)
Some(bm)
//}
}
} | evandor/skysail-apps | skysail.server.app.bookmarks/src/io/skysail/server/app/bookmarks/resources/bookmarks.scala | Scala | apache-2.0 | 4,550 |
package malgo
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.util.MLUtils._
import org.apache.spark.{SparkException, Logging}
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.mllib.classification.ClassificationModel
import org.apache.spark.mllib.linalg.{Vectors, Vector}
import org.apache.spark.mllib.optimization.{Optimizer, SquaredL2Updater, LogisticGradient, LBFGS}
import org.apache.spark.mllib.regression.{LabeledPoint, GeneralizedLinearModel}
import org.apache.spark.mllib.util.DataValidators
import org.apache.spark.mllib.classification.{LogisticRegressionModel=>LRModel}
import common.mutil
import org.apache.spark.rdd.RDD
/**
* Classification model trained using Logistic Regression.
*
* @param weights Weights computed for every feature.
* @param intercept Intercept computed for this model.
*/
class LogisticRegressionModel (
override val weights: Vector,
override val intercept: Double)
extends LRModel(weights, intercept) with Serializable {
private var threshold: Option[Double] = Some(0.5)
/**
* :: Experimental ::
* Sets the threshold that separates positive predictions from negative predictions. An example
* with prediction score greater than or equal to this threshold is identified as an positive,
* and negative otherwise. The default value is 0.5.
*/
@Experimental
override def setThreshold(threshold: Double): this.type = {
this.threshold = Some(threshold)
this
}
/**
* :: Experimental ::
* Clears the threshold so that `predict` will output raw prediction scores.
*/
@Experimental
override def clearThreshold(): this.type = {
threshold = None
this
}
override protected def predictPoint(dataMatrix: Vector, weightMatrix: Vector,
intercept: Double) = {
val margin = mutil.vectorToBreeze(weightMatrix).dot(mutil.vectorToBreeze(dataMatrix)) + intercept
val score = 1.0 / (1.0 + math.exp(-margin))
threshold match {
case Some(t) => if (score < t) 0.0 else 1.0
case None => score
}
}
}
/**
* :: DeveloperApi ::
* GeneralizedLinearAlgorithm implements methods to train a Generalized Linear Model (GLM).
* This class should be extended with an Optimizer to create a new GLM.
*/
@DeveloperApi
abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel]
extends Logging with Serializable {
protected val validators: Seq[RDD[LabeledPoint] => Boolean] = List()
/** The optimizer to solve the problem. */
def optimizer: Optimizer
/** Whether to add intercept (default: false). */
protected var addIntercept: Boolean = false
protected var validateData: Boolean = true
/**
* Whether to perform feature scaling before model training to reduce the condition numbers
* which can significantly help the optimizer converging faster. The scaling correction will be
* translated back to resulting model weights, so it's transparent to users.
* Note: This technique is used in both libsvm and glmnet packages. Default false.
*/
private var useFeatureScaling = false
/**
* Set if the algorithm should use feature scaling to improve the convergence during optimization.
*/
protected def setFeatureScaling(useFeatureScaling: Boolean): this.type = {
this.useFeatureScaling = useFeatureScaling
this
}
/**
* Create a model given the weights and intercept
*/
protected def createModel(weights: Vector, intercept: Double): M
/**
* Set if the algorithm should add an intercept. Default false.
* We set the default to false because adding the intercept will cause memory allocation.
*/
def setIntercept(addIntercept: Boolean): this.type = {
this.addIntercept = addIntercept
this
}
/**
* Set if the algorithm should validate data before training. Default true.
*/
def setValidateData(validateData: Boolean): this.type = {
this.validateData = validateData
this
}
/**
* Run the algorithm with the configured parameters on an input
* RDD of LabeledPoint entries.
*/
def run(input: RDD[LabeledPoint]): M = {
val numFeatures: Int = input.first().features.size
val initialWeights = Vectors.dense(new Array[Double](numFeatures))
run(input, initialWeights)
}
/**
* Run the algorithm with the configured parameters on an input RDD
* of LabeledPoint entries starting from the initial weights provided.
*/
def run(input: RDD[LabeledPoint], initialWeights: Vector): M = {
// Check the data properties before running the optimizer
if (validateData && !validators.forall(func => func(input))) {
throw new SparkException("Input validation failed.")
}
/**
* Scaling columns to unit variance as a heuristic to reduce the condition number:
*
* During the optimization process, the convergence (rate) depends on the condition number of
* the training dataset. Scaling the variables often reduces this condition number
* heuristically, thus improving the convergence rate. Without reducing the condition number,
* some training datasets mixing the columns with different scales may not be able to converge.
*
* GLMNET and LIBSVM packages perform the scaling to reduce the condition number, and return
* the weights in the original scale.
* See page 9 in http://cran.r-project.org/web/packages/glmnet/glmnet.pdf
*
* Here, if useFeatureScaling is enabled, we will standardize the training features by dividing
* the variance of each column (without subtracting the mean), and train the model in the
* scaled space. Then we transform the coefficients from the scaled space to the original scale
* as GLMNET and LIBSVM do.
*
* Currently, it's only enabled in LogisticRegressionWithLBFGS
*/
val scaler = if (useFeatureScaling) {
(new StandardScaler).fit(input.map(x => x.features))
} else {
null
}
// Prepend an extra variable consisting of all 1.0's for the intercept.
val data = if (addIntercept) {
if(useFeatureScaling) {
input.map(labeledPoint =>
(labeledPoint.label, appendBias(scaler.transform(labeledPoint.features))))
} else {
input.map(labeledPoint => (labeledPoint.label, appendBias(labeledPoint.features)))
}
} else {
if (useFeatureScaling) {
input.map(labeledPoint => (labeledPoint.label, scaler.transform(labeledPoint.features)))
} else {
input.map(labeledPoint => (labeledPoint.label, labeledPoint.features))
}
}
val initialWeightsWithIntercept = if (addIntercept) {
appendBias(initialWeights)
} else {
initialWeights
}
val weightsWithIntercept = optimizer.optimize(data, initialWeightsWithIntercept)
val intercept = if (addIntercept) weightsWithIntercept(weightsWithIntercept.size - 1) else 0.0
var weights =
if (addIntercept) {
Vectors.dense(weightsWithIntercept.toArray.slice(0, weightsWithIntercept.size - 1))
} else {
weightsWithIntercept
}
/**
* The weights and intercept are trained in the scaled space; we're converting them back to
* the original scale.
*
* Math shows that if we only perform standardization without subtracting means, the intercept
* will not be changed. w_i = w_i' / v_i where w_i' is the coefficient in the scaled space, w_i
* is the coefficient in the original space, and v_i is the variance of the column i.
*/
if (useFeatureScaling) {
weights = scaler.transform(weights)
}
createModel(weights, intercept)
}
}
/**
* Created by 58 on 2015/11/9.
*/
class LR_LBFGS extends GeneralizedLinearAlgorithm[LRModel] with Serializable {
this.setFeatureScaling(true)
override val optimizer = new LBFGS(new LogisticGradient, new SquaredL2Updater)
override protected val validators = List(DataValidators.binaryLabelValidator)
override protected def createModel(weights: Vector, intercept: Double) = {
new LRModel(weights, intercept)
}
}
| hu17889/mllib_subpackage | src/main/scala/malgo/LR.scala | Scala | apache-2.0 | 8,193 |
package memnets.model
trait GridBase[+T <: Yb] extends GridLike[T] with GridData {
def owner: DynamicSystem
def act(te: Tick, r: Int, c: Int): Double
def yScale: Float = apply(0).ui.scale.getOrElse(YRange.scaleF)
override def subGrid(rDivs: Int, cDivs: Int): GridBase[T] = {
val rDim = if (rDivs < rows) rDivs else rows
val cDim = if (cDivs < cols) cDivs else cols
val rStride = Math.max(1.0, (rows - 1).toDouble / (rDim - 1).toDouble)
val cStride = Math.max(1.0, (cols - 1).toDouble / (cDim - 1).toDouble)
subGrid(0, 0, rDim, cDim, rStride, cStride, centered = false)
}
override def subGrid(
r: Int,
c: Int,
rDim: Int,
cDim: Int,
rStride: Double = 1.0,
cStride: Double = 1.0,
centered: Boolean = true): GridBase[T] = {
val parent = this
object sub extends ElementBase with GridBase[T] {
name = "sub" + parent.name
loc = parent.ui.loc
val rOff: Int = if (centered) (-rStride * rDim).toInt / 2 else 0
val cOff: Int = if (centered) (-cStride * cDim).toInt / 2 else 0
val rows: Int = rDim
val cols: Int = cDim
def apply(i: Int, j: Int): T = {
val r2 = rOff + r + (i * rStride).asInstanceOf[Int]
val c2 = cOff + c + (j * cStride).asInstanceOf[Int]
parent.apply(r2, c2)
}
override def preRender(te: Tick): Boolean = parent.preRender(te)
def act(te: Tick, i: Int, j: Int): Double =
parent.act(te, rOff + r + (i * rStride).asInstanceOf[Int], cOff + c + (j * cStride).asInstanceOf[Int])
def owner: DynamicSystem = parent.owner
def hints: GridHints = parent.hints
def hints_=(hints: GridHints): Unit = parent.hints = hints
}
sub
}
}
| MemoryNetworks/memnets | api/src/main/scala/memnets/model/GridBase.scala | Scala | apache-2.0 | 1,728 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.SparkConf
import org.apache.spark.deploy.yarn.security.YARNHadoopDelegationTokenManager
/**
* Fake HadoopDelegationTokenManager to mock a credential provider.
*
* @note [[YARNHadoopDelegationTokenManager]] is meant to be private; therefore, it should likely
* be easy to be broken between minor versions. This class targets to test Spark 2.3
* but if it becomes difficult to maintain, maybe we should just remove this class and
* `TestHiveServer2CredentialProvider`.
*/
class FakeYARNHadoopDelegationTokenManager(
sparkConf: SparkConf,
hadoopConf: Configuration,
fileSystems: Configuration => Set[FileSystem])
extends YARNHadoopDelegationTokenManager(sparkConf, hadoopConf, fileSystems)
| hortonworks-spark/spark-llap | src/test/scala/org/apache/spark/deploy/yarn/FakeYARNHadoopDelegationTokenManager.scala | Scala | apache-2.0 | 1,655 |
import scala.reflect.runtime.universe._
import scala.annotation._
class sann(x: Int, y: List[Int]) extends StaticAnnotation
class jann(x: Int, y: Array[Int]) extends ClassfileAnnotation
@sann(1, List(1, 2))
class S
@jann(y = Array(1, 2), x = 2)
class J
object Test extends App {
println(symbolOf[S].annotations.head.tree)
println(symbolOf[J].annotations.head.tree)
}
| som-snytt/dotty | tests/disabled/reflect/run/reflection-scala-annotations.scala | Scala | apache-2.0 | 375 |
package org.mozartoz.bootcompiler
package oz
/** Compile-time constant */
sealed trait OzValue {
def syntax(): String
override def toString() = syntax()
}
/** Compile-time constant that can be used as a feature */
sealed trait OzFeature extends OzValue {
/** Compare two features for their ordering in a record */
def feature_<(that: OzFeature) = {
(this, that) match {
case (OzInt(l), OzInt(r)) => l < r
case (OzAtom(l), OzAtom(r)) => l.compareTo(r) < 0
case (l:BuiltinName, r:BuiltinName) => l.tag.compareTo(r.tag) < 0
case _ => typeRank(this) < typeRank(that)
}
}
/** Rank of a feature type */
private def typeRank(feature: OzFeature): Int = {
feature match {
case _:OzInt => 1
case _:OzAtom => 2
case _:BuiltinName => 3
}
}
}
/** Oz number */
sealed trait OzNumber extends OzValue
/** Oz integer */
case class OzInt(value: Long) extends OzNumber with OzFeature {
def syntax() = value.toString()
}
/** Oz float */
case class OzFloat(value: Double) extends OzNumber {
def syntax() = value.toString()
}
/** Oz literal */
sealed trait OzLiteral extends OzValue with OzFeature
/** Oz atom */
case class OzAtom(value: String) extends OzLiteral {
def syntax() = "'" + ast.escapePseudoChars(value, '\\'') + "'"
}
/** Abstract base class for builtin names */
sealed abstract class BuiltinName(val tag: String) extends OzLiteral {
def syntax() = tag
}
/** The `true` value */
case class True() extends BuiltinName("true")
/** The `false` value */
case class False() extends BuiltinName("false")
/** The `unit` value */
case class UnitVal() extends BuiltinName("unit")
/** Arity of a record */
case class OzArity(label: OzLiteral,
features: List[OzFeature]) extends OzValue {
def syntax() =
"<Arity/" + (if (features.isEmpty) label else toTuple).syntax() + ">"
/** Width of this arity, aka number of features */
val width = features.size
/** Returns true if this is the arity of a tuple */
val isTupleArity = {
features.zipWithIndex forall {
case (OzInt(feature), index) if feature == index+1 => true
case _ => false
}
}
/** Returns true if this is the arity of a cons */
val isConsArity =
isTupleArity && (width == 2) && (label == OzAtom("|"))
/** Returns an Oz tuple that represents this arity */
lazy val toTuple = OzTuple(label, features)
}
/** Field of an Oz record */
case class OzRecordField(feature: OzFeature, value: OzValue) {
def syntax() = feature.syntax() + ":" + value.syntax()
}
/** Oz record */
case class OzRecord(label: OzLiteral,
fields: List[OzRecordField]) extends OzValue {
require(!fields.isEmpty)
def syntax() = {
val untilFirstField = label.syntax() + "(" + fields.head.syntax()
fields.tail.foldLeft(untilFirstField) {
(prev, field) => prev + " " + field.syntax()
} + ")"
}
/** Arity of this record */
lazy val arity = OzArity(label, fields map (_.feature))
/** Values in this record */
lazy val values = fields map (_.value)
/** Returns true if this is a tuple */
def isTuple = arity.isTupleArity
/** Returns true if this is a cons */
def isCons = arity.isConsArity
/** Map from features to values */
private lazy val map = Map((fields map (x => x.feature -> x.value)):_*)
/** Returns the value stored at the given `feature` in this record.
*
* @return [[scala.None]] if the feature does not belong to this record
*/
def select(feature: OzFeature): Option[OzValue] =
map.get(feature)
}
/** Factory and pattern matching for Oz tuples */
object OzTuple extends ((OzLiteral, List[OzValue]) => OzRecord) {
def apply(label: OzLiteral, fields: List[OzValue]) = {
val recordFields =
for ((value, index) <- fields.zipWithIndex)
yield OzRecordField(OzInt(index+1), value)
OzRecord(label, recordFields)
}
def unapply(record: OzRecord) = {
if (record.isTuple) Some((record.label, record.fields map (_.value)))
else None
}
}
/** Factory and pattern matching for Oz conses */
object OzCons extends ((OzValue, OzValue) => OzRecord) {
def apply(head: OzValue, tail: OzValue) =
OzTuple(OzAtom("|"), List(head, tail))
def unapply(record: OzRecord) = {
if (record.isCons) Some((record.fields(0).value, record.fields(1).value))
else None
}
}
/** Factory and pattern matching for #-tuples */
object OzSharp extends (List[OzValue] => OzRecord) {
def apply(fields: List[OzValue]) =
OzTuple(OzAtom("#"), fields)
def unapply(record: OzRecord) = record match {
case OzTuple(OzAtom("#"), fields) => Some(fields)
case _ => None
}
}
/** Factory for Oz lists */
object OzList extends (List[OzValue] => OzValue) {
def apply(elems: List[OzValue]): OzValue =
if (elems.isEmpty) OzAtom("nil")
else OzCons(elems.head, OzList(elems.tail))
}
/** Oz value representing a builtin */
case class OzBuiltin(builtin: symtab.Builtin) extends OzValue {
def syntax() = builtin.toString()
}
/** Oz code area */
case class OzCodeArea(codeArea: bytecode.CodeArea) extends OzValue {
def syntax() = codeArea.toString()
}
/** Special value representing a wildcard in a pattern */
case class OzPatMatWildcard() extends OzValue {
def syntax() = "_"
}
/** Special value representing a capture in a pattern */
case class OzPatMatCapture(variable: symtab.Symbol) extends OzValue {
def syntax() = variable.toString()
}
/** Special value representing a pattern conjunction */
case class OzPatMatConjunction(parts: List[OzValue]) extends OzValue {
def syntax() = {
if (parts.isEmpty) "_"
else {
parts.tail.foldLeft(parts.head.syntax()) {
(prev, part) => prev + " = " + part.syntax()
}
}
}
}
/** Special value representing an open record pattern */
case class OzPatMatOpenRecord(label: OzLiteral,
fields: List[OzRecordField]) extends OzValue {
def syntax() = {
if (fields.isEmpty) {
label.syntax() + "(...)"
} else {
val untilFirstField = label.syntax() + "(" + fields.head.syntax()
fields.tail.foldLeft(untilFirstField) {
(prev, field) => prev + " " + field.syntax()
} + " ...)"
}
}
/** Arity of this record */
lazy val arity = OzArity(label, fields map (_.feature))
/** Sub-patterns in this pattern */
lazy val values = fields map (_.value)
}
/** Oz abstraction */
case class OzAbstraction(codeArea: OzCodeArea,
globals: List[OzValue]) extends OzValue {
def syntax() = {
val abstraction = codeArea.codeArea.abstraction
"<P/" + abstraction.arity + " " + abstraction.fullName + ">"
}
}
| yjaradin/mozart2 | bootcompiler/src/main/scala/org/mozartoz/bootcompiler/oz/OzValue.scala | Scala | bsd-2-clause | 6,597 |
package com.arcusys.valamis.persistence.impl.settings
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.settings.model.StatementToActivity
import com.arcusys.valamis.settings.storage.StatementToActivityStorage
import scala.slick.driver.JdbcProfile
import scala.slick.jdbc.JdbcBackend
class StatementToActivityStorageImpl(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends StatementToActivityStorage
with SlickProfile
with StatementToActivityTableComponent {
import driver.simple._
override def getAll: Seq[StatementToActivity] = {
db.withSession { implicit s =>
statementToActivity.list
}
}
def getById(id: Long): Option[StatementToActivity] = {
db.withSession { implicit s =>
statementToActivity.filter(_.id === id).firstOption
}
}
def getByCourseId(courseId: Long): Seq[StatementToActivity] = {
db.withSession { implicit s =>
statementToActivity.filter(_.courseId === courseId).list
}
}
def create(entity: StatementToActivity): StatementToActivity = {
db.withSession { implicit s =>
val newId = (statementToActivity returning statementToActivity.map(_.id)) += entity
entity.copy(id = Option(newId))
}
}
def modify(entity: StatementToActivity) = db.withSession { implicit s =>
statementToActivity.filter(_.id === entity.id).map(_.update).update(entity)
}
def delete(id: Long) = db.withSession { implicit s =>
statementToActivity.filter(_.id === id).delete
}
}
| arcusys/Valamis | valamis-slick-persistence/src/main/scala/com/arcusys/valamis/persistence/impl/settings/StatementToActivityStorageImpl.scala | Scala | gpl-3.0 | 1,563 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.repository
import java.io.File
import com.normation.rudder.domain.nodes.NodeGroup
import com.normation.rudder.domain.nodes.NodeGroupCategory
import com.normation.rudder.domain.policies.Directive
import com.normation.rudder.domain.policies.ActiveTechnique
import com.normation.rudder.domain.policies.ActiveTechniqueCategory
import net.liftweb.common.Box
import com.normation.rudder.domain.policies.Rule
import com.normation.rudder.domain.parameters.GlobalParameter
import com.normation.rudder.rule.category.RuleCategory
/**
* A category of the technique library.
*
*/
case class ActiveTechniqueCategoryContent(
category : ActiveTechniqueCategory
, categories: Set[ActiveTechniqueCategoryContent]
, templates : Set[ActiveTechniqueContent]
)
case class ActiveTechniqueContent(
activeTechnique : ActiveTechnique
, directives : Set[Directive]
)
/**
* Identifier for user library archive
*/
case class ActiveTechniqueLibraryArchiveId(value:String)
/**
* That trait allows to manage the import of active techniques library
* (categories, templates, directives) from the File System into
* the LDAP.
*/
trait ParseActiveTechniqueLibrary {
/**
* That method parse rules from the
* file system for an archive with the given ID.
*/
def getArchive(archiveId:GitCommitId) : Box[ActiveTechniqueCategoryContent]
}
/**
* That trait allows to manage the import of rules
* from the File System into the LDAP.
* That part read the last CR archive.
*/
trait ParseRules {
/**
* That method parse rules from the
* file system for an archive with the given ID.
*/
def getArchive(archiveId:GitCommitId) : Box[Seq[Rule]]
}
/**
* That trait allows to manage the import of Rule categories
* from the File System into the LDAP.
* That part read the last Rule category archive.
*/
trait ParseRuleCategories {
/**
* That method parse Rule category from the
* file system for an archive with the given ID.
*/
def getArchive(archiveId:GitCommitId) : Box[RuleCategory]
}
/**
* That trait allows to manage the import of Global Parameters
* from the File System into the LDAP.
* That part read the last Parameters archive.
*/
trait ParseGlobalParameters {
/**
* That method parse global parameters from the
* file system for an archive with the given ID.
*/
def getArchive(archiveId:GitCommitId) : Box[Seq[GlobalParameter]]
}
/**
* A category of the group library.
*/
case class NodeGroupCategoryContent(
category : NodeGroupCategory
, categories: Set[NodeGroupCategoryContent]
, groups : Set[NodeGroup]
)
/**
* Identifier for user library archive
*/
case class NodeGroupLibraryArchiveId(value:String)
trait ParseGroupLibrary {
/**
* That method parse a group library from the
* file system for an archive with the given ID.
*/
def getArchive(archiveId:GitCommitId) : Box[NodeGroupCategoryContent]
}
trait ImportTechniqueLibrary {
/**
* That method swap an existing active technique library in LDAP
* to a new one.
*
* In case of error, we try to restore the old technique library.
*/
def swapActiveTechniqueLibrary(rootCategory: ActiveTechniqueCategoryContent, includeSystem: Boolean = false) : Box[Unit]
}
trait ImportGroupLibrary {
/**
* That method swap an existing active technique library in LDAP
* to a new one.
*
* In case of error, we try to restore the old technique library.
*/
def swapGroupLibrary(rootCategory: NodeGroupCategoryContent, includeSystem: Boolean = false) : Box[Unit]
}
| Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/repository/ImportLibrary.scala | Scala | agpl-3.0 | 5,249 |
package com.hacktheburgh.commlog.actors
import akka.actor.{ActorRef, Actor}
import akka.event.Logging
import com.hacktheburgh.commlog.actors.messages.Commit
/**
* WebSocket handler actor for pushing JSON to clients.
*
* @author Arkan <arkan@drakon.io>
*/
class SocketActor(out: ActorRef) extends Actor {
val log = Logging(context.system, this)
override def receive = {
case x:Commit =>
out ! x.json
case x:String =>
// TODO: Remove me. Catch-and-ignore to avoid dead-letter warnings.
log.warning("Sending test payload.")
context.self ! new Commit("Test commit message", "Emberwalker/derplogs", "Emberwalker")
}
} | compsoc-edinburgh/Commlogs | app/com/hacktheburgh/commlog/actors/SocketActor.scala | Scala | mit | 659 |
package io.scalajs.nodejs.timers
import io.scalajs.nodejs.clearTimeout
import scala.scalajs.js
/**
* Timeout Handle
* @author lawrence.daniels@gmail.com
*/
@js.native
trait Timeout extends js.Object {
/**
* Indicates whether the timeout has been called
* @return true, if the timeout has already been called
*/
def _called: Boolean = js.native
}
/**
* Timeout Companion
* @author lawrence.daniels@gmail.com
*/
object Timeout {
/**
* Timeout Enrichment
* @param handle the given [[Timeout timeout]] handle
*/
implicit class TimeoutEnrichment(val handle: Timeout) extends AnyVal {
@inline
def clear(): Unit = clearTimeout(handle)
}
} | scalajs-io/nodejs | app/common/src/main/scala/io/scalajs/nodejs/timers/Timeout.scala | Scala | apache-2.0 | 695 |
/*
* Copyright (c) 2014 koiroha.org.
* All sources and related resources are available under Apache License 2.0.
* http://www.apache.org/licenses/LICENSE-2.0.html
*/
package org.asterisque
import org.specs2.Specification
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// PrioritySpec
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/**
* @author Takami Torao
*/
class PrioritySpec extends AsterisqueSpec { def is = s2"""
Priority should:
be private class. ${beStaticUtility(classOf[Priority])}
have maximum, minimum, normal constant value. $e0
transfer lower and upper priority. $e1
""""
def e0 = {
(Priority.Max === Byte.MaxValue) and (Priority.Min === Byte.MinValue) and (Priority.Normal === 0)
}
def e1 = {
(Byte.MinValue to Byte.MaxValue).map{ i =>
(Priority.upper(i.toByte) === (if(i == Priority.Max) Priority.Max else i + 1)) and
(Priority.lower(i.toByte) === (if(i == Priority.Min) Priority.Min else i - 1))
}.reduce { _ and _ }
}
}
| torao/asterisque | core-scala/src/test/scala/org/asterisque/PrioritySpec.scala | Scala | apache-2.0 | 1,068 |
package bytecode.model
abstract class ElementValue
case class ConstValueIndex(constValueIndex: Integer) extends ElementValue
case class EnumConstValue(typeNameIndex: Integer, constNameIndex: Integer)
case class ClassInfoIndex(classInfoIndex: Integer) extends ElementValue
case class AnnotationValue(annotationValue: Annotation) extends ElementValue
case class ArrayValue(values: Array[ElementValue]) extends ElementValue
case class Annotation(typeIndex: Integer, elementValuePairs: Array[Pair[Integer, ElementValue]]) | AndrewHancock/scala-bytecode-disassembler | src/main/scala/bytecode/model/Annotation.scala | Scala | apache-2.0 | 520 |
package operations.network.analysis
import models.Tag
import operations.persistance.Neo4j
import scala.collection.mutable.ListBuffer
object Metrics {
private def numberOfTagsRelatedTo(tagName1: String, tagName2: String): Long = {
val query1 =
"""match (t1:Tag)-[:`models.RelatedTagsEdge`]-(n:Tag)-[:`models.RelatedTagsEdge`]-(t2:Tag)
|where t1.name="""".stripMargin + tagName1 + """" and t2.name="""" + tagName2 + """"
|return count(distinct n)""".stripMargin
val query2 =
"""match (t1:Tag)-[:`models.SynonymTagsEdge`]-(:Tag)-[:`models.RelatedTagsEdge`]-(n:Tag)-[:`models.RelatedTagsEdge`]-(t2:Tag)
|where t1.name="""".stripMargin + tagName1 + """" and t2.name="""" + tagName2 + """"
|return count(distinct n)""".stripMargin
val query3 =
"""match (t1:Tag)-[:`models.RelatedTagsEdge`]-(n:Tag)-[:`models.RelatedTagsEdge`]-(:Tag)-[:`models.SynonymTagsEdge`]-(t2:Tag)
|where t1.name="""".stripMargin + tagName1 + """" and t2.name="""" + tagName2 + """"
|return count(distinct n)""".stripMargin
val query4 =
"""match (t1:Tag)-[:`models.SynonymTagsEdge`]-(:Tag)-[:`models.RelatedTagsEdge`]-(n:Tag)-[:`models.RelatedTagsEdge`]-(:Tag)-[:`models.SynonymTagsEdge`]-(t2:Tag)
|where t1.name="""".stripMargin + tagName1 + """" and t2.name="""" + tagName2 + """"
|return count(distinct n)""".stripMargin
Neo4j.executeCountQuery(query1) + Neo4j.executeCountQuery(query2) + Neo4j.executeCountQuery(query3) + Neo4j.executeCountQuery(query4)
}
private def numberOfTagsRelatedTo(tagName: String): Long = {
val query =
"""match (t:Tag)-[:`models.RelatedTagsEdge`]-(n:Tag)
|where t.name="""".stripMargin + tagName + """"
|return n
|UNION
|match (t:Tag)-[:`models.SynonymTagsEdge`]-()-[:`models.RelatedTagsEdge`]-(n:Tag)
|where t.name="""".stripMargin + tagName + """"
|return n""".stripMargin
Neo4j.executeCountOfUnion(query)
}
/**
* Method for calculating similarity between two tags.
* @param tagName1
* @param tagName2
* @return number from 0 to 100, where 100 means they very similar, and 0 not at all.
*/
def tagSimilarity(tagName1: String, tagName2: String): Double = {
val similarCount = numberOfTagsRelatedTo(tagName1, tagName2)
val sum = numberOfTagsRelatedTo(tagName1) + numberOfTagsRelatedTo(tagName2)
similarCount.toDouble / (sum - similarCount) * 100
}
/**
* Method for calculating similarity between two tags.
* @param tagName1
* @param tagName2
* @return
*/
def pointMutualInformation(tagName1: String, tagName2: String): Double = {
val topAskersForTag1 = Neo4j.extractTopAskers(tagName1)
val topAskersForTag2 = Neo4j.extractTopAskers(tagName2)
val topTagsOfTopAskersForTag1 = ListBuffer.empty[Tag]
for (asker <- topAskersForTag1) {
topTagsOfTopAskersForTag1 ++= Neo4j.extractTopTags(asker.user_id.toString)
}
val topTagsOfTopAskersForTag2 = ListBuffer.empty[Tag]
for (asker <- topAskersForTag2) {
topTagsOfTopAskersForTag2 ++= Neo4j.extractTopTags(asker.user_id.toString)
}
val tag1Related = topTagsOfTopAskersForTag1.distinct.length
println("\\tTag1Related: " + tag1Related)
val tag2Related = topTagsOfTopAskersForTag2.distinct.length
println("\\tTag2Related: " + tag2Related)
val intersection = topTagsOfTopAskersForTag1.distinct.intersect(topTagsOfTopAskersForTag2.distinct).length
println("\\tIntersection: " + intersection)
val all = (topTagsOfTopAskersForTag1 ++ topTagsOfTopAskersForTag2).distinct.length
println("\\tAll: " + all)
val probabilityIntersection: Double = intersection.toDouble / all
println("\\tPIntersection:" + probabilityIntersection)
val probabilityTag1: Double = tag1Related.toDouble / all
println("\\tPTag1: " + probabilityTag1)
val probabilityTag2: Double = tag2Related.toDouble / all
println("\\tPTag2: " + probabilityTag2)
val pmi = math.log(probabilityIntersection / (probabilityTag1 * probabilityTag2)) / math.log(2)
if (pmi == 0) 0 else - pmi / (math.log(probabilityIntersection) / math.log(2))
}
}
| QuietOne/StackExchangeAnalysis | src/main/scala/operations/network/analysis/Metrics.scala | Scala | artistic-2.0 | 4,195 |
/*
* Copyright (c) 2016 BBC Design and Engineering
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package bbc.schedulerplus
import akka.actor.{Actor, ActorLogging}
import bbc.schedulerplus.client.Callbacks
import bbc.schedulerplus.system.Monitor
/**
* Receives callbacks on application startup to begin the scheduler for jobs we wish to run
*/
class SchedulerPlusActor extends Actor with ActorLogging {
/**
* Start scheduler
*/
private def start(callbacks: Callbacks): Unit = Monitor.startScheduling(callbacks)
/**
* Stop scheduler
*/
private def stop(): Unit = Monitor.stopScheduling
override def receive: PartialFunction[Any, Unit] = {
case callbacks: Callbacks => {
start(callbacks)
sender ! "STARTING"
}
case "stop" => {
stop()
sender ! "STOPPING"
}
case message: Any => {
log.info(s"${message.getClass.getSimpleName} isn't handled by Scheduler Plus")
sender ! "FAILED"
}
}
}
| bbc/scheduler-plus | src/main/scala/bbc/schedulerplus/SchedulerPlusActor.scala | Scala | mit | 2,007 |
package scodec.protocols
package mpeg
package transport
package psi
import shapeless.Typeable
case class TableBuildingError(tableId: Int, message: String) extends MpegError
class TableBuilder private (cases: Map[Int, List[TableSupport[_]]]) {
def supporting[T <: Table](implicit ts: TableSupport[T]): TableBuilder = {
val newCases = ts :: cases.getOrElse(ts.tableId, Nil)
new TableBuilder(cases + (ts.tableId -> newCases))
}
def build(gs: GroupedSections[Section]): Either[TableBuildingError, Table] = {
cases.get(gs.tableId) match {
case None | Some(Nil) => Left(TableBuildingError(gs.tableId, "Unknown table id"))
case Some(list) =>
list.dropRight(1).foldRight[Either[String, _]](list.last.toTable(gs)) { (next, res) => res.fold(_ => next.toTable(gs), Right(_)) } match {
case Right(table) => Right(table.asInstanceOf[Table])
case Left(err) => Left(TableBuildingError(gs.tableId, err))
}
}
}
}
object TableBuilder {
def empty: TableBuilder = new TableBuilder(Map.empty)
def supporting[T <: Table : TableSupport] = empty.supporting[T]
def psi: TableBuilder =
supporting[ProgramAssociationTable].
supporting[ProgramMapTable].
supporting[ConditionalAccessTable]
}
trait TableSupport[T <: Table] {
def tableId: Int
def toTable(gs: GroupedSections[Section]): Either[String, T]
def toSections(t: T): GroupedSections[Section]
}
object TableSupport {
def singleton[A <: Section with Table : reflect.ClassTag](tableId: Int)(implicit t: Typeable[A]): TableSupport[A] = {
val tid = tableId
new TableSupport[A] {
def tableId = tid
def toTable(gs: GroupedSections[Section]) =
gs.narrow[A].toRight(s"Not a ${t.describe}").right.flatMap { sections =>
if (sections.tail.isEmpty) Right(sections.head)
else Left(s"${t.describe} supports only 1 section but got ${sections.list.size}")
}
def toSections(table: A) = GroupedSections(table)
}
}
}
| scodec/scodec-protocols | src/main/scala/scodec/protocols/mpeg/transport/psi/TableBuilder.scala | Scala | bsd-3-clause | 2,004 |
package jsky.app.ot.plugin
import edu.gemini.pot.sp.{ISPObservation, SPObservationID, ISPProgram}
import edu.gemini.sp.vcs2.VcsFailure
import edu.gemini.spModel.core.SPProgramID
import scalaz._
trait OtViewerService {
def registerView(view: AnyRef)
def unregisterView(view: AnyRef)
def load(pid: SPProgramID): \\/[VcsFailure, ISPProgram]
def load(oid: SPObservationID): \\/[VcsFailure, Option[ISPObservation]]
def loadAndView(pid: SPProgramID): \\/[VcsFailure, ISPProgram]
def loadAndView(oid: SPObservationID): \\/[VcsFailure, Option[ISPObservation]]
}
| spakzad/ocs | bundle/jsky.app.ot.plugin/src/main/scala/jsky/app/ot/plugin/OtViewerService.scala | Scala | bsd-3-clause | 565 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hammerlab.guacamole.filters
import org.apache.commons.math3.util.ArithmeticUtils
object FishersExactTest {
def apply(totalA: Int, totalB: Int, conditionA: Int, conditionB: Int): Double = {
math.exp(ArithmeticUtils.binomialCoefficientLog(totalA, conditionA) +
ArithmeticUtils.binomialCoefficientLog(totalB, conditionB) -
ArithmeticUtils.binomialCoefficientLog(totalA + totalB, conditionA + conditionB))
}
} | bikash/guacamole | src/main/scala/org/hammerlab/guacamole/filters/FishersExactTest.scala | Scala | apache-2.0 | 1,236 |
// Reverse a list.
// Example:
// scala> reverse(List(1, 1, 2, 3, 5, 8))
// res0: List[Int] = List(8, 5, 3, 2, 1, 1)
object P05 {
def reverse[A](items: List[A]): List[A] = {
def f(from: List[A], to: List[A]): List[A] = from match {
case Nil => to
case _ => f(from.tail, from.head :: to)
}
f(items, Nil)
}
}
| pilu/scala99 | src/main/scala/P05.scala | Scala | mit | 338 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.processor.handlers.xhtml
import org.orbeon.oxf.xforms.control.controls.XXFormsDynamicControl
import org.orbeon.oxf.xforms.processor.ScriptBuilder
import org.orbeon.oxf.xforms.processor.handlers.XFormsBaseHandler
import org.orbeon.oxf.xml._
import org.xml.sax.Attributes
class XXFormsDynamicHandler(
uri : String,
localname : String,
qName : String,
attributes : Attributes,
matched : AnyRef,
handlerContext : AnyRef
) extends XFormsBaseHandler(uri, localname, qName, attributes, matched, handlerContext, false, false) {
private var elementName: String = _
private var elementQName: String = _
override def start(): Unit = {
val controller = xformsHandlerContext.getController
val contentHandler = controller.getOutput
val prefixedId = xformsHandlerContext.getPrefixedId(attributes)
val effectiveId = xformsHandlerContext.getEffectiveId(attributes)
val xhtmlPrefix = xformsHandlerContext.findXHTMLPrefix
this.elementName = "div"
this.elementQName = XMLUtils.buildQName(xhtmlPrefix, elementName)
val classes = "xxforms-dynamic-control"
contentHandler.startElement(XMLConstants.XHTML_NAMESPACE_URI, elementName, elementQName, getIdClassXHTMLAttributes(attributes, classes, effectiveId))
xformsHandlerContext.pushComponentContext(prefixedId)
if (! xformsHandlerContext.isTemplate) {
containingDocument.getControlByEffectiveId(effectiveId) match {
case control: XXFormsDynamicControl ⇒
// Output new scripts upon update if any
// NOTE: Not implemented as of 2016-01-18.
if (! containingDocument.isInitializing && control.newScripts.nonEmpty && containingDocument.isServeInlineResources) {
implicit val helper = new XMLReceiverHelper(contentHandler)
helper.startElement(xhtmlPrefix, XMLConstants.XHTML_NAMESPACE_URI, "script", Array("type", "text/javascript"))
// NOTE: As of 2018-05-03, this is still not functional, so there is no impact
// for https://github.com/orbeon/orbeon-forms/issues/3565
ScriptBuilder.writeScripts(control.newScripts, s ⇒ helper.text(ScriptBuilder.escapeJavaScriptInsideScript(s)))
helper.endElement()
control.clearNewScripts()
}
// Output new markup
control.nested foreach { nested ⇒
xformsHandlerContext.pushPartAnalysis(nested.partAnalysis)
processShadowTree(controller, nested.template)
// Add part globals for top-level part only (see comments in PartAnalysisImpl)
if (nested.partAnalysis.isTopLevel)
nested.partAnalysis.getGlobals foreach { global ⇒
XXFormsComponentHandler.processShadowTree(xformsHandlerContext.getController, global.templateTree)
}
xformsHandlerContext.popPartAnalysis()
}
case _ ⇒
}
}
}
def processShadowTree(controller: ElementHandlerController, shadowTree: SAXStore): Unit = {
controller.startBody()
// Replay content of body
shadowTree.replay(new ForwardingXMLReceiver(controller) {
setForward(false)
var level = 0
// Filter out start/end doc
override def startDocument() = ()
override def endDocument() = ()
override def startElement(uri: String, localname: String, qName: String, attributes: Attributes): Unit = {
super.startElement(uri, localname, qName, attributes)
// Entering body
if (level == 1 && localname == "body")
setForward(true)
level += 1
}
override def endElement(uri: String, localname: String, qName: String): Unit = {
level -= 1
// Exiting body
if (level == 1 && localname == "body")
setForward(false)
super.endElement(uri, localname, qName)
}
// Let prefix mappings go through no matter what so that mappings on html/body work
override def startPrefixMapping(prefix: String, uri: String) =
getXMLReceiver.startPrefixMapping(prefix, uri)
override def endPrefixMapping(prefix: String) =
getXMLReceiver.endPrefixMapping(prefix)
})
controller.endBody()
}
override def end(): Unit = {
xformsHandlerContext.popComponentContext()
val controller = xformsHandlerContext.getController
val contentHandler = controller.getOutput
contentHandler.endElement(XMLConstants.XHTML_NAMESPACE_URI, elementName, elementQName)
}
} | brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/processor/handlers/xhtml/XXFormsDynamicHandler.scala | Scala | lgpl-2.1 | 5,197 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
package std
import sbt.internal.util.Types._
import sbt.internal.util.{ ~>, AList, DelegatingPMap, RMap }
import Task._
import TaskExtra.{ all, existToAny }
import Execute._
object Transform {
def fromDummy[T](original: Task[T])(action: => T): Task[T] = Task(original.info, Pure(action _, false))
def fromDummyStrict[T](original: Task[T], value: T): Task[T] = fromDummy(original)(value)
implicit def to_~>|[K[_], V[_]](map: RMap[K, V]): K ~>| V = new (K ~>| V) { def apply[T](k: K[T]): Option[V[T]] = map.get(k) }
final case class DummyTaskMap(mappings: List[TaskAndValue[_]]) {
def ::[T](tav: (Task[T], T)): DummyTaskMap = DummyTaskMap(new TaskAndValue(tav._1, tav._2) :: mappings)
}
final class TaskAndValue[T](val task: Task[T], val value: T)
def dummyMap(dummyMap: DummyTaskMap): Task ~>| Task =
{
val pmap = new DelegatingPMap[Task, Task](new collection.mutable.ListMap)
def add[T](dummy: TaskAndValue[T]): Unit = { pmap(dummy.task) = fromDummyStrict(dummy.task, dummy.value) }
dummyMap.mappings.foreach(x => add(x))
pmap
}
/** Applies `map`, returning the result if defined or returning the input unchanged otherwise.*/
implicit def getOrId(map: Task ~>| Task): Task ~> Task =
new (Task ~> Task) {
def apply[T](in: Task[T]): Task[T] = map(in).getOrElse(in)
}
def apply(dummies: DummyTaskMap) =
{
import System._
taskToNode(getOrId(dummyMap(dummies)))
}
def taskToNode(pre: Task ~> Task): NodeView[Task] = new NodeView[Task] {
def apply[T](t: Task[T]): Node[Task, T] = pre(t).work match {
case Pure(eval, _) => uniform(Nil)(_ => Right(eval()))
case m: Mapped[t, k] => toNode[t, k](m.in)(right ∙ m.f)(m.alist)
case m: FlatMapped[t, k] => toNode[t, k](m.in)(left ∙ m.f)(m.alist)
case DependsOn(in, deps) => uniform(existToAny(deps))(const(Left(in)) ∙ all)
case Join(in, f) => uniform(in)(f)
}
def inline[T](t: Task[T]) = t.work match {
case Pure(eval, true) => Some(eval)
case _ => None
}
}
def uniform[T, D](tasks: Seq[Task[D]])(f: Seq[Result[D]] => Either[Task[T], T]): Node[Task, T] =
toNode[T, ({ type l[L[x]] = List[L[D]] })#l](tasks.toList)(f)(AList.seq[D])
def toNode[T, k[L[x]]](inputs: k[Task])(f: k[Result] => Either[Task[T], T])(implicit a: AList[k]): Node[Task, T] = new Node[Task, T] {
type K[L[x]] = k[L]
val in = inputs
val alist = a
def work(results: K[Result]) = f(results)
}
}
| mdedetrich/sbt | tasks/standard/src/main/scala/sbt/std/System.scala | Scala | bsd-3-clause | 2,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import scala.util.control.NonFatal
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types.{StructField, StructType}
/**
* An analyzer rule that replaces [[UnresolvedInlineTable]] with [[LocalRelation]].
*/
object ResolveInlineTables extends Rule[LogicalPlan] with CastSupport {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case table: UnresolvedInlineTable if table.expressionsResolved =>
validateInputDimension(table)
validateInputEvaluable(table)
convert(table)
}
/**
* Validates the input data dimension:
* 1. All rows have the same cardinality.
* 2. The number of column aliases defined is consistent with the number of columns in data.
*
* This is package visible for unit testing.
*/
private[analysis] def validateInputDimension(table: UnresolvedInlineTable): Unit = {
if (table.rows.nonEmpty) {
val numCols = table.names.size
table.rows.zipWithIndex.foreach { case (row, ri) =>
if (row.size != numCols) {
table.failAnalysis(s"expected $numCols columns but found ${row.size} columns in row $ri")
}
}
}
}
/**
* Validates that all inline table data are valid expressions that can be evaluated
* (in this they must be foldable).
*
* This is package visible for unit testing.
*/
private[analysis] def validateInputEvaluable(table: UnresolvedInlineTable): Unit = {
table.rows.foreach { row =>
row.foreach { e =>
// Note that nondeterministic expressions are not supported since they are not foldable.
if (!e.resolved || !e.foldable) {
e.failAnalysis(s"cannot evaluate expression ${e.sql} in inline table definition")
}
}
}
}
/**
* Convert a valid (with right shape and foldable inputs) [[UnresolvedInlineTable]]
* into a [[LocalRelation]].
*
* This function attempts to coerce inputs into consistent types.
*
* This is package visible for unit testing.
*/
private[analysis] def convert(table: UnresolvedInlineTable): LocalRelation = {
// For each column, traverse all the values and find a common data type and nullability.
val fields = table.rows.transpose.zip(table.names).map { case (column, name) =>
val inputTypes = column.map(_.dataType)
val tpe = TypeCoercion.findWiderTypeWithoutStringPromotion(inputTypes).getOrElse {
table.failAnalysis(s"incompatible types found in column $name for inline table")
}
StructField(name, tpe, nullable = column.exists(_.nullable))
}
val attributes = StructType(fields).toAttributes
assert(fields.size == table.names.size)
val newRows: Seq[InternalRow] = table.rows.map { row =>
InternalRow.fromSeq(row.zipWithIndex.map { case (e, ci) =>
val targetType = fields(ci).dataType
try {
val castedExpr = if (e.dataType.sameType(targetType)) {
e
} else {
cast(e, targetType)
}
castedExpr.eval()
} catch {
case NonFatal(ex) =>
table.failAnalysis(s"failed to evaluate expression ${e.sql}: ${ex.getMessage}", ex)
}
})
}
LocalRelation(attributes, newRows)
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala | Scala | apache-2.0 | 4,234 |
package cs1.lx02b
import org.scalatest._
import LX02B._
class Test extends FlatSpec with Matchers {
}
| titech-is-cs115/lecture | test/lx02b-puzzle.scala | Scala | cc0-1.0 | 104 |
package sri.core
import scala.scalajs.js
import scala.scalajs.js.ConstructorTag
trait ElementFactoryLegacy {
/**
* add types to js constructor
*/
@deprecated("Use getTypedConstructor[Component]", "0.6.0")
def getTypedConstructor[P, S](ctor: js.Dynamic, clz: Class[_ <: ReactComponent[P, S]]) = {
ctor.asInstanceOf[ReactTypedConstructor[P, S]]
}
/**
* helper method to create ReactElements for components with props
*/
@deprecated("Use makeElement[Component](props)", "0.6.0")
def createElement[P, S](ctor: ReactTypedConstructor[P, S],
props: P,
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[_ <: ReactComponent[P, S], _] = null
) = createElementWithChildren(ctor, props, key, ref)()
/**
* helper method to create ReactElements for components with no props
*/
@deprecated("Use makeElementNoProps[Component]", "0.6.0")
def createElementNoProps[P, S](ctor: ReactTypedConstructor[P, S],
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[_ <: ReactComponent[P, S], _] = null
) = createElementNoPropsWithChildren(ctor, key, ref)()
/**
* helper method to create ReactElements for components with props and children
*/
@deprecated("Use makeElementWithChildren[Component](props)", "0.6.0")
def createElementWithChildren[P, S](ctor: ReactTypedConstructor[P, S],
props: P,
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[_ <: ReactComponent[P, S], _] = null
)(children: ReactNode*): ReactElementU[P, S] =
React.createElement(ctor, JSProps(key, if (ref != null) ref else js.undefined, props), children: _*).asInstanceOf[ReactElementU[P, S]]
/**
* helper method to create ReactElements for components with no props and children
*/
@deprecated("Use makeElementNoPropsWithChildren[Component]()", "0.6.0")
def createElementNoPropsWithChildren[P, S](ctor: ReactTypedConstructor[P, S],
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[_ <: ReactComponent[P, S], _] = null
)(children: ReactNode*): ReactElementU[P, S] =
React.createElement(ctor, JSProps(key, if (ref != null) ref else js.undefined, ()), children: _*).asInstanceOf[ReactElementU[P, S]]
}
trait ElementFactory {
/**
* add types to js constructor
*/
def getTypedConstructor[C <: ReactComponentBase : ConstructorTag] =
js.constructorTag[C].constructor.asInstanceOf[ReactTypedConstructor[C#Props, C#State]]
/**
* helper method to create ReactElements for components with props
*/
def makeElement[C <: ReactComponentBase : ConstructorTag](
props: C#Props,
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[C, Unit] = null
) = makeElementWithChildren[C](props, key, ref)()
/**
* helper method to create ReactElements for components with no props
*/
def makeElement[C <: ReactComponentBase { type Props = Unit } : ConstructorTag] =
makeElementNoPropsWithChildren[C]()()
/**
* helper method to create ReactElements for components with no props
*/
def makeElementNoProps[C <: ReactComponentBase { type Props = Unit } : ConstructorTag](
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[C, Unit] = null
) = makeElementNoPropsWithChildren(key, ref)()
/**
* helper method to create ReactElements for components with props and children
*/
def makeElementWithChildren[C <: ReactComponentBase : ConstructorTag](
props: C#Props,
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[C, Unit] = null
)(children: ReactNode*): ReactElementU[C#Props, C#State] =
React.createElement(js.constructorTag[C].constructor, JSProps(key, if (ref != null) ref else js.undefined, props), children: _*).asInstanceOf[ReactElementU[C#Props, C#State]]
/**
* helper method to create ReactElements for components with no props and children
*/
def makeElementNoPropsWithChildren[C <: ReactComponentBase : ConstructorTag](
key: js.UndefOr[String] = js.undefined,
ref: js.Function1[C, Unit] = null
)(children: ReactNode*): ReactElementU[C#Props, C#State] =
React.createElement(js.constructorTag[C].constructor, JSProps(key, if (ref != null) ref else js.undefined, ()), children: _*)
.asInstanceOf[ReactElementU[C#Props, C#State]]
def createStatelessFunctionElement[P](func: P => ReactElement, props: P, key: js.UndefOr[String] = js.undefined) = {
React.createElement((jsp: JSProps[P]) => func(jsp.sprops), JSProps(key = key, sprops = props))
}
def createStatelessFunctionElementNoProps(func: () => ReactElement, key: js.UndefOr[String] = js.undefined) = {
React.createElement((jsp: JSProps[_]) => func(), JSProps(key = key, sprops = null))
}
def createStatelessFunctionElementWithChildren[P](func: (P, ReactElement) => ReactElement, props: P, key: js.UndefOr[String] = js.undefined)(children: ReactNode*) = {
React.createElement((jsp: JSProps[P]) => func(jsp.sprops, jsp.children), JSProps(key = key, sprops = props), children: _*)
}
def createStatelessFunctionElementNoPropsWithChildren(func: ReactElement => ReactElement, key: js.UndefOr[String] = js.undefined)(children: ReactNode*) = {
React.createElement((jsp: JSProps[_]) => func(jsp.children), JSProps(key = key, sprops = null), children: _*)
}
/**
* use this method when you want js.Object as props
*/
def createStatelessFunctionElementJS[P <: ReactJSProps](func: P => ReactElement, props: P) = {
React.createElement(func, props)
}
/**
* use this method when you want js.Object as props
*/
def createStatelessFunctionElementJSWithChildren[P <: ReactJSProps](func: P => ReactElement, props: P)(children: ReactNode*) = {
React.createElement(func, props, children: _*)
}
}
object ElementFactory extends ElementFactory with ElementFactoryLegacy | chandu0101/sri | core/src/main/scala/sri/core/ElementFactory.scala | Scala | apache-2.0 | 7,259 |
package com.toscaruntime.it.openstack.standalone
import com.toscaruntime.it.AbstractSpec
import com.toscaruntime.it.TestConstant._
import com.toscaruntime.it.steps.AgentsSteps._
import com.toscaruntime.it.steps.CsarsSteps._
import com.toscaruntime.it.steps.DeploymentsSteps._
import com.toscaruntime.it.util.URLChecker._
import org.scalatest.MustMatchers
import scala.concurrent.duration.DurationInt
class WordpressSpec extends AbstractSpec with MustMatchers {
info("Test deployment of a topology wordpress with openstack in mode masterless")
feature("Deployment of wordpress") {
scenario("Standard deployment") {
Given("I download and install all necessary csars for wordpress deployment")
installNormativeTypesAndProviders()
downloadZipFileAndExtract("https://github.com/alien4cloud/samples/archive/master.zip", tempPath)
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("apache")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("mysql")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("php")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("wordpress")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("topology-wordpress")))
And("A deployment image has been created for the wordpress openstack topology")
createDeploymentImage("wordpress", openstackProvider) must be(true)
When("I deploy it")
launchDeployment("wordpress")
Then("I should have an output for the wordpress's public url")
val url = assertDeploymentHasOutput("wordpress", "wordpress_url")
And("A request on the application's url should return a response 200 OK")
checkURL(url, 200, Set.empty, 5 minutes)
And("I should be able to undeploy it without error")
launchUndeployment("wordpress")
}
}
}
| vuminhkh/tosca-runtime | test/src/it/scala/com/toscaruntime/it/openstack/standalone/WordpressSpec.scala | Scala | mit | 2,005 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.FullAccountsBoxRetriever
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.BoxRetriever._
case class ACQ5021(value: Option[Boolean]) extends CtBoxIdentifier(name = "Goodwill")
with CtOptionalBoolean
with Input
with ValidatableBox[FullAccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: FullAccountsBoxRetriever): Set[CtValidation] = {
import boxRetriever._
collectErrors(
cannotExistErrorIf(hasValue && ac42.noValue && ac43.noValue),
failIf(anyHaveValue(ac42, ac43)) {
atLeastOneBoxHasValue("balance.sheet.intangible.assets", this, acq5022)
}
)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/ACQ5021.scala | Scala | apache-2.0 | 1,340 |
package at.fh.swengb.resifo_android
/**
* Created by Florian on 05.01.2017.
*/
case class RegForm(person: Person, zmr:Zmr, reisepass: Reisepass, anmUnterkunft: AnmUnterkunft,
hauptwohnsitzBleibt: HauptwohnsitzBleibt, abmUnterkunft: AbmUnterkunft, unterkunftgeber: Unterkunftgeber) extends Serializable {
override def toString():String = {
"%s %s, %s, %s".format(person.firstName,person.secondName,person.gebDatum,person.gebOrt)
}
}
| FlorianReinprecht/resifo-android | app/src/main/scala/at/fh/swengb/resifo_android/RegForm.scala | Scala | gpl-3.0 | 468 |
package core.processing
import akka.actor.{ PoisonPill, Actor }
import core.HubId
import eu.delving.schema.SchemaVersion
import models.OrganizationConfiguration
import java.util.concurrent.atomic.AtomicBoolean
import play.api.Logger
import com.yammer.metrics.scala.Instrumented
import org.w3c.dom.Node
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
class RecordIndexer(
processingContext: ProcessingContext,
indexOne: (HubId, SchemaVersion, MultiMap, Node) => Option[Throwable],
processingInterrupted: AtomicBoolean,
configuration: OrganizationConfiguration) extends Actor with Instrumented {
val counter = metrics.counter(processingContext.collection.getOwner + ".recordIndexer")
val log = Logger("CultureHub")
override def postStop() {
counter.clear()
}
def receive = {
case IndexRecord(hubId, schema, fields, document) =>
if (processingInterrupted.get()) {
self ! PoisonPill
} else {
// TODO eventually handle indexing failure
indexOne(hubId, schema, fields, document)
counter += 1
if (log.isDebugEnabled) {
if (counter.count % 5000 == 0) {
log.debug(
s"""Processing metrics from RecordIndexer:
|- indexed records: ${counter.count}
""".stripMargin)
}
}
}
}
}
case class IndexRecord(hubId: HubId, schema: SchemaVersion, fields: Map[String, List[String]], document: Node)
| delving/culture-hub | web-core/app/core/processing/RecordIndexer.scala | Scala | apache-2.0 | 1,486 |
package au.com.dius.pact.provider.specs2
import java.io.{StringReader, File, InputStream, Reader}
import java.util.concurrent.Executors
import au.com.dius.pact.model.{FullResponseMatch, RequestResponsePact, PactReader, ResponseMatching}
import au.com.dius.pact.provider.sbtsupport.HttpClient
import org.specs2.Specification
import org.specs2.execute.Result
import org.specs2.specification.core.Fragments
import scala.collection.JavaConversions
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
trait PactInput
case class StringInput(string: String) extends PactInput
case class ReaderInput(reader: Reader) extends PactInput
case class StreamInput(stream: InputStream) extends PactInput
case class FileInput(file: File) extends PactInput
trait ProviderSpec extends Specification {
def timeout = Duration.apply(10000, "s")
def convertInput(input: PactInput) = {
input match {
case StringInput(string) => new StringReader(string)
case ReaderInput(reader) => reader
case StreamInput(stream) => stream
case FileInput(file) => file
}
}
override def is = {
val pact = PactReader.loadPact(convertInput(honoursPact)).asInstanceOf[RequestResponsePact]
val fs = JavaConversions.asScalaBuffer(pact.getInteractions).map { interaction =>
val description = s"${interaction.getProviderState} ${interaction.getDescription}"
val test: String => Result = { url =>
implicit val executionContext = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())
val request = interaction.getRequest.copy
request.setPath(s"$url${interaction.getRequest.getPath}")
val actualResponseFuture = HttpClient.run(request)
val actualResponse = Await.result(actualResponseFuture, timeout)
ResponseMatching.matchRules(interaction.getResponse, actualResponse) must beEqualTo(FullResponseMatch)
}
fragmentFactory.example(description, {inState(interaction.getProviderState, test)})
}
Fragments(fs :_*)
}
def honoursPact: PactInput
def inState(state: String, test: String => Result): Result
implicit def steamToPactInput(source: InputStream) : PactInput = StreamInput(source)
implicit def stringToPactInput(source: String) : PactInput = StringInput(source)
implicit def readerToPactInput(source: Reader) : PactInput = ReaderInput(source)
implicit def fileToPactInput(source: File) : PactInput = FileInput(source)
}
| Fitzoh/pact-jvm | pact-jvm-provider-specs2/src/main/scala/au/com/dius/pact/provider/specs2/ProviderSpec.scala | Scala | apache-2.0 | 2,473 |
package sss.ancillary
object Results {
trait ResultOrMsg[R] {
def orErrMsg[E](msg: => E): Result[R, E]
}
type Result[+R, E] = Either[R, Errors[E]]
type Errors[E] = List[E]
type ErrMsg = String
type Error[E] = Result[_, E]
type OkResult = Either[_, Errors[ErrMsg]]
def ok[R](r: R = ()) = Left(r)
def error(msg: String*): Error[String] = Right(msg.toList)
implicit class ResultOps[R, E](val r: Result[R, E]) extends AnyVal {
def isOk: Boolean = r.isLeft
def isError: Boolean = r.isRight
def errors: Errors[E] = r.getOrElse(throw new RuntimeException("Use isError guard"))
def result:R = r.left.getOrElse(throw new RuntimeException("Use isOk guard"))
def andThen[R](other: => Result[R, E]): Result[R, E] = {
if (r.isOk && other.isOk) other
else
Right(
r.getOrElse(List[E]()) ++
other.getOrElse(List[E]())
)
}
def ifOk(other: R => Result[_, E]): Result[_, E] = {
if (r.isOk) other(r.result)
else r
}
def ifNotOk(other: => Result[_, E]): Result[_, E] = {
if (!r.isOk)
if (other.isOk) other
else
Right(
r.getOrElse(List()) ++
other.getOrElse(List())
)
else r
}
}
implicit class FromOpt[R](val r: Option[R]) extends ResultOrMsg[R] {
def orErrMsg[E](msg: => E): Result[R, E] = {
if (r.isDefined) Left(r.get)
else Right(List(msg))
}
}
implicit class FromBool(val r: Boolean) extends ResultOrMsg[Boolean] {
def orErrMsg[E](msg: => E): Result[Boolean, E] = {
if (r) Left(true)
else Right(List(msg))
}
def orErrMsg(msg: String): OkResult = {
if (r) Left(true)
else Right(List(msg))
}
}
}
| mcsherrylabs/sss.ancillary | src/main/scala/sss/ancillary/Results.scala | Scala | gpl-3.0 | 1,759 |
trait BankAccount {
def closeAccount(): Unit
def getBalance: Option[Int]
def incrementBalance(increment: Int): Option[Int]
}
protected case class Account(var balance: Option[Int] = Some(0)) extends BankAccount {
private def runThreadSafe[A](block: => A): A = this.synchronized(block)
override def closeAccount(): Unit = runThreadSafe(balance = None)
override def getBalance: Option[Int] = runThreadSafe(balance)
override def incrementBalance(increment: Int): Option[Int] = runThreadSafe {
balance flatMap { amount =>
balance = Some(amount + increment)
balance
}
}
}
object Bank {
def openAccount(): BankAccount = Account()
}
| exercism/xscala | exercises/practice/bank-account/.meta/Example.scala | Scala | mit | 672 |
package code
import java.io.File
import scala.xml.XML
import org.specs.Specification
import org.specs.runner.JUnit4
import net.liftweb.common.Full
import net.liftweb.util.PCDataXmlParser
class XmlSourceSpecsTest extends JUnit4(XmlSourceSpecs)
object XmlSourceSpecs extends Specification {
"XML Sources" should {
"be well-formed" in {
/**
* Tests to make sure the project's XML files are well-formed.
*
* Finds every *.html and *.xml file in src/main/webapp (and its
* subdirectories) and tests to make sure they are well-formed.
*/
var failed: List[File] = Nil
def handledXml(file: String) =
file.endsWith(".xml")
def handledXHtml(file: String) =
file.endsWith(".html") || file.endsWith(".htm") || file.endsWith(".xhtml")
def wellFormed(file: File) {
if (file.isDirectory)
for (f <- file.listFiles) wellFormed(f)
if (file.isFile && handledXml(file.getName)) {
try {
XML.loadFile(file)
} catch {
case e: org.xml.sax.SAXParseException => failed = file :: failed
}
}
if (file.isFile && handledXHtml(file.getName)) {
PCDataXmlParser(new java.io.FileInputStream(file.getAbsolutePath)) match {
case Full(_) => // file is ok
case _ => failed = file :: failed
}
}
}
wellFormed(new File("src/main/webapp"))
val numFails = failed.size
if (numFails > 0) {
val fileStr = if (numFails == 1) "file" else "files"
val msg = "Malformed XML in " + numFails + " " + fileStr + ": " + failed.mkString(", ")
fail(msg)
}
numFails must_== 0
}
}
}
| TopicQuests/IBISLift | src/test/scala/code/XmlSourceSpecs.scala | Scala | apache-2.0 | 1,644 |
package org.zouzias.spark.lucenerdd.aws.utils
// CAUTION: Do not remove this (sbt-build-info)
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.zouzias.spark.lucenerdd.aws.BuildInfo
object Utils {
val FuzzyEditDistance = 1
val topK = 10
def loadWikipediaTitles(implicit sparkSession: SparkSession): RDD[String] = {
import sparkSession.sqlContext.implicits._
sparkSession.read.parquet("s3://spark-lucenerdd/wikipedia/enwiki-latest-all-titles.parquet")
.map(row => row.getString(0)).map(_.replaceAll("_", " ")).map(_.replaceAll("[^a-zA-Z0-9\\\\s]", ""))
.rdd
}
def sampleTopKWikipediaTitles(k: Int)(implicit sparkSession: SparkSession): List[String] = {
loadWikipediaTitles.sample(false, 0.01).take(k).toList
}
def dayString(): String = {
val date = new DateTime()
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd")
formatter.print(date)
}
val Version = BuildInfo.version
}
| zouzias/spark-lucenerdd-aws | src/main/scala/org/zouzias/spark/lucenerdd/aws/utils/Utils.scala | Scala | apache-2.0 | 1,043 |
package net.fwbrasil.activate
object EnumerationValue extends Enumeration {
case class EnumerationValue(name: String) extends Val(name)
val value1a = EnumerationValue("v1")
val value2 = EnumerationValue("v2")
val value3 = EnumerationValue("v3")
} | xdevelsistemas/activate | activate-test/src/test/scala/net/fwbrasil/activate/EnumerationValue.scala | Scala | lgpl-2.1 | 263 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.parquet
import org.apache.parquet.io.api.{Binary, Converter, GroupConverter, PrimitiveConverter, RecordMaterializer}
import org.apache.parquet.schema.{GroupType, MessageType}
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.parquet.schema.LogicalTypeAnnotation.stringType
import wvlet.airframe.codec.MessageCodec
import wvlet.airframe.codec.PrimitiveCodec.ValueCodec
import wvlet.airframe.surface.Surface
import wvlet.log.LogSupport
import scala.jdk.CollectionConverters._
object ParquetRecordReader extends LogSupport {
private class IntConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addInt(value: Int): Unit = {
holder.add(fieldName, value)
}
}
private class LongConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addLong(value: Long): Unit = {
holder.add(fieldName, value)
}
}
private class BooleanConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addBoolean(value: Boolean): Unit = {
holder.add(fieldName, value)
}
}
private class StringConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter with LogSupport {
override def addBinary(value: Binary): Unit = {
holder.add(fieldName, value.toStringUsingUTF8)
}
}
private class FloatConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addFloat(value: Float): Unit = {
holder.add(fieldName, value)
}
}
private class DoubleConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addDouble(value: Double): Unit = {
holder.add(fieldName, value)
}
}
private class MsgPackConverter(fieldName: String, holder: RecordBuilder) extends PrimitiveConverter {
override def addBinary(value: Binary): Unit = {
holder.add(fieldName, ValueCodec.fromMsgPack(value.getBytes))
}
}
case class ParentContext(paramName: String, recordBuilder: RecordBuilder)
}
import ParquetRecordReader._
class ParquetRecordReader[A](
surface: Surface,
projectedSchema: GroupType,
parentContext: Option[ParentContext] = None
) extends GroupConverter
with LogSupport {
private val codec = MessageCodec.ofSurface(surface)
private val recordBuilder = RecordBuilder.newBuilder
private val converters: Seq[Converter] = projectedSchema.getFields.asScala.map { f =>
val cv: Converter = f match {
case p if p.isPrimitive =>
p.asPrimitiveType().getPrimitiveTypeName match {
case PrimitiveTypeName.INT32 => new IntConverter(f.getName, recordBuilder)
case PrimitiveTypeName.INT64 => new LongConverter(f.getName, recordBuilder)
case PrimitiveTypeName.BOOLEAN => new BooleanConverter(f.getName, recordBuilder)
case PrimitiveTypeName.FLOAT => new FloatConverter(f.getName, recordBuilder)
case PrimitiveTypeName.DOUBLE => new DoubleConverter(f.getName, recordBuilder)
case PrimitiveTypeName.BINARY if p.getLogicalTypeAnnotation == stringType =>
new StringConverter(f.getName, recordBuilder)
case PrimitiveTypeName.BINARY =>
new MsgPackConverter(f.getName, recordBuilder)
case _ => ???
}
case _ if surface.isMap =>
// Mapping Parquet columns to non-object types (e.g., Map[String, Any])
if (f.isPrimitive) {
new MsgPackConverter(f.getName, recordBuilder)
} else {
// Mapping Parquet group types to non-object types
new ParquetRecordReader(
Surface.of[Map[String, Any]],
f.asGroupType(),
parentContext = Some(ParentContext(f.getName, recordBuilder))
)
}
case _ =>
// GroupConverter for nested objects
surface.params.find(_.name == f.getName) match {
case Some(param) =>
if (param.surface.isOption || param.surface.isSeq || param.surface.isArray) {
// For Option[X], Seq[X] types, extract X
val elementSurface = param.surface.typeArgs(0)
new ParquetRecordReader(
param.surface,
ParquetSchema.toParquetSchema(elementSurface),
parentContext = Some(ParentContext(f.getName, recordBuilder))
)
} else {
new ParquetRecordReader(
param.surface,
ParquetSchema.toParquetSchema(param.surface),
parentContext = Some(ParentContext(f.getName, recordBuilder))
)
}
case None =>
???
}
}
cv
}.toIndexedSeq
def currentRecord: A = {
val m = recordBuilder.toMap
codec.fromMap(m).asInstanceOf[A]
}
override def getConverter(fieldIndex: Int): Converter = converters(fieldIndex)
override def start(): Unit = {
recordBuilder.clear()
}
override def end(): Unit = {
parentContext.foreach { ctx =>
val m = recordBuilder.toMap
ctx.recordBuilder.add(ctx.paramName, m)
}
}
}
/**
* An adapter class for org.apache.parquet.RecordMaterializer
*/
class ParquetRecordMaterializer[A](surface: Surface, projectedSchema: MessageType) extends RecordMaterializer[A] {
private val recordConverter = new ParquetRecordReader[A](surface, projectedSchema)
override def getCurrentRecord: A = recordConverter.currentRecord
override def getRootConverter: GroupConverter = recordConverter
}
| wvlet/airframe | airframe-parquet/src/main/scala/wvlet/airframe/parquet/ParquetRecordReader.scala | Scala | apache-2.0 | 6,157 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import scala.collection.mutable
import scala.concurrent.duration._
import org.mockito.Matchers.any
import org.mockito.Mockito.{mock, verify, when}
import org.scalatest.{BeforeAndAfterAll, PrivateMethodTester}
import org.scalatest.concurrent.Eventually._
import org.apache.spark._
import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState}
import org.apache.spark.deploy.master.ApplicationInfo
import org.apache.spark.deploy.master.Master
import org.apache.spark.deploy.worker.Worker
import org.apache.spark.internal.config
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.scheduler.cluster._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.{RegisterExecutor, RegisterExecutorFailed}
/**
* End-to-end tests for dynamic allocation in standalone mode.
*/
class StandaloneDynamicAllocationSuite
extends SparkFunSuite
with LocalSparkContext
with BeforeAndAfterAll
with PrivateMethodTester {
private val numWorkers = 2
private val conf = new SparkConf()
private val securityManager = new SecurityManager(conf)
private var masterRpcEnv: RpcEnv = null
private var workerRpcEnvs: Seq[RpcEnv] = null
private var master: Master = null
private var workers: Seq[Worker] = null
/**
* Start the local cluster.
* Note: local-cluster mode is insufficient because we want a reference to the Master.
*/
override def beforeAll(): Unit = {
super.beforeAll()
masterRpcEnv = RpcEnv.create(Master.SYSTEM_NAME, "localhost", 0, conf, securityManager)
workerRpcEnvs = (0 until numWorkers).map { i =>
RpcEnv.create(Worker.SYSTEM_NAME + i, "localhost", 0, conf, securityManager)
}
master = makeMaster()
workers = makeWorkers(10, 2048)
// Wait until all workers register with master successfully
eventually(timeout(60.seconds), interval(10.millis)) {
assert(getMasterState.workers.size === numWorkers)
}
}
override def afterAll(): Unit = {
try {
masterRpcEnv.shutdown()
workerRpcEnvs.foreach(_.shutdown())
master.stop()
workers.foreach(_.stop())
masterRpcEnv = null
workerRpcEnvs = null
master = null
workers = null
} finally {
super.afterAll()
}
}
test("dynamic allocation default behavior") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 1 more
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one won't go through
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 2 = 1 executor
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with max cores <= cores per worker") {
sc = new SparkContext(appConf.set("spark.cores.max", "8"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 8)
assert(apps.head.getExecutorLimit === 1)
// request 1 more; this one won't go through because we're already at max cores.
// This highlights a limitation of using dynamic allocation with max cores WITHOUT
// setting cores per executor: once an application scales down and then scales back
// up, its executors may not be spread out anymore!
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one also won't go through for the same reason
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 1 = 2 executor
// Note: we scheduled these executors together, so their cores should be evenly distributed
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === 2)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(4, 4))
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with max cores > cores per worker") {
sc = new SparkContext(appConf.set("spark.cores.max", "16"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(8, 8))
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 10)
assert(apps.head.getExecutorLimit === 1)
// request 1 more
// Note: the cores are not evenly distributed because we scheduled these executors 1 by 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toSet === Set(10, 6))
assert(apps.head.getExecutorLimit === 2)
// request 1 more; this one won't go through
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 3)
// kill all existing executors; we should end up with 3 - 2 = 1 executor
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.executors.values.head.cores === 10)
assert(apps.head.getExecutorLimit === 1)
// kill all executors again; this time we'll have 1 - 1 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.executors.values.map(_.cores).toArray === Array(8, 8))
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with cores per executor") {
sc = new SparkContext(appConf.set("spark.executor.cores", "2"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 10) // 20 cores total
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 3 more
assert(sc.requestExecutors(3))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 4)
// request 10 more; only 6 will go through
assert(sc.requestExecutors(10))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 14)
// kill 2 executors; we should get 2 back immediately
assert(killNExecutors(sc, 2))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 12)
// kill 4 executors; we should end up with 12 - 4 = 8 executors
assert(killNExecutors(sc, 4))
apps = getApplications()
assert(apps.head.executors.size === 8)
assert(apps.head.getExecutorLimit === 8)
// kill all executors; this time we'll have 8 - 8 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 10)
assert(apps.head.getExecutorLimit === 1000)
}
test("dynamic allocation with cores per executor AND max cores") {
sc = new SparkContext(appConf
.set("spark.executor.cores", "2")
.set("spark.cores.max", "8"))
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 4) // 8 cores total
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
// kill all executors
assert(killAllExecutors(sc))
var apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request 1
assert(sc.requestExecutors(1))
apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
// request 3 more
assert(sc.requestExecutors(3))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 4)
// request 10 more; none will go through
assert(sc.requestExecutors(10))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 14)
// kill all executors; 4 executors will be launched immediately
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 10)
// ... and again
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 6)
// ... and again; now we end up with 6 - 4 = 2 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
// ... and again; this time we have 2 - 2 = 0 executors left
assert(killAllExecutors(sc))
apps = getApplications()
assert(apps.head.executors.size === 0)
assert(apps.head.getExecutorLimit === 0)
// request many more; this increases the limit well beyond the cluster capacity
assert(sc.requestExecutors(1000))
apps = getApplications()
assert(apps.head.executors.size === 4)
assert(apps.head.getExecutorLimit === 1000)
}
test("kill the same executor twice (SPARK-9795)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
sc.requestExecutors(2)
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
}
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
// kill the same executor twice
val executors = getExecutorIds(sc)
assert(executors.size === 2)
assert(sc.killExecutor(executors.head))
assert(!sc.killExecutor(executors.head))
val apps = getApplications()
assert(apps.head.executors.size === 1)
// The limit should not be lowered twice
assert(apps.head.getExecutorLimit === 1)
}
test("the pending replacement executors should not be lost (SPARK-10515)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
sc.requestExecutors(2)
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === 2)
}
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
val executors = getExecutorIds(sc)
val executorIdsBefore = executors.toSet
assert(executors.size === 2)
// kill and replace an executor
assert(sc.killAndReplaceExecutor(executors.head))
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.head.executors.size === 2)
val executorIdsAfter = getExecutorIds(sc).toSet
// make sure the executor was killed and replaced
assert(executorIdsBefore != executorIdsAfter)
}
// kill old executor (which is killedAndReplaced) should fail
assert(!sc.killExecutor(executors.head))
// refresh executors list
val newExecutors = getExecutorIds(sc)
syncExecutors(sc)
// kill newly created executor and do not replace it
assert(sc.killExecutor(newExecutors(1)))
val apps = getApplications()
assert(apps.head.executors.size === 1)
assert(apps.head.getExecutorLimit === 1)
}
test("disable force kill for busy executors (SPARK-9552)") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
var apps = getApplications()
// sync executors between the Master and the driver, needed because
// the driver refuses to kill executors it does not know about
syncExecutors(sc)
val executors = getExecutorIds(sc)
assert(executors.size === 2)
// simulate running a task on the executor
val getMap =
PrivateMethod[mutable.HashMap[String, mutable.HashSet[Long]]]('executorIdToRunningTaskIds)
val taskScheduler = sc.taskScheduler.asInstanceOf[TaskSchedulerImpl]
val executorIdToRunningTaskIds = taskScheduler invokePrivate getMap()
executorIdToRunningTaskIds(executors.head) = mutable.HashSet(1L)
// kill the busy executor without force; this should fail
assert(killExecutor(sc, executors.head, force = false).isEmpty)
apps = getApplications()
assert(apps.head.executors.size === 2)
// force kill busy executor
assert(killExecutor(sc, executors.head, force = true).nonEmpty)
apps = getApplications()
// kill executor successfully
assert(apps.head.executors.size === 1)
}
test("initial executor limit") {
val initialExecutorLimit = 1
val myConf = appConf
.set("spark.dynamicAllocation.enabled", "true")
.set("spark.shuffle.service.enabled", "true")
.set("spark.dynamicAllocation.initialExecutors", initialExecutorLimit.toString)
sc = new SparkContext(myConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === initialExecutorLimit)
assert(apps.head.getExecutorLimit === initialExecutorLimit)
}
}
test("kill all executors on localhost") {
sc = new SparkContext(appConf)
val appId = sc.applicationId
eventually(timeout(10.seconds), interval(10.millis)) {
val apps = getApplications()
assert(apps.size === 1)
assert(apps.head.id === appId)
assert(apps.head.executors.size === 2)
assert(apps.head.getExecutorLimit === Int.MaxValue)
}
val beforeList = getApplications().head.executors.keys.toSet
assert(killExecutorsOnHost(sc, "localhost").equals(true))
syncExecutors(sc)
val afterList = getApplications().head.executors.keys.toSet
eventually(timeout(10.seconds), interval(100.millis)) {
assert(beforeList.intersect(afterList).size == 0)
}
}
test("executor registration on a blacklisted host must fail") {
sc = new SparkContext(appConf.set(config.BLACKLIST_ENABLED.key, "true"))
val endpointRef = mock(classOf[RpcEndpointRef])
val mockAddress = mock(classOf[RpcAddress])
when(endpointRef.address).thenReturn(mockAddress)
val message = RegisterExecutor("one", endpointRef, "blacklisted-host", 10, Map.empty)
// Get "localhost" on a blacklist.
val taskScheduler = mock(classOf[TaskSchedulerImpl])
when(taskScheduler.nodeBlacklist()).thenReturn(Set("blacklisted-host"))
when(taskScheduler.sc).thenReturn(sc)
sc.taskScheduler = taskScheduler
// Create a fresh scheduler backend to blacklist "localhost".
sc.schedulerBackend.stop()
val backend =
new StandaloneSchedulerBackend(taskScheduler, sc, Array(masterRpcEnv.address.toSparkURL))
backend.start()
backend.driverEndpoint.ask[Boolean](message)
eventually(timeout(10.seconds), interval(100.millis)) {
verify(endpointRef).send(RegisterExecutorFailed(any()))
}
}
// ===============================
// | Utility methods for testing |
// ===============================
/** Return a SparkConf for applications that want to talk to our Master. */
private def appConf: SparkConf = {
new SparkConf()
.setMaster(masterRpcEnv.address.toSparkURL)
.setAppName("test")
.set("spark.executor.memory", "256m")
}
/** Make a master to which our application will send executor requests. */
private def makeMaster(): Master = {
val master = new Master(masterRpcEnv, masterRpcEnv.address, 0, securityManager, conf)
masterRpcEnv.setupEndpoint(Master.ENDPOINT_NAME, master)
master
}
/** Make a few workers that talk to our master. */
private def makeWorkers(cores: Int, memory: Int): Seq[Worker] = {
(0 until numWorkers).map { i =>
val rpcEnv = workerRpcEnvs(i)
val worker = new Worker(rpcEnv, 0, cores, memory, Array(masterRpcEnv.address),
Worker.ENDPOINT_NAME, null, conf, securityManager)
rpcEnv.setupEndpoint(Worker.ENDPOINT_NAME, worker)
worker
}
}
/** Get the Master state */
private def getMasterState: MasterStateResponse = {
master.self.askSync[MasterStateResponse](RequestMasterState)
}
/** Get the applications that are active from Master */
private def getApplications(): Seq[ApplicationInfo] = {
getMasterState.activeApps
}
/** Kill all executors belonging to this application. */
private def killAllExecutors(sc: SparkContext): Boolean = {
killNExecutors(sc, Int.MaxValue)
}
/** Kill N executors belonging to this application. */
private def killNExecutors(sc: SparkContext, n: Int): Boolean = {
syncExecutors(sc)
sc.killExecutors(getExecutorIds(sc).take(n))
}
/** Kill the given executor, specifying whether to force kill it. */
private def killExecutor(sc: SparkContext, executorId: String, force: Boolean): Seq[String] = {
syncExecutors(sc)
sc.schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = true, countFailures = false,
force)
case _ => fail("expected coarse grained scheduler")
}
}
/** Kill the executors on a given host. */
private def killExecutorsOnHost(sc: SparkContext, host: String): Boolean = {
syncExecutors(sc)
sc.schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutorsOnHost(host)
case _ => fail("expected coarse grained scheduler")
}
}
/**
* Return a list of executor IDs belonging to this application.
*
* Note that we must use the executor IDs according to the Master, which has the most
* updated view. We cannot rely on the executor IDs according to the driver because we
* don't wait for executors to register. Otherwise the tests will take much longer to run.
*/
private def getExecutorIds(sc: SparkContext): Seq[String] = {
val app = getApplications().find(_.id == sc.applicationId)
assert(app.isDefined)
// Although executors is transient, master is in the same process so the message won't be
// serialized and it's safe here.
app.get.executors.keys.map(_.toString).toSeq
}
/**
* Sync executor IDs between the driver and the Master.
*
* This allows us to avoid waiting for new executors to register with the driver before
* we submit a request to kill them. This must be called before each kill request.
*/
private def syncExecutors(sc: SparkContext): Unit = {
val driverExecutors = sc.env.blockManager.master.getStorageStatus
.map(_.blockManagerId.executorId)
.filter { _ != SparkContext.DRIVER_IDENTIFIER}
val masterExecutors = getExecutorIds(sc)
val missingExecutors = masterExecutors.toSet.diff(driverExecutors.toSet).toSeq.sorted
missingExecutors.foreach { id =>
// Fake an executor registration so the driver knows about us
val endpointRef = mock(classOf[RpcEndpointRef])
val mockAddress = mock(classOf[RpcAddress])
when(endpointRef.address).thenReturn(mockAddress)
val message = RegisterExecutor(id, endpointRef, "localhost", 10, Map.empty)
val backend = sc.schedulerBackend.asInstanceOf[CoarseGrainedSchedulerBackend]
backend.driverEndpoint.askSync[Boolean](message)
}
}
}
| bravo-zhang/spark | core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala | Scala | apache-2.0 | 24,881 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.