code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package counter.manager
import akka.actor.{Actor, ActorRef, Props}
import akka.persistence.{RecoveryCompleted, PersistentActor}
import counter.manager.Counter._
import counter.operation.StartOperation.Started
import counter.warning.WarningCollector.CounterExceeded
class Counter(name: String, warningCollector: ActorRef) extends PersistentActor {
override def persistenceId = s"counter-$name"
def receiveRecover = {
case CounterInit(limit) =>
context become {
active(limit)
}
case CounterValue(limit, actualValue) =>
context become {
active(limit, actualValue)
}
}
def receiveCommand = init
def init: Receive = {
case Init(limit) =>
initialize(limit)
}
def active(limit: Long, actualValue: Long = 0L): Receive = {
case Init(newLimit) =>
initialize(newLimit)
case State =>
sender() ! CounterDetails(limit, actualValue)
case Count(value) =>
val newValue = actualValue + value
if (newValue >= limit) {
persist(CounterInit(limit)) { event =>
warningCollector ! CounterExceeded(name, limit, newValue)
context become {
init
}
}
} else {
persist(CounterValue(limit, newValue)) { event =>
context become {
active(limit, newValue)
}
}
}
}
def initialize(limit: Long): Unit = {
persist(CounterInit(limit)) { event =>
sender() ! Started
context become {
active(event.limit)
}
}
}
}
object Counter {
case class Init(limit: Long)
case class Count(value: Long)
case object State
case class CounterDetails(limit: Long, used: Long)
def props(name: String, warningCollector: ActorRef) = Props(new Counter(name, warningCollector))
}
case class CounterInit(limit: Long)
case class CounterValue(limit: Long, actualValue: Long)
| grzesiekw/counter | counter-core/src/main/scala/counter/manager/Counter.scala | Scala | apache-2.0 | 1,897 |
package me.eax.examples.tracing
import akka.actor._
import akka.pattern.ask
import kamon.trace.Tracer
import scala.concurrent.Future
case object SecondActor {
case class SayHelloSecond(msg: String)
case class AskExt(ref: ActorRef) extends AskHelper {
def sayHello(msg: String): Future[Unit] = {
(ref ? SayHelloSecond(msg)).mapTo[Unit]
}
}
}
class SecondActor extends Actor with ActorLogging {
import SecondActor._
override def receive: Receive = {
case r: SayHelloSecond =>
val token = Tracer.currentContext.token
log.debug(s"Second actor: ${r.msg}, token = $token")
sender() ! {}
}
}
| afiskon/akka-tracing-example | tracing/src/main/scala/me/eax/examples/tracing/SecondActor.scala | Scala | mit | 641 |
package com.twitter.server
import com.twitter.app.App
import com.twitter.finagle.tracing.Trace
import com.twitter.util.Time
import java.util.logging.{Formatter, Level, LogRecord, Logger}
import java.io.{PrintWriter, StringWriter}
import scala.reflect.NameTransformer
trait LogFormat { app: App =>
premain {
for (h <- Logger.getLogger("").getHandlers)
h.setFormatter(new LogFormatter)
}
}
/**
* Implements "glog" style log formatting.
*/
private class LogFormatter extends Formatter {
private val levels = Map[Level, Char](
Level.FINEST -> 'D',
Level.FINER -> 'D',
Level.FINE -> 'D',
Level.CONFIG -> 'I',
Level.INFO -> 'I',
Level.WARNING -> 'W',
Level.SEVERE -> 'E'
)
// Make some effort to demangle scala names.
private def prettyClass(name: String) = {
var s = NameTransformer.decode(name)
val dolladolla = s.indexOf("$$")
if (dolladolla > 0) {
s = s.substring(0, dolladolla)
s += "~"
}
s
}
override def format(r: LogRecord) = {
val msg = formatMessage(r)
val str = new StringBuilder(msg.size+30+150)
.append(levels.get(r.getLevel) getOrElse 'U')
.append(Time.fromMilliseconds(r.getMillis).format(" MMdd HH:mm:ss.SSS"))
.append(" THREAD")
.append(r.getThreadID)
for (id <- Trace.idOption) {
str.append(" TraceId:")
str.append(id.traceId)
}
if (r.getSourceClassName != null) {
str.append(' ').append(prettyClass(r.getSourceClassName))
if (r.getSourceMethodName != null)
str.append('.').append(r.getSourceMethodName)
}
str.append(": ")
str.append(msg)
if (r.getThrown != null) {
val w = new StringWriter
r.getThrown.printStackTrace(new PrintWriter(w))
str.append("\\n").append(w.toString)
}
str.append("\\n")
str.toString
}
}
| nshkrob/twitter-server | src/main/scala/com/twitter/server/LogFormat.scala | Scala | apache-2.0 | 1,848 |
package counter.operation
import akka.actor.{Actor, ActorRef, Props}
import counter.manager.Counter.CounterDetails
import counter.manager.CounterManager.{Details, NotFound}
import counter.operation.DetailsOperation.GetCounter
import counter.operation.OperationReceiver.CounterNotFound
class DetailsOperation(counterManager: ActorRef) extends Actor {
def receive = {
case GetCounter(name) =>
counterManager ! Details(name)
context become {
replyTo(sender()).andThen(stopItself)
}
}
def replyTo(origin: ActorRef): Receive = {
case details: CounterDetails =>
origin ! details
case NotFound =>
origin ! CounterNotFound()
}
def stopItself: Receive = {
case _ =>
context.stop(self)
}
}
object DetailsOperation {
case class GetCounter(name: String)
def props(counterManager: ActorRef) = Props(new DetailsOperation(counterManager))
}
| grzesiekw/counter | counter-core/src/main/scala/counter/operation/DetailsOperation.scala | Scala | apache-2.0 | 909 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.nio.ByteBuffer
import java.util.{HashMap => JHashMap}
import scala.collection.{mutable, Map}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.{FileOutputCommitter, FileOutputFormat, JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{Job => NewAPIHadoopJob, OutputFormat => NewOutputFormat}
import org.apache.spark._
import org.apache.spark.Partitioner.defaultPartitioner
import org.apache.spark.annotation.Experimental
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.OutputMetrics
import org.apache.spark.internal.io.{FileCommitProtocol, HadoopMapReduceCommitProtocol, SparkHadoopMapReduceWriter, SparkHadoopWriterUtils}
import org.apache.spark.internal.Logging
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.serializer.Serializer
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.CompactBuffer
import org.apache.spark.util.random.StratifiedSamplingUtils
/**
* Extra functions available on RDDs of (key, value) pairs through an implicit conversion.
*/
class PairRDDFunctions[K, V](self: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)
extends Logging with Serializable {
/**
* :: Experimental ::
* Generic function to combine the elements for each key using a custom set of aggregation
* functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C
*
* Users provide three functions:
*
* - `createCombiner`, which turns a V into a C (e.g., creates a one-element list)
* - `mergeValue`, to merge a V into a C (e.g., adds it to the end of a list)
* - `mergeCombiners`, to combine two C's into a single one.
*
* In addition, users can control the partitioning of the output RDD, and whether to perform
* map-side aggregation (if a mapper can produce multiple items with the same key).
*
* @note V and C can be different -- for example, one might group an RDD of type
* (Int, Int) into an RDD of type (Int, Seq[Int]).
*/
@Experimental
def combineByKeyWithClassTag[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C,
partitioner: Partitioner,
mapSideCombine: Boolean = true,
serializer: Serializer = null)(implicit ct: ClassTag[C]): RDD[(K, C)] = self.withScope {
require(mergeCombiners != null, "mergeCombiners must be defined") // required as of Spark 0.9.0
if (keyClass.isArray) {
if (mapSideCombine) {
throw new SparkException("Cannot use map-side combining with array keys.")
}
if (partitioner.isInstanceOf[HashPartitioner]) {
throw new SparkException("HashPartitioner cannot partition array keys.")
}
}
val aggregator = new Aggregator[K, V, C](
self.context.clean(createCombiner),
self.context.clean(mergeValue),
self.context.clean(mergeCombiners))
if (self.partitioner == Some(partitioner)) {
self.mapPartitions(iter => {
val context = TaskContext.get()
new InterruptibleIterator(context, aggregator.combineValuesByKey(iter, context))
}, preservesPartitioning = true)
} else {
new ShuffledRDD[K, V, C](self, partitioner)
.setSerializer(serializer)
.setAggregator(aggregator)
.setMapSideCombine(mapSideCombine)
}
}
/**
* Generic function to combine the elements for each key using a custom set of aggregation
* functions. This method is here for backward compatibility. It does not provide combiner
* classtag information to the shuffle.
*
* @see [[combineByKeyWithClassTag]]
*/
def combineByKey[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C,
partitioner: Partitioner,
mapSideCombine: Boolean = true,
serializer: Serializer = null): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners,
partitioner, mapSideCombine, serializer)(null)
}
/**
* Simplified version of combineByKeyWithClassTag that hash-partitions the output RDD.
* This method is here for backward compatibility. It does not provide combiner
* classtag information to the shuffle.
*
* @see [[combineByKeyWithClassTag]]
*/
def combineByKey[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C,
numPartitions: Int): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners, numPartitions)(null)
}
/**
* :: Experimental ::
* Simplified version of combineByKeyWithClassTag that hash-partitions the output RDD.
*/
@Experimental
def combineByKeyWithClassTag[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C,
numPartitions: Int)(implicit ct: ClassTag[C]): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners,
new HashPartitioner(numPartitions))
}
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
* as in scala.TraversableOnce. The former operation is used for merging values within a
* partition, and the latter is used for merging values between partitions. To avoid memory
* allocation, both of these functions are allowed to modify and return their first argument
* instead of creating a new U.
*/
def aggregateByKey[U: ClassTag](zeroValue: U, partitioner: Partitioner)(seqOp: (U, V) => U,
combOp: (U, U) => U): RDD[(K, U)] = self.withScope {
// Serialize the zero value to a byte array so that we can get a new clone of it on each key
val zeroBuffer = SparkEnv.get.serializer.newInstance().serialize(zeroValue)
val zeroArray = new Array[Byte](zeroBuffer.limit)
zeroBuffer.get(zeroArray)
lazy val cachedSerializer = SparkEnv.get.serializer.newInstance()
val createZero = () => cachedSerializer.deserialize[U](ByteBuffer.wrap(zeroArray))
// We will clean the combiner closure later in `combineByKey`
val cleanedSeqOp = self.context.clean(seqOp)
combineByKeyWithClassTag[U]((v: V) => cleanedSeqOp(createZero(), v),
cleanedSeqOp, combOp, partitioner)
}
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
* as in scala.TraversableOnce. The former operation is used for merging values within a
* partition, and the latter is used for merging values between partitions. To avoid memory
* allocation, both of these functions are allowed to modify and return their first argument
* instead of creating a new U.
*/
def aggregateByKey[U: ClassTag](zeroValue: U, numPartitions: Int)(seqOp: (U, V) => U,
combOp: (U, U) => U): RDD[(K, U)] = self.withScope {
aggregateByKey(zeroValue, new HashPartitioner(numPartitions))(seqOp, combOp)
}
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
* as in scala.TraversableOnce. The former operation is used for merging values within a
* partition, and the latter is used for merging values between partitions. To avoid memory
* allocation, both of these functions are allowed to modify and return their first argument
* instead of creating a new U.
*/
def aggregateByKey[U: ClassTag](zeroValue: U)(seqOp: (U, V) => U,
combOp: (U, U) => U): RDD[(K, U)] = self.withScope {
aggregateByKey(zeroValue, defaultPartitioner(self))(seqOp, combOp)
}
/**
* Merge the values for each key using an associative function and a neutral "zero value" which
* may be added to the result an arbitrary number of times, and must not change the result
* (e.g., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(
zeroValue: V,
partitioner: Partitioner)(func: (V, V) => V): RDD[(K, V)] = self.withScope {
// Serialize the zero value to a byte array so that we can get a new clone of it on each key
val zeroBuffer = SparkEnv.get.serializer.newInstance().serialize(zeroValue)
val zeroArray = new Array[Byte](zeroBuffer.limit)
zeroBuffer.get(zeroArray)
// When deserializing, use a lazy val to create just one instance of the serializer per task
lazy val cachedSerializer = SparkEnv.get.serializer.newInstance()
val createZero = () => cachedSerializer.deserialize[V](ByteBuffer.wrap(zeroArray))
val cleanedFunc = self.context.clean(func)
combineByKeyWithClassTag[V]((v: V) => cleanedFunc(createZero(), v),
cleanedFunc, cleanedFunc, partitioner)
}
/**
* Merge the values for each key using an associative function and a neutral "zero value" which
* may be added to the result an arbitrary number of times, and must not change the result
* (e.g., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(zeroValue: V, numPartitions: Int)(func: (V, V) => V): RDD[(K, V)] = self.withScope {
foldByKey(zeroValue, new HashPartitioner(numPartitions))(func)
}
/**
* Merge the values for each key using an associative function and a neutral "zero value" which
* may be added to the result an arbitrary number of times, and must not change the result
* (e.g., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(zeroValue: V)(func: (V, V) => V): RDD[(K, V)] = self.withScope {
foldByKey(zeroValue, defaultPartitioner(self))(func)
}
/**
* Return a subset of this RDD sampled by key (via stratified sampling).
*
* Create a sample of this RDD using variable sampling rates for different keys as specified by
* `fractions`, a key to sampling rate map, via simple random sampling with one pass over the
* RDD, to produce a sample of size that's approximately equal to the sum of
* math.ceil(numItems * samplingRate) over all key values.
*
* @param withReplacement whether to sample with or without replacement
* @param fractions map of specific keys to sampling rates
* @param seed seed for the random number generator
* @return RDD containing the sampled subset
*/
def sampleByKey(withReplacement: Boolean,
fractions: Map[K, Double],
seed: Long = Utils.random.nextLong): RDD[(K, V)] = self.withScope {
require(fractions.values.forall(v => v >= 0.0), "Negative sampling rates.")
val samplingFunc = if (withReplacement) {
StratifiedSamplingUtils.getPoissonSamplingFunction(self, fractions, false, seed)
} else {
StratifiedSamplingUtils.getBernoulliSamplingFunction(self, fractions, false, seed)
}
self.mapPartitionsWithIndex(samplingFunc, preservesPartitioning = true)
}
/**
* Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
* math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
*
* This method differs from [[sampleByKey]] in that we make additional passes over the RDD to
* create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
* over all key values with a 99.99% confidence. When sampling without replacement, we need one
* additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
* two additional passes.
*
* @param withReplacement whether to sample with or without replacement
* @param fractions map of specific keys to sampling rates
* @param seed seed for the random number generator
* @return RDD containing the sampled subset
*/
def sampleByKeyExact(
withReplacement: Boolean,
fractions: Map[K, Double],
seed: Long = Utils.random.nextLong): RDD[(K, V)] = self.withScope {
require(fractions.values.forall(v => v >= 0.0), "Negative sampling rates.")
val samplingFunc = if (withReplacement) {
StratifiedSamplingUtils.getPoissonSamplingFunction(self, fractions, true, seed)
} else {
StratifiedSamplingUtils.getBernoulliSamplingFunction(self, fractions, true, seed)
}
self.mapPartitionsWithIndex(samplingFunc, preservesPartitioning = true)
}
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce.
*/
def reduceByKey(partitioner: Partitioner, func: (V, V) => V): RDD[(K, V)] = self.withScope {
combineByKeyWithClassTag[V]((v: V) => v, func, func, partitioner)
}
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce. Output will be hash-partitioned with numPartitions partitions.
*/
def reduceByKey(func: (V, V) => V, numPartitions: Int): RDD[(K, V)] = self.withScope {
reduceByKey(new HashPartitioner(numPartitions), func)
}
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce. Output will be hash-partitioned with the existing partitioner/
* parallelism level.
*/
def reduceByKey(func: (V, V) => V): RDD[(K, V)] = self.withScope {
reduceByKey(defaultPartitioner(self), func)
}
/**
* Merge the values for each key using an associative and commutative reduce function, but return
* the results immediately to the master as a Map. This will also perform the merging locally on
* each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce.
*/
def reduceByKeyLocally(func: (V, V) => V): Map[K, V] = self.withScope {
val cleanedF = self.sparkContext.clean(func)
if (keyClass.isArray) {
throw new SparkException("reduceByKeyLocally() does not support array keys")
}
val reducePartition = (iter: Iterator[(K, V)]) => {
val map = new JHashMap[K, V]
iter.foreach { pair =>
val old = map.get(pair._1)
map.put(pair._1, if (old == null) pair._2 else cleanedF(old, pair._2))
}
Iterator(map)
} : Iterator[JHashMap[K, V]]
val mergeMaps = (m1: JHashMap[K, V], m2: JHashMap[K, V]) => {
m2.asScala.foreach { pair =>
val old = m1.get(pair._1)
m1.put(pair._1, if (old == null) pair._2 else cleanedF(old, pair._2))
}
m1
} : JHashMap[K, V]
self.mapPartitions(reducePartition).reduce(mergeMaps).asScala
}
/**
* Count the number of elements for each key, collecting the results to a local Map.
*
* @note This method should only be used if the resulting map is expected to be small, as
* the whole thing is loaded into the driver's memory.
* To handle very large results, consider using rdd.mapValues(_ => 1L).reduceByKey(_ + _), which
* returns an RDD[T, Long] instead of a map.
*/
def countByKey(): Map[K, Long] = self.withScope {
self.mapValues(_ => 1L).reduceByKey(_ + _).collect().toMap
}
/**
* Approximate version of countByKey that can return a partial result if it does
* not finish within a timeout.
*
* The confidence is the probability that the error bounds of the result will
* contain the true value. That is, if countApprox were called repeatedly
* with confidence 0.9, we would expect 90% of the results to contain the
* true count. The confidence must be in the range [0,1] or an exception will
* be thrown.
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countByKeyApprox(timeout: Long, confidence: Double = 0.95)
: PartialResult[Map[K, BoundedDouble]] = self.withScope {
self.map(_._1).countByValueApprox(timeout, confidence)
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* The relative accuracy is approximately `1.054 / sqrt(2^p)`. Setting a nonzero (`sp` is
* greater than `p`) would trigger sparse representation of registers, which may reduce the
* memory consumption and increase accuracy when the cardinality is small.
*
* @param p The precision value for the normal set.
* `p` must be a value between 4 and `sp` if `sp` is not zero (32 max).
* @param sp The precision value for the sparse set, between 0 and 32.
* If `sp` equals 0, the sparse representation is skipped.
* @param partitioner Partitioner to use for the resulting RDD.
*/
def countApproxDistinctByKey(
p: Int,
sp: Int,
partitioner: Partitioner): RDD[(K, Long)] = self.withScope {
require(p >= 4, s"p ($p) must be >= 4")
require(sp <= 32, s"sp ($sp) must be <= 32")
require(sp == 0 || p <= sp, s"p ($p) cannot be greater than sp ($sp)")
val createHLL = (v: V) => {
val hll = new HyperLogLogPlus(p, sp)
hll.offer(v)
hll
}
val mergeValueHLL = (hll: HyperLogLogPlus, v: V) => {
hll.offer(v)
hll
}
val mergeHLL = (h1: HyperLogLogPlus, h2: HyperLogLogPlus) => {
h1.addAll(h2)
h1
}
combineByKeyWithClassTag(createHLL, mergeValueHLL, mergeHLL, partitioner)
.mapValues(_.cardinality())
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
* @param partitioner partitioner of the resulting RDD
*/
def countApproxDistinctByKey(
relativeSD: Double,
partitioner: Partitioner): RDD[(K, Long)] = self.withScope {
require(relativeSD > 0.000017, s"accuracy ($relativeSD) must be greater than 0.000017")
val p = math.ceil(2.0 * math.log(1.054 / relativeSD) / math.log(2)).toInt
assert(p <= 32)
countApproxDistinctByKey(if (p < 4) 4 else p, 0, partitioner)
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
* @param numPartitions number of partitions of the resulting RDD
*/
def countApproxDistinctByKey(
relativeSD: Double,
numPartitions: Int): RDD[(K, Long)] = self.withScope {
countApproxDistinctByKey(relativeSD, new HashPartitioner(numPartitions))
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
*/
def countApproxDistinctByKey(relativeSD: Double = 0.05): RDD[(K, Long)] = self.withScope {
countApproxDistinctByKey(relativeSD, defaultPartitioner(self))
}
/**
* Group the values for each key in the RDD into a single sequence. Allows controlling the
* partitioning of the resulting key-value pair RDD by passing a Partitioner.
* The ordering of elements within each group is not guaranteed, and may even differ
* each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*
* @note As currently implemented, groupByKey must be able to hold all the key-value pairs for any
* key in memory. If a key has too many values, it can result in an [[OutOfMemoryError]].
*/
def groupByKey(partitioner: Partitioner): RDD[(K, Iterable[V])] = self.withScope {
// groupByKey shouldn't use map side combine because map side combine does not
// reduce the amount of data shuffled and requires all map side data be inserted
// into a hash table, leading to more objects in the old gen.
val createCombiner = (v: V) => CompactBuffer(v)
val mergeValue = (buf: CompactBuffer[V], v: V) => buf += v
val mergeCombiners = (c1: CompactBuffer[V], c2: CompactBuffer[V]) => c1 ++= c2
val bufs = combineByKeyWithClassTag[CompactBuffer[V]](
createCombiner, mergeValue, mergeCombiners, partitioner, mapSideCombine = false)
bufs.asInstanceOf[RDD[(K, Iterable[V])]]
}
/**
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with into `numPartitions` partitions. The ordering of elements within
* each group is not guaranteed, and may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*
* @note As currently implemented, groupByKey must be able to hold all the key-value pairs for any
* key in memory. If a key has too many values, it can result in an [[OutOfMemoryError]].
*/
def groupByKey(numPartitions: Int): RDD[(K, Iterable[V])] = self.withScope {
groupByKey(new HashPartitioner(numPartitions))
}
/**
* Return a copy of the RDD partitioned using the specified partitioner.
*/
def partitionBy(partitioner: Partitioner): RDD[(K, V)] = self.withScope {
if (keyClass.isArray && partitioner.isInstanceOf[HashPartitioner]) {
throw new SparkException("HashPartitioner cannot partition array keys.")
}
if (self.partitioner == Some(partitioner)) {
self
} else {
new ShuffledRDD[K, V, V](self, partitioner)
}
}
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Uses the given Partitioner to partition the output RDD.
*/
def join[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (V, W))] = self.withScope {
this.cogroup(other, partitioner).flatMapValues( pair =>
for (v <- pair._1.iterator; w <- pair._2.iterator) yield (v, w)
)
}
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Uses the given Partitioner to
* partition the output RDD.
*/
def leftOuterJoin[W](
other: RDD[(K, W)],
partitioner: Partitioner): RDD[(K, (V, Option[W]))] = self.withScope {
this.cogroup(other, partitioner).flatMapValues { pair =>
if (pair._2.isEmpty) {
pair._1.iterator.map(v => (v, None))
} else {
for (v <- pair._1.iterator; w <- pair._2.iterator) yield (v, Some(w))
}
}
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Uses the given Partitioner to
* partition the output RDD.
*/
def rightOuterJoin[W](other: RDD[(K, W)], partitioner: Partitioner)
: RDD[(K, (Option[V], W))] = self.withScope {
this.cogroup(other, partitioner).flatMapValues { pair =>
if (pair._1.isEmpty) {
pair._2.iterator.map(w => (None, w))
} else {
for (v <- pair._1.iterator; w <- pair._2.iterator) yield (Some(v), w)
}
}
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Uses the given Partitioner to partition the output RDD.
*/
def fullOuterJoin[W](other: RDD[(K, W)], partitioner: Partitioner)
: RDD[(K, (Option[V], Option[W]))] = self.withScope {
this.cogroup(other, partitioner).flatMapValues {
case (vs, Seq()) => vs.iterator.map(v => (Some(v), None))
case (Seq(), ws) => ws.iterator.map(w => (None, Some(w)))
case (vs, ws) => for (v <- vs.iterator; w <- ws.iterator) yield (Some(v), Some(w))
}
}
/**
* Simplified version of combineByKeyWithClassTag that hash-partitions the resulting RDD using the
* existing partitioner/parallelism level. This method is here for backward compatibility. It
* does not provide combiner classtag information to the shuffle.
*
* @see [[combineByKeyWithClassTag]]
*/
def combineByKey[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners)(null)
}
/**
* :: Experimental ::
* Simplified version of combineByKeyWithClassTag that hash-partitions the resulting RDD using the
* existing partitioner/parallelism level.
*/
@Experimental
def combineByKeyWithClassTag[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C)(implicit ct: ClassTag[C]): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners, defaultPartitioner(self))
}
/**
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with the existing partitioner/parallelism level. The ordering of elements
* within each group is not guaranteed, and may even differ each time the resulting RDD is
* evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupByKey(): RDD[(K, Iterable[V])] = self.withScope {
groupByKey(defaultPartitioner(self))
}
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Performs a hash join across the cluster.
*/
def join[W](other: RDD[(K, W)]): RDD[(K, (V, W))] = self.withScope {
join(other, defaultPartitioner(self, other))
}
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Performs a hash join across the cluster.
*/
def join[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (V, W))] = self.withScope {
join(other, new HashPartitioner(numPartitions))
}
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Hash-partitions the output
* using the existing partitioner/parallelism level.
*/
def leftOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (V, Option[W]))] = self.withScope {
leftOuterJoin(other, defaultPartitioner(self, other))
}
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Hash-partitions the output
* into `numPartitions` partitions.
*/
def leftOuterJoin[W](
other: RDD[(K, W)],
numPartitions: Int): RDD[(K, (V, Option[W]))] = self.withScope {
leftOuterJoin(other, new HashPartitioner(numPartitions))
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Hash-partitions the resulting
* RDD using the existing partitioner/parallelism level.
*/
def rightOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], W))] = self.withScope {
rightOuterJoin(other, defaultPartitioner(self, other))
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Hash-partitions the resulting
* RDD into the given number of partitions.
*/
def rightOuterJoin[W](
other: RDD[(K, W)],
numPartitions: Int): RDD[(K, (Option[V], W))] = self.withScope {
rightOuterJoin(other, new HashPartitioner(numPartitions))
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Hash-partitions the resulting RDD using the existing partitioner/
* parallelism level.
*/
def fullOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], Option[W]))] = self.withScope {
fullOuterJoin(other, defaultPartitioner(self, other))
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Hash-partitions the resulting RDD into the given number of partitions.
*/
def fullOuterJoin[W](
other: RDD[(K, W)],
numPartitions: Int): RDD[(K, (Option[V], Option[W]))] = self.withScope {
fullOuterJoin(other, new HashPartitioner(numPartitions))
}
/**
* Return the key-value pairs in this RDD to the master as a Map.
*
* Warning: this doesn't return a multimap (so if you have multiple values to the same key, only
* one value per key is preserved in the map returned)
*
* @note this method should only be used if the resulting data is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def collectAsMap(): Map[K, V] = self.withScope {
val data = self.collect()
val map = new mutable.HashMap[K, V]
map.sizeHint(data.length)
data.foreach { pair => map.put(pair._1, pair._2) }
map
}
/**
* Pass each value in the key-value pair RDD through a map function without changing the keys;
* this also retains the original RDD's partitioning.
*/
def mapValues[U](f: V => U): RDD[(K, U)] = self.withScope {
val cleanF = self.context.clean(f)
new MapPartitionsRDD[(K, U), (K, V)](self,
(context, pid, iter) => iter.map { case (k, v) => (k, cleanF(v)) },
preservesPartitioning = true)
}
/**
* Pass each value in the key-value pair RDD through a flatMap function without changing the
* keys; this also retains the original RDD's partitioning.
*/
def flatMapValues[U](f: V => TraversableOnce[U]): RDD[(K, U)] = self.withScope {
val cleanF = self.context.clean(f)
new MapPartitionsRDD[(K, U), (K, V)](self,
(context, pid, iter) => iter.flatMap { case (k, v) =>
cleanF(v).map(x => (k, x))
},
preservesPartitioning = true)
}
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: RDD[(K, W1)],
other2: RDD[(K, W2)],
other3: RDD[(K, W3)],
partitioner: Partitioner)
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope {
if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) {
throw new SparkException("HashPartitioner cannot partition array keys.")
}
val cg = new CoGroupedRDD[K](Seq(self, other1, other2, other3), partitioner)
cg.mapValues { case Array(vs, w1s, w2s, w3s) =>
(vs.asInstanceOf[Iterable[V]],
w1s.asInstanceOf[Iterable[W1]],
w2s.asInstanceOf[Iterable[W2]],
w3s.asInstanceOf[Iterable[W3]])
}
}
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](other: RDD[(K, W)], partitioner: Partitioner)
: RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope {
if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) {
throw new SparkException("HashPartitioner cannot partition array keys.")
}
val cg = new CoGroupedRDD[K](Seq(self, other), partitioner)
cg.mapValues { case Array(vs, w1s) =>
(vs.asInstanceOf[Iterable[V]], w1s.asInstanceOf[Iterable[W]])
}
}
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], partitioner: Partitioner)
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope {
if (partitioner.isInstanceOf[HashPartitioner] && keyClass.isArray) {
throw new SparkException("HashPartitioner cannot partition array keys.")
}
val cg = new CoGroupedRDD[K](Seq(self, other1, other2), partitioner)
cg.mapValues { case Array(vs, w1s, w2s) =>
(vs.asInstanceOf[Iterable[V]],
w1s.asInstanceOf[Iterable[W1]],
w2s.asInstanceOf[Iterable[W2]])
}
}
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)])
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope {
cogroup(other1, other2, other3, defaultPartitioner(self, other1, other2, other3))
}
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](other: RDD[(K, W)]): RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope {
cogroup(other, defaultPartitioner(self, other))
}
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)])
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope {
cogroup(other1, other2, defaultPartitioner(self, other1, other2))
}
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](
other: RDD[(K, W)],
numPartitions: Int): RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope {
cogroup(other, new HashPartitioner(numPartitions))
}
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], numPartitions: Int)
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope {
cogroup(other1, other2, new HashPartitioner(numPartitions))
}
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: RDD[(K, W1)],
other2: RDD[(K, W2)],
other3: RDD[(K, W3)],
numPartitions: Int)
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope {
cogroup(other1, other2, other3, new HashPartitioner(numPartitions))
}
/** Alias for cogroup. */
def groupWith[W](other: RDD[(K, W)]): RDD[(K, (Iterable[V], Iterable[W]))] = self.withScope {
cogroup(other, defaultPartitioner(self, other))
}
/** Alias for cogroup. */
def groupWith[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)])
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))] = self.withScope {
cogroup(other1, other2, defaultPartitioner(self, other1, other2))
}
/** Alias for cogroup. */
def groupWith[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)])
: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))] = self.withScope {
cogroup(other1, other2, other3, defaultPartitioner(self, other1, other2, other3))
}
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be less than or equal to us.
*/
def subtractByKey[W: ClassTag](other: RDD[(K, W)]): RDD[(K, V)] = self.withScope {
subtractByKey(other, self.partitioner.getOrElse(new HashPartitioner(self.partitions.length)))
}
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*/
def subtractByKey[W: ClassTag](
other: RDD[(K, W)],
numPartitions: Int): RDD[(K, V)] = self.withScope {
subtractByKey(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*/
def subtractByKey[W: ClassTag](other: RDD[(K, W)], p: Partitioner): RDD[(K, V)] = self.withScope {
new SubtractedRDD[K, V, W](self, other, p)
}
/**
* Return the list of values in the RDD for key `key`. This operation is done efficiently if the
* RDD has a known partitioner by only searching the partition that the key maps to.
*/
def lookup(key: K): Seq[V] = self.withScope {
self.partitioner match {
case Some(p) =>
val index = p.getPartition(key)
val process = (it: Iterator[(K, V)]) => {
val buf = new ArrayBuffer[V]
for (pair <- it if pair._1 == key) {
buf += pair._2
}
buf
} : Seq[V]
val res = self.context.runJob(self, process, Array(index))
res(0)
case None =>
self.filter(_._1 == key).map(_._2).collect()
}
}
/**
* Output the RDD to any Hadoop-supported file system, using a Hadoop `OutputFormat` class
* supporting the key and value types K and V in this RDD.
*/
def saveAsHadoopFile[F <: OutputFormat[K, V]](
path: String)(implicit fm: ClassTag[F]): Unit = self.withScope {
saveAsHadoopFile(path, keyClass, valueClass, fm.runtimeClass.asInstanceOf[Class[F]])
}
/**
* Output the RDD to any Hadoop-supported file system, using a Hadoop `OutputFormat` class
* supporting the key and value types K and V in this RDD. Compress the result with the
* supplied codec.
*/
def saveAsHadoopFile[F <: OutputFormat[K, V]](
path: String,
codec: Class[_ <: CompressionCodec])(implicit fm: ClassTag[F]): Unit = self.withScope {
val runtimeClass = fm.runtimeClass
saveAsHadoopFile(path, keyClass, valueClass, runtimeClass.asInstanceOf[Class[F]], codec)
}
/**
* Output the RDD to any Hadoop-supported file system, using a new Hadoop API `OutputFormat`
* (mapreduce.OutputFormat) object supporting the key and value types K and V in this RDD.
*/
def saveAsNewAPIHadoopFile[F <: NewOutputFormat[K, V]](
path: String)(implicit fm: ClassTag[F]): Unit = self.withScope {
saveAsNewAPIHadoopFile(path, keyClass, valueClass, fm.runtimeClass.asInstanceOf[Class[F]])
}
/**
* Output the RDD to any Hadoop-supported file system, using a new Hadoop API `OutputFormat`
* (mapreduce.OutputFormat) object supporting the key and value types K and V in this RDD.
*/
def saveAsNewAPIHadoopFile(
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: NewOutputFormat[_, _]],
conf: Configuration = self.context.hadoopConfiguration): Unit = self.withScope {
// Rename this as hadoopConf internally to avoid shadowing (see SPARK-2038).
val hadoopConf = conf
val job = NewAPIHadoopJob.getInstance(hadoopConf)
job.setOutputKeyClass(keyClass)
job.setOutputValueClass(valueClass)
job.setOutputFormatClass(outputFormatClass)
val jobConfiguration = job.getConfiguration
jobConfiguration.set("mapred.output.dir", path)
saveAsNewAPIHadoopDataset(jobConfiguration)
}
/**
* Output the RDD to any Hadoop-supported file system, using a Hadoop `OutputFormat` class
* supporting the key and value types K and V in this RDD. Compress with the supplied codec.
*/
def saveAsHadoopFile(
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: OutputFormat[_, _]],
codec: Class[_ <: CompressionCodec]): Unit = self.withScope {
saveAsHadoopFile(path, keyClass, valueClass, outputFormatClass,
new JobConf(self.context.hadoopConfiguration), Some(codec))
}
/**
* Output the RDD to any Hadoop-supported file system, using a Hadoop `OutputFormat` class
* supporting the key and value types K and V in this RDD.
*
* @note We should make sure our tasks are idempotent when speculation is enabled, i.e. do
* not use output committer that writes data directly.
* There is an example in https://issues.apache.org/jira/browse/SPARK-10063 to show the bad
* result of using direct output committer with speculation enabled.
*/
def saveAsHadoopFile(
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: OutputFormat[_, _]],
conf: JobConf = new JobConf(self.context.hadoopConfiguration),
codec: Option[Class[_ <: CompressionCodec]] = None): Unit = self.withScope {
// Rename this as hadoopConf internally to avoid shadowing (see SPARK-2038).
val hadoopConf = conf
hadoopConf.setOutputKeyClass(keyClass)
hadoopConf.setOutputValueClass(valueClass)
conf.setOutputFormat(outputFormatClass)
for (c <- codec) {
hadoopConf.setCompressMapOutput(true)
hadoopConf.set("mapred.output.compress", "true")
hadoopConf.setMapOutputCompressorClass(c)
hadoopConf.set("mapred.output.compression.codec", c.getCanonicalName)
hadoopConf.set("mapred.output.compression.type", CompressionType.BLOCK.toString)
}
// Use configured output committer if already set
if (conf.getOutputCommitter == null) {
hadoopConf.setOutputCommitter(classOf[FileOutputCommitter])
}
// When speculation is on and output committer class name contains "Direct", we should warn
// users that they may loss data if they are using a direct output committer.
val speculationEnabled = self.conf.getBoolean("spark.speculation", false)
val outputCommitterClass = hadoopConf.get("mapred.output.committer.class", "")
if (speculationEnabled && outputCommitterClass.contains("Direct")) {
val warningMessage =
s"$outputCommitterClass may be an output committer that writes data directly to " +
"the final location. Because speculation is enabled, this output committer may " +
"cause data loss (see the case in SPARK-10063). If possible, please use an output " +
"committer that does not have this behavior (e.g. FileOutputCommitter)."
logWarning(warningMessage)
}
FileOutputFormat.setOutputPath(hadoopConf,
SparkHadoopWriterUtils.createPathFromString(path, hadoopConf))
saveAsHadoopDataset(hadoopConf)
}
/**
* Output the RDD to any Hadoop-supported storage system with new Hadoop API, using a Hadoop
* Configuration object for that storage system. The Conf should set an OutputFormat and any
* output paths required (e.g. a table name to write to) in the same way as it would be
* configured for a Hadoop MapReduce job.
*
* @note We should make sure our tasks are idempotent when speculation is enabled, i.e. do
* not use output committer that writes data directly.
* There is an example in https://issues.apache.org/jira/browse/SPARK-10063 to show the bad
* result of using direct output committer with speculation enabled.
*/
def saveAsNewAPIHadoopDataset(conf: Configuration): Unit = self.withScope {
SparkHadoopMapReduceWriter.write(
rdd = self,
hadoopConf = conf)
}
/**
* Output the RDD to any Hadoop-supported storage system, using a Hadoop JobConf object for
* that storage system. The JobConf should set an OutputFormat and any output paths required
* (e.g. a table name to write to) in the same way as it would be configured for a Hadoop
* MapReduce job.
*/
def saveAsHadoopDataset(conf: JobConf): Unit = self.withScope {
// Rename this as hadoopConf internally to avoid shadowing (see SPARK-2038).
val hadoopConf = conf
val outputFormatInstance = hadoopConf.getOutputFormat
val keyClass = hadoopConf.getOutputKeyClass
val valueClass = hadoopConf.getOutputValueClass
if (outputFormatInstance == null) {
throw new SparkException("Output format class not set")
}
if (keyClass == null) {
throw new SparkException("Output key class not set")
}
if (valueClass == null) {
throw new SparkException("Output value class not set")
}
SparkHadoopUtil.get.addCredentials(hadoopConf)
logDebug("Saving as hadoop file of type (" + keyClass.getSimpleName + ", " +
valueClass.getSimpleName + ")")
if (SparkHadoopWriterUtils.isOutputSpecValidationEnabled(self.conf)) {
// FileOutputFormat ignores the filesystem parameter
val ignoredFs = FileSystem.get(hadoopConf)
hadoopConf.getOutputFormat.checkOutputSpecs(ignoredFs, hadoopConf)
}
val writer = new SparkHadoopWriter(hadoopConf)
writer.preSetup()
val writeToFile = (context: TaskContext, iter: Iterator[(K, V)]) => {
// Hadoop wants a 32-bit task attempt ID, so if ours is bigger than Int.MaxValue, roll it
// around by taking a mod. We expect that no task will be attempted 2 billion times.
val taskAttemptId = (context.taskAttemptId % Int.MaxValue).toInt
val outputMetricsAndBytesWrittenCallback: Option[(OutputMetrics, () => Long)] =
SparkHadoopWriterUtils.initHadoopOutputMetrics(context)
writer.setup(context.stageId, context.partitionId, taskAttemptId)
writer.open()
var recordsWritten = 0L
Utils.tryWithSafeFinallyAndFailureCallbacks {
while (iter.hasNext) {
val record = iter.next()
writer.write(record._1.asInstanceOf[AnyRef], record._2.asInstanceOf[AnyRef])
// Update bytes written metric every few records
SparkHadoopWriterUtils.maybeUpdateOutputMetrics(
outputMetricsAndBytesWrittenCallback, recordsWritten)
recordsWritten += 1
}
}(finallyBlock = writer.close())
writer.commit()
outputMetricsAndBytesWrittenCallback.foreach { case (om, callback) =>
om.setBytesWritten(callback())
om.setRecordsWritten(recordsWritten)
}
}
self.context.runJob(self, writeToFile)
writer.commitJob()
}
/**
* Return an RDD with the keys of each tuple.
*/
def keys: RDD[K] = self.map(_._1)
/**
* Return an RDD with the values of each tuple.
*/
def values: RDD[V] = self.map(_._2)
private[spark] def keyClass: Class[_] = kt.runtimeClass
private[spark] def valueClass: Class[_] = vt.runtimeClass
private[spark] def keyOrdering: Option[Ordering[K]] = Option(ord)
}
| sh-cho/cshSpark | rdd/PairRDDFunctions.scala | Scala | apache-2.0 | 51,854 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.microburn.domain
sealed trait TaskStatus
case class SpecifiedStatus(status: String) extends TaskStatus
case object TaskCompletedStatus extends TaskStatus
// used only for purpose of simulating of board start state
case object TaskOpenedStatus extends TaskStatus | arkadius/micro-burn | src/main/scala/org/github/microburn/domain/TaskStatus.scala | Scala | apache-2.0 | 898 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package geomesa.core.iterators
import java.util.UUID
import org.apache.accumulo.core.client.{IteratorSetting, ScannerBase}
import org.apache.accumulo.core.data.{Value, Key}
import org.apache.accumulo.core.iterators.{SortedKeyValueIterator, IteratorEnvironment, WrappingIterator}
object RowOnlyIterator {
def setupRowOnlyIterator(scanner: ScannerBase, priority: Int) {
val iteratorName = "RowOnlyIterator-" + UUID.randomUUID.toString.subSequence(0, 5)
scanner.addScanIterator(new IteratorSetting(priority, iteratorName, classOf[RowOnlyIterator]))
}
}
class RowOnlyIterator
extends WrappingIterator {
@Override
override def getTopKey: Key = new Key(super.getTopKey.getRow)
@Override
override def deepCopy(env: IteratorEnvironment): SortedKeyValueIterator[Key, Value] = null
}
| anthonyccri/geomesa | geomesa-core/src/main/scala/geomesa/core/iterators/RowOnlyIterator.scala | Scala | apache-2.0 | 1,419 |
package com.github.gabadi.scalajooq
import com.google.common.base.CaseFormat.{LOWER_CAMEL, LOWER_UNDERSCORE, UPPER_UNDERSCORE}
import org.jooq.{DSLContext, Field, Record, Table}
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
import scala.util.{Failure, Success}
trait JooqMacroMapper[C <: Context] {
val rc = {
val n = namespace
new {
val context: co.type = co
val namespace = n
} with RichContext[co.type] with AbortContext[co.type]
}
protected val co: C
import co.universe._
protected val namespace: Option[String]
def tryGenerateByTree(tableType: Tree, recordType: Tree, entityType: Tree): scala.util.Try[Tree] = {
tryGenerateByType(
tableType = rc.treeToType(tableType),
recordType = rc.treeToType(recordType),
entityType = rc.treeToType(entityType)
)
}
def childOf(c: C, namespace: Option[String]) = {
val n = namespace
new {
val co: c.type = c
val namespace = n
} with JooqMacroMapper[c.type]
}
def tryGenerateByType(tableType: Type, recordType: Type, entityType: Type): scala.util.Try[Tree] = {
scala.util.Try {
val fields = rc.caseClassFields(entityType)
val table = rc.tableInstanceMethod(tableType)
val tableMembers = rc.tableMembersMap(tableType)
val recordMembers = rc.recordMembersMap(recordType)
val (toEntityParams, toRecordParams, metaFields, mappedFields, joins, embeddedEntities) =
fields.map { field =>
val fieldName = field.name.decodedName.toString
val fieldTermName = field.asTerm.name
val columnName = LOWER_CAMEL.to(UPPER_UNDERSCORE, fieldName)
val columnNameNoNamespace = namespace.map(n => columnName.replaceFirst(LOWER_CAMEL.to(UPPER_UNDERSCORE, n) + "_", "")).getOrElse(columnName)
val fieldNameLowerUnderscore = LOWER_CAMEL.to(LOWER_UNDERSCORE, fieldName)
val fieldIsOption = field.typeSignature <:< typeOf[Option[_]]
val effectiveFieldType = if (fieldIsOption) field.typeSignature.typeArgs.head else field.typeSignature
tableMembers.get(fieldName) match {
// direct matching between record and entity
case Some(recordMember) =>
recordMembers.get(s"set${namespace.getOrElse("").capitalize}${fieldName.capitalize}").get
val recordSetter = recordMembers.get(s"set${namespace.getOrElse("").capitalize}${fieldName.capitalize}").get
val recordFieldType = recordSetter.asMethod.paramLists.head.head.typeSignature
val e2rTypeConversion = rc.implicitConversion(from = effectiveFieldType, to = recordFieldType)
val r2eTypeConversion = rc.implicitConversion(from = recordFieldType, to = effectiveFieldType)
val getValue = q"r.getValue(table.$recordMember)"
if ((r2eTypeConversion equalsStructure EmptyTree) || (e2rTypeConversion equalsStructure EmptyTree)) {
rc.abortCanNotFindImplicitConversion(s"$entityType.$fieldName($effectiveFieldType)", s"$recordType.$columnName($recordFieldType)")(None)
} else {
if (fieldIsOption) {
(q"$fieldTermName = Option($getValue).map($r2eTypeConversion)",
q"r.$recordSetter(e.$fieldTermName.map($e2rTypeConversion).orNull[$recordFieldType])",
q"lazy val ${TermName(columnName)} = table.$recordMember",
q"f = f ++ Array(table.$recordMember)",
Nil,
None)
} else {
val nullInRecordMessage = s"${recordMember.name.decodedName.toString} in record ${recordType.typeSymbol.name.decodedName.toString} is null in the database. This is inconsistent"
val nullInEntityMessage = s"$fieldName in entity ${entityType.typeSymbol.name.decodedName.toString} must not be null"
val entityFieldConverted = q"$e2rTypeConversion(e.$fieldTermName)"
(q"$fieldTermName = $r2eTypeConversion(${rc.checkNotNullTree(getValue, nullInRecordMessage)})",
q"r.$recordSetter(${rc.checkNotNullTree(entityFieldConverted, nullInEntityMessage)})",
q"lazy val ${TermName(columnName)} = table.$recordMember",
q"f = f ++ Array(table.$recordMember)",
Nil,
None)
}
}
case None =>
// there is no matching between record and entity
rc.implicitJooqMeta(effectiveFieldType) match {
// try to create an embedded mapper
case None =>
val newNamespace = namespace.map(n => s"$n${fieldName.capitalize}").getOrElse(fieldName)
val newNamespaceUpper = LOWER_CAMEL.to(UPPER_UNDERSCORE, newNamespace)
val mayExistNamespace = rc.existsMemberStartWith(tableType)(newNamespaceUpper)
if (!mayExistNamespace) {
rc.abortFieldNotFoundInRecord(s"$entityType.$fieldName", s"$recordType.$newNamespaceUpper")(None)
} else {
childOf(co, Some(newNamespace)).tryGenerateByType(tableType = tableType, recordType = recordType, entityType = effectiveFieldType) match {
case Success(child) =>
val objectName = s"O${newNamespace.capitalize.drop(namespace.getOrElse("").length)}"
val objectTermName = TermName(objectName)
val childObject = q"object $objectTermName extends ${rc.jooqMeta(tableType, recordType, effectiveFieldType)} {..$child}"
val toEntity = if (fieldIsOption) q"$objectTermName.${TermName("toOptEntity")}" else q"$objectTermName.${TermName("toEntity")}"
val toRecord = q"$objectTermName.${TermName("toRecord")}"
val fieldToRecord = if (fieldIsOption) {
q"e.${TermName(fieldName)}.foreach(o => $toRecord(o, r))"
} else {
q"$toRecord(e.${TermName(fieldName)}, r)"
}
(q"$field = $toEntity(r)",
fieldToRecord,
q"lazy val ${TermName(columnNameNoNamespace)} = $objectTermName",
q"f = f ++ $objectTermName.${TermName("fields")}",
Nil,
Some(childObject))
case Failure(e: AbortException) =>
rc.abortFieldCanNotBeMapped(s"$entityType.$fieldName")(Some(e.getMessage))
case Failure(e) =>
throw e
}
}
case Some(implicitMapper) =>
val mapperRecordType = {
val toRecordMethod = implicitMapper.tpe.decl(TermName("toRecord"))
if (toRecordMethod.isMethod) {
val recordType = toRecordMethod.asMethod.paramLists.head(1).typeSignature
if (recordType.typeSymbol.isAbstract) {
implicitMapper.tpe.typeArgs(1)
} else {
recordType
}
} else {
rc.abortMissingMappingFor(s"$entityType.$fieldName", effectiveFieldType)(None)
}
}
val toEntity = if (fieldIsOption) q"$implicitMapper.${TermName("toOptEntity")}" else q"$implicitMapper.${TermName("toEntity")}"
if (mapperRecordType.equals(recordType)) {
// resolve like embedded
val toRecord = q"$implicitMapper.${TermName("toRecord")}"
val fieldToRecord = if (fieldIsOption) {
q"e.${TermName(fieldName)}.foreach(o => $toRecord(o, r))"
} else {
q"$toRecord(e.${TermName(fieldName)}, r)"
}
(q"$field = $toEntity(r)",
fieldToRecord,
q"lazy val ${TermName(columnNameNoNamespace)} = $implicitMapper",
q"f = f ++ $implicitMapper.${TermName("fields")}",
Nil,
None)
} else {
// try to resolve like a joined entity
val idSuffix = {
val joinedTableType = {
val tableType = implicitMapper.tpe.decl(TermName("table")).asMethod.returnType
if (tableType.typeSymbol.isAbstract) {
implicitMapper.tpe.typeArgs.head
} else {
tableType
}
}
val maybeMappedMethods = tableMembers.keySet.filter(f => f.startsWith(fieldName))
if (maybeMappedMethods.isEmpty) {
rc.abortCanNotFindMapIdFieldBetween(joinedTableType, s"$entityType.$fieldName")(None)
} else if (maybeMappedMethods.size == 1) {
val suffix = maybeMappedMethods.head.replaceFirst(fieldName, "")
s"${suffix.substring(0, 0).toLowerCase}${suffix.substring(1, suffix.length)}"
} else {
if (maybeMappedMethods.exists(_.equals(fieldName + "Id"))) {
"id"
} else if (maybeMappedMethods.exists(_.equals(fieldName + "Oid"))) {
"oid"
} else if (maybeMappedMethods.exists(_.equals(fieldName + "Code"))) {
"code"
} else {
// improve message
rc.abortCanNotFindMapIdFieldBetween(joinedTableType, s"$entityType.$fieldName")(None)
}
}
}
val tableFieldName = LOWER_CAMEL.to(UPPER_UNDERSCORE, s"$fieldName${idSuffix.capitalize}")
val joinTableFieldName = LOWER_CAMEL.to(UPPER_UNDERSCORE, s"$idSuffix")
val recordSetter = recordMembers.get(s"set${fieldName.capitalize}${idSuffix.capitalize}").get
val fieldTypeIdMember = effectiveFieldType.member(TermName(idSuffix))
val fieldTypeIdType = fieldTypeIdMember.asMethod.returnType
val recordSetterType = recordSetter.typeSignature.paramLists.head.head.typeSignature
val e2rTypeConversion = rc.implicitConversion(from = fieldTypeIdType, to = recordSetterType)
val nullInEntityMessage = s"$fieldName in entity ${entityType.typeSymbol.name.decodedName.toString} must not be null"
val entityFieldConverted = q"$e2rTypeConversion(e.$fieldTermName.$fieldTypeIdMember)"
val namespace = q"""namespace.map(n => n + "_" + $fieldNameLowerUnderscore).getOrElse($fieldNameLowerUnderscore)"""
//val joinTable = q"$implicitMapper.table.as($namespace)"
val joinTable = q"$implicitMapper.table"
//val ownTable = q"namespace.map(n => table.as(n)).getOrElse(table)"
val ownTable = q"table"
val joinCondition = q"$ownTable.${TermName(tableFieldName)}.equal($joinTable.${TermName(joinTableFieldName)})"
val join = if (fieldIsOption) {
q"(current leftOuterJoin $joinTable).on($joinCondition)"
} else {
q"(if(leftJoin) (current leftOuterJoin $joinTable).on($joinCondition) else (current join $joinTable).on($joinCondition))"
}
val leftJoin = if (fieldIsOption) q"true" else q"leftJoin"
(q"$field = $toEntity(r)",
if (fieldIsOption) {
q"e.${TermName(fieldName)}.foreach(o => r.$recordSetter($e2rTypeConversion(o.$fieldTypeIdMember)))"
} else {
q"r.$recordSetter(${rc.checkNotNullTree(entityFieldConverted, nullInEntityMessage)})"
},
q"",
q"f = f ++ $implicitMapper.${TermName("fields")} ++ Array(table.${TermName(tableFieldName)})",
q"current = $join" ::
q"current = ($implicitMapper).joinedTable(current, Some($namespace), $leftJoin)" :: Nil,
None
)
}
}
}
}.unzip6
val companion = entityType.typeSymbol.companion
val body = q"""
..${embeddedEntities.flatten}
override val table = $table
override def joinedTable(t: ${weakTypeOf[Table[Record]]}, namespace: Option[String] = None, leftJoin: Boolean = false) = {
var current = t
..${joins.flatten}
current
}
..$metaFields
override lazy val fields = {
var f = Array.empty[${weakTypeOf[Field[_]]}]
..$mappedFields
f
}
override def toEntity(r: ${weakTypeOf[Record]}) = $companion(..$toEntityParams)
override def toRecord(e: $entityType, current: $recordType = null.asInstanceOf[$recordType])(implicit dsl: ${weakTypeOf[DSLContext]}): $recordType = {
val r = if(current != null) current else dsl.newRecord(table)
..$toRecordParams
r
}
"""
body
}
}
}
object JooqMacroMapper {
type Context = scala.reflect.macros.blackbox.Context
}
| gabadi/JOOQ-scala-mappings | src/main/scala/com/github/gabadi/scalajooq/JooqMacroMapper.scala | Scala | apache-2.0 | 13,809 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.models.dal.mixin
import anorm.NamedParameter
import org.maproulette.models.utils.DALHelper
import org.maproulette.session.SearchParameters
import org.maproulette.framework.psql.SQLUtils
import play.api.libs.json.JsDefined
import scala.collection.mutable.ListBuffer
/**
* NOTE: This class has quite a few side effects that need to be taken into account. Specifically
* that the "whereClause" and "joinClause" are updated through the string builder functions
* and not returned. So basically functioning like InOut Parameters. Not the best approach
*
* @author mcuthbert
*/
@deprecated
trait SearchParametersMixin
extends DALHelper
with org.maproulette.framework.mixins.SearchParametersMixin {
def updateWhereClause(
params: SearchParameters,
whereClause: StringBuilder,
joinClause: StringBuilder
)(implicit projectSearch: Boolean = true): ListBuffer[NamedParameter] = {
val parameters = new ListBuffer[NamedParameter]()
this.paramsLocation(params, whereClause)
this.paramsBounding(params, whereClause)
this.paramsTaskStatus(params, whereClause)
this.paramsTaskId(params, whereClause)
this.paramsProjectSearch(params, whereClause)
this.paramsTaskReviewStatus(params, whereClause)
this.paramsMetaReviewStatus(params, whereClause)
this.paramsOwner(params, whereClause)
this.paramsReviewer(params, whereClause)
this.paramsMetaReviewer(params, whereClause)
this.paramsMapper(params, whereClause)
this.paramsTaskPriorities(params, whereClause)
this.paramsTaskTags(params, whereClause)
this.paramsPriority(params, whereClause)
this.paramsChallengeDifficulty(params, whereClause)
this.paramsChallengeStatus(params, whereClause)
this.paramsChallengeRequiresLocal(params, whereClause)
this.paramsBoundingGeometries(params, whereClause)
// For efficiency can only query on task properties with a parent challenge id
this.paramsTaskProps(params, whereClause)
parameters ++= this.addSearchToQuery(params, whereClause)(projectSearch)
parameters ++= this.addChallengeTagMatchingToQuery(params, whereClause, joinClause)
parameters
}
def addSearchToQuery(
params: SearchParameters,
whereClause: StringBuilder
)(implicit projectSearch: Boolean = true): ListBuffer[NamedParameter] = {
val parameters = new ListBuffer[NamedParameter]()
if (projectSearch) {
parameters ++= this.paramsProjects(params, whereClause)
this.paramsProjectEnabled(params, whereClause)
}
parameters ++= this.paramsChallenges(params, whereClause)
this.paramsChallengeEnabled(params, whereClause)
parameters
}
def paramsProjectSearch(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterProjectSearch(params).sql())
}
def paramsProjects(params: SearchParameters, whereClause: StringBuilder): List[NamedParameter] = {
val filter = this.filterProjects(params)
this.appendInWhereClause(whereClause, filter.sql())
filter.parameters()
}
def paramsProjectEnabled(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterProjectEnabled(params).sql())
}
def paramsLocation(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterLocation(params).sql())
}
def paramsBounding(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterBounding(params).sql())
}
def paramsTaskStatus(
params: SearchParameters,
whereClause: StringBuilder,
defaultStatuses: List[Int] = List(0, 3, 6)
): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskStatus(params, defaultStatuses).sql())
}
def paramsTaskId(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskId(params).sql())
}
def paramsTaskPriorities(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskPriorities(params).sql())
}
def paramsPriority(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterPriority(params).sql())
}
def paramsTaskTags(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskTags(params).sql())
}
def paramsTaskReviewStatus(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskReviewStatus(params).sql())
}
def paramsMetaReviewStatus(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterMetaReviewStatus(params).sql())
}
def paramsChallengeEnabled(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterChallengeEnabled(params).sql())
}
def paramsChallengeDifficulty(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterChallengeDifficulty(params).sql())
}
def paramsChallengeStatus(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterChallengeStatus(params).sql())
}
def paramsChallenges(
params: SearchParameters,
whereClause: StringBuilder
): List[NamedParameter] = {
val filter = this.filterChallenges(params)
this.appendInWhereClause(whereClause, filter.sql())
filter.parameters()
}
def paramsChallengeRequiresLocal(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterChallengeRequiresLocal(params).sql())
}
def paramsBoundingGeometries(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterBoundingGeometries(params).sql())
}
def paramsTaskProps(params: SearchParameters, whereClause: StringBuilder): Unit = {
this.appendInWhereClause(whereClause, this.filterTaskProps(params).sql())
}
def paramsOwner(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterOwner(params).sql())
}
def paramsReviewer(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterReviewer(params).sql())
}
def paramsMetaReviewer(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterMetaReviewer(params).sql())
}
def paramsMapper(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterMapper(params).sql())
}
def paramsMappers(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterReviewMappers(params).sql())
}
def paramsReviewers(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterReviewers(params).sql())
}
def paramsReviewDate(
params: SearchParameters,
whereClause: StringBuilder
): Unit = {
this.appendInWhereClause(whereClause, this.filterReviewDate(params).sql())
}
}
| mgcuthbert/maproulette2 | app/org/maproulette/models/dal/mixin/SearchParametersMixin.scala | Scala | apache-2.0 | 7,741 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.client.socket
import java.util.UUID
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.communication.security.SecurityActorType
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSpecLike}
import org.mockito.Mockito._
import org.mockito.Matchers._
import play.api.libs.json.Json
class ShellClientSpec extends TestKit(ActorSystem("ShellActorSpec"))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
private val SignatureEnabled = true
describe("ShellClientActor") {
val socketFactory = mock[SocketFactory]
val mockActorLoader = mock[ActorLoader]
val probe : TestProbe = TestProbe()
when(socketFactory.ShellClient(
any(classOf[ActorSystem]), any(classOf[ActorRef])
)).thenReturn(probe.ref)
val signatureManagerProbe = TestProbe()
doReturn(system.actorSelection(signatureManagerProbe.ref.path.toString))
.when(mockActorLoader).load(SecurityActorType.SignatureManager)
val shellClient = system.actorOf(Props(
classOf[ShellClient], socketFactory, mockActorLoader, SignatureEnabled
))
describe("send execute request") {
it("should send execute request") {
val request = ExecuteRequest(
"foo", false, true, UserExpressions(), true
)
val header = Header(
UUID.randomUUID().toString, "spark",
UUID.randomUUID().toString, MessageType.Incoming.ExecuteRequest.toString,
"5.0"
)
val kernelMessage = KernelMessage(
Seq[String](), "",
header, HeaderBuilder.empty,
Metadata(), Json.toJson(request).toString
)
shellClient ! kernelMessage
// Echo back the kernel message sent to have a signature injected
signatureManagerProbe.expectMsgClass(classOf[KernelMessage])
signatureManagerProbe.reply(kernelMessage)
probe.expectMsgClass(classOf[ZMQMessage])
}
}
}
} | asorianostratio/incubator-toree | client/src/test/scala/org/apache/toree/kernel/protocol/v5/client/socket/ShellClientSpec.scala | Scala | apache-2.0 | 3,092 |
package lift.profiler
import scala.annotation.StaticAnnotation
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
class Profile(extraContext: String = "") extends StaticAnnotation {
def macroTransform(annottees: Any*) = macro Profile.impl
}
object Profile {
def impl(c: Context)(annottees: c.Expr[Any]*): c.Expr[Any] = {
import c.universe._
val ctx: String = c.prefix.tree match {
case q"new Profile($extraContext)" =>
c.eval[String](c.Expr[String](extraContext))
// match for when we have no explicit context (i.e. this allows us to
// write `@Profile def ...` rather than `@Profile() def ...` which is
// (in some ways) slightly cleaner
case _⇒ "nocontext"
}
val result = {
annottees.map(_.tree).toList match {
case q"$mods def $methodName[..$tpes](...$args): $returnType = { ..$body }" :: Nil => {
q"""$mods def $methodName[..$tpes](...$args): $returnType = {
val start = System.nanoTime()
val profSpliceResultValueNoConflict = {..$body}
val end = System.nanoTime()
println("PROFILING_DATUM: (\\""+${methodName.toString}+"\\", \\"" + ${ctx.toString} + "\\", " + (end-start).toDouble/1000000 + ", \\"Scala\\")")
profSpliceResultValueNoConflict
}"""
}
case _ => c.abort(c.enclosingPosition, "Annotation @Profile can be used only with methods")
}
}
c.Expr[Any](result)
}
def profile[T](name: String, context: String, f: () => T) : T = {
val start = System.nanoTime()
val r: T = f()
val end = System.nanoTime()
println("PROFILING_DATUM: (\\"" + name + "\\", \\"" + context + "\\"," + (end-start).toDouble/1000000 + ", \\"Scala\\")")
r
}
}
| lift-project/lift | lib/Profiler/src/main/scala/Profiler.scala | Scala | mit | 1,779 |
package io.vamp.container_driver.marathon
import io.vamp.container_driver.Docker
import io.vamp.model.artifact.{ HealthCheck, Port }
case class MarathonApp(
id: String,
container: Option[Container],
ipAddress: Option[MarathonAppIpAddress],
instances: Int,
cpus: Double,
mem: Int,
env: Map[String, String],
cmd: Option[String],
healthChecks: List[MarathonHealthCheck] = Nil,
args: List[String] = Nil,
labels: Map[String, String] = Map(),
constraints: List[List[String]] = Nil,
upgradeStrategy: Option[UpgradeStrategy] = Some(UpgradeStrategy()),
fetch: Option[List[UriObject]]
)
case class UriObject(uri: String, extract: Boolean = true)
case class Container(docker: Docker, `type`: String = "DOCKER")
// About rolling Restarts: https://mesosphere.github.io/marathon/docs/deployments.html#rolling-restarts
// TODO: make them configurable
case class UpgradeStrategy(maximumOverCapacity: Double = 1.0, minimumHealthCapacity: Double = 1.0)
case class MarathonAppIpAddress(networkName: String)
case class MarathonHealthCheck(
path: String,
port: Option[Int],
portIndex: Option[Int],
protocol: String,
gracePeriodSeconds: Int,
intervalSeconds: Int,
timeoutSeconds: Int,
maxConsecutiveFailures: Int
)
object MarathonHealthCheck {
/** Transforms a HealthCheck to a Marathon specific HealthCheck */
def apply(ports: List[Port], healthCheck: HealthCheck): MarathonHealthCheck = {
val index: Int = ports
.zipWithIndex
.find { case (p, i) ⇒ p.name.contains(healthCheck.port) || p.alias.contains(healthCheck.port) }
.get // Able to get due to validation
._2
MarathonHealthCheck(
healthCheck.path,
None,
Some(index),
healthCheck.protocol,
healthCheck.initialDelay.value,
healthCheck.interval.value,
healthCheck.timeout.value,
healthCheck.failures
)
}
/**
* Checks wether healthChecks are equal or not (ports needed for conversion to MarathonHealthCheck)
*/
def equalHealthChecks(
ports: List[Port],
healthChecks: List[HealthCheck],
marathonHealthChecks: List[MarathonHealthCheck]
): Boolean =
if (healthChecks.isEmpty && marathonHealthChecks.isEmpty) true
else if (healthChecks.length != marathonHealthChecks.length) false
else healthChecks
.map(MarathonHealthCheck.apply(ports, _))
.forall(marathonHealthChecks.contains(_))
}
| magneticio/vamp | dcos/src/main/scala/io/vamp/container_driver/marathon/MarathonApp.scala | Scala | apache-2.0 | 2,670 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.http
import org.orbeon.oxf.util.DateUtils
import collection.breakOut
object Headers {
val OrbeonToken = "Orbeon-Token"
val OrbeonUsername = "Orbeon-Username"
val OrbeonGroup = "Orbeon-Group"
val OrbeonRoles = "Orbeon-Roles"
val ContentType = "Content-Type"
val ContentLength = "Content-Length"
val LastModified = "Last-Modified"
val Authorization = "Authorization"
val Location = "Location"
val OrbeonClient = "Orbeon-Client"
val OrbeonTokenLower = OrbeonToken.toLowerCase
val OrbeonUsernameLower = OrbeonUsername.toLowerCase
val OrbeonGroupLower = OrbeonGroup.toLowerCase
val OrbeonRolesLower = OrbeonRoles.toLowerCase
val ContentTypeLower = ContentType.toLowerCase
val ContentLengthLower = ContentLength.toLowerCase
val LastModifiedLower = LastModified.toLowerCase
val AuthorizationLower = Authorization.toLowerCase
val LocationLower = Location.toLowerCase
val OrbeonClientLower = OrbeonClient.toLowerCase
// These headers are connection headers and must never be forwarded (content-length is handled separately below)
//
// - Don't proxy Content-Length and Content-Type. Proxies must associate these with the content and propagate via
// other means.
// - We are not able to properly proxy directly a content-encoded response, so we don't proxy the relevant headers.
private val HeadersToRemove = Set("connection", "transfer-encoding", ContentLength, ContentType) map (_.toLowerCase)
val RequestHeadersToRemove = HeadersToRemove ++ List("host", "cookie", "cookie2", "accept-encoding")
val ResponseHeadersToRemove = HeadersToRemove ++ List("set-cookie", "set-cookie2", "content-encoding")
val EmptyHeaders = Map.empty[String, List[String]]
// See: https://groups.google.com/d/msg/scala-sips/wP6dL8nIAQs/TUfwXWWxkyMJ
type ConvertibleToSeq[T[_], U] = T[U] ⇒ Seq[U]
// Filter headers that that should never be propagated in our proxies
// Also combine headers with the same name into a single header
def proxyCapitalizeAndCombineHeaders[T[_] <: AnyRef](
headers : Iterable[(String, T[String])],
request : Boolean)(implicit
_conv : ConvertibleToSeq[T, String]
): Iterable[(String, String)] =
proxyAndCapitalizeHeaders(headers, request) map { case (name, values) ⇒ name → (values mkString ",") }
def proxyAndCapitalizeHeaders[T[_] <: AnyRef, U](
headers : Iterable[(String, T[U])],
request : Boolean)(implicit
_conv : ConvertibleToSeq[T, U]
): Iterable[(String, T[U])] =
proxyHeaders(headers, request) map { case (n, v) ⇒ capitalizeCommonOrSplitHeader(n) → v }
// NOTE: Filtering is case-insensitive, but original case is unchanged
def proxyAndCombineRequestHeaders[T[_] <: AnyRef](
headers : Iterable[(String, T[String])])(implicit
_conv : ConvertibleToSeq[T, String]
): Iterable[(String, String)] =
proxyHeaders(headers, request = true) map { case (name, values) ⇒ name → (values mkString ",") }
// NOTE: Filtering is case-insensitive, but original case is unchanged
def proxyHeaders[T[_] <: AnyRef, U](
headers : Iterable[(String, T[U])],
request : Boolean)(implicit
_conv : ConvertibleToSeq[T, U]
): Iterable[(String, T[U])] =
for {
(name, values) ← headers
if name ne null // HttpURLConnection.getHeaderFields returns null names. Great.
if (request && ! RequestHeadersToRemove(name.toLowerCase)) || (! request && ! ResponseHeadersToRemove(name.toLowerCase))
if (values ne null) && values.nonEmpty
} yield
name → values
// Capitalize any header
def capitalizeCommonOrSplitHeader(name: String) =
capitalizeCommonHeader(name) getOrElse capitalizeSplitHeader(name)
// Try to capitalize a common HTTP header
def capitalizeCommonHeader(name: String) =
lowercaseToCommonCapitalization.get(name.toLowerCase)
// Capitalize a header of the form foo-bar-baz to Foo-Bar-Baz
def capitalizeSplitHeader(name: String) =
name split '-' map (_.toLowerCase.capitalize) mkString "-"
def firstHeaderIgnoreCase(headers: Iterable[(String, Seq[String])], name: String): Option[String] =
headers collectFirst {
case (key, value) if name.equalsIgnoreCase(key) && value.nonEmpty ⇒ value.head
}
def firstLongHeaderIgnoreCase(headers: Iterable[(String, Seq[String])], name: String): Option[Long] =
firstHeaderIgnoreCase(headers, name) map (_.toLong) filter (_ >= 0L)
def firstDateHeaderIgnoreCase(headers: Iterable[(String, Seq[String])], name: String): Option[Long] =
firstHeaderIgnoreCase(headers, name) flatMap DateUtils.tryParseRFC1123 filter (_ > 0L)
// List of common HTTP headers
val CommonHeaders = Seq(
"Accept",
"Accept-Charset",
"Accept-Datetime",
"Accept-Encoding",
"Accept-Language",
"Accept-Ranges",
"Age",
"Allow",
"Authorization",
"Cache-Control",
"Connection",
"Content-Disposition",
"Content-Encoding",
"Content-Language",
"Content-Length",
"Content-Location",
"Content-MD5",
"Content-Range",
"Content-Security-Policy",
"Content-Type",
"Cookie",
"Cookie2",
"DNT",
"Date",
"ETag",
"Expect",
"Expires",
"Frame-Options",
"From",
"Front-End-Https",
"Host",
"If-Match",
"If-Modified-Since",
"If-None-Match",
"If-Range",
"If-Unmodified-Since",
"Last-Modified",
"Link",
"Location",
"Max-Forwards",
"Origin",
"P3P",
"Pragma",
"Pragma",
"Proxy-Authenticate",
"Proxy-Authorization",
"Proxy-Connection",
"Range",
"Referer",
"Refresh",
"Retry-After",
"Server",
"Set-Cookie",
"Set-Cookie2",
"SOAPAction",
"Status",
"Strict-Transport-Security",
"TE",
"Trailer",
"Transfer-Encoding",
"Upgrade",
"User-Agent",
"Vary",
"Via",
"Via",
"WWW-Authenticate",
"Warning",
"X-ATT-DeviceId",
"X-Content-Security-Policy",
"X-Content-Type-Options",
"X-Forwarded-For",
"X-Forwarded-Proto",
"X-Frame-Options",
"X-Powered-By",
"X-Requested-With",
"X-UA-Compatible",
"X-Wap-Profile",
"X-WebKit-CSP",
"X-XSS-Protection"
)
private val lowercaseToCommonCapitalization: Map[String, String] = CommonHeaders.map(name ⇒ name.toLowerCase → name)(breakOut)
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/http/Headers.scala | Scala | lgpl-2.1 | 7,092 |
package ilc
package language
package bacchus
import org.scalatest.FunSuite
import ilc.feature._
class BacchusReificationSuite
extends FunSuite
with Syntax // for mapLiteral
with Subjects // for type shorthands
with Evaluation
with naturals.Reification
with maps.Reification
with maybe.Reification
with sums.Reification
with base.Pretty
{
test("can reify natural numbers") {
val n: Value = 42
assert(eval(reify(n, ℕ)) === n)
}
test("can reify maps") {
val valueType = (ℕ ↦ ℕ) ↦ (ℕ ↦ ℕ)
val t = mapLiteral(
EmptyMap.tapply(ℕ, ℕ) -> mapLiteral(0 -> 100),
mapLiteral(1 -> 2, 3 -> 4) -> mapLiteral(5 -> 6, 7 -> 8),
mapLiteral(20 -> 30) -> EmptyMap.tapply(ℕ, ℕ))
val value = eval(t)
assert(eval(reify(value, valueType)) === value)
}
}
| inc-lc/ilc-scala | src/test/scala/ilc/language/bacchus/ReificationSuite.scala | Scala | mit | 830 |
/**
* Helpers:
* ========
*
* io.Source.stdin.getLines()
* readInt()
* readLine()
* readLine().toList
* readLine().split(" ") .split("\\s+")
* println("%.2f\n".format(res))
* .toInt
* .toDouble
* .toFloat
* var Array(x1,x2) = readLine().split(" ").map(_.toInt)
* for (i <- 0 until readInt())
* mutable.ArrayBuffer.fill(n)(0)
* */
/**
* Problem: http://www.codechef.com/problems/
* GitHub: https://github.com/amezhenin/codechef_problems
*/
object Main {
/**
* Checkout https://github.com/amezhenin/codechef_scala_template to test your solutions with sbt-doctest
* {{{
* >>> Main.alg()
*
*
* >>> Main.alg()
*
*
* >>> Main.alg()
*
*
* }}}
* */
def alg(): = {
}
def main(args : Array[String]) = {
val res = alg()
println(res)
}
}
| amezhenin/codechef_scala_template | src/main/scala/Main.scala | Scala | gpl-2.0 | 808 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package api
/** Tags which preserve the identity of abstract types in the face of erasure.
* Can be used for pattern matching, instance tests, serialization and the like.
* @group Tags
*/
trait ImplicitTags {
self: Universe =>
// Tags for Types.
implicit val AnnotatedTypeTag: ClassTag[AnnotatedType]
implicit val BoundedWildcardTypeTag: ClassTag[BoundedWildcardType]
implicit val ClassInfoTypeTag: ClassTag[ClassInfoType]
implicit val CompoundTypeTag: ClassTag[CompoundType]
implicit val ConstantTypeTag: ClassTag[ConstantType]
implicit val ExistentialTypeTag: ClassTag[ExistentialType]
implicit val MethodTypeTag: ClassTag[MethodType]
implicit val NullaryMethodTypeTag: ClassTag[NullaryMethodType]
implicit val PolyTypeTag: ClassTag[PolyType]
implicit val RefinedTypeTag: ClassTag[RefinedType]
implicit val SingleTypeTag: ClassTag[SingleType]
implicit val SingletonTypeTag: ClassTag[SingletonType]
implicit val SuperTypeTag: ClassTag[SuperType]
implicit val ThisTypeTag: ClassTag[ThisType]
implicit val TypeBoundsTag: ClassTag[TypeBounds]
implicit val TypeRefTag: ClassTag[TypeRef]
implicit val TypeTagg: ClassTag[Type]
// Tags for Names.
implicit val NameTag: ClassTag[Name]
implicit val TermNameTag: ClassTag[TermName]
implicit val TypeNameTag: ClassTag[TypeName]
// Tags for Scopes.
implicit val ScopeTag: ClassTag[Scope]
implicit val MemberScopeTag: ClassTag[MemberScope]
// Tags for Annotations.
implicit val AnnotationTag: ClassTag[Annotation]
implicit val JavaArgumentTag: ClassTag[JavaArgument]
// Tags for Symbols.
implicit val TermSymbolTag: ClassTag[TermSymbol]
implicit val MethodSymbolTag: ClassTag[MethodSymbol]
implicit val SymbolTag: ClassTag[Symbol]
implicit val TypeSymbolTag: ClassTag[TypeSymbol]
implicit val ModuleSymbolTag: ClassTag[ModuleSymbol]
implicit val ClassSymbolTag: ClassTag[ClassSymbol]
// Tags for misc Tree relatives.
implicit val PositionTag: ClassTag[Position]
implicit val ConstantTag: ClassTag[Constant]
implicit val FlagSetTag: ClassTag[FlagSet]
implicit val ModifiersTag: ClassTag[Modifiers]
// Tags for Trees. WTF.
implicit val AlternativeTag: ClassTag[Alternative]
implicit val AnnotatedTag: ClassTag[Annotated]
implicit val AppliedTypeTreeTag: ClassTag[AppliedTypeTree]
implicit val ApplyTag: ClassTag[Apply]
implicit val NamedArgTag: ClassTag[NamedArg]
implicit val AssignTag: ClassTag[Assign]
implicit val BindTag: ClassTag[Bind]
implicit val BlockTag: ClassTag[Block]
implicit val CaseDefTag: ClassTag[CaseDef]
implicit val ClassDefTag: ClassTag[ClassDef]
implicit val CompoundTypeTreeTag: ClassTag[CompoundTypeTree]
implicit val DefDefTag: ClassTag[DefDef]
implicit val DefTreeTag: ClassTag[DefTree]
implicit val ExistentialTypeTreeTag: ClassTag[ExistentialTypeTree]
implicit val FunctionTag: ClassTag[Function]
implicit val GenericApplyTag: ClassTag[GenericApply]
implicit val IdentTag: ClassTag[Ident]
implicit val IfTag: ClassTag[If]
implicit val ImplDefTag: ClassTag[ImplDef]
implicit val ImportSelectorTag: ClassTag[ImportSelector]
implicit val ImportTag: ClassTag[Import]
implicit val LabelDefTag: ClassTag[LabelDef]
implicit val LiteralTag: ClassTag[Literal]
implicit val MatchTag: ClassTag[Match]
implicit val MemberDefTag: ClassTag[MemberDef]
implicit val ModuleDefTag: ClassTag[ModuleDef]
implicit val NameTreeTag: ClassTag[NameTree]
implicit val NewTag: ClassTag[New]
implicit val PackageDefTag: ClassTag[PackageDef]
implicit val RefTreeTag: ClassTag[RefTree]
implicit val ReturnTag: ClassTag[Return]
implicit val SelectFromTypeTreeTag: ClassTag[SelectFromTypeTree]
implicit val SelectTag: ClassTag[Select]
implicit val SingletonTypeTreeTag: ClassTag[SingletonTypeTree]
implicit val StarTag: ClassTag[Star]
implicit val SuperTag: ClassTag[Super]
implicit val SymTreeTag: ClassTag[SymTree]
implicit val TemplateTag: ClassTag[Template]
implicit val TermTreeTag: ClassTag[TermTree]
implicit val ThisTag: ClassTag[This]
implicit val ThrowTag: ClassTag[Throw]
implicit val TreeTag: ClassTag[Tree]
implicit val TryTag: ClassTag[Try]
implicit val TypTreeTag: ClassTag[TypTree]
implicit val TypeApplyTag: ClassTag[TypeApply]
implicit val TypeBoundsTreeTag: ClassTag[TypeBoundsTree]
implicit val TypeDefTag: ClassTag[TypeDef]
implicit val TypeTreeTag: ClassTag[TypeTree]
implicit val TypedTag: ClassTag[Typed]
implicit val UnApplyTag: ClassTag[UnApply]
implicit val ValDefTag: ClassTag[ValDef]
implicit val ValOrDefDefTag: ClassTag[ValOrDefDef]
// Miscellaneous
implicit val TreeCopierTag: ClassTag[TreeCopier]
implicit val RuntimeClassTag: ClassTag[RuntimeClass]
implicit val MirrorTag: ClassTag[Mirror]
}
| martijnhoekstra/scala | src/reflect/scala/reflect/api/ImplicitTags.scala | Scala | apache-2.0 | 5,089 |
package daos
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import play.api.test._
import scala.collection.LinearSeq
import scaldi._
import utils.test.WithFakeDb
@RunWith(classOf[JUnitRunner])
class HealthcheckDaoSpec extends Specification {
private val dao = new HealthcheckDaoH2Impl()
"The Healthcheck dao" should {
"return true iff the db is online" in new WithFakeDb(scripts = LinearSeq("test/resources/sql/healthcheckdaospec.1.sql")) {
dao.ping() === true
}
"throw an exception iff the db in not online `app is down!`" in new WithFakeDb(scripts = LinearSeq("test/resources/sql/healthcheckdaospec.2.sql")) {
{ dao.ping() } must throwA[Exception]
}
}
}
| umatrangolo/shrt | test/daos/HealthcheckDaoSpec.scala | Scala | mit | 734 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.internals.operators
import monifu.concurrent.atomic.Atomic
import monifu.reactive.Observable
import monifu.reactive.exceptions.DummyException
import scala.concurrent.duration.Duration
object OnErrorRetryIfSuite extends BaseOperatorSuite {
def create(sourceCount: Int, maxSubscriptions: Int, ex: Throwable) = {
var subscriptions = 0
Observable.create[Long] { subscriber =>
if (subscriptions < maxSubscriptions) {
subscriptions += 1
Observable.range(0, sourceCount)
.endWithError(ex)
.onSubscribe(subscriber)
}
else {
Observable.range(0, sourceCount)
.onSubscribe(subscriber)
}
}
}
def createObservable(sourceCount: Int) = Some {
val retriesCount = Atomic(0)
val o = create(sourceCount, 3, DummyException("expected")).onErrorRetryIf {
case DummyException("expected") =>
retriesCount.incrementAndGet() <= 3
}
val count = sourceCount * 4
val sum = 1L * sourceCount * (sourceCount-1) / 2 * 4
Sample(o, count, sum, Duration.Zero, Duration.Zero)
}
def observableInError(sourceCount: Int, ex: Throwable) = Some {
val retriesCount = Atomic(0)
val o = create(sourceCount, 4, ex)
.onErrorRetryIf(ex => retriesCount.incrementAndGet() <= 3)
val count = sourceCount * 4
val sum = 1L * sourceCount * (sourceCount-1) / 2 * 4
Sample(o, count, sum, Duration.Zero, Duration.Zero)
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val retriesCount = Atomic(0)
val o = create(sourceCount, 4, DummyException("unexpected"))
.onErrorRetryIf { _ =>
if (retriesCount.incrementAndGet() <= 3)
true
else
throw ex
}
val count = sourceCount * 4
val sum = 1L * sourceCount * (sourceCount-1) / 2 * 4
Sample(o, count, sum, Duration.Zero, Duration.Zero)
}
}
| virtualirfan/monifu | monifu/shared/src/test/scala/monifu/reactive/internals/operators/OnErrorRetryIfSuite.scala | Scala | apache-2.0 | 2,603 |
/*
* Copyright (C) 2015 Language Technology Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package model
import model.queryable.impl.{DocumentQueryableImpl, KeyTermQueryableImpl}
import org.scalatest.BeforeAndAfterAll
import testFactories.FlatSpecWithDatabaseTrait
// scalastyle:off
import scalikejdbc._
// scalastyle:on
class KeyTermQueryableImplTest extends FlatSpecWithDatabaseTrait with BeforeAndAfterAll {
def testDatabase: NamedDB = NamedDB('newsleakTestDB)
// Mocking setup
final class DocumentQueryableTestable extends DocumentQueryableImpl {
override def connector: NamedDB = testDatabase
}
final class KeyTermQueryableTestable extends KeyTermQueryableImpl {
override def connector: NamedDB = testDatabase
override val document = new DocumentQueryableTestable
}
val uut = new KeyTermQueryableTestable
override def beforeAll(): Unit = {
testDatabase.localTx { implicit session =>
sql"INSERT INTO terms VALUES (1, ${"CDU"}, 7)".update.apply()
sql"INSERT INTO terms VALUES (1, ${"SPD"}, 3)".update.apply()
sql"INSERT INTO documentrelationship VALUES (1, 1, 10)".update.apply()
}
}
"getDocumentKeyTerms" should "return all terms if no limit is set" in {
val expected = List(
KeyTerm("CDU", 7),
KeyTerm("SPD", 3)
)
val actual = uut.getDocumentKeyTerms(1)
assert(actual == expected)
}
"getDocumentTermVectors" should "return only limit-terms" in {
val actual = uut.getDocumentKeyTerms(1, Some(1))
assert(actual.size == 1)
}
"getDocumentTermVectors" should "return ordered by importance when limit" in {
val expected = List(
KeyTerm("CDU", 7)
)
val actual = uut.getDocumentKeyTerms(1, Some(1))
assert(actual == expected)
}
"getRelationshipKeyTerms" should "return important terms with document occurrence" in {
val expected = List(
KeyTerm("CDU", 1),
KeyTerm("SPD", 1)
)
val actual = uut.getRelationshipKeyTerms(1)
assert(actual == expected)
}
}
| tudarmstadt-lt/newsleak | common/src/test/scala/model/KeyTermQueryableImplTest.scala | Scala | agpl-3.0 | 2,658 |
package com.twitter.finagle.postgresql
import java.nio.file.Files
import java.nio.file.attribute.PosixFilePermission
import com.spotify.docker.client.messages.HostConfig
import com.twitter.finagle.PostgreSql
import com.twitter.finagle.ssl.TrustCredentials
import com.twitter.finagle.ssl.client.SslClientConfiguration
import com.whisk.docker.testkit.ContainerSpec
import scala.jdk.CollectionConverters._
class TlsSpec extends PgSqlIntegrationSpec with ResourceFileSpec {
specificTo(Postgres)
/**
* Here be dragons.
*
* A Docker mount of type "bind" will have the uid:gid of the host user inside the container.
* For example, if the host user running `docker run` is `1001:116`, the mounted file in the container will be owned by `1001:116`.
*
* For the TLS private key file, postgres will only accept reading it if it is owned by root or the user running postgres.
* Furthermore, it will check that the permissions are not "world readable".
*
* The 2 statements above makes it difficult to provide a private key to postgres: the host user does not exist in the container
* yet, it must run own the secret key AND run postgres.
*
* The solution used is to run postgres as the host user, but this requires the following workarounds:
*
* * run the container as the host's `uid:gid`
* * mount the host `/etc/passwd` as `/etc/passwd` in the container so the host user exists
* * use a subdirectory of the default `PGDATA` value so `initdb` can successfully do its thing
*
* When necessary, the host user's `uid` and `gid` must be provided using the `CI_UID_GID` environment variable and should
* be formatted as `"uid:gid"` (without the double quotes).
*
* NOTE: on OSX none of this is necessary, for some reason, the mounted files are owned by root.
*/
def runAs = Option(System.getenv("CI_UID_GID"))
def passwdVolume = runAs.map { _ =>
HostConfig.Bind.builder()
.from("/etc/passwd")
.to("/etc/passwd")
.readOnly(true)
.build()
}
def keyPerms(file: java.io.File) =
Files.setPosixFilePermissions(
file.toPath,
Set(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE).asJava
).toFile
override def env(backend: Backend) = Map(
"PGDATA" -> "/var/lib/postgresql/data/pg_data"
)
def tlsVolumes = List(
HostConfig.Bind.builder()
.from(toTmpFile("/server.crt").getAbsolutePath)
.to("/certs/server.crt")
.readOnly(true)
.build(),
HostConfig.Bind.builder()
.from(keyPerms(toTmpFile("/server.key")).getAbsolutePath)
.to("/certs/server.key")
.readOnly(true)
.build()
)
override def configure(backend: Backend, spec: ContainerSpec): ContainerSpec =
spec
.withOption(runAs)((s, user) => s.withConfiguration(_.user(user)))
.withVolumeBindings((passwdVolume.toList ++ tlsVolumes): _*)
.withCommand(
"-c",
"ssl=true",
"-c",
"ssl_cert_file=/certs/server.crt",
"-c",
"ssl_key_file=/certs/server.key",
)
def withTls(client: PostgreSql.Client): PostgreSql.Client =
client.withTransport.tls(SslClientConfiguration(trustCredentials = TrustCredentials.Insecure))
"TLS" should {
"support tls" in withClient(cfg = withTls) { client =>
client.toService(Request.Sync)
.map { response =>
response must beEqualTo(Response.Ready)
}
}
}
}
class MissingTlsSpec extends PgSqlIntegrationSpec with ResourceFileSpec {
def withTls(client: PostgreSql.Client): PostgreSql.Client =
client.withTransport.tls(SslClientConfiguration(trustCredentials = TrustCredentials.Insecure))
"TLS" should {
"handle unsupported tls" in withClient(cfg = withTls) { client =>
client.toService(Request.Sync)
.liftToTry
.map { response =>
response.asScala must beAFailedTry(beEqualTo(PgSqlTlsUnsupportedError))
}
}
}
}
| twitter/finagle | finagle-postgresql/src/it/scala/com/twitter/finagle/postgresql/TlsSpec.scala | Scala | apache-2.0 | 3,968 |
package im.actor.server.session
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random
import akka.testkit.TestProbe
import com.google.protobuf.ByteString
import scodec.bits._
import im.actor.api.rpc.auth._
import im.actor.api.rpc.codecs._
import im.actor.api.rpc.contacts.UpdateContactRegistered
import im.actor.api.rpc.misc.ResponseVoid
import im.actor.api.rpc.peers.UserOutPeer
import im.actor.api.rpc.sequence.RequestSubscribeToOnline
import im.actor.api.rpc.weak.UpdateUserOffline
import im.actor.api.rpc.{ AuthorizedClientData, Request, RpcOk }
import im.actor.server.mtproto.protocol._
import im.actor.server.mtproto.transport._
import im.actor.server.push.{ SeqUpdatesManager, WeakUpdatesManager }
import im.actor.server.session.SessionEnvelope.Payload
import im.actor.server.user.UserOffice
class SessionSpec extends BaseSessionSpec {
behavior of "Session actor"
it should "send Drop on message on wrong message box" in sessions().e1
it should "send NewSession on first HandleMessageBox" in sessions().e2
it should "reply to RpcRequestBox" in sessions().e3
it should "handle user authorization" in sessions().e4
it should "subscribe to sequence updates" in sessions().e5
it should "subscribe to weak updates" in sessions().e6
it should "subscribe to presences" in sessions().e7
it should "react to SessionHello" in sessions().e8
case class sessions() {
implicit val probe = TestProbe()
def e1() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val session = system.actorOf(Session.props(mediator))
sendEnvelope(authId, sessionId, session, Payload.HandleMessageBox(HandleMessageBox(ByteString.copyFrom(BitVector.empty.toByteBuffer))))
probe watch session
probe.expectMsg(Drop(0, 0, "Cannot parse MessageBox"))
probe.expectTerminated(session)
}
def e2() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333333L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
expectNewSession(authId, sessionId, messageId)
probe.receiveOne(1.second)
probe.receiveOne(1.second)
probe.expectNoMsg()
}
def e3() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(75553333334L, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedRequest))
expectNewSession(authId, sessionId, messageId)
expectMessageAck(authId, sessionId, messageId)
expectRpcResult() should matchPattern {
case RpcOk(ResponseSendAuthCodeObsolete(_, false)) ⇒
}
}
def e4() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val firstMessageId = Random.nextLong()
val phoneNumber = 75550000000L
val encodedCodeRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(phoneNumber, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, RpcRequestBox(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
expectMessageAck(authId, sessionId, firstMessageId)
val smsHash = expectRpcResult().asInstanceOf[RpcOk].response.asInstanceOf[ResponseSendAuthCodeObsolete].smsHash
val encodedSignUpRequest = RequestCodec.encode(Request(RequestSignUpObsolete(
phoneNumber = phoneNumber,
smsHash = smsHash,
smsCode = "0000",
name = "Wayne Brain",
deviceHash = Array(4, 5, 6),
deviceTitle = "Specs virtual device",
appId = 1,
appKey = "appKey",
isSilent = false
))).require
val secondMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, RpcRequestBox(encodedSignUpRequest))
expectMessageAck(authId, sessionId, secondMessageId)
expectRpcResult() should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
val encodedSignOutRequest = RequestCodec.encode(Request(RequestSignOut)).require
val thirdMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, thirdMessageId, RpcRequestBox(encodedSignOutRequest))
expectMessageAck(authId, sessionId, thirdMessageId)
expectRpcResult() should matchPattern {
case RpcOk(ResponseVoid) ⇒
}
}
def e5() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val firstMessageId = Random.nextLong()
val phoneNumber = 75550000000L
val encodedCodeRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(phoneNumber, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, RpcRequestBox(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
expectMessageAck(authId, sessionId, firstMessageId)
val smsHash = expectRpcResult().asInstanceOf[RpcOk].response.asInstanceOf[ResponseSendAuthCodeObsolete].smsHash
val encodedSignUpRequest = RequestCodec.encode(Request(RequestSignUpObsolete(
phoneNumber = phoneNumber,
smsHash = smsHash,
smsCode = "0000",
name = "Wayne Brain",
deviceHash = Array(5, 5, 6),
deviceTitle = "Specs virtual device",
appId = 1,
appKey = "appKey",
isSilent = false
))).require
val secondMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, RpcRequestBox(encodedSignUpRequest))
expectMessageAck(authId, sessionId, secondMessageId)
val authResult = expectRpcResult()
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
implicit val clientData = AuthorizedClientData(authId, sessionId, authResult.asInstanceOf[RpcOk].response.asInstanceOf[ResponseAuth].user.id)
val update = UpdateContactRegistered(1, true, 1L, 2L)
Await.result(UserOffice.broadcastClientUpdate(update, None, isFat = false), 1.second)
expectSeqUpdate(authId, sessionId).update should ===(update.toByteArray)
}
def e6() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val firstMessageId = Random.nextLong()
val phoneNumber = 75550000000L
val encodedCodeRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(phoneNumber, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, RpcRequestBox(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
expectMessageAck(authId, sessionId, firstMessageId)
val smsHash = expectRpcResult().asInstanceOf[RpcOk].response.asInstanceOf[ResponseSendAuthCodeObsolete].smsHash
val encodedSignUpRequest = RequestCodec.encode(Request(RequestSignUpObsolete(
phoneNumber = phoneNumber,
smsHash = smsHash,
smsCode = "0000",
name = "Wayne Brain",
deviceHash = Array(5, 5, 6),
deviceTitle = "Specs virtual device",
appId = 1,
appKey = "appKey",
isSilent = false
))).require
val secondMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, RpcRequestBox(encodedSignUpRequest))
expectMessageAck(authId, sessionId, secondMessageId)
val authResult = expectRpcResult()
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
implicit val clientData = AuthorizedClientData(authId, sessionId, authResult.asInstanceOf[RpcOk].response.asInstanceOf[ResponseAuth].user.id)
val update = UpdateContactRegistered(1, true, 1L, 5L)
Await.result(db.run(WeakUpdatesManager.broadcastUserWeakUpdate(clientData.userId, update)), 1.second)
expectWeakUpdate(authId, sessionId).update should ===(update.toByteArray)
}
def e7() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val firstMessageId = Random.nextLong()
val phoneNumber = 75550000000L
val encodedCodeRequest = RequestCodec.encode(Request(RequestSendAuthCodeObsolete(phoneNumber, 1, "apiKey"))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, RpcRequestBox(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
expectMessageAck(authId, sessionId, firstMessageId)
val smsHash = expectRpcResult().asInstanceOf[RpcOk].response.asInstanceOf[ResponseSendAuthCodeObsolete].smsHash
{
val encodedSignUpRequest = RequestCodec.encode(Request(RequestSignUpObsolete(
phoneNumber = phoneNumber,
smsHash = smsHash,
smsCode = "0000",
name = "Wayne Brain",
deviceHash = Array(5, 5, 6),
deviceTitle = "Specs virtual device",
appId = 1,
appKey = "appKey",
isSilent = false
))).require
val messageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedSignUpRequest))
expectMessageAck(authId, sessionId, messageId)
val authResult = expectRpcResult()
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
}
{
val userForSubscribe = 2
// FIXME: real user and real accessHash
val encodedSubscribeRequest = RequestCodec.encode(Request(RequestSubscribeToOnline(Vector(UserOutPeer(userForSubscribe, 0L))))).require
val messageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, RpcRequestBox(encodedSubscribeRequest))
expectMessageAck(authId, sessionId, messageId)
val subscribeResult = expectRpcResult()
subscribeResult should matchPattern {
case RpcOk(ResponseVoid) ⇒
}
}
val ub = expectWeakUpdate(authId, sessionId)
ub.updateHeader should ===(UpdateUserOffline.header)
}
def e8() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, SessionHello)
expectNewSession(authId, sessionId, messageId)
expectMessageAck(authId, sessionId, messageId)
SeqUpdatesManager.persistAndPushUpdateF(authId, UpdateContactRegistered(1, false, 1L, 2L), None, isFat = false)
expectSeqUpdate(authId, sessionId)
probe.expectNoMsg()
}
}
}
| sc4599/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/session/SessionSpec.scala | Scala | mit | 10,901 |
package com.wavesplatform.lang.v1.repl.node.http.response.model
import com.wavesplatform.lang.v1.traits.DataType
import io.circe.{Decoder, DecodingFailure, HCursor}
private[node] case class DataEntry(key: String, value: Any, `type`: DataType)
private[node] object DataEntry {
implicit val decoder: Decoder[DataEntry] = (c: HCursor) =>
for {
rawType <- c.downField("type").as[String]
key <- c.downField("key").as[String]
v = c.downField("value")
(value, resolvedType) <- rawType match {
case "binary" => v.as[ByteString] map(b => (b.byteStr, DataType.ByteArray))
case "boolean" => v.as[Boolean] map((_, DataType.Boolean))
case "integer" => v.as[Long] map((_, DataType.Long))
case "string" => v.as[String] map((_, DataType.String))
case t => Left(DecodingFailure(s"Illegal data entry type: $t", Nil))
}
} yield DataEntry(key, value, resolvedType)
}
| wavesplatform/Waves | repl/shared/src/main/scala/com/wavesplatform/lang/v1/repl/node/http/response/model/DataEntry.scala | Scala | mit | 957 |
package com.github.jmora.scala.util.data.collection
import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import com.github.jmora.scala.util.boilerplate._
import scala.collection.AbstractIterator
class PrefetchIterator[A](val inner: Iterator[A])(implicit timeout: Duration = Duration.Inf) extends AbstractIterator[A] {
def prefetch = this // for extension purposes
private var hd: Option[Future[A]] = if (inner.hasNext) Some(Future { inner.next }) else None
def hasNext(): Boolean = hd.isDefined
def next(): A = {
val r: A = {
if (hd.isEmpty)
throw new java.util.NoSuchElementException("next on empty iterator")
Await.result(hd.get, timeout)
}
hd = if (inner.hasNext) Some(Future { inner.next }) else None
r
}
}
| jmora/scala.util | src/main/scala/com/github/jmora/scala/util/data/collection/PrefetchIterator.scala | Scala | mit | 872 |
package mesosphere.marathon.state
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.upgrade.DeploymentPlan
import scala.collection.immutable.Seq
import scala.concurrent.Future
class DeploymentRepository(
val store: EntityStore[DeploymentPlan],
val maxVersions: Option[Int] = None,
val metrics: Metrics)
extends EntityRepository[DeploymentPlan] with StateMetrics {
import mesosphere.util.ThreadPoolContext.context
def store(plan: DeploymentPlan): Future[DeploymentPlan] =
storeWithVersion(plan.id, plan.version, plan)
def all(): Future[Seq[DeploymentPlan]] = {
allIds().flatMap { ids =>
val results = ids.map(this.currentVersion)
Future.sequence(results).map(_.flatten.to[Seq])
}
}
}
| EasonYi/marathon | src/main/scala/mesosphere/marathon/state/DeploymentRepository.scala | Scala | apache-2.0 | 751 |
/*
* Copyright 2015 Foundational Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package pro.foundev.scala
object FindWithArgs extends CassandraCapable {
def main(args: Array[String]): Unit = {
val context = connectToCassandra()
val rdd = context.rdd
var version = "1"
if(args.length>0) {
if (version != null) {
version = args(0)
}
}
println("reading version: " + version)
rdd.where("version=" + version).
map(x => x.get[String]("value")).collect()
.foreach(x => println(x))
}
}
| rssvihla/datastax_work | spark_commons/examples/spark_bulk_operations/src/main/scala/pro/foundev/scala/FindWithArgs.scala | Scala | apache-2.0 | 1,097 |
package lore.compiler.types
import lore.compiler.semantics.functions.FunctionSignature
import lore.compiler.semantics.structures.StructPropertyDefinition
case class StructType(
schema: StructSchema,
assignments: TypeVariable.Assignments,
) extends DeclaredType {
/**
* The struct's properties, their types instantiated with the type arguments.
*/
lazy val properties: Vector[StructPropertyDefinition.Instance] = schema.definition.properties.map(_.instantiate(assignments))
/**
* The struct constructor signature with all type parameters instantiated. The corresponding signature type
* parameters are thus empty.
*/
lazy val constructorSignature: FunctionSignature = FunctionSignature(
name,
Vector.empty,
properties.map(_.asParameter),
this,
schema.definition.position,
)
/**
* The type arguments corresponding to the schema's open type parameters.
*/
lazy val openTypeArguments: Vector[Type] = {
assignments.flatMap {
case (parameter, argument) => if (parameter.isOpen) Some(argument) else None
}.toVector
}
/**
* The struct viewed as a compile-time shape type. Whether the struct's properties are open has no bearing on
* this representation.
*/
override lazy val asShapeType: ShapeType = ShapeType(properties.map(ShapeType.Property.apply))
}
| marcopennekamp/lore | compiler/src/lore/compiler/types/StructType.scala | Scala | mit | 1,351 |
package sample.camel
import org.apache.camel.Exchange
import org.apache.camel.Processor
import org.apache.camel.builder.RouteBuilder
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.camel.CamelExtension
import akka.camel.CamelMessage
import akka.camel.Consumer
import akka.camel.Producer
object CustomRouteExample {
def main(args: Array[String]): Unit = {
val system = ActorSystem("some-system")
val producer = system.actorOf(Props[RouteProducer])
val mediator = system.actorOf(Props(classOf[RouteTransformer], producer))
val consumer = system.actorOf(Props(classOf[RouteConsumer], mediator))
CamelExtension(system).context.addRoutes(new CustomRouteBuilder)
}
class RouteConsumer(transformer: ActorRef) extends Actor with Consumer {
def endpointUri = "jetty:http://0.0.0.0:8877/camel/welcome"
def receive = {
// Forward a string representation of the message body to transformer
case msg: CamelMessage => {
transformer.forward(msg.withBodyAs[String])
}
}
}
class RouteTransformer(producer: ActorRef) extends Actor {
def receive = {
// example: transform message body "foo" to "- foo -" and forward result
// to producer
case msg: CamelMessage => {
producer.forward(msg.mapBody((body: String) => "- %s -" format body))
}
}
}
class RouteProducer extends Actor with Producer {
def endpointUri = "direct:welcome"
}
class CustomRouteBuilder extends RouteBuilder {
def configure {
from("direct:welcome").process(new Processor() {
def process(exchange: Exchange) {
// Create a 'welcome' message from the input message
exchange.getOut.setBody("Welcome %s" format exchange.getIn.getBody)
}
})
}
}
}
| alanktwong/typesafe_activators | akka-sample-camel-scala/src/main/scala/sample/camel/CustomRouteExample.scala | Scala | mit | 1,756 |
package info.glennengstrand.news
import java.sql.PreparedStatement
import info.glennengstrand.io._
import org.specs2.mutable.Specification
import spray.testkit.Specs2RouteTest
trait MockWriter extends PersistentDataStoreWriter {
def write(o: PersistentDataStoreBindings, state: Map[String, Any], criteria: Map[String, Any]): Map[String, Any] = {
Map("id" -> 1l)
}
}
trait MockRelationalWriter extends PersistentRelationalDataStoreWriter {
def generatePreparedStatement(operation: String, entity: String, inputs: Iterable[String], outputs: Iterable[(String, String)]): String = {
null
}
}
trait MockSearcher extends PersistentDataStoreSearcher {
def search(terms: String): Iterable[java.lang.Long] = {
List(1l, 2l, 3l)
}
def index(id: Long, content: String): Unit = {
}
}
class MockPerformanceLogger extends PerformanceLogger {
def log(topic: String, entity: String, operation: String, duration: Long): Unit = {
}
}
class MockFactoryClass extends FactoryClass {
val performanceLogger = new MockPerformanceLogger
def getParticipant(id: Long): Participant = {
new Participant(id, "test") with MockRelationalWriter with MockCacheAware
}
def getParticipant(state: String): Participant = {
val s = IO.fromFormPost(state)
new Participant(s("id").asInstanceOf[String].toLong, s("name").asInstanceOf[String]) with MockRelationalWriter with MockCacheAware
}
def getFriends(id: Long): Friends = {
val state: Iterable[Map[String, Any]] = List(
Map[String, Any](
"FriendsID" -> "1",
"ParticipantID" -> "2"
)
)
new Friends(1l, state)
}
def getFriend(state: String): Friend = {
val s = IO.fromFormPost(state)
new Friend(s("FriendsID").asInstanceOf[String].toLong, s("FromParticipantID").asInstanceOf[String].toInt, s("ToParticipantID").asInstanceOf[String].toInt) with MockRelationalWriter with MockCacheAware
}
def getInbound(id: Int): InboundFeed = {
val state: Iterable[Map[String, Any]] = List(
Map[String, Any](
"participantID" -> "1",
"occurred" -> "2014-01-12 22:56:36-0800",
"fromParticipantID" -> "2",
"subject" -> "test",
"story" -> "this is a unit test"
)
)
new InboundFeed(1, state)
}
def getInbound(state: String): Inbound = {
val s = IO.fromFormPost(state)
new Inbound(s("participantID").asInstanceOf[String].toInt, IO.df.parse(s("occurred").asInstanceOf[String]), s("fromParticipantID").asInstanceOf[String].toInt, s("subject").asInstanceOf[String], s("story").asInstanceOf[String]) with MockWriter with MockCacheAware
}
def getOutbound(id: Int): OutboundFeed = {
val state: Iterable[Map[String, Any]] = List(
Map[String, Any](
"participantID" -> "1",
"occurred" -> "2014-01-12 22:56:36-0800",
"fromParticipantID" -> "2",
"subject" -> "test",
"story" -> "this is a unit test"
)
)
new OutboundFeed(1, state)
}
def getOutbound(state: String): Outbound = {
val s = IO.fromFormPost(state)
new Outbound(s("participantID").asInstanceOf[String].toInt, IO.df.parse(s("occurred").asInstanceOf[String]), s("subject").asInstanceOf[String], s("story").asInstanceOf[String]) with MockWriter with MockCacheAware
}
def getObject(name: String, id: Long): Option[Object] = {
name match {
case "participant" => Some(getParticipant(id))
case "friends" => Some(getFriends(id))
case _ => None
}
}
def getObject(name: String, id: Int): Option[Object] = {
name match {
case "inbound" => Some(getInbound(id))
case "outbound" => Some(getOutbound(id))
case _ => None
}
}
def getObject(name: String, state: String): Option[Object] = {
name match {
case "participant" => Some(getParticipant(state))
case "friend" => Some(getFriend(state))
case "inbound" => Some(getInbound(state))
case "outbound" => Some(getOutbound(state))
case _ => None
}
}
def getObject(name: String): Option[Object] = {
name match {
case "logger" => Some(performanceLogger)
case _ => None
}
}
}
/** unit tests for the news feed service */
class FeedSpec extends Specification with Specs2RouteTest with Feed {
def actorRefFactory = system
Feed.factory = new MockFactoryClass
IO.cacheStatements = false
IO.unitTesting = true
"Feed" should {
"return the correct data when fetching a participant" in {
Get("/participant/2") ~> myRoute ~> check {
responseAs[String] must contain("test")
}
}
"return the correct data when fetching friends" in {
Get("/friends/1") ~> myRoute ~> check {
responseAs[String] must contain("2")
}
}
"return the correct data when fetching inbound" in {
Get("/inbound/1") ~> myRoute ~> check {
responseAs[String] must contain("test")
}
}
"leave GET requests to other paths unhandled" in {
Get("/kermit") ~> myRoute ~> check {
handled must beFalse
}
}
"process post requests to create a new participant properly" in {
Post("/participant/new", "id=2&name=smith") ~> myRoute ~> check {
responseAs[String] must contain("smith")
}
}
}
}
| Krasnyanskiy/clojure-news-feed | server/feed2/src/test/scala/info/glennengstrand/news/FeedSpec.scala | Scala | epl-1.0 | 5,257 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.hyperkernels.fastfouriertransform
import cogx.cogmath.algebra.real.Logarithm._
/** The dimensions of a 1D, 2D, or 3D FFT.
*
* @param x Size of X dimension.
* @param y Size of Y dimension (1 for 1D transform).
* @param z Size of Z dimension (1 for 1D and 2D transforms).
*
* @author Greg Snider and Dick Carter
*/
private[cogx]
class ClFFTDim3(val x: Int, val y: Int = 1, val z: Int = 1) {
require(isPowerOf2(x))
require(isPowerOf2(y))
require(isPowerOf2(z))
/** The dimension of the transform, opportunistically reduced for sizes == 1. */
def dimension =
if (y == 1 && z == 1)
1
else if (z == 1)
2
else
3
/**
* Converts the FFT sizes into a String.
*/
override def toString: String = {
dimension match {
case 1 => s"_size_${x}"
case 2 => s"_size_${x}_${y}"
case 3 => s"_size_${x}_${y}_${z}"
}
}
}
| hpe-cct/cct-core | src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/fastfouriertransform/ClFFTDim3.scala | Scala | apache-2.0 | 1,565 |
package shared.requests.policies
case class GetPolicyRequest (
pathParams: GetPolicyRequestPathParams
)
case class GetPolicyRequestPathParams(
organizationId: String,
policyName: String
)
| beikern/foulkon-ui | shared/src/main/scala/shared/requests/policies/GetPolicyRequest.scala | Scala | apache-2.0 | 196 |
/*
* Copyright 2016 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.drelephant.spark
import com.linkedin.drelephant.analysis.{HadoopAggregatedData, HadoopApplicationData, HadoopMetricsAggregator}
import com.linkedin.drelephant.configurations.aggregator.AggregatorConfigurationData
import com.linkedin.drelephant.math.Statistics
import com.linkedin.drelephant.spark.data.{SparkApplicationData, SparkLogDerivedData, SparkRestDerivedData}
import com.linkedin.drelephant.util.MemoryFormatUtils
import org.apache.commons.io.FileUtils
import org.apache.log4j.Logger
import scala.util.Try
class SparkMetricsAggregator(private val aggregatorConfigurationData: AggregatorConfigurationData)
extends HadoopMetricsAggregator {
import SparkMetricsAggregator._
private val logger: Logger = Logger.getLogger(classOf[SparkMetricsAggregator])
private val allocatedMemoryWasteBufferPercentage: Double =
Option(aggregatorConfigurationData.getParamMap.get(ALLOCATED_MEMORY_WASTE_BUFFER_PERCENTAGE_KEY))
.flatMap { value => Try(value.toDouble).toOption }
.getOrElse(DEFAULT_ALLOCATED_MEMORY_WASTE_BUFFER_PERCENTAGE)
private val hadoopAggregatedData: HadoopAggregatedData = new HadoopAggregatedData()
override def getResult(): HadoopAggregatedData = hadoopAggregatedData
override def aggregate(data: HadoopApplicationData): Unit = data match {
case (data: SparkApplicationData) => aggregate(data)
case _ => throw new IllegalArgumentException("data should be SparkApplicationData")
}
private def aggregate(data: SparkApplicationData): Unit = for {
executorInstances <- executorInstancesOf(data)
executorMemoryBytes <- executorMemoryBytesOf(data)
} {
val applicationDurationMillis = applicationDurationMillisOf(data)
if( applicationDurationMillis < 0) {
logger.warn(s"applicationDurationMillis is negative. Skipping Metrics Aggregation:${applicationDurationMillis}")
} else {
val totalExecutorTaskTimeMillis = totalExecutorTaskTimeMillisOf(data)
val resourcesAllocatedForUse =
aggregateresourcesAllocatedForUse(executorInstances, executorMemoryBytes, applicationDurationMillis)
val resourcesActuallyUsed = aggregateresourcesActuallyUsed(executorMemoryBytes, totalExecutorTaskTimeMillis)
val resourcesActuallyUsedWithBuffer = resourcesActuallyUsed.doubleValue() * (1.0 + allocatedMemoryWasteBufferPercentage)
val resourcesWastedMBSeconds = (resourcesActuallyUsedWithBuffer < resourcesAllocatedForUse.doubleValue()) match {
case true => resourcesAllocatedForUse.doubleValue() - resourcesActuallyUsedWithBuffer
case false => 0.0
}
//allocated is the total used resource from the cluster.
if (resourcesAllocatedForUse.isValidLong) {
hadoopAggregatedData.setResourceUsed(resourcesAllocatedForUse.toLong)
} else {
logger.warn(s"resourcesAllocatedForUse/resourcesWasted exceeds Long.MaxValue")
logger.warn(s"ResourceUsed: ${resourcesAllocatedForUse}")
logger.warn(s"executorInstances: ${executorInstances}")
logger.warn(s"executorMemoryBytes:${executorMemoryBytes}")
logger.warn(s"applicationDurationMillis:${applicationDurationMillis}")
logger.warn(s"totalExecutorTaskTimeMillis:${totalExecutorTaskTimeMillis}")
logger.warn(s"resourcesActuallyUsedWithBuffer:${resourcesActuallyUsedWithBuffer}")
logger.warn(s"resourcesWastedMBSeconds:${resourcesWastedMBSeconds}")
logger.warn(s"allocatedMemoryWasteBufferPercentage:${allocatedMemoryWasteBufferPercentage}")
}
hadoopAggregatedData.setResourceWasted(resourcesWastedMBSeconds.toLong)
}
}
private def aggregateresourcesActuallyUsed(executorMemoryBytes: Long, totalExecutorTaskTimeMillis: BigInt): BigInt = {
val bytesMillis = BigInt(executorMemoryBytes) * totalExecutorTaskTimeMillis
(bytesMillis / (BigInt(FileUtils.ONE_MB) * BigInt(Statistics.SECOND_IN_MS)))
}
private def aggregateresourcesAllocatedForUse(
executorInstances: Int,
executorMemoryBytes: Long,
applicationDurationMillis: Long
): BigInt = {
val bytesMillis = BigInt(executorInstances) * BigInt(executorMemoryBytes) * BigInt(applicationDurationMillis)
(bytesMillis / (BigInt(FileUtils.ONE_MB) * BigInt(Statistics.SECOND_IN_MS)))
}
private def executorInstancesOf(data: SparkApplicationData): Option[Int] = {
val appConfigurationProperties = data.appConfigurationProperties
appConfigurationProperties.get(SPARK_EXECUTOR_INSTANCES_KEY).map(_.toInt)
}
private def executorMemoryBytesOf(data: SparkApplicationData): Option[Long] = {
val appConfigurationProperties = data.appConfigurationProperties
appConfigurationProperties.get(SPARK_EXECUTOR_MEMORY_KEY).map(MemoryFormatUtils.stringToBytes)
}
private def applicationDurationMillisOf(data: SparkApplicationData): Long = {
require(data.applicationInfo.attempts.nonEmpty)
val lastApplicationAttemptInfo = data.applicationInfo.attempts.last
lastApplicationAttemptInfo.endTime.getTime - lastApplicationAttemptInfo.startTime.getTime
}
private def totalExecutorTaskTimeMillisOf(data: SparkApplicationData): BigInt = {
data.executorSummaries.map { executorSummary => BigInt(executorSummary.totalDuration) }.sum
}
}
object SparkMetricsAggregator {
/** The percentage of allocated memory we expect to waste because of overhead. */
val DEFAULT_ALLOCATED_MEMORY_WASTE_BUFFER_PERCENTAGE = 0.5D
val ALLOCATED_MEMORY_WASTE_BUFFER_PERCENTAGE_KEY = "allocated_memory_waste_buffer_percentage"
val SPARK_EXECUTOR_INSTANCES_KEY = "spark.executor.instances"
val SPARK_EXECUTOR_MEMORY_KEY = "spark.executor.memory"
}
| nntnag17/dr-elephant-1 | app/com/linkedin/drelephant/spark/SparkMetricsAggregator.scala | Scala | apache-2.0 | 6,237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
import java.util.Date
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{HashMap, HashSet}
import scala.concurrent.duration._
import scala.io.Source
import scala.reflect.ClassTag
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfter, PrivateMethodTester}
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import other.supplier.{CustomPersistenceEngine, CustomRecoveryModeFactory}
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.deploy._
import org.apache.spark.deploy.DeployMessages._
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Deploy._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.config.Worker._
import org.apache.spark.resource.{ResourceInformation, ResourceRequirement}
import org.apache.spark.resource.ResourceUtils.{FPGA, GPU}
import org.apache.spark.rpc.{RpcAddress, RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.serializer
import org.apache.spark.util.Utils
object MockWorker {
val counter = new AtomicInteger(10000)
}
class MockWorker(master: RpcEndpointRef, conf: SparkConf = new SparkConf) extends RpcEndpoint {
val seq = MockWorker.counter.incrementAndGet()
val id = seq.toString
override val rpcEnv: RpcEnv = RpcEnv.create("worker", "localhost", seq,
conf, new SecurityManager(conf))
val apps = new mutable.HashMap[String, String]()
val driverIdToAppId = new mutable.HashMap[String, String]()
def newDriver(driverId: String): RpcEndpointRef = {
val name = s"driver_${drivers.size}"
rpcEnv.setupEndpoint(name, new RpcEndpoint {
override val rpcEnv: RpcEnv = MockWorker.this.rpcEnv
override def receive: PartialFunction[Any, Unit] = {
case RegisteredApplication(appId, _) =>
apps(appId) = appId
driverIdToAppId(driverId) = appId
}
})
}
var decommissioned = false
var appDesc = DeployTestUtils.createAppDesc()
val drivers = mutable.HashSet[String]()
val driverResources = new mutable.HashMap[String, Map[String, Set[String]]]
val execResources = new mutable.HashMap[String, Map[String, Set[String]]]
override def receive: PartialFunction[Any, Unit] = {
case RegisteredWorker(masterRef, _, _, _) =>
masterRef.send(WorkerLatestState(id, Nil, drivers.toSeq))
case LaunchExecutor(_, appId, execId, _, _, _, resources_) =>
execResources(appId + "/" + execId) = resources_.map(r => (r._1, r._2.addresses.toSet))
case LaunchDriver(driverId, desc, resources_) =>
drivers += driverId
driverResources(driverId) = resources_.map(r => (r._1, r._2.addresses.toSet))
master.send(RegisterApplication(appDesc, newDriver(driverId)))
case KillDriver(driverId) =>
master.send(DriverStateChanged(driverId, DriverState.KILLED, None))
drivers -= driverId
driverResources.remove(driverId)
driverIdToAppId.get(driverId) match {
case Some(appId) =>
apps.remove(appId)
master.send(UnregisterApplication(appId))
case None =>
}
driverIdToAppId.remove(driverId)
case DecommissionWorker =>
decommissioned = true
}
}
// This class is designed to handle the lifecycle of only one application.
class MockExecutorLaunchFailWorker(master: Master, conf: SparkConf = new SparkConf)
extends MockWorker(master.self, conf) with Eventually {
val appRegistered = new CountDownLatch(1)
val launchExecutorReceived = new CountDownLatch(1)
val appIdsToLaunchExecutor = new mutable.HashSet[String]
var failedCnt = 0
override def receive: PartialFunction[Any, Unit] = {
case LaunchDriver(driverId, _, _) =>
master.self.send(RegisterApplication(appDesc, newDriver(driverId)))
// Below code doesn't make driver stuck, as newDriver opens another rpc endpoint for
// handling driver related messages. To simplify logic, we will block handling
// LaunchExecutor message until we validate registering app succeeds.
eventually(timeout(5.seconds)) {
// an app would be registered with Master once Driver set up
assert(apps.nonEmpty)
assert(master.idToApp.keySet.intersect(apps.keySet) == apps.keySet)
}
appRegistered.countDown()
case LaunchExecutor(_, appId, execId, _, _, _, _) =>
assert(appRegistered.await(10, TimeUnit.SECONDS))
if (failedCnt == 0) {
launchExecutorReceived.countDown()
}
assert(master.idToApp.contains(appId))
appIdsToLaunchExecutor += appId
failedCnt += 1
master.self.send(ExecutorStateChanged(appId, execId, ExecutorState.FAILED, None, None))
case otherMsg => super.receive(otherMsg)
}
}
class MasterSuite extends SparkFunSuite
with Matchers with Eventually with PrivateMethodTester with BeforeAndAfter {
// regex to extract worker links from the master webui HTML
// groups represent URL and worker ID
val WORKER_LINK_RE = """<a href="(.+?)">\\s*(worker-.+?)\\s*</a>""".r
private var _master: Master = _
after {
if (_master != null) {
_master.rpcEnv.shutdown()
_master.rpcEnv.awaitTermination()
_master = null
}
}
test("can use a custom recovery mode factory") {
val conf = new SparkConf(loadDefaults = false)
conf.set(RECOVERY_MODE, "CUSTOM")
conf.set(RECOVERY_MODE_FACTORY, classOf[CustomRecoveryModeFactory].getCanonicalName)
conf.set(MASTER_REST_SERVER_ENABLED, false)
val instantiationAttempts = CustomRecoveryModeFactory.instantiationAttempts
val commandToPersist = new Command(
mainClass = "",
arguments = Nil,
environment = Map.empty,
classPathEntries = Nil,
libraryPathEntries = Nil,
javaOpts = Nil
)
val appToPersist = new ApplicationInfo(
startTime = 0,
id = "test_app",
desc = new ApplicationDescription(
name = "",
maxCores = None,
memoryPerExecutorMB = 0,
command = commandToPersist,
appUiUrl = "",
eventLogDir = None,
eventLogCodec = None,
coresPerExecutor = None),
submitDate = new Date(),
driver = null,
defaultCores = 0
)
val driverToPersist = new DriverInfo(
startTime = 0,
id = "test_driver",
desc = new DriverDescription(
jarUrl = "",
mem = 0,
cores = 0,
supervise = false,
command = commandToPersist
),
submitDate = new Date()
)
val workerToPersist = new WorkerInfo(
id = "test_worker",
host = "127.0.0.1",
port = 10000,
cores = 0,
memory = 0,
endpoint = null,
webUiAddress = "http://localhost:80",
Map.empty
)
val (rpcEnv, _, _) =
Master.startRpcEnvAndEndpoint("127.0.0.1", 0, 0, conf)
try {
rpcEnv.setupEndpointRef(rpcEnv.address, Master.ENDPOINT_NAME)
CustomPersistenceEngine.lastInstance.isDefined shouldBe true
val persistenceEngine = CustomPersistenceEngine.lastInstance.get
persistenceEngine.addApplication(appToPersist)
persistenceEngine.addDriver(driverToPersist)
persistenceEngine.addWorker(workerToPersist)
val (apps, drivers, workers) = persistenceEngine.readPersistedData(rpcEnv)
apps.map(_.id) should contain(appToPersist.id)
drivers.map(_.id) should contain(driverToPersist.id)
workers.map(_.id) should contain(workerToPersist.id)
} finally {
rpcEnv.shutdown()
rpcEnv.awaitTermination()
}
CustomRecoveryModeFactory.instantiationAttempts should be > instantiationAttempts
}
test("master correctly recover the application") {
val conf = new SparkConf(loadDefaults = false)
conf.set(RECOVERY_MODE, "CUSTOM")
conf.set(RECOVERY_MODE_FACTORY, classOf[FakeRecoveryModeFactory].getCanonicalName)
conf.set(MASTER_REST_SERVER_ENABLED, false)
val fakeAppInfo = makeAppInfo(1024)
val fakeWorkerInfo = makeWorkerInfo(8192, 16)
val fakeDriverInfo = new DriverInfo(
startTime = 0,
id = "test_driver",
desc = new DriverDescription(
jarUrl = "",
mem = 1024,
cores = 1,
supervise = false,
command = new Command("", Nil, Map.empty, Nil, Nil, Nil)),
submitDate = new Date())
// Build the fake recovery data
FakeRecoveryModeFactory.persistentData.put(s"app_${fakeAppInfo.id}", fakeAppInfo)
FakeRecoveryModeFactory.persistentData.put(s"driver_${fakeDriverInfo.id}", fakeDriverInfo)
FakeRecoveryModeFactory.persistentData.put(s"worker_${fakeWorkerInfo.id}", fakeWorkerInfo)
var master: Master = null
try {
master = makeMaster(conf)
master.rpcEnv.setupEndpoint(Master.ENDPOINT_NAME, master)
// Wait until Master recover from checkpoint data.
eventually(timeout(5.seconds), interval(100.milliseconds)) {
master.workers.size should be(1)
}
master.idToApp.keySet should be(Set(fakeAppInfo.id))
getDrivers(master) should be(Set(fakeDriverInfo))
master.workers should be(Set(fakeWorkerInfo))
// Notify Master about the executor and driver info to make it correctly recovered.
val fakeExecutors = List(
new ExecutorDescription(fakeAppInfo.id, 0, 8, ExecutorState.RUNNING),
new ExecutorDescription(fakeAppInfo.id, 0, 7, ExecutorState.RUNNING))
fakeAppInfo.state should be(ApplicationState.UNKNOWN)
fakeWorkerInfo.coresFree should be(16)
fakeWorkerInfo.coresUsed should be(0)
master.self.send(MasterChangeAcknowledged(fakeAppInfo.id))
eventually(timeout(1.second), interval(10.milliseconds)) {
// Application state should be WAITING when "MasterChangeAcknowledged" event executed.
fakeAppInfo.state should be(ApplicationState.WAITING)
}
val execResponse = fakeExecutors.map(exec =>
WorkerExecutorStateResponse(exec, Map.empty[String, ResourceInformation]))
val driverResponse = WorkerDriverStateResponse(
fakeDriverInfo.id, Map.empty[String, ResourceInformation])
master.self.send(WorkerSchedulerStateResponse(
fakeWorkerInfo.id, execResponse, Seq(driverResponse)))
eventually(timeout(5.seconds), interval(100.milliseconds)) {
getState(master) should be(RecoveryState.ALIVE)
}
// If driver's resource is also counted, free cores should 0
fakeWorkerInfo.coresFree should be(0)
fakeWorkerInfo.coresUsed should be(16)
// State of application should be RUNNING
fakeAppInfo.state should be(ApplicationState.RUNNING)
} finally {
if (master != null) {
master.rpcEnv.shutdown()
master.rpcEnv.awaitTermination()
master = null
FakeRecoveryModeFactory.persistentData.clear()
}
}
}
test("master/worker web ui available") {
implicit val formats = org.json4s.DefaultFormats
val conf = new SparkConf()
val localCluster = new LocalSparkCluster(2, 2, 512, conf)
localCluster.start()
val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}"
try {
eventually(timeout(5.seconds), interval(100.milliseconds)) {
val json = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/json"))(_.getLines().mkString("\\n"))
val JArray(workers) = (parse(json) \\ "workers")
workers.size should be (2)
workers.foreach { workerSummaryJson =>
val JString(workerWebUi) = workerSummaryJson \\ "webuiaddress"
val workerResponse = parse(Utils
.tryWithResource(Source.fromURL(s"$workerWebUi/json"))(_.getLines().mkString("\\n")))
(workerResponse \\ "cores").extract[Int] should be (2)
}
val html = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/"))(_.getLines().mkString("\\n"))
html should include ("Spark Master at spark://")
val workerLinks = (WORKER_LINK_RE findAllMatchIn html).toList
workerLinks.size should be (2)
workerLinks foreach { case WORKER_LINK_RE(workerUrl, workerId) =>
val workerHtml = Utils
.tryWithResource(Source.fromURL(workerUrl))(_.getLines().mkString("\\n"))
workerHtml should include ("Spark Worker at")
workerHtml should include ("Running Executors (0)")
}
}
} finally {
localCluster.stop()
}
}
test("master/worker web ui available with reverseProxy") {
implicit val formats = org.json4s.DefaultFormats
val conf = new SparkConf()
conf.set(UI_REVERSE_PROXY, true)
val localCluster = new LocalSparkCluster(2, 2, 512, conf)
localCluster.start()
val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}"
try {
eventually(timeout(5.seconds), interval(100.milliseconds)) {
val json = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/json"))(_.getLines().mkString("\\n"))
val JArray(workers) = (parse(json) \\ "workers")
workers.size should be (2)
workers.foreach { workerSummaryJson =>
// the webuiaddress intentionally points to the local web ui.
// explicitly construct reverse proxy url targeting the master
val JString(workerId) = workerSummaryJson \\ "id"
val url = s"$masterUrl/proxy/${workerId}/json"
val workerResponse = parse(
Utils.tryWithResource(Source.fromURL(url))(_.getLines().mkString("\\n")))
(workerResponse \\ "cores").extract[Int] should be (2)
}
val html = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/"))(_.getLines().mkString("\\n"))
html should include ("Spark Master at spark://")
html should include ("""href="/static""")
html should include ("""src="/static""")
verifyWorkerUI(html, masterUrl)
}
} finally {
localCluster.stop()
System.getProperties().remove("spark.ui.proxyBase")
}
}
test("master/worker web ui available behind front-end reverseProxy") {
implicit val formats = org.json4s.DefaultFormats
val reverseProxyUrl = "http://proxyhost:8080/path/to/spark"
val conf = new SparkConf()
conf.set(UI_REVERSE_PROXY, true)
conf.set(UI_REVERSE_PROXY_URL, reverseProxyUrl)
val localCluster = new LocalSparkCluster(2, 2, 512, conf)
localCluster.start()
val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}"
try {
eventually(timeout(5.seconds), interval(100.milliseconds)) {
val json = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/json"))(_.getLines().mkString("\\n"))
val JArray(workers) = (parse(json) \\ "workers")
workers.size should be (2)
workers.foreach { workerSummaryJson =>
// the webuiaddress intentionally points to the local web ui.
// explicitly construct reverse proxy url targeting the master
val JString(workerId) = workerSummaryJson \\ "id"
val url = s"$masterUrl/proxy/${workerId}/json"
val workerResponse = parse(Utils
.tryWithResource(Source.fromURL(url))(_.getLines().mkString("\\n")))
(workerResponse \\ "cores").extract[Int] should be (2)
(workerResponse \\ "masterwebuiurl").extract[String] should be (reverseProxyUrl + "/")
}
// with LocalCluster, we have masters and workers in the same JVM, each overwriting
// system property spark.ui.proxyBase.
// so we need to manage this property explicitly for test
System.getProperty("spark.ui.proxyBase") should startWith
(s"$reverseProxyUrl/proxy/worker-")
System.setProperty("spark.ui.proxyBase", reverseProxyUrl)
val html = Utils
.tryWithResource(Source.fromURL(s"$masterUrl/"))(_.getLines().mkString("\\n"))
html should include ("Spark Master at spark://")
verifyStaticResourcesServedByProxy(html, reverseProxyUrl)
verifyWorkerUI(html, masterUrl, reverseProxyUrl)
}
} finally {
localCluster.stop()
System.getProperties().remove("spark.ui.proxyBase")
}
}
private def verifyWorkerUI(masterHtml: String, masterUrl: String,
reverseProxyUrl: String = ""): Unit = {
val workerLinks = (WORKER_LINK_RE findAllMatchIn masterHtml).toList
workerLinks.size should be (2)
workerLinks foreach {
case WORKER_LINK_RE(workerUrl, workerId) =>
workerUrl should be (s"$reverseProxyUrl/proxy/$workerId")
// there is no real front-end proxy as defined in $reverseProxyUrl
// construct url directly targeting the master
val url = s"$masterUrl/proxy/$workerId/"
System.setProperty("spark.ui.proxyBase", workerUrl)
val workerHtml = Utils
.tryWithResource(Source.fromURL(url))(_.getLines().mkString("\\n"))
workerHtml should include ("Spark Worker at")
workerHtml should include ("Running Executors (0)")
verifyStaticResourcesServedByProxy(workerHtml, workerUrl)
case _ => fail // make sure we don't accidentially skip the tests
}
}
private def verifyStaticResourcesServedByProxy(html: String, proxyUrl: String): Unit = {
html should not include ("""href="/static""")
html should include (s"""href="$proxyUrl/static""")
html should not include ("""src="/static""")
html should include (s"""src="$proxyUrl/static""")
}
test("basic scheduling - spread out") {
basicScheduling(spreadOut = true)
}
test("basic scheduling - no spread out") {
basicScheduling(spreadOut = false)
}
test("basic scheduling with more memory - spread out") {
basicSchedulingWithMoreMemory(spreadOut = true)
}
test("basic scheduling with more memory - no spread out") {
basicSchedulingWithMoreMemory(spreadOut = false)
}
test("scheduling with max cores - spread out") {
schedulingWithMaxCores(spreadOut = true)
}
test("scheduling with max cores - no spread out") {
schedulingWithMaxCores(spreadOut = false)
}
test("scheduling with cores per executor - spread out") {
schedulingWithCoresPerExecutor(spreadOut = true)
}
test("scheduling with cores per executor - no spread out") {
schedulingWithCoresPerExecutor(spreadOut = false)
}
test("scheduling with cores per executor AND max cores - spread out") {
schedulingWithCoresPerExecutorAndMaxCores(spreadOut = true)
}
test("scheduling with cores per executor AND max cores - no spread out") {
schedulingWithCoresPerExecutorAndMaxCores(spreadOut = false)
}
test("scheduling with executor limit - spread out") {
schedulingWithExecutorLimit(spreadOut = true)
}
test("scheduling with executor limit - no spread out") {
schedulingWithExecutorLimit(spreadOut = false)
}
test("scheduling with executor limit AND max cores - spread out") {
schedulingWithExecutorLimitAndMaxCores(spreadOut = true)
}
test("scheduling with executor limit AND max cores - no spread out") {
schedulingWithExecutorLimitAndMaxCores(spreadOut = false)
}
test("scheduling with executor limit AND cores per executor - spread out") {
schedulingWithExecutorLimitAndCoresPerExecutor(spreadOut = true)
}
test("scheduling with executor limit AND cores per executor - no spread out") {
schedulingWithExecutorLimitAndCoresPerExecutor(spreadOut = false)
}
test("scheduling with executor limit AND cores per executor AND max cores - spread out") {
schedulingWithEverything(spreadOut = true)
}
test("scheduling with executor limit AND cores per executor AND max cores - no spread out") {
schedulingWithEverything(spreadOut = false)
}
private def basicScheduling(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(1024)
val scheduledCores = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores === Array(10, 10, 10))
}
private def basicSchedulingWithMoreMemory(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(3072)
val scheduledCores = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores === Array(10, 10, 10))
}
private def schedulingWithMaxCores(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo1 = makeAppInfo(1024, maxCores = Some(8))
val appInfo2 = makeAppInfo(1024, maxCores = Some(16))
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo1, workerInfos, spreadOut)
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo2, workerInfos, spreadOut)
if (spreadOut) {
assert(scheduledCores1 === Array(3, 3, 2))
assert(scheduledCores2 === Array(6, 5, 5))
} else {
assert(scheduledCores1 === Array(8, 0, 0))
assert(scheduledCores2 === Array(10, 6, 0))
}
}
private def schedulingWithCoresPerExecutor(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo1 = makeAppInfo(1024, coresPerExecutor = Some(2))
val appInfo2 = makeAppInfo(256, coresPerExecutor = Some(2))
val appInfo3 = makeAppInfo(256, coresPerExecutor = Some(3))
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo1, workerInfos, spreadOut)
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo2, workerInfos, spreadOut)
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo3, workerInfos, spreadOut)
assert(scheduledCores1 === Array(8, 8, 8)) // 4 * 2 because of memory limits
assert(scheduledCores2 === Array(10, 10, 10)) // 5 * 2
assert(scheduledCores3 === Array(9, 9, 9)) // 3 * 3
}
// Sorry for the long method name!
private def schedulingWithCoresPerExecutorAndMaxCores(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo1 = makeAppInfo(256, coresPerExecutor = Some(2), maxCores = Some(4))
val appInfo2 = makeAppInfo(256, coresPerExecutor = Some(2), maxCores = Some(20))
val appInfo3 = makeAppInfo(256, coresPerExecutor = Some(3), maxCores = Some(20))
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo1, workerInfos, spreadOut)
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo2, workerInfos, spreadOut)
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo3, workerInfos, spreadOut)
if (spreadOut) {
assert(scheduledCores1 === Array(2, 2, 0))
assert(scheduledCores2 === Array(8, 6, 6))
assert(scheduledCores3 === Array(6, 6, 6))
} else {
assert(scheduledCores1 === Array(4, 0, 0))
assert(scheduledCores2 === Array(10, 10, 0))
assert(scheduledCores3 === Array(9, 9, 0))
}
}
private def schedulingWithExecutorLimit(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(256)
appInfo.executorLimit = 0
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 2
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 5
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores1 === Array(0, 0, 0))
assert(scheduledCores2 === Array(10, 10, 0))
assert(scheduledCores3 === Array(10, 10, 10))
}
private def schedulingWithExecutorLimitAndMaxCores(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(256, maxCores = Some(16))
appInfo.executorLimit = 0
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 2
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 5
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores1 === Array(0, 0, 0))
if (spreadOut) {
assert(scheduledCores2 === Array(8, 8, 0))
assert(scheduledCores3 === Array(6, 5, 5))
} else {
assert(scheduledCores2 === Array(10, 6, 0))
assert(scheduledCores3 === Array(10, 6, 0))
}
}
private def schedulingWithExecutorLimitAndCoresPerExecutor(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(256, coresPerExecutor = Some(4))
appInfo.executorLimit = 0
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 2
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 5
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores1 === Array(0, 0, 0))
if (spreadOut) {
assert(scheduledCores2 === Array(4, 4, 0))
} else {
assert(scheduledCores2 === Array(8, 0, 0))
}
assert(scheduledCores3 === Array(8, 8, 4))
}
// Everything being: executor limit + cores per executor + max cores
private def schedulingWithEverything(spreadOut: Boolean): Unit = {
val master = makeMaster()
val appInfo = makeAppInfo(256, coresPerExecutor = Some(4), maxCores = Some(18))
appInfo.executorLimit = 0
val scheduledCores1 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 2
val scheduledCores2 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
appInfo.executorLimit = 5
val scheduledCores3 = scheduleExecutorsOnWorkers(master, appInfo, workerInfos, spreadOut)
assert(scheduledCores1 === Array(0, 0, 0))
if (spreadOut) {
assert(scheduledCores2 === Array(4, 4, 0))
assert(scheduledCores3 === Array(8, 4, 4))
} else {
assert(scheduledCores2 === Array(8, 0, 0))
assert(scheduledCores3 === Array(8, 8, 0))
}
}
// ==========================================
// | Utility methods and fields for testing |
// ==========================================
private val _scheduleExecutorsOnWorkers =
PrivateMethod[Array[Int]](Symbol("scheduleExecutorsOnWorkers"))
private val _drivers = PrivateMethod[HashSet[DriverInfo]](Symbol("drivers"))
private val _state = PrivateMethod[RecoveryState.Value](Symbol("state"))
private val workerInfo = makeWorkerInfo(4096, 10)
private val workerInfos = Array(workerInfo, workerInfo, workerInfo)
private def makeMaster(conf: SparkConf = new SparkConf): Master = {
assert(_master === null, "Some Master's RpcEnv is leaked in tests")
val securityMgr = new SecurityManager(conf)
val rpcEnv = RpcEnv.create(Master.SYSTEM_NAME, "localhost", 0, conf, securityMgr)
_master = new Master(rpcEnv, rpcEnv.address, 0, securityMgr, conf)
_master
}
def makeAliveMaster(conf: SparkConf = new SparkConf): Master = {
val master = makeMaster(conf)
master.rpcEnv.setupEndpoint(Master.ENDPOINT_NAME, master)
eventually(timeout(10.seconds)) {
val masterState = master.self.askSync[MasterStateResponse](RequestMasterState)
assert(masterState.status === RecoveryState.ALIVE, "Master is not alive")
}
master
}
private def makeAppInfo(
memoryPerExecutorMb: Int,
coresPerExecutor: Option[Int] = None,
maxCores: Option[Int] = None): ApplicationInfo = {
val desc = new ApplicationDescription(
"test", maxCores, memoryPerExecutorMb, null, "", None, None, coresPerExecutor)
val appId = System.currentTimeMillis.toString
val endpointRef = mock(classOf[RpcEndpointRef])
val mockAddress = mock(classOf[RpcAddress])
when(endpointRef.address).thenReturn(mockAddress)
new ApplicationInfo(0, appId, desc, new Date, endpointRef, Int.MaxValue)
}
private def makeWorkerInfo(memoryMb: Int, cores: Int): WorkerInfo = {
val workerId = System.currentTimeMillis.toString
val endpointRef = mock(classOf[RpcEndpointRef])
val mockAddress = mock(classOf[RpcAddress])
when(endpointRef.address).thenReturn(mockAddress)
new WorkerInfo(workerId, "host", 100, cores, memoryMb,
endpointRef, "http://localhost:80", Map.empty)
}
private def scheduleExecutorsOnWorkers(
master: Master,
appInfo: ApplicationInfo,
workerInfos: Array[WorkerInfo],
spreadOut: Boolean): Array[Int] = {
master.invokePrivate(_scheduleExecutorsOnWorkers(appInfo, workerInfos, spreadOut))
}
test("SPARK-13604: Master should ask Worker kill unknown executors and drivers") {
val master = makeAliveMaster()
val killedExecutors = new ConcurrentLinkedQueue[(String, Int)]()
val killedDrivers = new ConcurrentLinkedQueue[String]()
val fakeWorker = master.rpcEnv.setupEndpoint("worker", new RpcEndpoint {
override val rpcEnv: RpcEnv = master.rpcEnv
override def receive: PartialFunction[Any, Unit] = {
case KillExecutor(_, appId, execId) => killedExecutors.add((appId, execId))
case KillDriver(driverId) => killedDrivers.add(driverId)
}
})
master.self.send(RegisterWorker(
"1",
"localhost",
9999,
fakeWorker,
10,
1024,
"http://localhost:8080",
RpcAddress("localhost", 9999)))
val executors = (0 until 3).map { i =>
new ExecutorDescription(appId = i.toString, execId = i, 2, ExecutorState.RUNNING)
}
master.self.send(WorkerLatestState("1", executors, driverIds = Seq("0", "1", "2")))
eventually(timeout(10.seconds)) {
assert(killedExecutors.asScala.toList.sorted === List("0" -> 0, "1" -> 1, "2" -> 2))
assert(killedDrivers.asScala.toList.sorted === List("0", "1", "2"))
}
}
test("SPARK-20529: Master should reply the address received from worker") {
val master = makeAliveMaster()
@volatile var receivedMasterAddress: RpcAddress = null
val fakeWorker = master.rpcEnv.setupEndpoint("worker", new RpcEndpoint {
override val rpcEnv: RpcEnv = master.rpcEnv
override def receive: PartialFunction[Any, Unit] = {
case RegisteredWorker(_, _, masterAddress, _) =>
receivedMasterAddress = masterAddress
}
})
master.self.send(RegisterWorker(
"1",
"localhost",
9999,
fakeWorker,
10,
1024,
"http://localhost:8080",
RpcAddress("localhost2", 10000)))
eventually(timeout(10.seconds)) {
assert(receivedMasterAddress === RpcAddress("localhost2", 10000))
}
}
test("SPARK-27510: Master should avoid dead loop while launching executor failed in Worker") {
val master = makeAliveMaster()
var worker: MockExecutorLaunchFailWorker = null
try {
val conf = new SparkConf()
// SPARK-32250: When running test on GitHub Action machine, the available processors in JVM
// is only 2, while on Jenkins it's 32. For this specific test, 2 available processors, which
// also decides number of threads in Dispatcher, is not enough to consume the messages. In
// the worst situation, MockExecutorLaunchFailWorker would occupy these 2 threads for
// handling messages LaunchDriver, LaunchExecutor at the same time but leave no thread for
// the driver to handle the message RegisteredApplication. At the end, it results in the dead
// lock situation. Therefore, we need to set more threads to avoid the dead lock.
conf.set(Network.RPC_NETTY_DISPATCHER_NUM_THREADS, 6)
worker = new MockExecutorLaunchFailWorker(master, conf)
worker.rpcEnv.setupEndpoint("worker", worker)
val workerRegMsg = RegisterWorker(
worker.id,
"localhost",
9999,
worker.self,
10,
1234 * 3,
"http://localhost:8080",
master.rpcEnv.address)
master.self.send(workerRegMsg)
val driver = DeployTestUtils.createDriverDesc()
// mimic DriverClient to send RequestSubmitDriver to master
master.self.askSync[SubmitDriverResponse](RequestSubmitDriver(driver))
// LaunchExecutor message should have been received in worker side
assert(worker.launchExecutorReceived.await(10, TimeUnit.SECONDS))
eventually(timeout(10.seconds)) {
val appIds = worker.appIdsToLaunchExecutor
// Master would continually launch executors until reach MAX_EXECUTOR_RETRIES
assert(worker.failedCnt == master.conf.get(MAX_EXECUTOR_RETRIES))
// Master would remove the app if no executor could be launched for it
assert(master.idToApp.keySet.intersect(appIds).isEmpty)
}
} finally {
if (worker != null) {
worker.rpcEnv.shutdown()
}
if (master != null) {
master.rpcEnv.shutdown()
}
}
}
def testWorkerDecommissioning(
numWorkers: Int,
numWorkersExpectedToDecom: Int,
hostnames: Seq[String]): Unit = {
val conf = new SparkConf()
val master = makeAliveMaster(conf)
val workers = (1 to numWorkers).map { idx =>
val worker = new MockWorker(master.self, conf)
worker.rpcEnv.setupEndpoint(s"worker-$idx", worker)
val workerReg = RegisterWorker(
worker.id,
"localhost",
worker.self.address.port,
worker.self,
10,
1024,
"http://localhost:8080",
RpcAddress("localhost", 10000))
master.self.send(workerReg)
worker
}
eventually(timeout(10.seconds)) {
val masterState = master.self.askSync[MasterStateResponse](RequestMasterState)
assert(masterState.workers.length === numWorkers)
assert(masterState.workers.forall(_.state == WorkerState.ALIVE))
assert(masterState.workers.map(_.id).toSet == workers.map(_.id).toSet)
}
val decomWorkersCount = master.self.askSync[Integer](DecommissionWorkersOnHosts(hostnames))
assert(decomWorkersCount === numWorkersExpectedToDecom)
// Decommissioning is actually async ... wait for the workers to actually be decommissioned by
// polling the master's state.
eventually(timeout(30.seconds)) {
val masterState = master.self.askSync[MasterStateResponse](RequestMasterState)
assert(masterState.workers.length === numWorkers)
val workersActuallyDecomed = masterState.workers
.filter(_.state == WorkerState.DECOMMISSIONED).map(_.id)
val decommissionedWorkers = workers.filter(w => workersActuallyDecomed.contains(w.id))
assert(workersActuallyDecomed.length === numWorkersExpectedToDecom)
assert(decommissionedWorkers.forall(_.decommissioned))
}
// Decommissioning a worker again should return the same answer since we want this call to be
// idempotent.
val decomWorkersCountAgain = master.self.askSync[Integer](DecommissionWorkersOnHosts(hostnames))
assert(decomWorkersCountAgain === numWorkersExpectedToDecom)
}
test("All workers on a host should be decommissioned") {
testWorkerDecommissioning(2, 2, Seq("LoCalHost", "localHOST"))
}
test("No workers should be decommissioned with invalid host") {
testWorkerDecommissioning(2, 0, Seq("NoSuchHost1", "NoSuchHost2"))
}
test("Only worker on host should be decommissioned") {
testWorkerDecommissioning(1, 1, Seq("lOcalHost", "NoSuchHost"))
}
test("SPARK-19900: there should be a corresponding driver for the app after relaunching driver") {
val conf = new SparkConf().set(WORKER_TIMEOUT, 1L)
val master = makeAliveMaster(conf)
var worker1: MockWorker = null
var worker2: MockWorker = null
try {
worker1 = new MockWorker(master.self)
worker1.rpcEnv.setupEndpoint("worker", worker1)
val worker1Reg = RegisterWorker(
worker1.id,
"localhost",
9998,
worker1.self,
10,
1024,
"http://localhost:8080",
RpcAddress("localhost2", 10000))
master.self.send(worker1Reg)
val driver = DeployTestUtils.createDriverDesc().copy(supervise = true)
master.self.askSync[SubmitDriverResponse](RequestSubmitDriver(driver))
eventually(timeout(10.seconds)) {
assert(worker1.apps.nonEmpty)
}
eventually(timeout(10.seconds)) {
val masterState = master.self.askSync[MasterStateResponse](RequestMasterState)
assert(masterState.workers(0).state == WorkerState.DEAD)
}
worker2 = new MockWorker(master.self)
worker2.rpcEnv.setupEndpoint("worker", worker2)
master.self.send(RegisterWorker(
worker2.id,
"localhost",
9999,
worker2.self,
10,
1024,
"http://localhost:8081",
RpcAddress("localhost", 10001)))
eventually(timeout(10.seconds)) {
assert(worker2.apps.nonEmpty)
}
master.self.send(worker1Reg)
eventually(timeout(10.seconds)) {
val masterState = master.self.askSync[MasterStateResponse](RequestMasterState)
val worker = masterState.workers.filter(w => w.id == worker1.id)
assert(worker.length == 1)
// make sure the `DriverStateChanged` arrives at Master.
assert(worker(0).drivers.isEmpty)
assert(worker1.apps.isEmpty)
assert(worker1.drivers.isEmpty)
assert(worker2.apps.size == 1)
assert(worker2.drivers.size == 1)
assert(masterState.activeDrivers.length == 1)
assert(masterState.activeApps.length == 1)
}
} finally {
if (worker1 != null) {
worker1.rpcEnv.shutdown()
}
if (worker2 != null) {
worker2.rpcEnv.shutdown()
}
}
}
test("assign/recycle resources to/from driver") {
val master = makeAliveMaster()
val masterRef = master.self
val resourceReqs = Seq(ResourceRequirement(GPU, 3), ResourceRequirement(FPGA, 3))
val driver = DeployTestUtils.createDriverDesc().copy(resourceReqs = resourceReqs)
val driverId = masterRef.askSync[SubmitDriverResponse](
RequestSubmitDriver(driver)).driverId.get
var status = masterRef.askSync[DriverStatusResponse](RequestDriverStatus(driverId))
assert(status.state === Some(DriverState.SUBMITTED))
val worker = new MockWorker(masterRef)
worker.rpcEnv.setupEndpoint(s"worker", worker)
val resources = Map(GPU -> new ResourceInformation(GPU, Array("0", "1", "2")),
FPGA -> new ResourceInformation(FPGA, Array("f1", "f2", "f3")))
val regMsg = RegisterWorker(worker.id, "localhost", 7077, worker.self, 10, 1024,
"http://localhost:8080", RpcAddress("localhost", 10000), resources)
masterRef.send(regMsg)
eventually(timeout(10.seconds)) {
status = masterRef.askSync[DriverStatusResponse](RequestDriverStatus(driverId))
assert(status.state === Some(DriverState.RUNNING))
assert(worker.drivers.head === driverId)
assert(worker.driverResources(driverId) === Map(GPU -> Set("0", "1", "2"),
FPGA -> Set("f1", "f2", "f3")))
val workerResources = master.workers.head.resources
assert(workerResources(GPU).availableAddrs.length === 0)
assert(workerResources(GPU).assignedAddrs.toSet === Set("0", "1", "2"))
assert(workerResources(FPGA).availableAddrs.length === 0)
assert(workerResources(FPGA).assignedAddrs.toSet === Set("f1", "f2", "f3"))
}
val driverFinished = DriverStateChanged(driverId, DriverState.FINISHED, None)
masterRef.send(driverFinished)
eventually(timeout(10.seconds)) {
val workerResources = master.workers.head.resources
assert(workerResources(GPU).availableAddrs.length === 3)
assert(workerResources(GPU).assignedAddrs.toSet === Set())
assert(workerResources(FPGA).availableAddrs.length === 3)
assert(workerResources(FPGA).assignedAddrs.toSet === Set())
}
}
test("assign/recycle resources to/from executor") {
def makeWorkerAndRegister(
master: RpcEndpointRef,
workerResourceReqs: Map[String, Int] = Map.empty)
: MockWorker = {
val worker = new MockWorker(master)
worker.rpcEnv.setupEndpoint(s"worker", worker)
val resources = workerResourceReqs.map { case (rName, amount) =>
val shortName = rName.charAt(0)
val addresses = (0 until amount).map(i => s"$shortName$i").toArray
rName -> new ResourceInformation(rName, addresses)
}
val reg = RegisterWorker(worker.id, "localhost", 8077, worker.self, 10, 2048,
"http://localhost:8080", RpcAddress("localhost", 10000), resources)
master.send(reg)
worker
}
val master = makeAliveMaster()
val masterRef = master.self
val resourceReqs = Seq(ResourceRequirement(GPU, 3), ResourceRequirement(FPGA, 3))
val worker = makeWorkerAndRegister(masterRef, Map(GPU -> 6, FPGA -> 6))
worker.appDesc = worker.appDesc.copy(resourceReqsPerExecutor = resourceReqs)
val driver = DeployTestUtils.createDriverDesc().copy(resourceReqs = resourceReqs)
val driverId = masterRef.askSync[SubmitDriverResponse](RequestSubmitDriver(driver)).driverId
val status = masterRef.askSync[DriverStatusResponse](RequestDriverStatus(driverId.get))
assert(status.state === Some(DriverState.RUNNING))
val workerResources = master.workers.head.resources
eventually(timeout(10.seconds)) {
assert(workerResources(GPU).availableAddrs.length === 0)
assert(workerResources(FPGA).availableAddrs.length === 0)
assert(worker.driverResources.size === 1)
assert(worker.execResources.size === 1)
val driverResources = worker.driverResources.head._2
val execResources = worker.execResources.head._2
val gpuAddrs = driverResources(GPU).union(execResources(GPU))
val fpgaAddrs = driverResources(FPGA).union(execResources(FPGA))
assert(gpuAddrs === Set("g0", "g1", "g2", "g3", "g4", "g5"))
assert(fpgaAddrs === Set("f0", "f1", "f2", "f3", "f4", "f5"))
}
val appId = worker.apps.head._1
masterRef.send(UnregisterApplication(appId))
masterRef.send(DriverStateChanged(driverId.get, DriverState.FINISHED, None))
eventually(timeout(10.seconds)) {
assert(workerResources(GPU).availableAddrs.length === 6)
assert(workerResources(FPGA).availableAddrs.length === 6)
}
}
private def getDrivers(master: Master): HashSet[DriverInfo] = {
master.invokePrivate(_drivers())
}
private def getState(master: Master): RecoveryState.Value = {
master.invokePrivate(_state())
}
}
private class FakeRecoveryModeFactory(conf: SparkConf, ser: serializer.Serializer)
extends StandaloneRecoveryModeFactory(conf, ser) {
import FakeRecoveryModeFactory.persistentData
override def createPersistenceEngine(): PersistenceEngine = new PersistenceEngine {
override def unpersist(name: String): Unit = {
persistentData.remove(name)
}
override def persist(name: String, obj: Object): Unit = {
persistentData(name) = obj
}
override def read[T: ClassTag](prefix: String): Seq[T] = {
persistentData.filter(_._1.startsWith(prefix)).map(_._2.asInstanceOf[T]).toSeq
}
}
override def createLeaderElectionAgent(master: LeaderElectable): LeaderElectionAgent = {
new MonarchyLeaderAgent(master)
}
}
private object FakeRecoveryModeFactory {
val persistentData = new HashMap[String, Object]()
}
| witgo/spark | core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala | Scala | apache-2.0 | 44,184 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.market
import squants._
import scala.util.{Failure, Success, Try}
import scala.language.implicitConversions
import scala.math.BigDecimal.RoundingMode
import scala.math.BigDecimal.RoundingMode.RoundingMode
import java.util.Objects
/**
* Represents a quantity of Money.
*
* Money is similar to other quantities in that it represents an amount of something - purchasing power - and
* it is measured in units - currencies.
*
* The main difference is that the conversion rate between currencies can not be certain at compile.
* (In fact it may not always be so easy to know them at runtime as well.)
*
* To address this diversion from the way most other quantities work, Money overrides several of the standard methods
* and operators to ensure one of two rules is followed:
*
* 1) this and that are in the same currency, or
* 2) there is in an implicit MoneyContext in scope (which may or may not have the applicable exchange rate)
*
* Methods and operations applied to moneys of different currencies may throw a NoSuchExchangeRateException if the
* implicit MoneyContext does not contain the Rate(s) necessary to perform the conversion.
*
* The defaultMoneyContext includes USD as the default currency, a list of ~20 other currencies and NO exchange rates
*
* @author garyKeorkunian
* @since 0.1
*
* @param amount the amount of money
* @param currency the currency in which the money is denominated
*/
final class Money private (val amount: BigDecimal)(val currency: Currency)
extends Quantity[Money] {
def dimension = Money
def unit = currency
def value = amount.toDouble
/**
* Returns a string formatted with the original precision amount and the currency code
*
* eg USD(123.456) => "123.456 USD"
*
* @return String
*/
override def toString: String = amount.underlying.stripTrailingZeros.toString + " " + currency.code
/**
* Converts the amount to the given currency and returns a string formatted with the original precision and the currency code
*
* @param c Currency
* @param context MoneyContext required for conversion
* @return
*/
def toString(c: Currency)(implicit context: MoneyContext): String = in(c).toString
/**
* Returns a string formatted with the amount, rounded based on the Currency rules, and the currency symbol
*
* eg USD(12.4563) => "\\$123.46"
*
* @return String
*/
def toFormattedString: String = currency.symbol + amount.setScale(currency.formatDecimals, BigDecimal.RoundingMode.HALF_EVEN).toString
def toFormattedString(c: Currency)(implicit context: MoneyContext): String = in(c).toFormattedString
/**
* Adds this Money to that Money converted to this.currency via context
*
* @param that Money
* @param context MoneyContext required for cross currency operations
* @return Money
* @throws NoSuchExchangeRateException when no exchange rate is available
*/
def moneyPlus(that: Money)(implicit context: MoneyContext = defaultMoneyContext) = context.add(this, that)
/** moneyPlus **/
def +(that: Money)(implicit context: MoneyContext = defaultMoneyContext) = context.add(this, that)
/**
* Overrides Quantity.plus to only work on like currencies.
* Cross currency additions should use moneyPlus
*
* @param that Money
* @return Money
* @throws scala.UnsupportedOperationException when attempted on cross currencies
*/
override def plus(that: Money): Money = that.currency match {
case this.currency ⇒ new Money(this.amount + that.amount)(currency)
case _ ⇒ throw new UnsupportedOperationException("plus not supported for cross-currency comparison - use moneyPlus")
}
/**
* Subtracts that Money from this Money converted to this.currency via context
*
* @param that Money
* @param context MoneyContext required for cross currency operations
* @return Money
* @throws NoSuchExchangeRateException when no exchange rate is available
*/
def moneyMinus(that: Money)(implicit context: MoneyContext = defaultMoneyContext) = context.subtract(this, that)
/** moneyMinus **/
def -(that: Money)(implicit context: MoneyContext = defaultMoneyContext) = context.subtract(this, that)
/**
* Override Quantity.minus to only work on like currencies
* Cross currency subtractions should use moneyMinus
*
* @param that Money
* @return Money
* @throws scala.UnsupportedOperationException when attempted on cross currencies
*/
override def minus(that: Money): Money = that.currency match {
case this.currency ⇒ new Money(this.amount - that.amount)(currency)
case _ ⇒ throw new UnsupportedOperationException("minus not supported for cross-currency comparison - use moneyMinus")
}
/**
* Multiplies this money by that BigDecimal and returns a new Money
*
* @param that BigDecimal
* @return Money
*/
def times(that: BigDecimal): Money = new Money(amount * that)(currency)
def *(that: BigDecimal): Money = times(that)
/**
* Overrides Quantity.times to ensure BigDecimal math is performed
*
* @param that Double
* @return Quantity
*/
override def times(that: Double): Money = new Money(amount * that)(currency)
override def *(that: Double): Money = times(that)
/**
* Multiplies this money by that [[squants.market.CurrencyExchangeRate]] and returns the equal value in the other currency.
*
* Delegates to CurrencyExchangeRate * Money
*
* @param that BigDecimal
* @return
*/
def *(that: CurrencyExchangeRate): Money = that * this
/**
* Divides this money by that BigDecimal and returns a new Money
*
* @param that BigDecimal
* @return Money
*/
def divide(that: BigDecimal): Money = new Money(amount / that)(currency)
def /(that: BigDecimal): Money = divide(that)
/**
* Overrides Quantity.divide to ensure BigDecimal math is performed
*
* @param that Double
* @return Quantity
*/
override def divide(that: Double): Money = new Money(amount / that)(currency)
override def /(that: Double): Money = divide(that)
/**
* Integer divides this money by that BigDecimal and returns the remainder
* @param that BigDecimal
* @return Money
*/
def %(that: BigDecimal): Money = new Money(amount % that)(currency)
/**
* Integer divides this money by that BigDecimal and returns the quotient and the remainder
* @param that BigDecimal
* @return (Money, Money)
*/
def /%(that: BigDecimal): (Money, Money) = amount /% that match {
case (q, r) ⇒ (new Money(q)(currency), new Money(r)(currency))
}
/**
* Divides this money by that money and returns the ratio between the converted amounts
*
* @param that Money
* @param context MoneyContext
* @return
*/
def moneyDivide(that: Money)(implicit context: MoneyContext): BigDecimal = context.divide(this, that)
def /(that: Money)(implicit context: MoneyContext = defaultMoneyContext): BigDecimal = moneyDivide(that)
/**
* Divide this money by another (non-money) Quantity and return a Price
* @param that Quantity
* @tparam A Quantity Type
* @return Price[A]
*/
def /[A <: Quantity[A]](that: A): Price[A] = Price(this, that)
/**
* Divide this money by a Price and return Quantity
* @param that Price
* @tparam A Quantity Type
* @return A
*/
def /[A <: Quantity[A]](that: Price[A]): A = that.quantity * (this / that.money).toDouble
/**
* Override for Quantity.divide to only work on Moneys of like Currency
* Cross currency subtractions should use moneyMinus
*
* @param that Money
* @return Double
*/
override def divide(that: Money): Double = that.currency match {
case this.currency ⇒ (this.amount / that.amount).toDouble
case _ ⇒ throw new UnsupportedOperationException("divide not supported for cross-currency comparison - use moneyDivide")
}
/**
* Override for Quantity.max to only work on Moneys of like Currency
* @param that Money
* @return Int
*/
override def max(that: Money): Money = (that, that.currency) match {
case (m: Money, this.currency) ⇒ new Money(amount.max(m.amount))(currency)
case _ ⇒ throw new UnsupportedOperationException("max not supported for cross-currency comparison - use moneyMax")
}
/**
* Override for Quantity.max to only work on Moneys of like Currency
* @param that Quantity
* @return Int
*/
override def min(that: Money): Money = (that, that.currency) match {
case (m: Money, this.currency) ⇒ new Money(amount.min(m.amount))(currency)
case _ ⇒ throw new UnsupportedOperationException("min not supported for cross-currency comparison - use moneyMin")
}
/**
* Override for Quantity.equal to only match Moneys of like Currency
* @param that Money must be of matching value and unit
* @return
*/
override def equals(that: Any): Boolean = that match {
case m: Money ⇒ amount == m.amount && currency == m.currency
case _ ⇒ false
}
/**
* Override for Quantity.hashCode because Money doesn't contain a primary unit
* @return
*/
override def hashCode: Int = Objects.hash(amount, currency)
/**
* Override for Quantity.compare to only work on Moneys of like Currency
* @param that Money
* @return Int
*/
override def compare(that: Money): Int = that.currency match {
case this.currency ⇒ if (this.amount > that.amount) 1 else if (this.amount < that.amount) -1 else 0
case _ ⇒ throw new UnsupportedOperationException("Comparison between Moneys of dislike Currency is not supported")
}
/**
* Supports max operation on Moneys of dislike Currency
* @param that Money
* @param moneyContext MoneyContext
* @return
*/
def moneyMax(that: Money)(implicit moneyContext: MoneyContext) = moneyContext.compare(this, that) match {
case -1 ⇒ that
case _ ⇒ this
}
/**
* Supports min operation on Moneys of dislike Currency
* @param that Money
* @param moneyContext MoneyContext
* @return
*/
def moneyMin(that: Money)(implicit moneyContext: MoneyContext) = moneyContext.compare(this, that) match {
case 1 ⇒ that
case _ ⇒ this
}
/**
* Supports equality comparisons on Moneys of dislike Currency
* @param that Money
* @param moneyContext MoneyContext
* @return
*/
def moneyEquals(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) == 0
/**
* Supports non-equality comparisons on Moneys of dislike Currency
* @param that Money
* @param moneyContext MoneyContext
* @return
*/
def moneyNotEquals(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) != 0
/**
* Supports compare operation on Moneys of dislike Currency
* @param that Money
* @param moneyContext MoneyContext
* @return
*/
def moneyCompare(that: Money)(implicit moneyContext: MoneyContext) = moneyContext.compare(this, that)
def ==#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) == 0
def !=#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) != 0
def >#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) > 0
def >=#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) >= 0
def <#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) < 0
def <=#(that: Money)(implicit moneyContext: MoneyContext) = moneyCompare(that) <= 0
/**
* Combines with that Money to create an [[squants.market.CurrencyExchangeRate]]
*
* Exchange Rates on the same currency are not supported
*
* val rate: CurrencyExchangeRate = JPY(100) toThe USD(1)
*
* @param that Money
* @return
* @throws scala.IllegalArgumentException if the that.currency matches this.currency
*/
def toThe(that: Money) = that.currency match {
case this.currency ⇒ throw new IllegalArgumentException("Can not create Exchange Rate on matching currencies")
case _ ⇒ CurrencyExchangeRate(that, this)
}
/**
* toThe
*/
def -> = toThe _
/**
* Convert this Money to a Double representing the currency unit
*
* @param unit Currency
* @param context MoneyContext required for cross currency operations
* @return Double
* @throws NoSuchExchangeRateException when no exchange rate is available
*/
def to(unit: Currency)(implicit context: MoneyContext) = context.convert(this, unit).amount
/**
* Reboxes this Money value in a Money in the given Currency
*
* @param unit Currency
* @param context MoneyContext required for cross currency operations
* @return Money
* @throws NoSuchExchangeRateException when no exchange rate is available
*/
def in(unit: Currency)(implicit context: MoneyContext) = context.convert(this, unit)
/**
* Returns a Money rounded using scale and mode.
*
* @param scale Int - scale of the Money to be returned
* @param mode RoundingMode - defaults to HALF_EVEN
* @return Quantity
*/
override def rounded(scale: Int, mode: RoundingMode = RoundingMode.HALF_EVEN) = currency(amount.setScale(scale, mode))
/**
* Applies a function to the underlying amount of the Money, returning a Money in the same Currency
*
* @param f BigDecimal => BigDecimal function
* @return Money
*/
def mapAmount(f: BigDecimal => BigDecimal) = currency(f(amount))
}
/**
* Factory singleton for Money
*/
object Money extends Dimension[Money] {
def apply(value: Double)(implicit fxContext: MoneyContext) = new Money(BigDecimal(value))(fxContext.defaultCurrency)
def apply(value: BigDecimal)(implicit fxContext: MoneyContext) = new Money(value)(fxContext.defaultCurrency)
def apply(value: BigDecimal, currency: Currency) = new Money(value)(currency)
def apply(value: BigDecimal, currency: String)(implicit fxContext: MoneyContext): Try[Money] = {
Currency(currency).map(new Money(value)(_))
}
def apply[A](n: A, currency: Currency)(implicit num: Numeric[A]) = new Money(BigDecimal(num.toDouble(n)))(currency)
def apply[A](n: A, currency: String)(implicit num: Numeric[A], fxContext: MoneyContext): Try[Money] = {
Currency(currency).map(new Money(BigDecimal(num.toDouble(n)))(_))
}
def apply(s: String)(implicit fxContext: MoneyContext): Try[Money] = {
val regex = ("([-+]?[0-9]*\\\\.?[0-9]+) *(" + fxContext.currencies.map(_.code).reduceLeft(_ + "|" + _) + ")").r
s match {
case regex(value, currency) ⇒ Currency(currency).map(Money(value.toDouble, _))
case _ ⇒ Failure(QuantityParseException("Unable to parse Money", s))
}
}
def name = "Money"
def primaryUnit = ??? // Should not be used with Money - drawn from MoneyContext instead
def siUnit = ??? // Should not be used with Money - drawn from MoneyContext instead
def units = ??? // Should not be used with Money - drawn from MoneyContext instead
}
/**
* Represents a Currency, which is the Unit of Measure for Money
*
* @param code Currency code
* @param name Currency name
* @param symbol Currency symbol
* @param formatDecimals Number of decimals in standard formatting
*/
abstract class Currency(val code: String, val name: String, val symbol: String, val formatDecimals: Int) extends UnitOfMeasure[Money] {
def apply(d: BigDecimal): Money = Money(d, this)
def apply[A](n: A)(implicit num: Numeric[A]) = Money(BigDecimal(num.toDouble(n)), this)
protected def converterFrom: Double ⇒ Double = ???
protected def converterTo: Double ⇒ Double = ???
def /(that: Money): CurrencyExchangeRate = that toThe Money(1, this)
override def toString: String = code
def canEqual(other: Any): Boolean = other.isInstanceOf[Currency]
override def equals(other: Any): Boolean = other match {
case that: Currency =>
(that canEqual this) &&
code == that.code &&
name == that.name &&
symbol == that.symbol &&
formatDecimals == that.formatDecimals
case _ => false
}
override def hashCode(): Int = {
val state = Seq(code, name, symbol, formatDecimals)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object Currency {
def apply(currency: String)(implicit fxContext: MoneyContext) = {
fxContext.currencyMap.get(currency)
.fold(Try[Currency](throw NoSuchCurrencyException(currency, fxContext)))(Success(_))
}
}
object USD extends Currency("USD", "US Dollar", "$", 2)
object ARS extends Currency("ARS", "Argentinean Peso", "$", 2)
object AUD extends Currency("AUD", "Australian Dollar", "$", 2)
object BRL extends Currency("BRL", "Brazilian Real", "R$", 2)
object CAD extends Currency("CAD", "Canadian Dollar", "$", 2)
object CHF extends Currency("CHF", "Swiss Franc", "CHF", 2)
object CLP extends Currency("CLP", "Chilean Peso", "$", 2)
object CNY extends Currency("CNY", "Chinese Yuan Renminbi", "¥", 2)
object CZK extends Currency("CZK", "Czech Republic Koruny", "Kč", 2)
object DKK extends Currency("DKK", "Danish Kroner", "kr", 2)
object EUR extends Currency("EUR", "Euro", "€", 2)
object GBP extends Currency("GBP", "British Pound", "£", 2)
object HKD extends Currency("HKD", "Hong Kong Dollar", "$", 2)
object INR extends Currency("INR", "Indian Rupee", "₹", 2)
object JPY extends Currency("JPY", "Japanese Yen", "¥", 0)
object KRW extends Currency("KRW", "South Korean Won", "₩", 0)
object MXN extends Currency("MXN", "Mexican Peso", "$", 2)
object MYR extends Currency("MYR", "Malaysian Ringgit", "RM", 2)
object NOK extends Currency("NOK", "Norwegian Krone", "kr", 2)
object NZD extends Currency("NZD", "New Zealand Dollar", "$", 2)
object RUB extends Currency("RUB", "Russian Ruble", "\\u20BD", 2)
object SEK extends Currency("SEK", "Swedish Kroner", "kr", 2)
object XAG extends Currency("XAG", "Silver", "oz", 4)
object XAU extends Currency("XAU", "Gold", "oz", 4)
object BTC extends Currency("BTC", "Bitcoin", "\\u20BF", 15)
object ETH extends Currency("ETH", "Ether", "\\u039E", 15)
object LTC extends Currency("LTC", "Litecoin", "\\u0141", 15)
object ZAR extends Currency("ZAR", "South African Rand", "R", 2)
object NAD extends Currency("NAD", "Namibian Dollar", "N$", 2)
object TRY extends Currency("TRY", "Turkish lira", "₺", 2)
/**
* Support for Money DSL
*/
object MoneyConversions {
lazy val dollar = Money(1, USD)
lazy val euro = Money(1, EUR)
lazy val yen = Money(1, JPY)
implicit def fromLong(l: Long): MoneyConversions[BigDecimal] = new MoneyConversions(BigDecimal(l))
implicit def fromDouble(d: Double): MoneyConversions[BigDecimal] = new MoneyConversions(BigDecimal(d))
implicit class MoneyConversions[A](n: A)(implicit num: Numeric[A]) {
def money(implicit context: MoneyContext) = Money(n, context.defaultCurrency)
def XAU = Money(n, squants.market.XAU)
def XAG = Money(n, squants.market.XAG)
def USD = Money(n, squants.market.USD)
def dollars = USD
def cents = Money(num.toDouble(n) / 100d, squants.market.USD)
def EUR = Money(n, squants.market.EUR)
def euros = EUR
def JPY = Money(n, squants.market.JPY)
def yen = JPY
def GBP = Money(n, squants.market.GBP)
def poundSterling = GBP
def CHF = Money(n, squants.market.CHF)
def swissFrancs = CHF
def AUD = Money(n, squants.market.AUD)
def CAD = Money(n, squants.market.CAD)
def SEK = Money(n, squants.market.SEK)
def HKD = Money(n, squants.market.HKD)
def NOK = Money(n, squants.market.NOK)
def NZD = Money(n, squants.market.NZD)
def BTC = Money(n, squants.market.BTC)
def bitcoin = BTC
def ETH = Money(n, squants.market.ETH)
def ether = ETH
def LTC = Money(n, squants.market.LTC)
def litecoin = LTC
def ZAR = Money(n, squants.market.ZAR)
def NAD = Money(n, squants.market.NAD)
def TRY = Money(n, squants.market.TRY)
}
class MoneyNumeric()(implicit mc: MoneyContext) extends Numeric[Money] {
def plus(x: Money, y: Money) = x + y
def minus(x: Money, y: Money) = x - y
def times(x: Money, y: Money) = throw new UnsupportedOperationException("Numeric.times not supported for Quantities")
def negate(x: Money) = -x
def fromInt(x: Int) = mc.defaultCurrency(x)
def toInt(x: Money) = x.value.toInt
def toLong(x: Money) = x.value.toLong
def toFloat(x: Money) = x.value.toFloat
def toDouble(x: Money) = x.value
def compare(x: Money, y: Money) = if (x.value > y.value) 1 else if (x.value < y.value) -1 else 0
def parseString(str: String): Option[Money] = Money(str).toOption
/**
* Custom implementation using SortedSets to ensure consistent output
* @return String representation of this instance
*/
override def toString: String = s"MoneyNumeric($mc)"
}
}
| typelevel/squants | shared/src/main/scala/squants/market/Money.scala | Scala | apache-2.0 | 21,364 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.wiki.model
import com.mongodb.DBObject
import com.mongodb.casbah.Imports._
import com.novus.salat._
import controllers.{VErrors, Validation}
import play.api.Play.current
import play.api.cache._
import razie.audit.Audit
import razie.db.RazSalatContext._
import razie.db.{RMany, RazMongo}
import razie.diesel.dom.WikiDomain
import razie.hosting.WikiReactors
import razie.tconf.Visibility.PUBLIC
import razie.tconf.parser.{BaseAstNode, LeafAstNode, SpecParserSettings, StrAstNode}
import razie.wiki.admin.GlobalData
import razie.wiki.model.features.{WForm, WikiForm}
import razie.wiki.parser.WAST
import razie.wiki.util.QueryParms
import razie.wiki.{Enc, Services, WikiConfig}
import razie.{Logging, cdebug, clog, ctrace}
import scala.collection.mutable.ListBuffer
/** wiki factory and utils */
object Wikis extends Logging with Validation {
/** retrieve chached compiled page or get it fresh, fold and cache if not cached
*
* @param wid the page id
* @param au the user, if any - pages may have dynamic content in the context of a user
* @return
*/
def cachedPage(wid: WID, au: Option[WikiUser]) = {
val w = {
if (Services.config.cacheWikis) {
val id = wid.wpathFullNoSection + ".page"
WikiCache.getEntry(id).map { x =>
x
}.orElse {
// complex logic to not log cache miss twice
val n = if(wid.hasCachedPage) wid.page else wid.findPageNocache
n.map(_.preprocess(au))
if (n.exists(w => w.cacheable && w.category != "-" && w.category != "")) {
WikiCache.set(n.get.wid.wpathFull + ".page", n.get, 300) // 10 miuntes
} else {
cdebug << "WIKI_CACHE_CANTC FULL - " + id
}
n
}
} else // no cache
wid.page
}
w
}
/** create the data section */
def mkFormData(spec: WikiEntry, defaults: Map[String, String] = Map.empty) = {
// build the defaults - cross check with formSpec
var defaultStr = ""
defaults.filter(x=> spec.form.fields.contains(x._1)).map { t =>
val (k, v) = t
defaultStr = defaultStr + s""", "$k":"$v" """
}
val content = s"""
{{.section:formData}}
{"formState":"created" $defaultStr }
{{/section}}
"""
content
}
def isEvent(cat: String) = "Race" == cat || "Event" == cat || "Training" == cat
//todo configure per realm
/** these categories are persisted in their own tables */
final val PERSISTED = Array("Item", "Event", "Training", "Note", "Entry", "Form", "JSON")
// "DslReactor", "DslElement", "DslDomain", "JSON", "DslEntity")
/** customize table names per category */
final val TABLE_NAME = "WikiEntry"
// map all Dsl type entities in the same table
final val TABLE_NAMES = Map.empty[String,String]
//("DslReactor" -> "weDsl", "DslElement" -> "weDsl", "DslDomain" -> "weDsl", "DslEntity" -> "weDslEntity")
final val RK = WikiConfig.RK
final val DFLT = RK // todo replace with RK
def apply(realm: String = RK) = WikiReactors(realm).wiki
def rk = WikiReactors(RK).wiki
def dflt = WikiReactors(WikiReactors.WIKI).wiki
def fromGrated[T <: AnyRef](o: DBObject)(implicit m: Manifest[T]) = grater[T](ctx, m).asObject(o)
/** safe to call before reactors are initialized */
def findSimple (wid:WID) = {
RazMongo(Wikis.TABLE_NAME).findOne(Map("category" -> wid.cat, "name" -> wid.name)) map (grater[WikiEntry].asObject(_))
}
// TODO refactor convenience
def find(wid: WID): Option[WikiEntry] =
apply(wid.getRealm).find(wid)
// TODO find by ID is bad, no - how to make it work across wikis ?
/** @deprecated optimize with realm */
def findById(id: String) = find(new ObjectId(id))
/** @deprecated optimize with realm */
def find(id: ObjectId) =
WikiReactors.reactors.foldLeft(None.asInstanceOf[Option[WikiEntry]])((a, b) => a orElse b._2.wiki.find(id))
/** @deprecated optimize with realm */
def findById(cat: String, id: ObjectId): Option[WikiEntry] =
WikiReactors.reactors.foldLeft(None.asInstanceOf[Option[WikiEntry]])((a, b) => a orElse b._2.wiki.findById(cat, id))
def linksFrom(to: UWID) = RMany[WikiLink]("from.cat" -> to.cat, "from.id" -> to.id)
def linksTo(to: UWID) = RMany[WikiLink]("to.cat" -> to.cat, "to.id" -> to.id)
def childrenOf(parent: UWID) =
RMany[WikiLink]("to.id" -> parent.id, "how" -> "Child").map(_.from)
def linksFrom(from: UWID, role: String) =
RMany[WikiLink]("from.id" -> from.id, "how" -> role)
// not taking realm into account...
def linksTo(cat: String, to: UWID, role: String) =
RMany[WikiLink]("from.cat" -> cat, "to.cat" -> to.cat, "to.id" -> to.id, "how" -> role)
// leave these vvvvvvvvvvvvvvvvvvvvvvvvvv
def label(wid: WID): String = /*wid.page map (_.label) orElse*/
apply(wid.getRealm).label(wid)
def label(wid: UWID): String = /*wid.page map (_.label) orElse*/
wid.wid.map(x => label(x)).getOrElse(wid.nameOrId)
// leave these ^^^^^^^^^^^^^^^^^^^^^^^^^^
//todo refactor in own utils vvv
final val MD = "md"
final val TEXT = "text"
final val JS = "js"
final val SCALA = "scala"
final val JSON = "json"
final val XML = "xml"
final val HTML = "html"
/** helper to deal with the different markups */
object markups {
final val list = Seq(
MD -> "Markdown",
TEXT -> "Text",
JSON -> "JSON",
XML -> "XML",
JS -> "JavaScript",
SCALA -> "Scala",
HTML -> "Raw html"
) // todo per reator type - hackers like stuff
def contains(s: String) = list.exists(_._1 == s)
def isDsl(s: String) =
s == JS || s == XML || s == JSON || s == SCALA
}
def formFor(we: WikiEntry) = {
we.attr("wiki.form") orElse WikiDomain(we.realm).prop(we.category, "inst.form")
}
def templateFor(we: WikiEntry) = {
we.attr("wiki.template") orElse WikiDomain(we.realm).prop(we.category, "inst.template")
}
private def iformatName(name: String, pat: String, pat2: String = "") =
name.replaceAll(pat, "_").replaceAll(pat2, "").replaceAll("_+", "_").replaceFirst("_$", "")
/** format a simple name - try NOT to use this */
/** these are the safe url characters. I also included ',which are confusing many sites */
val SAFECHARS =
"""[^0-9a-zA-Z\\$\\-_()',]""" // DO NOT TOUCH THIS PATTERN!
def formatName(name: String): String = iformatName(name, SAFECHARS, "") // DO NOT TOUCH THIS PATTERN!
/** format a complex name cat:name */
def formatName(wid: WID): String =
if ("WikiLink" == wid.cat)
iformatName(wid.name, """[ /{}\\[\\]]""")
else
formatName(wid.name)
/** format an even more complex name
*
* @param rk force links back to RK main or leave them
*/
def formatWikiLink(curRealm: String, wid: WID, nicename: String, label: String, role: Option[String], hover: Option[String] = None, rk: Boolean = false, max: Int = -1) = {
val name = formatName(wid.name)
val title = hover.map("title=\\"" + _ + "\\"") getOrElse ("")
def trim(s: String) = {
if (max < 0) s
else {
if (s.length > max) s.substring(0, max - 3) + "..."
else s
}
}
val tlabel = trim(label)
val r = wid.realm.getOrElse(curRealm)
// all pages wihtout realm are assumed in current realm
val bigName = Wikis.apply(r).index.getForLower(name.toLowerCase())
if (bigName.isDefined || wid.cat.matches("User")) {
var newwid = Wikis.apply(r).index.getWids(bigName.get).headOption.map(_.copy(section = wid.section)) getOrElse wid.copy(name = bigName.get)
var u = newwid.formatted.urlRelative(curRealm)
if (rk && (u startsWith "/")) u = "http://" + Services.config.home + u
(s"""<a href="$u" title="$title">$tlabel</a>""", Some(ILink(newwid, label, role)))
} else if (rk) {
val sup = "" //"""<sup><b style="color:red">^</b></sup></a>"""
(
s"""<a href="http://${Services.config.home}${wid.formatted.urlRelative}" title="$title">$tlabel$sup</a>""",
Some(ILink(wid, label, role)))
} else {
// topic not found in index - hide it from google
// val prefix = if (wid.realm.isDefined && wid.getRealm != curRealm) s"/we/${wid.getRealm}" else "/wikie"
val prefix = "/wikie"
val plusplus = if (Wikis.PERSISTED.contains(wid.cat)) "" else """<sup><b style="color:red">++</b></sup>"""
(
s"""<a href="$prefix/show/${wid.wpath}" title="%s">$tlabel$plusplus</a>""".format
(hover.getOrElse("Missing page")),
Some(ILink(wid, label, role)))
}
}
def shouldFlag(name: String, label: String, content: String): Option[String] = {
val a = Array(name, label, content)
if (a.exists(_.matches("(?i)^.*<(" + SpecParserSettings.hnok + ")([^>]*)>"))) Some("WIKI_FORBIDDEN_HTML")
else if (hasBadWords(content, adultWords)) Some("WIKI_HAS_ADULT")
else None
}
private def include(wid: WID, c2: String, we: Option[WikiEntry] = None, firstTime: Boolean = false)(implicit errCollector: VErrors): Option[String] = {
// todo this is not cached as the underlying page may change - need to pick up changes
var done = false
val collecting = we.exists(_.depys.isEmpty) // should collect depys
val res = try {
val INCLUDE = """(?<!`)\\[\\[include(WithSection)?:([^\\]]*)\\]\\]""".r
var res1 = INCLUDE.replaceAllIn(c2, { m =>
val content = for (
iwid <- WID.fromPath(m.group(2)).map(w => if (w.realm.isDefined) w else w.r(wid.getRealm)) orErr ("bad format for page");
c <- (if (m.group(1) == null) iwid.content else iwid.findSection.map(_.original)) orErr s"content for ${iwid.wpath} not found"
) yield {
if (collecting && we.isDefined)
we.get.depys = iwid.uwid.toList ::: we.get.depys
c
}
done = true
// IF YOUR content changes - review this escape here
//regexp uses $ as a substitution
val xx = content
.map(
_.replaceAllLiterally("\\\\", "\\\\\\\\")
.replaceAll("\\\\$", "\\\\\\\\\\\\$")
)
// .map(_.replaceAllLiterally("$", "\\\\$"))
// .map(_.replaceAll("\\\\\\\\", "\\\\\\\\\\\\\\\\"))
.getOrElse("`[ERR Can't include $1 " + errCollector.mkString + "]`")
xx
})
if (!res1.contains("{{.wiki.noTemplate")) {
var hadTemplate = false
val TEMPLATE = """(?<!`)\\{\\{\\.?wiki.template[: ]*([^\\}]*)\\}\\}""".r
res1 = TEMPLATE.replaceAllIn(res1, { m =>
done = true
hadTemplate = true
//todo this is parse-ahead, maybe i can make it lazy?
val parms = WikiForm.parseFormData(c2)
val content = template(m.group(1), Map() ++ parms)
// IF YOUR content changes - review this escape here
//regexp uses $ as a substitution
content
.replaceAllLiterally("\\\\", "\\\\\\\\")
.replaceAll("\\\\$", "\\\\\\\\\\\\$")
})
// check cat for preloaded cats that will trigger stackoverflow
// also, while domain is loading itself, i'm not processing instance templates
if (firstTime && !hadTemplate && wid.cat != "Category" && wid.cat != "Reactor" && !WikiDomain(wid.getRealm).isLoading)
WikiDomain(wid.getRealm).prop(wid.cat, "inst.template").map { t =>
done = true
val parms = WikiForm.parseFormData(c2)
val content = template(t, Map() ++ parms)
res1 = content + "\\n\\n" + res1
}
}
res1
} catch {
case s: Throwable => log("Error: ", s); "`[ERR Can't process an include]`"
}
if (done) Some(res) else None
}
def preprocessIncludes(wid: WID, markup: String, content: String, page: Option[WikiEntry] = None) = markup match {
case MD =>
implicit val errCollector = new VErrors()
var c2 = content
// TODO stupid - 3 levels of include...
include(wid, c2, page, true).map {
c2 = _
}.flatMap { x =>
include(wid, c2, page, false).map {
c2 = _
}.flatMap { x =>
include(wid, c2, page, false).map {
c2 = _
}
}
}
c2
case _ => content
}
// TODO better escaping of all url chars in wiki name
/** pre-process this wiki: do AST, includes etc */
def preprocess(wid: WID, markup: String, content: String, page: Option[WikiEntry]) : (BaseAstNode, String) = {
implicit val errCollector = new VErrors()
def includes (c:String) = {
var c2 = c
if (c2 contains "[[./")
c2 = c.replaceAll("""\\[\\[\\./""", """[[%s/""".format(wid.realm.map(_ + ".").mkString + wid.cat + ":" + wid.name)) // child topics
if (c2 contains "[[../")
c2 = c2.replaceAll("""\\[\\[\\../""", """[[%s""".format(wid.parentWid.map(wp => wp.realm.map(_ + ".").mkString + wp.cat + ":" + wp.name + "/").getOrElse(""))) // siblings topics
// TODO stupid - 3 levels of include...
include(wid, c2, page, true).map { x =>
page.map(_.cacheable = false) // simple dirty if includes, no depy to manage
c2 = x
}.flatMap { x =>
include(wid, c2, page, false).map {
c2 = _
}.flatMap { x =>
include(wid, c2, page, false).map {
c2 = _
}
}
}
c2
}
try {
markup match {
case MD =>
val t1 = System.currentTimeMillis
var c2 = includes(content)
// pre-mods
page.orElse(wid.page).map { x =>
// WikiMods will dirty the we.cacheable if needed
c2 = razie.wiki.mods.WikiMods.modPreParsing(x, Some(c2)).getOrElse(c2)
}
val res = WikiReactors(wid.getRealm).wiki.mkParser apply c2
val t2 = System.currentTimeMillis
ctrace << s"wikis.preprocessed ${t2 - t1} millis for ${wid.name}"
(res, c2)
case TEXT => {
val c2 = content.replaceAll("""\\[\\[([^]]*)\\]\\]""", """[[\\(1\\)]]""")
(StrAstNode(c2), c2)
}
case JSON | XML | JS | SCALA => {
(StrAstNode(content), content)
}
case HTML => {
// trick: parse it like we normally would, for properties and includes, but then discard
val x = preprocess(wid, MD, content, page)
(LeafAstNode(x._2, x._1), x._2)
}
case _ => (StrAstNode("UNKNOWN_MARKUP " + markup + " - " + content), content)
}
} catch {
case t: Throwable =>
razie.Log.error("EXCEPTION_PARSING " + markup + " - " + wid.wpath, t)
razie.audit.Audit.logdb("EXCEPTION_PARSING", markup + " - " + wid.wpath + " " + t.getLocalizedMessage())
(StrAstNode("EXCEPTION_PARSING " + markup + " - " + t.getLocalizedMessage() + " - " + content), content)
}
}
/** html for later */
def propLater (id:String, url:String) =
s"""<script async>require(['jquery'],function($$){$$("#$id").load("$url");});</script>"""
/** partial formatting function
*
* @param wid - the wid being formatted
* @param markup - markup language being formatted
* @param icontent - the content being formatted or "" if there is a WikiEntry being formatted
* @param we - optional page for context for formatting
* @return
*/
private def format1(wid: WID, markup: String, icontent: String, we: Option[WikiEntry], user:Option[WikiUser]) = {
val res = try {
var content =
(if(icontent == null || icontent.isEmpty) {
if (wid.section.isDefined)
preprocess(wid, markup, noBadWords(wid.content.mkString), we)._1
else
// use preprocessed cache
we.flatMap(_.ipreprocessed.map(_._1)).orElse(
we.map(_.preprocess(user))
).getOrElse(
preprocess(wid, markup, noBadWords(icontent), we)._1
)
}
else
preprocess(wid, markup, noBadWords(icontent), we)._1
).fold(WAST.context(we, user)).s
// apply md templates first
content = Wikis(wid.getRealm).applyTemplates(wid, content, "md")
// TODO index nobadwords when saving/loading page, in the WikiIndex
// TODO have a pre-processed and formatted page index I can use - for non-scripted pages, refreshed on save
// run scripts
val S_PAT = """`\\{\\{(call):([^#}]*)#([^}]*)\\}\\}`""".r
try {
// to evaluate scripts wihtout a page, we need this trick:
val tempPage = we orElse None //Some(new WikiEntry("Temp", "fiddle", "fiddle", "md", content, new ObjectId(), Seq("temp"), ""))
// warn against duplicated included scripts
val duplicates = new ListBuffer[String]()
content = S_PAT replaceSomeIn (content, { m =>
we.map(_.cacheable = false)
try {
// find the page with signed scripts and call them
// inline scripts are exanded into the html page
val scriptName = m group 3
val scriptPath = m group 2
val pageWithScripts = WID.fromPath(scriptPath).flatMap(x => Wikis(x.getRealm).find(x)).orElse(tempPage)
val y=pageWithScripts.flatMap(_.scripts.find(_.name == scriptName)).filter(_.checkSignature(user)).map{s=>
val warn = if(duplicates contains s.name) {
s"`WARNING: script named '${s.name}' duplicated - check your includes`\\n\\n"
} else ""
duplicates.append(s.name)
if("inline" == s.stype) {
val wix = Wikis(wid.getRealm).mkWixJson(we, user, Map.empty, "")
warn + s"""<!-- WikiScript: ${s} -->
|<script>
|withJquery(function(){
|${wix}\\n
|${s.content}
|;});
|</script>
""".stripMargin
} else
runScript(s.content, "js", we, user)
}
// dolar sign (jquery) in embedded JS needs to be escaped ... don't remember why
y
.map(_.replaceAll("\\\\$", "\\\\\\\\\\\\$"))
// also, any escaped double quote needs re-escaped... likely same reason as dolar sign
// wix.toJson can escape realm props including "" and they get lost somehow if I don't do this
.map(_.replaceAll("\\\\\\"", "\\\\\\\\\\\\\\""))
} catch {
case t: Throwable => {
log("exception in script", t)
Some("`!?!`")
}
}
})
} catch {
// sometimes the pattern itself blows
case t: Throwable => log("exception in script", t);
}
// cannot have these expanded in the AST parser because then i recurse forever when resolving XPATHs...
val XP_PAT = """`\\{\\{\\{(xp[l]*):([^}]*)\\}\\}\\}`""".r
content = XP_PAT replaceSomeIn (content, { m =>
we.map(_.cacheable = false)
try {
we.map(x => runXp(m group 1, x, m group 2))
} catch { case _: Throwable => Some("!?!") }
})
// for forms
we.map { x => content = new WForm(x).formatFields(content) }
// pre-mods
we.map {x =>
// we don't mark cacheable false - the WikiMods does that
content = razie.wiki.mods.WikiMods.modPreHtml(x, Some(content)).getOrElse(content)
}
//todo plugins register and define formatting for differnet content types
markup match {
case MD => {
object DTimer {
def apply[A](desc:String)(f: => A): A = {
val t1 = System.currentTimeMillis
val res:A = f
val t2 = System.currentTimeMillis
cdebug << s"$desc took ${t2 - t1} millis"
res
}
}
val res = DTimer ("wikis.mdhtml for "+wid.name) {
val ast = DTimer ("wikis.mdast for "+wid.name) {
val parser = org.commonmark.parser.Parser.builder().build();
parser.parse(content);
}
val renderer = org.commonmark.renderer.html.HtmlRenderer.builder().build();
renderer.render(ast); // "<p>This is <em>Sparta</em></p>\\n"
}
res
}
case TEXT => content
case JSON | SCALA | JS => "<pre>" + content.replaceAll("\\n", "<br/>") + "</pre>"
case XML | HTML => content
case _ => "UNKNOWN_MARKUP " + markup + " - " + content
}
} catch {
case e : Throwable => {
Audit.logdbWithLink("ERR_FORMATTING", wid.ahref, "[[ERROR FORMATTING]]: " + wid.wpath + " err: " + e.toString)
log("[[ERROR FORMATTING]]: ", e)
if(Services.config.isLocalhost) throw e
"[[ERROR FORMATTING]] - sorry, dumb program here! The content is not lost: try editing this topic... also, please report this topic with the error and we'll fix it for you!"
}
}
res
}
def prepUrl (url:String) = {
if(Services.config.isDevMode && Services.config.isLocalhost)
url
.replace("http://cdn.razie.com/", "/admin/img/Users/raz/w/razie.github.io/")
.replace("https://cdn.razie.com/", "/admin/img/")
// .replace("https://cdn.razie.com/", "http://localhost:9000/asset/../../")
// .replace("https://cdn.razie.com/", "file://Users/raz/w/razie.github.io/")
else url
}
def irunXp(what: String, w: WikiEntry, path: String) = {
var root = new razie.Snakk.Wrapper(new WikiWrapper(w.wid), WikiXpSolver)
var xpath = path // TODO why am I doing this?
val ROOT_ALL = """root\\(\\*\\)/(.*)""".r
val ROOT = """root\\(([^:]*):([^:)/]*)\\)/(.*)""".r //\\[[@]*(\\w+)[ \\t]*([=!~]+)[ \\t]*[']*([^']*)[']*\\]""".r
path match {
case ROOT_ALL(rest) => {
root = new razie.Snakk.Wrapper(new WikiWrapper(WID("Admin", "*").r(w.realm)), WikiXpSolver)
xpath = rest //path.replace("root(*)/", "")
}
case ROOT(cat, name, rest) => {
root = new razie.Snakk.Wrapper(new WikiWrapper(WID(cat, name).r(w.realm)), WikiXpSolver)
xpath = rest
}
}
val res: List[_] =
if (razie.GPath(xpath).isAttr) (root xpla xpath).filter(_.length > 0) // sometimes attributes come as zero value?
else {
(root xpl xpath).collect {
case ww: WikiWrapper => formatWikiLink(w.realm, ww.wid, ww.wid.name, ww.page.map(_.label).getOrElse(ww.wid.name), None)._1
}
}
res
}
/** a list to html */
def toUl (res:List[Any]) =
"<ul>" +
res.take(100).map { x: Any =>
"<li>" + x.toString + "</li>"
}.mkString +
(if(res.size>100)"<li>...</li>" else "") +
"</ul>"
def runXp(what: String, w: WikiEntry, path: String) = {
val res = irunXp(what, w, path)
what match {
case "xp" => res.headOption.getOrElse("?").toString
case "xpl" => toUl(res)
// case "xmap" => res.take(100).map { x: Any => "<li>" + x.toString + "</li>" }.mkString
}
// else "TOO MANY to list"), None))
}
// scaled down formatting of jsut some content
def sformat(content: String, markup:String="md", realm:String, user:Option[WikiUser]=None) =
format (WID("1","2").r(realm), markup, content, None, user)
/** main formatting function
*
* @param wid - the wid being formatted
* @param markup - markup language being formatted
* @param icontent - the content being formatted or "" if there is a WikiEntry being formatted
* @param we - optional page for context for formatting
* @return
*/
def formatJson(wid: WID, markup: String, icontent: String, we: Option[WikiEntry] = None) = {
val content =
if(icontent == null || icontent.isEmpty) wid.content.mkString
else icontent
content
}
/** main formatting function
*
* @param wid - the wid being formatted
* @param markup - markup language being formatted
* @param icontent - the content being formatted or "" if there is a WikiEntry being formatted
* @param we - optional page for context for formatting
* @return
*/
def format(we: WikiEntry, user:Option[WikiUser]) : String = {
format (we.wid, we.markup, "", Some(we), user)
}
WikiObservers mini {
case ev@WikiEvent(action, "WikiEntry", _, entity, _, _, _) => {
action match {
case WikiAudit.UPD_RENAME => {
val oldWid = ev.oldId.flatMap(WID.fromPath)
Wikis.clearCache(oldWid.get)
}
case a if WikiAudit.isUpd(a) => {
val wid = WID.fromPath(ev.id)
Wikis.clearCache(wid.get)
}
case _ => {}
}
}
}
/** clearing all possible versions of this WID from the cache */
def clearCache(wids : WID*) = {
wids.foreach(wid=>
Array(
wid.r("rk"), // yea, stupid but...
wid,
wid.copy(parent=None, section=None),
wid.copy(realm = None, section=None),
wid.copy(realm = None, parent=None, section=None),
wid.copy(realm = None, parent=None, section=None, cat="")
).foreach {wid=>
val key = wid.wpathFull
WikiCache.remove(key + ".db")
WikiCache.remove(key + ".formatted")
WikiCache.remove(key + ".page")
})
}
/** main formatting function
*
* @param wid - the wid being formatted
* @param markup - markup language being formatted
* @param icontent - the content being formatted or "" if there is a WikiEntry being formatted
* @param we - optional page for context for formatting
* @return
*/
def format(wid: WID, markup: String, icontent: String, we: Option[WikiEntry], user:Option[WikiUser]) : String = {
if (JSON == wid.cat || JSON == markup || XML == wid.cat || XML == markup || TEXT == markup)
formatJson(wid, markup, icontent, we)
else {
var res = {
val cacheFormatted = Services.config.cacheFormat
if(cacheFormatted &&
we.exists(w=> w.cacheable && w.category != "-" && w.category != "") &&
(icontent == null || icontent == "") &&
wid.section.isEmpty) {
WikiCache.getString(we.get.wid.wpathFull+".formatted").map{x=>
x
}.getOrElse {
val n = format1(wid, markup, icontent, we, user)
if(we.exists(_.cacheable)) // format can change cacheable
WikiCache.set(we.get.wid.wpathFull+".formatted", n, 300) // 10 miuntes
n
}
} else
format1(wid, markup, icontent, we, user)
}
// mark the external links
val sup = "" //"""<sup> <b style="color:darkred">^</b></sup>""")
val A_PAT = """(<a +href="http://)([^>]*)>([^<]*)(</a>)""".r
res = A_PAT replaceSomeIn (res, { m =>
if (Option(m group 2) exists (s=> !s.startsWith(Services.config.hostport) &&
!Services.isSiteTrusted("", s))
)
Some("""$1$2 title="External site"><i>$3</i>"""+sup+"$4")
else None
})
// replace all divs - limitation of the markdown parser
val DPAT1 = "\\\\{\\\\{div ([^}]*)\\\\}\\\\}".r
res = DPAT1 replaceSomeIn (res, { m =>
Some("<div "+Enc.unescapeHtml(m group 1)+">")
})
res = res.replaceAll("\\\\{\\\\{/div *\\\\}\\\\}", "</div>")
// // modify external sites mapped to external URLs
// // TODO optimize - either this logic or a parent-based approach
// for (site <- Wikis.urlmap)
// res = res.replaceAll ("""<a +href="%s""".format(site._1), """<a href="%s""".format(site._2))
// get some samples of what people get stuck on...
if(res contains "CANNOT PARSE")
Audit.logdbWithLink(
"CANNOT_PARSE",
wid.urlRelative,
s"""${wid.wpath} ver ${we.map(_.ver)}""")
res
}
}
def divLater(x:String) = {
val y = x.replaceAll("\\\\{\\\\{div.later ([^ ]*) ([^}]*)\\\\}\\\\}",
"""
| <div id=$1>div.later</div>
| <script>
| withJquery(function(){
| \\$("#$1").attr("src","$2");
| });
| </script>
| """.stripMargin)
y
}
// todo protect this from tresspassers
def runScript(s: String, lang:String, page: Option[WikiEntry], au:Option[WikiUser]) = {
// page preprocessed for, au or default to thread statics - the least reliable
val up = page.flatMap(_.ipreprocessed.flatMap(_._2)) orElse au
//todo use au not up
val q = razie.NoStaticS.get[QueryParms]
Services.runScript(s, lang, page, up, q.map(_.q.map(t => (t._1, t._2.mkString))).getOrElse(Map()))
}
/** format content from a template, given some parms
*
* - this is used only when creating new pages from spec
*
* DO NOT mess with this - one side effect is only replacing the ${} it understands...
*
* CANNOT should reconcile with templateFromContent
*/
def template(wpath: String, parms:Map[String,String]) = {
(for (
wid <- WID.fromPath(wpath).map(x=>if(x.realm.isDefined) x else x.r("wiki")); // templates are in wiki or rk
c <- wid.content
) yield {
var extraParms = Map.empty[String,String]
val TIF = """(?s)\\{\\{\\.*(tif)([: ])?([^ :}]*)([ :]+)?([^}]+)?\\}\\}((?>.*?(?=\\{\\{/[^`])))\\{\\{/\\.*tif\\}\\}""".r
var res = TIF.replaceAllIn(c, { m =>
if(parms.get(m.group(3)).exists(_.length > 0)) "$6"
else if(m.group(5) != null) { // default value
extraParms = extraParms + (m.group(3) -> m.group(5))
"$6"
} else ""
})
val s1 = (parms ++ extraParms).foldLeft(res){(a,b)=>
a.replaceAll("\\\\{\\\\{\\\\$\\\\$"+b._1+"\\\\}\\\\}", b._2)
}
s1.replaceAll("\\\\{\\\\{`", "{{").replaceAll("\\\\[\\\\[`", "[[")
}) getOrElse (
"No content template for: " + wpath + "\\n\\nAttributes:\\n\\n" + parms.map{t=>s"* ${t._1} = ${t._2}\\n"}.mkString
)
}
/** format content from a template, given some parms
*
* @param parms will resolve expressions from the template into Strings. you can use a Map.
* parms("*") should return some details for debugging
*/
def templateFromContent(content: String, parms:String=>String) = {
val PAT = """\\\\$\\\\{([^\\\\}]*)\\\\}""".r
val s1 = PAT.replaceAllIn(content, {m =>
parms(m.group(1))
})
}
def noBadWords(s: String) = badWords.foldLeft(s)((x, y) => x.replaceAll("""\\b%s\\b""".format(y), "BLIP"))
def hasBadWords(s: String, what: Array[String] = badWords): Boolean = s.toLowerCase.split("""\\w""").exists(what.contains(_))
def flag(we: WikiEntry) { flag(we.wid) }
def flag(wid: WID, reason: String = "") {
Audit.logdb("WIKI_FLAGGED", reason, wid.toString)
}
final val badWords = "boohoo,hell".split(",")
final val adultWords = "damn,heck".split(",")
//todo who uses this
def updateUserName(uold: String, unew: String) = {
// TODO 1 optimize with find()
// TODO 2 rename references
val we = RazMongo("WikiEntry")
for (u <- we.findAll() if "User" == u.get("category") && uold == u.get("name")) {
u.put("name", unew)
we.save(u)
}
val weo = RazMongo("WikiEntryOld")
for (u <- weo.findAll() if "User" == u.get("category") && uold == u.get("name")) {
u.put("name", unew)
weo.save(u)
}
}
def w(we: UWID):String = we.wid.map(wid=>w(wid)).getOrElse("ERR_NO_URL_FOR_"+we.toString)
def w(we: WID, shouldCount: Boolean = true):String =
we.urlRelative + (if (!shouldCount) "?count=0" else "")
/** make a relative href for the given tag. give more tags with 1/2/3 */
def hrefTag(wid:WID, t:String,label:String) = {
if(Array("Blog","Forum") contains wid.cat) {
s"""<b><a href="${w(wid)}/tag/$t">$label</a></b>"""
} else {
if(wid.parentWid.isDefined) {
s"""<b><a href="${w(wid.parentWid.get)}/tag/$t">$label</a></b>"""
} else {
s"""<b><a href="/tag/$t">$label</a></b>"""
}
}
}
/////////////////// visibility for new wikis
def mkVis(wid:WID, realm:String) = wid.findParent
.flatMap(_.props.get("visibility"))
.orElse(WikiReactors(realm).props.prop("default.visibility"))
.getOrElse(
WikiReactors(realm)
.wiki
.visibilityFor(wid.cat)
.headOption
.getOrElse(PUBLIC))
/** extract wvis (edit permissions) prop from wiki */
protected def wvis(props: Option[Map[String, String]]): Option[String] =
props.flatMap(p => p.get("wvis").orElse(p.get("visibility"))).map(_.asInstanceOf[String])
def mkwVis(wid:WID, realm:String) = wvis(wid.findParent.map(_.props))
.orElse(WikiReactors(realm).props.prop("default.wvis"))
.getOrElse(
WikiReactors(realm)
.wiki
.visibilityFor(wid.cat)
.headOption
.getOrElse(PUBLIC))
/** see if a exists otherwise return b */
def fallbackPage (a:String, b:String) : String = {
WID.fromPath(a).flatMap(find).map(x => a).getOrElse(b)
}
}
| razie/diesel-rx | diesel/src/main/scala/razie/wiki/model/Wikis.scala | Scala | apache-2.0 | 32,988 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.kafka
import java.util.Properties
import com.google.common.annotations.VisibleForTesting
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.samza.config.ApplicationConfig.ApplicationMode
import org.apache.samza.config.KafkaConfig.Config2Kafka
import org.apache.samza.config._
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.system.{SystemAdmin, SystemConsumer, SystemFactory, SystemProducer}
import scala.collection.JavaConverters._
import org.apache.samza.util._
object KafkaSystemFactory extends Logging {
@VisibleForTesting
def getInjectedProducerProperties(systemName: String, config: Config) = if (new StorageConfig(config).isChangelogSystem(systemName)) {
warn("System name '%s' is being used as a changelog. Disabling compression since Kafka does not support compression for log compacted topics." format systemName)
Map[String, String]("compression.type" -> "none")
} else {
Map[String, String]()
}
val CLIENTID_PRODUCER_PREFIX = "kafka-producer"
val CLIENTID_CONSUMER_PREFIX = "kafka-consumer"
val CLIENTID_ADMIN_PREFIX = "kafka-admin-consumer"
}
class KafkaSystemFactory extends SystemFactory with Logging {
def getConsumer(systemName: String, config: Config, registry: MetricsRegistry): SystemConsumer = {
val metrics = new KafkaSystemConsumerMetrics(systemName, registry)
val clientId = KafkaConsumerConfig.createClientId(KafkaSystemFactory.CLIENTID_CONSUMER_PREFIX, config);
val kafkaConsumerConfig = KafkaConsumerConfig.getKafkaSystemConsumerConfig(config, systemName, clientId);
val kafkaConsumer = KafkaSystemConsumer.createKafkaConsumerImpl[Array[Byte], Array[Byte]](systemName, kafkaConsumerConfig)
info("Created kafka consumer for system %s, clientId %s: %s" format (systemName, clientId, kafkaConsumer))
val kafkaConsumerProxyFactory =
new KafkaConsumerProxy.BaseFactory[Array[Byte], Array[Byte]](kafkaConsumer, systemName, clientId, metrics)
val kafkaSystemConsumer = new KafkaSystemConsumer(kafkaConsumer, systemName, config, clientId,
kafkaConsumerProxyFactory, metrics, new SystemClock)
info("Created samza system consumer for system %s, config %s: %s" format(systemName, config, kafkaSystemConsumer))
kafkaSystemConsumer
}
def getProducer(systemName: String, config: Config, registry: MetricsRegistry): SystemProducer = {
val injectedProps = KafkaSystemFactory.getInjectedProducerProperties(systemName, config)
val clientId = KafkaConsumerConfig.createClientId(KafkaSystemFactory.CLIENTID_PRODUCER_PREFIX, config);
val producerConfig = config.getKafkaSystemProducerConfig(systemName, clientId, injectedProps)
val getProducer = () => {
new KafkaProducer[Array[Byte], Array[Byte]](producerConfig.getProducerProperties)
}
val metrics = new KafkaSystemProducerMetrics(systemName, registry)
// Unlike consumer, no need to use encoders here, since they come for free
// inside the producer configs. Kafka's producer will handle all of this
// for us.
info("Creating kafka producer for system %s, producerClientId %s" format(systemName, clientId))
val taskConfig = new TaskConfig(config)
new KafkaSystemProducer(
systemName,
new ExponentialSleepStrategy(initialDelayMs = producerConfig.reconnectIntervalMs),
getProducer,
metrics,
dropProducerExceptions = taskConfig.getDropProducerErrors)
}
def getAdmin(systemName: String, config: Config): SystemAdmin = {
// extract kafka client configs
val clientId = KafkaConsumerConfig.createClientId(KafkaSystemFactory.CLIENTID_ADMIN_PREFIX, config);
val consumerConfig = KafkaConsumerConfig.getKafkaSystemConsumerConfig(config, systemName, clientId)
new KafkaSystemAdmin(systemName, config, KafkaSystemConsumer.createKafkaConsumerImpl(systemName, consumerConfig))
}
def getCoordinatorTopicProperties(config: Config) = {
val segmentBytes = config.getCoordinatorSegmentBytes
val maxMessageBytes = config.getCoordinatorMaxMessageByte
(new Properties /: Map(
"cleanup.policy" -> "compact",
"segment.bytes" -> segmentBytes,
"max.message.bytes" -> maxMessageBytes)) { case (props, (k, v)) => props.put(k, v); props }
}
def getIntermediateStreamProperties(config: Config): Map[String, Properties] = {
val appConfig = new ApplicationConfig(config)
if (appConfig.getAppMode == ApplicationMode.BATCH) {
val streamConfig = new StreamConfig(config)
streamConfig.getStreamIds().asScala.filter(streamConfig.getIsIntermediateStream(_)).map(streamId => {
// only the override here
val properties = new Properties()
properties.putIfAbsent("retention.ms", String.valueOf(KafkaConfig.DEFAULT_RETENTION_MS_FOR_BATCH))
(streamId, properties)
}).toMap
} else {
Map()
}
}
}
| lhaiesp/samza | samza-kafka/src/main/scala/org/apache/samza/system/kafka/KafkaSystemFactory.scala | Scala | apache-2.0 | 5,710 |
package com.sksamuel.elastic4s.http.search
import com.sksamuel.elastic4s.HitReader
case class MultiSearchResponse(responses: Seq[SearchResponse]) {
def size: Int = responses.size
def to[T: HitReader]: IndexedSeq[T] = responses.flatMap(_.hits.hits).map(_.to[T]).toIndexedSeq
def safeTo[T: HitReader]: IndexedSeq[Either[Throwable, T]] = responses.flatMap(_.hits.hits).map(_.safeTo[T]).toIndexedSeq
}
| FabienPennequin/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/MultiSearchResponse.scala | Scala | apache-2.0 | 406 |
/**
* The MIT License (MIT) Copyright (c) 2014 University of Applied Sciences, Berlin, Germany
* For more detailed information, please read the licence.txt in the root directory.
**/
package org.onepercent.Jobs
//Scala imports
import scala.util.{Failure, Success, Try}
//JAVA imports
import java.text.SimpleDateFormat
//Spark imports
import org.apache.spark.sql.hive._
import org.apache.spark.{SparkConf, SparkContext}
//Own imports
import org.onepercent.utils.Types.TypeCreator
import org.onepercent.utils._
import org.onepercent.{Env, JobExecutor, JobResult}
/**
* This class is a job for calculating the TopHashtags.
*
* @author Florian Willich
*/
class TopHashtagJob extends JobExecutor with Logging {
/**
* This method analysis one hour of tweets to extract the top hashtags.
*
*
* @param params Have to be as follows:
* List element 0: Timestamp <yyyy-mm-dd hh:mm:ss>
* List element 1: Natural Number (Integer) defining the top X
*
* @return The result of the analysis that looks like follow:
* TopHashtags @see { TweetAnalyser }
*
* Or errors if there has been something going wrong:
*
* If the timestamp is not a valid date:
* ErrorMessage("Parameter X is not a valid date!", 100)
*
* If the created path is not valid:
* ErrorMessage("Parameter X i not a valid path!", 100)
*
* If top X can not be cast to an Integer:
* ErrorMessage("Parameter X is not an Integer!", 100)
*
* If there was something going wrong in the analysis:
* ErrorMessage("TopHashtag analyses failed!", 100)
*
* @author Florian Willich
*/
override def executeJob(params: List[String]): JobResult = {
Try(TypeCreator.gregorianCalendar(params(0), new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"))) match {
case Success(gregCalendar) =>
Try(TypeCreator.clusterPath(Config.get.tweetsPrefixPath, gregCalendar, "*.data")) match {
case Success(path) =>
val topX = params(1).toInt
val hc = new HiveContext(Env.sc)
val ta = new TweetAnalyser(Env.sc,hc)
log("executeJob", "Starting Anaylsis with path: " + path.path + " and topX: " + topX)
//Please notice the JSONFileReader which is used to create a schema for the topHashtagAnalyser
//method
Try(ta.topHashtag(new TweetJSONFileReader(Env.sc, hc).readFile(path.path), topX)) match {
case Success(result) =>
//stop the spark context, otherwise its stuck in this context...
log("executeJob", "End Anaylsis with path: " + path.path + " and topX: " + topX)
result
case Failure(_) =>
//stop the spark context, otherwise its stuck in this context...
log("executeJob", "TopHashtag analyses failed! path[" + path.path + "] topX[" + topX + "]")
ErrorMessage("TopHashtag analyses failed!", 101);
}
case Failure(wrongPath) =>
ErrorMessage("Parameter [" + wrongPath + "] i not a valid path!", 100)
}
case Failure(wrongDate) =>
ErrorMessage("Parameter [" + wrongDate + "] is not a valid date!", 100)
}
}
}
| isn0gud/onepercent | src/main/scala/org/onepercent/Jobs/TopHashtagJob.scala | Scala | mit | 3,388 |
package gitbucket.core.controller.api
import gitbucket.core.api._
import gitbucket.core.controller.ControllerBase
import gitbucket.core.model.{Account, Issue}
import gitbucket.core.service.{AccountService, IssueCreationService, IssuesService, MilestonesService}
import gitbucket.core.service.IssuesService.IssueSearchCondition
import gitbucket.core.service.PullRequestService.PullRequestLimit
import gitbucket.core.util.{ReadableUsersAuthenticator, ReferrerAuthenticator, RepositoryName}
import gitbucket.core.util.Implicits._
trait ApiIssueControllerBase extends ControllerBase {
self: AccountService
with IssuesService
with IssueCreationService
with MilestonesService
with ReadableUsersAuthenticator
with ReferrerAuthenticator =>
/*
* i. List issues
* https://developer.github.com/v3/issues/#list-issues
* requested: 1743
*/
/*
* ii. List issues for a repository
* https://developer.github.com/v3/issues/#list-issues-for-a-repository
*/
get("/api/v3/repos/:owner/:repository/issues")(referrersOnly { repository =>
val page = IssueSearchCondition.page(request)
// TODO: more api spec condition
val condition = IssueSearchCondition(request)
val baseOwner = getAccountByUserName(repository.owner).get
val issues: List[(Issue, Account, Option[Account])] =
searchIssueByApi(
condition = condition,
offset = (page - 1) * PullRequestLimit,
limit = PullRequestLimit,
repos = repository.owner -> repository.name
)
JsonFormat(issues.map {
case (issue, issueUser, assignedUser) =>
ApiIssue(
issue = issue,
repositoryName = RepositoryName(repository),
user = ApiUser(issueUser),
assignee = assignedUser.map(ApiUser(_)),
labels = getIssueLabels(repository.owner, repository.name, issue.issueId)
.map(ApiLabel(_, RepositoryName(repository))),
issue.milestoneId.flatMap { getApiMilestone(repository, _) }
)
})
})
/*
* iii. Get a single issue
* https://developer.github.com/v3/issues/#get-a-single-issue
*/
get("/api/v3/repos/:owner/:repository/issues/:id")(referrersOnly { repository =>
(for {
issueId <- params("id").toIntOpt
issue <- getIssue(repository.owner, repository.name, issueId.toString)
users = getAccountsByUserNames(Set(issue.openedUserName) ++ issue.assignedUserName, Set())
openedUser <- users.get(issue.openedUserName)
} yield {
JsonFormat(
ApiIssue(
issue,
RepositoryName(repository),
ApiUser(openedUser),
issue.assignedUserName.flatMap(users.get(_)).map(ApiUser(_)),
getIssueLabels(repository.owner, repository.name, issue.issueId).map(ApiLabel(_, RepositoryName(repository))),
issue.milestoneId.flatMap { getApiMilestone(repository, _) }
)
)
}) getOrElse NotFound()
})
/*
* iv. Create an issue
* https://developer.github.com/v3/issues/#create-an-issue
*/
post("/api/v3/repos/:owner/:repository/issues")(readableUsersOnly { repository =>
if (isIssueEditable(repository)) { // TODO Should this check is provided by authenticator?
(for {
data <- extractFromJsonBody[CreateAnIssue]
loginAccount <- context.loginAccount
} yield {
val milestone = data.milestone.flatMap(getMilestone(repository.owner, repository.name, _))
val issue = createIssue(
repository,
data.title,
data.body,
data.assignees.headOption,
milestone.map(_.milestoneId),
None,
data.labels,
loginAccount
)
JsonFormat(
ApiIssue(
issue,
RepositoryName(repository),
ApiUser(loginAccount),
issue.assignedUserName.flatMap(getAccountByUserName(_)).map(ApiUser(_)),
getIssueLabels(repository.owner, repository.name, issue.issueId)
.map(ApiLabel(_, RepositoryName(repository))),
issue.milestoneId.flatMap { getApiMilestone(repository, _) }
)
)
}) getOrElse NotFound()
} else Unauthorized()
})
/*
* v. Edit an issue
* https://developer.github.com/v3/issues/#edit-an-issue
*/
/*
* vi. Lock an issue
* https://developer.github.com/v3/issues/#lock-an-issue
*/
/*
* vii. Unlock an issue
* https://developer.github.com/v3/issues/#unlock-an-issue
*/
}
| takezoe/gitbucket | src/main/scala/gitbucket/core/controller/api/ApiIssueControllerBase.scala | Scala | apache-2.0 | 4,478 |
package jsentric
import jsentric.queryTree.QueryTree
import org.scalatest.{FunSuite, Matchers}
import argonaut._
import Argonaut._
class QueryTests extends FunSuite with Matchers {
import Jsentric._
test("Existance/nonexistance of field") {
object Query1 extends Contract {
val field = \?[String]("field")
val nested = new \\("nested") {
val field2 = \?[String]("field2")
}
}
val query = Query1.field.$exists(true)
val tree = QueryTree(query.obj.get)
query.$isMatch(Json("field" := "value")) should be (true)
query.$isMatch(Json("field2" := "value")) should be (false)
tree.isMatch(Json("field" := "value")) should be (true)
tree.isMatch(Json("field2" := "value")) should be (false)
val query2 = Query1.field.$exists(false) && Query1.nested.field2.$exists(true)
val tree2 = QueryTree(query2.obj.get)
query2.$isMatch(Json("nested" -> Json("field2" := "value"))) should be (true)
query2.$isMatch(Json("field" := "value", "nested" -> Json("field2" := "value"))) should be (false)
tree2.isMatch(Json("nested" -> Json("field2" := "value"))) should be (true)
tree2.isMatch(Json("field" := "value", "nested" -> Json("field2" := "value"))) should be (false)
}
test("Equality") {
object Query2 extends Contract {
val field = \?[String]("field")
val nested = new \\("nested") {
val field2 = \[Int]("field2")
}
}
val query1 = Query2.field.$eq("TEST") || Query2.nested.field2.$eq(45)
val tree1 = QueryTree(query1.obj.get)
query1.$isMatch(Json("field" := "TEST")) should be (true)
query1.$isMatch(jEmptyObject) should be (false)
query1.$isMatch(Json("field" := "TEST2")) should be (false)
query1.$isMatch(Json("nested" -> Json("field2" := 45))) should be (true)
query1.$isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 45))) should be (true)
tree1.isMatch(Json("field" := "TEST")) should be (true)
tree1.isMatch(jEmptyObject) should be (false)
tree1.isMatch(Json("field" := "TEST2")) should be (false)
tree1.isMatch(Json("nested" -> Json("field2" := 45))) should be (true)
tree1.isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 45))) should be (true)
val query2 = Query2.field.$ne("TEST") || Query2.nested(n => n.field2.$gte(45) && n.field2.$lt(52))
val tree2 = QueryTree(query2.obj.get)
query2.$isMatch(Json("field" := "TEST")) should be (false)
query2.$isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 44))) should be (false)
query2.$isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 52))) should be (false)
query2.$isMatch(Json("field" := "TEST2", "nested" -> Json("field2" := 45))) should be (true)
query2.$isMatch(Json("nested" -> Json("field2" := 44))) should be (true)
tree2.isMatch(Json("field" := "TEST")) should be (false)
tree2.isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 44))) should be (false)
tree2.isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 52))) should be (false)
tree2.isMatch(Json("field" := "TEST2", "nested" -> Json("field2" := 45))) should be (true)
tree2.isMatch(Json("nested" -> Json("field2" := 44))) should be (true)
val query3 = Query2(q => q.field.$in("TEST", "TEST2") && q.nested.field2.$nin(4,5,6))
val tree3 = QueryTree(query3.obj.get)
query3.$isMatch(Json("field" := "TEST")) should be (true)
query3.$isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 3))) should be (true)
query3.$isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 4))) should be (false)
query3.$isMatch(Json("field" := "TEST3")) should be (false)
query3.$isMatch(Json("field" := "TEST3", "nested" -> Json("field2" := 3))) should be (false)
query3.$isMatch(Json("nested" -> Json("field2" := 3))) should be (false)
tree3.isMatch(Json("field" := "TEST")) should be (true)
tree3.isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 3))) should be (true)
tree3.isMatch(Json("field" := "TEST", "nested" -> Json("field2" := 4))) should be (false)
tree3.isMatch(Json("field" := "TEST3")) should be (false)
tree3.isMatch(Json("field" := "TEST3", "nested" -> Json("field2" := 3))) should be (false)
tree3.isMatch(Json("nested" -> Json("field2" := 3))) should be (false)
//TODO not a generalised solution
val query4 = Query2.field.$like("value")
val tree4 = QueryTree(query4.obj.get)
query4.$isMatch(Json("field" := "Value")) should be (true)
query4.$isMatch(jEmptyObject) should be (false)
query4.$isMatch(Json("field" := "Values")) should be (false)
tree4.isMatch(Json("field" := "Value")) should be (true)
tree4.isMatch(jEmptyObject) should be (false)
tree4.isMatch(Json("field" := "Values")) should be (false)
val query5 = Query2.field.$like("%lue")
val tree5 = QueryTree(query5.obj.get)
query5.$isMatch(Json("field" := "ValuE")) should be (true)
query5.$isMatch(jEmptyObject) should be (false)
query5.$isMatch(Json("field" := "Values")) should be (false)
tree5.isMatch(Json("field" := "ValuE")) should be (true)
tree5.isMatch(jEmptyObject) should be (false)
tree5.isMatch(Json("field" := "Values")) should be (false)
val query6 = Query2.field.$regex("vaLUe", "i")
val tree6 = QueryTree(query6.obj.get)
query6.$isMatch(Json("field" := "Value")) should be (true)
query6.$isMatch(jEmptyObject) should be (false)
query6.$isMatch(Json("field" := "Values")) should be (false)
tree6.isMatch(Json("field" := "Value")) should be (true)
tree6.isMatch(jEmptyObject) should be (false)
tree6.isMatch(Json("field" := "Values")) should be (false)
}
test("Long double equality") {
Json("field" := 1L).$isMatch(Json("field" := 1.00D)) should be (true)
QueryTree(Json("field" := 1L).obj.get).isMatch(Json("field" := 1.00D)) should be (true)
}
test("element match") {
object Query3 extends Contract {
val doubles = \:[Long]("doubles")
val nested = new \\("nested") {
val strings = \:?[String]("strings")
}
}
val query1 = Query3.doubles.$elemMatch(_.$gt(4))
query1.$isMatch(Json("doubles" := (3.asJson -->>: 5.asJson -->>: jEmptyArray))) should be (true)
query1.$isMatch(Json("doubles" := (2.asJson -->>: 4.asJson -->>: jEmptyArray))) should be (false)
query1.$isMatch(Json("doubles" := jEmptyArray)) should be (false)
}
test("boolean operators") {
object Query4 extends Contract {
val value = \[Double]("value")
}
val query1 = Query4.value.$gt(0) || Query4.value.$lt(-10)
val tree1 = QueryTree(query1.obj.get)
query1.$isMatch(Json("value" := 2)) should be (true)
query1.$isMatch(Json("value" := -3)) should be (false)
query1.$isMatch(Json("value" := -15)) should be (true)
tree1.isMatch(Json("value" := 2)) should be (true)
tree1.isMatch(Json("value" := -3)) should be (false)
tree1.isMatch(Json("value" := -15)) should be (true)
val query2 = Jsentric.not(query1)
val tree2 = QueryTree(query2.obj.get)
query2.$isMatch(Json("value" := 2)) should be (false)
query2.$isMatch(Json("value" := -3)) should be (true)
query2.$isMatch(Json("value" := -15)) should be (false)
tree2.isMatch(Json("value" := 2)) should be (false)
tree2.isMatch(Json("value" := -3)) should be (true)
tree2.isMatch(Json("value" := -15)) should be (false)
val query3 = Query4.value.$gte(0) && Query4.value.$lt(50)
val tree3 = QueryTree(query3.obj.get)
query3.$isMatch(Json("value" := 12)) should be (true)
query3.$isMatch(Json("value" := -3)) should be (false)
query3.$isMatch(Json("value" := 50)) should be (false)
tree3.isMatch(Json("value" := 12)) should be (true)
tree3.isMatch(Json("value" := -3)) should be (false)
tree3.isMatch(Json("value" := 50)) should be (false)
}
}
| HigherState/jsentric | src/test/scala/jsentric/QueryTests.scala | Scala | apache-2.0 | 7,900 |
package objektwerks.types
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
sealed trait Canine
class Dog extends Canine
class Wolf extends Canine
class TypeVarianceTest extends AnyFunSuite with Matchers {
test("invariant") {
class Vet[T] {
def heal[U](canine: T): T = canine
}
val canineVet = new Vet[Canine]
canineVet.heal[Canine]( new Dog() ).isInstanceOf[Dog] shouldBe true
canineVet.heal[Canine]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
val dogVet: Vet[Dog] = new Vet[Dog]
dogVet.heal[Dog]( new Dog() ).isInstanceOf[Dog] shouldBe true
val wolfVet: Vet[Wolf] = new Vet[Wolf]
wolfVet.heal[Wolf]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
}
test("covariant") {
class Vet[+T] {
def heal[S >: T](canine: S): S = canine
}
val canineVet = new Vet[Canine]
canineVet.heal[Canine]( new Dog() ).isInstanceOf[Dog] shouldBe true
canineVet.heal[Canine]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
val dogVet: Vet[Dog] = new Vet[Dog]
dogVet.heal[Dog]( new Dog() ).isInstanceOf[Dog] shouldBe true
val wolfVet: Vet[Wolf] = new Vet[Wolf]
wolfVet.heal[Wolf]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
}
test("contravariant") {
class Vet[-T] {
def heal[S <: T](canine: S): S = canine
}
val canineVet = new Vet[Canine]
canineVet.heal[Canine]( new Dog() ).isInstanceOf[Dog] shouldBe true
canineVet.heal[Canine]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
val dogVet: Vet[Dog] = new Vet[Canine]
dogVet.heal[Dog]( new Dog() ).isInstanceOf[Dog] shouldBe true
val wolfVet: Vet[Wolf] = new Vet[Canine]
wolfVet.heal[Wolf]( new Wolf() ).isInstanceOf[Wolf] shouldBe true
}
test("contravariant in, covariant out") {
trait Function[-V, +R] {
def apply(value: V): R
}
val function = new Function[String, Option[Int]] {
def apply(value: String): Option[Int] = value.toIntOption
}
val values = List("1", "2", "3", "four")
values.flatMap(value => function(value)) shouldEqual List(1, 2, 3)
values.flatMap(value => function(value)).sum shouldEqual 6
}
} | objektwerks/scala | src/test/scala/objektwerks/types/TypeVarianceTest.scala | Scala | apache-2.0 | 2,178 |
package graphql.schema.marshalling
import common.InputUnmarshallerGenerator
import model._
import sangria.marshalling.FromInput
object Unmarshallers extends InputUnmarshallerGenerator {
implicit val addPostInputUnmarshaller: FromInput[AddPostInput] = inputUnmarshaller {
input =>
AddPostInput(
title = input("title").asInstanceOf[String],
content = input("content").asInstanceOf[String]
)
}
implicit val editPostInputUnmarshaller: FromInput[EditPostInput] = inputUnmarshaller {
input =>
EditPostInput(
id = input("id").asInstanceOf[Int],
title = input("title").asInstanceOf[String],
content = input("content").asInstanceOf[String]
)
}
implicit val addCommentInputUnmarshaller: FromInput[AddCommentInput] = inputUnmarshaller {
input =>
AddCommentInput(
postId = input("postId").asInstanceOf[Int],
content = input("content").asInstanceOf[String]
)
}
implicit val editCommentInputUnmarshaller: FromInput[EditCommentInput] = inputUnmarshaller {
input =>
EditCommentInput(
id = input("id").asInstanceOf[Int],
postId = input("postId").asInstanceOf[Int],
content = input("content").asInstanceOf[String]
)
}
implicit val deleteCommentInputUnmarshaller: FromInput[DeleteCommentInput] = inputUnmarshaller {
input =>
DeleteCommentInput(
id = input("id").asInstanceOf[Int],
postId = input("postId").asInstanceOf[Int]
)
}
}
| sysgears/apollo-universal-starter-kit | modules/post/server-scala/src/main/scala/graphql/schema/marshalling/Unmarshallers.scala | Scala | mit | 1,514 |
package com.github.agourlay.cornichon.steps.wrapped
import cats.data.StateT
import cats.syntax.monoid._
import com.github.agourlay.cornichon.core._
import com.github.agourlay.cornichon.dsl.BlockScopedResource
case class WithBlockScopedResource(nested: List[Step], resource: BlockScopedResource) extends WrapperStep {
val title = resource.openingTitle
override val stateUpdate: StepState = StateT { runState =>
resource.use(runState.nestedContext)(ScenarioRunner.runStepsShortCircuiting(nested, _)).map { resTuple =>
val (results, (resourcedState, resourcedRes)) = resTuple
val initialDepth = runState.depth
val closingTitle = resource.closingTitle
val logStack = resourcedRes match {
case Left(_) => FailureLogInstruction(closingTitle, initialDepth) +: resourcedState.logStack :+ failedTitleLog(initialDepth)
case _ => SuccessLogInstruction(closingTitle, initialDepth) +: resourcedState.logStack :+ successTitleLog(initialDepth)
}
val completeSession = resourcedState.session.combine(results)
// Manual nested merge
(runState.withSession(completeSession).recordLogStack(logStack).registerCleanupSteps(runState.cleanupSteps), resourcedRes)
}
}
}
| agourlay/cornichon | cornichon-core/src/main/scala/com/github/agourlay/cornichon/steps/wrapped/WithBlockScopedResource.scala | Scala | apache-2.0 | 1,233 |
package im.actor.server.persist.contact
import im.actor.server.model.contact.UserContact
import slick.dbio.Effect.Write
import im.actor.server.db.ActorPostgresDriver.api._
import slick.profile.FixedSqlAction
private[contact] abstract class UserContactBase[T](tag: Tag, tname: String) extends Table[T](tag, tname) {
def ownerUserId = column[Int]("owner_user_id", O.PrimaryKey)
def contactUserId = column[Int]("contact_user_id", O.PrimaryKey)
def name = column[Option[String]]("name")
def isDeleted = column[Boolean]("is_deleted", O.Default(false))
def idx = index("idx_user_contacts_owner_user_id_is_deleted", (ownerUserId, isDeleted))
}
final class UserContactTable(tag: Tag) extends UserContactBase[UserContact](tag, "user_contacts") {
def * = (ownerUserId, contactUserId, name, isDeleted) <> (UserContact.tupled, UserContact.unapply)
}
object UserContactRepo {
val contacts = TableQuery[UserContactTable]
val active = contacts.filter(_.isDeleted === false)
private def byOwnerUserIdNotDeleted(ownerUserId: Rep[Int]) =
active.filter(_.ownerUserId === ownerUserId)
private val byOwnerUserIdNotDeletedC = Compiled(byOwnerUserIdNotDeleted _)
private val countC = Compiled { (userId: Rep[Int]) ⇒
byOwnerUserIdNotDeleted(userId).length
}
def byPKNotDeleted(ownerUserId: Rep[Int], contactUserId: Rep[Int]) =
contacts.filter(c ⇒ c.ownerUserId === ownerUserId && c.contactUserId === contactUserId && c.isDeleted === false)
val nameByPKNotDeletedC = Compiled(
(ownerUserId: Rep[Int], contactUserId: Rep[Int]) ⇒
byPKNotDeleted(ownerUserId, contactUserId) map (_.name)
)
def byContactUserId(contactUserId: Rep[Int]) = active.filter(_.contactUserId === contactUserId)
val byContactUserIdC = Compiled(byContactUserId _)
def byPKDeleted(ownerUserId: Int, contactUserId: Int) =
contacts.filter(c ⇒ c.ownerUserId === ownerUserId && c.contactUserId === contactUserId && c.isDeleted === true)
private def existsC = Compiled { (ownerUserId: Rep[Int], contactUserId: Rep[Int]) ⇒
byPKNotDeleted(ownerUserId, contactUserId).exists
}
def fetchAll = active.result
def exists(ownerUserId: Int, contactUserId: Int) = existsC((ownerUserId, contactUserId)).result
def find(ownerUserId: Int, contactUserId: Int): DBIO[Option[UserContact]] =
byPKNotDeleted(ownerUserId, contactUserId).result.headOption
def count(ownerUserId: Int) = countC(ownerUserId).result
def findIds(ownerUserId: Int, contactUserIds: Set[Int]) =
byOwnerUserIdNotDeletedC.applied(ownerUserId).filter(_.contactUserId inSet contactUserIds).map(_.contactUserId).result
def findOwners(contactUserId: Int) = byContactUserIdC(contactUserId).result
def findNotDeletedIds(ownerUserId: Int) =
byOwnerUserIdNotDeleted(ownerUserId).map(_.contactUserId).result
def findName(ownerUserId: Int, contactUserId: Int) =
nameByPKNotDeletedC((ownerUserId, contactUserId)).result
def findContactIdsAll(ownerUserId: Int) =
contacts.filter(c ⇒ c.ownerUserId === ownerUserId).map(_.contactUserId).result
def findContactIdsActive(ownerUserId: Int) =
byOwnerUserIdNotDeleted(ownerUserId).map(_.contactUserId).distinct.result
def updateName(ownerUserId: Int, contactUserId: Int, name: Option[String]): FixedSqlAction[Int, NoStream, Write] = {
contacts.filter(c ⇒ c.ownerUserId === ownerUserId && c.contactUserId === contactUserId).map(_.name).update(name)
}
def delete(ownerUserId: Int, contactUserId: Int) =
byPKNotDeleted(ownerUserId, contactUserId).map(_.isDeleted).update(true)
def insertOrUpdate(contact: UserContact) =
contacts.insertOrUpdate(contact)
}
| EaglesoftZJ/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/persist/contact/UserContactRepo.scala | Scala | agpl-3.0 | 3,649 |
package org.apache.mesos.chronos.scheduler.jobs
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import java.util.concurrent.{TimeUnit, Executors, Future}
import java.util.logging.{Level, Logger}
import akka.actor.ActorSystem
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.mesos.MesosDriverFactory
import org.apache.mesos.chronos.scheduler.state.PersistenceStore
import com.google.common.util.concurrent.AbstractIdleService
import com.google.inject.Inject
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}
import org.apache.mesos.Protos.TaskStatus
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone, Duration, Period}
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
/**
* Constructs concrete tasks given a list of schedules and a global scheduleHorizon.
* The schedule horizon represents the advance-time the schedule is constructed.
*
* A lot of the methods in this class are broken into small pieces to allow for better unit testing.
* @author Florian Leibert (flo@leibert.de)
*/
class JobScheduler @Inject()(val scheduleHorizon: Period,
val taskManager: TaskManager,
val jobGraph: JobGraph,
val persistenceStore: PersistenceStore,
val mesosDriver: MesosDriverFactory = null,
val curator: CuratorFramework = null,
val leaderLatch: LeaderLatch = null,
val leaderPath: String = null,
val jobsObserver: JobsObserver.Observer,
val failureRetryDelay: Long = 60000,
val disableAfterFailures: Long = 0,
val jobMetrics: JobMetrics)
//Allows us to let Chaos manage the lifecycle of this class.
extends AbstractIdleService {
val localExecutor = Executors.newFixedThreadPool(1)
val schedulerThreadFuture = new AtomicReference[Future[_]]
val leaderExecutor = Executors.newSingleThreadExecutor()
//This acts as the lock
val lock = new Object
val actorSystem = ActorSystem()
val akkaScheduler = actorSystem.scheduler
//TODO(FL): Take some methods out of this class.
val running = new AtomicBoolean(false)
val leader = new AtomicBoolean(false)
private[this] val log = Logger.getLogger(getClass.getName)
var streams: List[ScheduleStream] = List()
def isLeader: Boolean = leader.get()
def getLeader: String = {
try {
leaderLatch.getLeader.getId
} catch {
case e: Exception =>
log.log(Level.SEVERE, "Error trying to talk to zookeeper. Exiting.", e)
System.exit(1)
null
}
}
def isTaskAsync(taskId: String): Boolean = {
val TaskUtils.taskIdPattern(_, _, jobName, _) = taskId
jobGraph.lookupVertex(jobName) match {
case Some(baseJob: BaseJob) => baseJob.async
case _ => false
}
}
/**
* Update job definition
* @param oldJob job definition
* @param newJob new job definition
*/
def updateJob(oldJob: StoredJob, newJob: StoredJob) {
//TODO(FL): Ensure we're using job-ids rather than relying on jobs names for identification.
assert(newJob.name == oldJob.name, "Renaming jobs is currently not supported!")
newJob match {
case scheduleBasedJob: InternalScheduleBasedJob =>
lock.synchronized {
if (!scheduleBasedJob.disabled) {
JobUtils.makeScheduleStream(scheduleBasedJob, DateTime.now(DateTimeZone.UTC)) foreach { newSchedule =>
log.info("updating ScheduleBasedJob:" + scheduleBasedJob.toString)
val tmpStreams = streams.filter(_.jobName != scheduleBasedJob.name)
streams = iteration(DateTime.now(DateTimeZone.UTC), List(newSchedule) ++ tmpStreams)
}
} else {
log.info("updating ScheduleBasedJob:" + scheduleBasedJob.toString)
val tmpStreams = streams.filter(_.jobName != scheduleBasedJob.name)
streams = iteration(DateTime.now(DateTimeZone.UTC), tmpStreams)
}
}
case _ =>
}
replaceJob(oldJob, newJob)
}
def reset(purgeQueue: Boolean = false) {
lock.synchronized {
streams = List()
jobGraph.reset()
if (purgeQueue) {
log.warning("Purging locally queued tasks!")
taskManager.flush()
}
}
}
def registerJob(job: StoredJob, persist: Boolean, dateTime: DateTime) {
registerJob(List(job), persist, dateTime)
}
/**
* This method should be used to register jobs.
*/
def registerJob(jobs: List[StoredJob], persist: Boolean = false, dateTime: DateTime = DateTime.now(DateTimeZone.UTC)) {
lock.synchronized {
require(isLeader, "Cannot register a job with this scheduler, not the leader!")
val scheduleBasedJobs = ListBuffer[InternalScheduleBasedJob]()
val dependencyBasedJobs = ListBuffer[DependencyBasedJob]()
jobs.foreach {
case x: DependencyBasedJob =>
dependencyBasedJobs += x
case x: InternalScheduleBasedJob =>
scheduleBasedJobs += x
}
if (scheduleBasedJobs.nonEmpty) {
val newStreams = scheduleBasedJobs.filter(!_.disabled).flatMap(JobUtils.makeScheduleStream(_, dateTime))
scheduleBasedJobs.foreach({
job =>
jobGraph.addVertex(job)
if (persist) {
log.info("Persisting job:" + job.name)
persistenceStore.persistJob(job)
}
})
if (newStreams.nonEmpty) {
addSchedule(dateTime, newStreams.toList)
}
}
if (dependencyBasedJobs.nonEmpty) {
dependencyBasedJobs.foreach({
job =>
val parents = jobGraph.parentJobs(job)
log.info("Job parent: [ %s ], name: %s, command: %s".format(job.parents.mkString(","), job.name, job.command))
jobGraph.addVertex(job)
parents.foreach(x => jobGraph.addDependency(x.name, job.name))
if (persist) {
log.info("Persisting job:" + job.name)
persistenceStore.persistJob(job)
}
})
}
}
}
def deregisterJob(job: StoredJob, persist: Boolean = false) {
require(isLeader, "Cannot deregister a job with this scheduler, not the leader!")
lock.synchronized {
log.info("Removing vertex")
jobGraph.getChildren(job.name)
.map(x => jobGraph.lookupVertex(x).get)
.filter {
case j: DependencyBasedJob => true
case _ => false
}
.map(x => x.asInstanceOf[DependencyBasedJob])
.filter(x => x.parents.size > 1)
.foreach({
childJob =>
log.info("Updating job %s".format(job.name))
val copy = childJob.copy(parents = childJob.parents.filter(_ != job.name))
updateJob(childJob, copy)
})
jobGraph.removeVertex(job)
job match {
case scheduledJob: InternalScheduleBasedJob =>
removeSchedule(scheduledJob)
log.info("Removed schedule based job")
log.info("Size of streams:" + streams.size)
case dependencyBasedJob: DependencyBasedJob =>
//TODO(FL): Check if there are empty edges.
log.info("Job removed from dependency graph.")
}
taskManager.cancelTasks(job)
taskManager.removeTasks(job)
jobsObserver.apply(JobRemoved(job))
if (persist) {
log.info("Removing job from underlying state abstraction:" + job.name)
persistenceStore.removeJob(job)
}
}
}
def handleStartedTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
val jobName = TaskUtils.getJobNameForTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("Job '%s' no longer registered.".format(jobName))
} else {
val job = jobOption.get
val (_, _, attempt, _) = TaskUtils.parseTaskId(taskId)
jobsObserver.apply(JobStarted(job, taskStatus, attempt))
job match {
case j: DependencyBasedJob =>
jobGraph.resetDependencyInvocations(j.name)
case _ =>
}
}
}
/**
* Takes care of follow-up actions for a finished task, i.e. update the job schedule in the persistence store or
* launch tasks for dependent jobs
*/
def handleFinishedTask(taskStatus: TaskStatus, taskDate: Option[DateTime] = None) {
// `taskDate` is purely for unit testing
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
persistenceStore.removeTask(taskId)
val jobName = TaskUtils.getJobNameForTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("Job '%s' no longer registered.".format(jobName))
} else {
val (_, start, attempt, _) = TaskUtils.parseTaskId(taskId)
jobMetrics.updateJobStat(jobName, timeMs = DateTime.now(DateTimeZone.UTC).getMillis - start)
jobMetrics.updateJobStatus(jobName, success = true)
val job = jobOption.get
jobsObserver.apply(JobFinished(job, taskStatus, attempt))
val newJob = job match {
case job: InternalScheduleBasedJob =>
job.copy(successCount = job.successCount + 1,
errorsSinceLastSuccess = 0,
lastSuccess = DateTime.now(DateTimeZone.UTC).toString)
case job: DependencyBasedJob =>
job.copy(successCount = job.successCount + 1,
errorsSinceLastSuccess = 0,
lastSuccess = DateTime.now(DateTimeZone.UTC).toString)
case _ =>
throw new IllegalArgumentException("Cannot handle unknown task type")
}
replaceJob(job, newJob)
processDependencies(jobName, taskDate)
log.fine("Cleaning up finished task '%s'".format(taskId))
/* TODO(FL): Fix.
Cleanup potentially exhausted job. Note, if X tasks were fired within a short period of time (~ execution time
of the job, the first returning Finished-task may trigger deletion of the job! This is a known limitation and
needs some work but should only affect long running frequent finite jobs or short finite jobs with a tiny pause
in between */
job match {
case scheduleBasedJob: InternalScheduleBasedJob =>
val streamForJob = streams.find(_.jobName == job.name)
streamForJob.foreach { stream =>
stream.schedule.recurrences.foreach { recurRemaining =>
if (recurRemaining == 0) {
log.info("Disabling job that reached a zero-recurrence count!")
val disabledJob = scheduleBasedJob.copy(disabled = true)
jobsObserver.apply(JobDisabled(job, """Job '%s' has exhausted all of its recurrences and has been disabled.
|Please consider either removing your job, or updating its schedule and re-enabling it.
""".stripMargin.format(job.name)))
replaceJob(scheduleBasedJob, disabledJob)
}
}
}
case _ =>
}
}
}
def replaceJob(oldJob: StoredJob, newJob: StoredJob) {
lock.synchronized {
jobGraph.replaceVertex(oldJob, newJob)
persistenceStore.persistJob(newJob)
}
}
private def processDependencies(jobName: String, taskDate: Option[DateTime]) {
val dependents = jobGraph.getExecutableChildren(jobName)
if (dependents.nonEmpty) {
log.fine("%s has dependents: %s .".format(jobName, dependents.mkString(",")))
dependents.foreach {
//TODO(FL): Ensure that the job for the given x exists. Lock.
x =>
val dependentJob = jobGraph.getJobForName(x).get
if (!dependentJob.disabled) {
val date = taskDate match {
case Some(d) => d
case None => DateTime.now(DateTimeZone.UTC)
}
taskManager.enqueue(TaskUtils.getTaskId(dependentJob,
date), dependentJob.highPriority)
log.fine("Enqueued depedent job." + x)
}
}
} else {
log.fine("%s does not have any ready dependents.".format(jobName))
}
}
def handleFailedTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
} else {
val (jobName, _, attempt, _) = TaskUtils.parseTaskId(taskId)
log.warning("Task of job: %s failed.".format(jobName))
jobGraph.lookupVertex(jobName) match {
case Some(job) =>
jobsObserver.apply(JobFailed(Right(job), taskStatus, attempt))
val hasAttemptsLeft: Boolean = attempt < job.retries
val hadRecentSuccess: Boolean = try {
job.lastError.length > 0 && job.lastSuccess.length > 0 &&
(DateTime.parse(job.lastSuccess).getMillis - DateTime.parse(job.lastError).getMillis) >= 0
} catch {
case ex: IllegalArgumentException =>
log.warning(s"Couldn't parse last run date from ${job.name}")
false
case _: Exception => false
}
if (hasAttemptsLeft && (job.lastError.length == 0 || hadRecentSuccess)) {
log.warning("Retrying job: %s, attempt: %d".format(jobName, attempt))
/* Schedule the retry up to 60 seconds in the future */
val delayDuration = new Duration(failureRetryDelay)
val newTaskId = TaskUtils.getTaskId(job, DateTime.now(DateTimeZone.UTC)
.plus(delayDuration), attempt + 1)
val delayedTask = new Runnable {
def run() {
log.info(s"Enqueuing failed task $newTaskId")
taskManager.persistTask(newTaskId, job)
taskManager.enqueue(newTaskId, job.highPriority)
}
}
implicit val executor = actorSystem.dispatcher
akkaScheduler.scheduleOnce(
delay = scala.concurrent.duration.Duration(delayDuration.getMillis, TimeUnit.MILLISECONDS),
runnable = delayedTask)
} else {
val disableJob =
(disableAfterFailures > 0) && (job.errorsSinceLastSuccess + 1 >= disableAfterFailures)
val lastErrorTime = DateTime.now(DateTimeZone.UTC)
val newJob = {
job match {
case job: InternalScheduleBasedJob =>
job.copy(errorCount = job.errorCount + 1,
errorsSinceLastSuccess = job.errorsSinceLastSuccess + 1,
lastError = lastErrorTime.toString, disabled = disableJob)
case job: DependencyBasedJob =>
job.copy(errorCount = job.errorCount + 1,
errorsSinceLastSuccess = job.errorsSinceLastSuccess + 1,
lastError = lastErrorTime.toString, disabled = disableJob)
}
}
updateJob(job, newJob)
if (job.softError) processDependencies(jobName, Option(lastErrorTime))
// Handle failure by either disabling the job and notifying the owner,
// or just notifying the owner.
if (disableJob) {
log.warning("Job failed beyond retries! Job will now be disabled after "
+ newJob.errorsSinceLastSuccess + " failures (disableAfterFailures=" + disableAfterFailures + ").")
val msg = "\\nFailed at '%s', %d failures since last success\\nTask id: %s\\n"
.format(DateTime.now(DateTimeZone.UTC), newJob.errorsSinceLastSuccess, taskId)
jobsObserver.apply(JobDisabled(job, TaskUtils.appendSchedulerMessage(msg, taskStatus)))
} else {
log.warning("Job failed beyond retries!")
jobsObserver.apply(JobRetriesExhausted(job, taskStatus, attempt))
}
jobMetrics.updateJobStatus(jobName, success = false)
}
case None =>
log.warning("Could not find job for task: %s Job may have been deleted while task was in flight!"
.format(taskId))
}
}
}
/**
* Task has been killed. Do appropriate cleanup
* Possible reasons for task being killed:
* -invoked kill via task manager API
* -job is deleted
*/
def handleKilledTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
val (jobName, start, attempt, _) = TaskUtils.parseTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
jobsObserver.apply(JobFailed(jobOption.toRight(jobName), taskStatus, attempt))
}
/**
* Iterates through the stream for the given DateTime and a list of schedules, removing old schedules and acting on
* the available schedules.
* @param dateTime for which to process schedules
* @param schedules schedules to be processed
* @return list of updated schedules
*/
def iteration(dateTime: DateTime, schedules: List[ScheduleStream]): List[ScheduleStream] = {
log.info("Checking schedules with time horizon:%s".format(scheduleHorizon.toString))
removeOldSchedules(schedules.map(s => scheduleStream(dateTime, s)))
}
def run(dateSupplier: () => DateTime) {
log.info("Starting run loop for JobScheduler. CurrentTime: %s".format(DateTime.now(DateTimeZone.UTC)))
while (running.get) {
lock.synchronized {
log.info("Size of streams: %d".format(streams.size))
streams = iteration(dateSupplier(), streams)
}
Thread.sleep(scheduleHorizon.toStandardDuration.getMillis)
//TODO(FL): This can be inaccurate if the horizon >= 1D on daylight savings day and when leap seconds are introduced.
}
log.info("No longer running.")
}
/**
* Given a stream and a DateTime(@see org.joda.DateTime), this method returns a 2-tuple with a ScheduleTask and
* a clipped schedule stream in case that the ScheduleTask was not none. Returns no task and the input stream,
* if nothing needs scheduling within the time horizon.
* @param now time to start iteration with
* @param stream schedule stream
* @return
*/
@tailrec
final def next(now: DateTime, stream: ScheduleStream): (Option[ScheduledTask], Option[ScheduleStream]) = {
val (jobName, schedule) = stream.head
val nextDate = schedule.invocationTime
log.info("Calling next for stream: %s, jobname: %s".format(stream.schedule, jobName))
assert(jobName != null, "BaseJob cannot be null")
var jobOption: Option[StoredJob] = None
//TODO(FL): wrap with lock.
try {
jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("-----------------------------------")
log.warning("Warning, no job found in graph for:" + jobName)
log.warning("-----------------------------------")
//This might happen during loading stage in case of failover.
return (None, None)
}
} catch {
case ex: IllegalArgumentException =>
log.warning(s"Corrupt job in stream for $jobName")
}
log.finest("Recurrences: '%d', next date: '%s'".format(schedule.recurrences.getOrElse(-1L), stream.schedule))
//nextDate has to be > (now - epsilon) & < (now + timehorizon) , for it to be scheduled!
schedule.next match {
case None =>
log.info("Finished all recurrences of job '%s'".format(jobName))
//We're not removing the job here because it may still be required if a pending task fails.
(None, None)
case Some(nextScheduled) =>
val job = jobOption.get
val scheduleWindowBegin = now.minus(job.epsilon)
val scheduleWindowEnd = now.plus(scheduleHorizon)
if (nextDate.isAfter(scheduleWindowBegin)
&& nextDate.isBefore(scheduleWindowEnd)) {
log.info("Task ready for scheduling: %s in range [%s => %s]".format(nextDate, scheduleWindowBegin, scheduleWindowEnd))
//TODO(FL): Rethink passing the dispatch queue all the way down to the ScheduledTask.
val task = new ScheduledTask(TaskUtils.getTaskId(job, nextDate), nextDate, job, taskManager)
return (Some(task), stream.tail)
}
// Next instance is too far in the future
// Needs to be scheduled at a later time, after schedule horizon.
if (!nextDate.isBefore(now)) {
return (None, Some(stream))
}
// Next instance is too far in the past (beyond epsilon)
//TODO(FL): Think about the semantics here and see if it always makes sense to skip ahead of missed schedules.
log.fine("No need to work on schedule: '%s' yet".format(nextDate))
jobsObserver.apply(JobSkipped(job, nextDate))
val tail = stream.tail
if (tail.isEmpty) {
//TODO(FL): Verify that this can go.
persistenceStore.removeJob(job)
log.warning("\\n\\nWARNING\\n\\nReached the tail of the streams which should have been never reached \\n\\n")
(None, None)
} else {
log.info("tail: " + tail.get.schedule + " now: " + now)
next(now, tail.get)
}
}
}
def removeSchedule(deletedStream: BaseJob) {
lock.synchronized {
log.fine("Removing schedules: ")
streams = streams.filter(_.jobName != deletedStream.name)
log.fine("Size of streams: %d".format(streams.size))
}
}
//Begin Service interface
override def startUp() {
assert(!running.get, "This scheduler is already running!")
log.info("Trying to become leader.")
leaderLatch.addListener(new LeaderLatchListener {
override def notLeader(): Unit = {
leader.set(false)
onDefeated()
}
override def isLeader(): Unit = {
leader.set(true)
onElected()
}
}, leaderExecutor)
leaderLatch.start()
}
override def shutDown() {
running.set(false)
log.info("Shutting down job scheduler")
leaderLatch.close(LeaderLatch.CloseMode.NOTIFY_LEADER)
leaderExecutor.shutdown()
}
//Begin Leader interface, which is required for CandidateImpl.
def onDefeated() {
mesosDriver.close()
log.info("Defeated. Not the current leader.")
running.set(false)
jobGraph.reset() // So we can rebuild it later.
schedulerThreadFuture.get.cancel(true)
}
def onElected() {
log.info("Elected as leader.")
running.set(true)
lock.synchronized {
try {
//It's important to load the tasks first, otherwise a job that's due will trigger a task right away.
log.info("Loading tasks")
TaskUtils.loadTasks(taskManager, persistenceStore)
log.info("Loading jobs")
JobUtils.loadJobs(this, persistenceStore)
} catch {
case e: Exception =>
log.log(Level.SEVERE, "Loading tasks or jobs failed. Exiting.", e)
System.exit(1)
}
}
val jobScheduler = this
//Consider making this a background thread or control via an executor.
val f = localExecutor.submit(
new Thread() {
override def run() {
log.info("Running background thread")
val dateSupplier = () => {
DateTime.now(DateTimeZone.UTC)
}
jobScheduler.run(dateSupplier)
}
})
schedulerThreadFuture.set(f)
log.info("Starting chronos driver")
mesosDriver.start()
}
// Generates a new ScheduleStream based on a DateTime and a ScheduleStream. Side effects of this method
// are that a new Job may be persisted in the underlying persistence store and a task might get dispatched.
@tailrec
private final def scheduleStream(now: DateTime, s: ScheduleStream): Option[ScheduleStream] = {
val (taskOption, stream) = next(now, s)
if (taskOption.isEmpty) {
stream
} else {
val encapsulatedJob = taskOption.get.job
log.info("Scheduling:" + taskOption.get.job.name)
taskManager.scheduleDelayedTask(taskOption.get, taskManager.getMillisUntilExecution(taskOption.get.due), persist = true)
/*TODO(FL): This needs some refactoring. Ideally, the task should only be persisted once it has been submitted
to chronos, however if we were to do this with the current design, there could be missed tasks if
the scheduler went down before having fired off the jobs, since we're scheduling ahead of time.
Instead we persist the tasks right away, which also has the disadvantage of us maybe executing a job
twice IFF the scheduler goes down after the jobs have been submitted to chronos and stored in the queue
and us still being unavailable before the failover timeout. Thus we set the failover timeout to one
week. This means we should receive a chronos message of a successful task as long as we're not down for
more than a week for the above mentioned scenario.
E.g. Schedule 5seconds into the future
j1 -> R10/20:00:00/PT1S
19:00:56: queue(j1t1, j1t2, j1t3, j1t4, j1t5)
19:00:56: persist(R5/20:00:05/PT1S)
19:00:56: persist(j1t1, j1t2, j1t3, j1t4, j1t5)
19:00:57: DOWN
19:00:58: UP
...
*/
/* TODO(FL): The invocation count only represents the number of job invocations, not the number of successful
executions. When a scheduler starts up, it needs to verify that there are no pending tasks.
This isn't really transactional but should be sufficiently reliable for most usecases. To outline why it is not
really transactional. To fix this, we need to add a new state into ZK that stores the successful tasks.
*/
encapsulatedJob match {
case job: InternalScheduleBasedJob =>
val updatedJob = job.copy(scheduleData = stream.get.schedule)
log.info("Saving updated job:" + updatedJob)
persistenceStore.persistJob(updatedJob)
jobGraph.replaceVertex(encapsulatedJob, updatedJob)
case _ =>
log.warning(s"Job ${encapsulatedJob.name} is not a scheduled job!")
}
if (stream.isEmpty) {
return stream
}
scheduleStream(now, stream.get)
}
}
//End Service interface
private def removeOldSchedules(scheduleStreams: List[Option[ScheduleStream]]): List[ScheduleStream] = {
log.fine("Filtering out empty streams")
scheduleStreams.filter(s => s.isDefined && s.get.tail.isDefined).map(_.get)
}
/**
* Adds a List of ScheduleStream and runs a iteration at the current time.
* @param now time from which to evaluate schedule
* @param newStreams new schedules to be evaluated
*/
private def addSchedule(now: DateTime, newStreams: List[ScheduleStream]) {
log.info("Adding schedule for time:" + now.toString(DateTimeFormat.fullTime()))
lock.synchronized {
log.fine("Starting iteration")
streams = iteration(now, newStreams ++ streams)
log.fine("Size of streams: %d".format(streams.size))
}
}
//End Leader interface
}
| BoopBoopBeepBoop/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/jobs/JobScheduler.scala | Scala | apache-2.0 | 27,711 |
package scoverage.report
import java.io.File
class BaseReportWriter(sourceDirectories: Seq[File], outputDir: File) {
// Source paths in canonical form WITH trailing file separator
private val formattedSourcePaths: Seq[String] =
sourceDirectories filter (_.isDirectory) map (_.getCanonicalPath + File.separator)
/** Converts absolute path to relative one if any of the source directories is it's parent.
* If there is no parent directory, the path is returned unchanged (absolute).
*
* @param src absolute file path in canonical form
*/
def relativeSource(src: String): String =
relativeSource(src, formattedSourcePaths)
private def relativeSource(src: String, sourcePaths: Seq[String]): String = {
// We need the canonical path for the given src because our formattedSourcePaths are canonical
val canonicalSrc = new File(src).getCanonicalPath
val sourceRoot: Option[String] =
sourcePaths.find(sourcePath => canonicalSrc.startsWith(sourcePath))
sourceRoot match {
case Some(path: String) => canonicalSrc.replace(path, "")
case _ =>
val fmtSourcePaths: String = sourcePaths.mkString("'", "', '", "'")
throw new RuntimeException(
s"No source root found for '$canonicalSrc' (source roots: $fmtSourcePaths)"
);
}
}
}
| scoverage/scalac-scoverage-plugin | scalac-scoverage-plugin/src/main/scala/scoverage/report/BaseReportWriter.scala | Scala | apache-2.0 | 1,326 |
package tu.coreservice.action.way2think
import tu.model.knowledge.communication.{ContextHelper, ShortTermMemory}
import tu.model.knowledge.domain.ConceptNetwork
import tu.model.knowledge.{Constant, Resource, SolvedIssue}
import org.slf4j.LoggerFactory
import tu.model.knowledge.narrative.Narrative
/**
* @author adel chepkunov
* Date: 10.07.12
* Time: 7:00
*/
class SearchSolution extends Way2Think {
def start() = false
def stop() = false
/**
* Way2Think interface.
* @param inputContext ShortTermMemory of all inbound parameters.
* @return outputContext
*/
def apply(inputContext: ShortTermMemory) = SearchSolution(inputContext)
}
object SearchSolution {
val log = LoggerFactory.getLogger(this.getClass)
val searcher = new Solutions
def apply(inputContext: ShortTermMemory): ShortTermMemory = {
searcher.solutions = inputContext.solutions
val res = inputContext.lastResult match {
case Some(cn: ConceptNetwork) => {
if (cn.rootNodes.size <= 0) {
return inputContext
}
searcher.search(cn, Nil)
}
case _ => None
}
log debug("search solution result={}", res)
val outputContext = ContextHelper(List[Resource](), this.getClass.getName + " result")
outputContext.lastResult = res
this.setReport(res, outputContext)
outputContext
}
def setReport(solution: Option[SolvedIssue], context: ShortTermMemory): ShortTermMemory = {
solution match {
case Some(issue: SolvedIssue) => {
this.setResultsToReport(Constant.FOUND_SOLUTIONS, context, List[SolvedIssue](issue))
}
case None => {
ContextHelper(List[Resource](), this.getClass.getName)
}
}
}
def search(target: ConceptNetwork): Option[SolvedIssue] = {
searcher.search(target, Nil)
}
/**
* Sets concepts to result to report.
* @param identifier the result identifier in ShortTermMemory.
* @param context ShortTermMemory to set understood Concepts to report.
* @param issues found solved issues to set in ShortTermMemory.
* @return updated ShortTermMemory
*/
def setResultsToReport(identifier: String, context: ShortTermMemory, issues: List[SolvedIssue]): ShortTermMemory = {
val foundSolutions = Narrative[SolvedIssue](identifier, issues)
context.solutionsToReport = context.solutionsToReport + foundSolutions
context
}
} | keskival/2 | coreservice.action.way2think/src/main/scala/tu/coreservice/action/way2think/SearchSolution.scala | Scala | gpl-3.0 | 2,403 |
/*
* Copyright 2015 Foundational Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package pro.foundev.commons.benchmarking
trait OutputWriter {
def print(message: String): Unit
def println(message: String):Unit
}
| rssvihla/datastax_work | spark_commons/commons/src/main/scala/pro/foundev/commons/benchmarking/OutputWriter.scala | Scala | apache-2.0 | 767 |
package slick.codegen
import slick.SlickException
import slick.ast.ColumnOption
import slick.{model => m}
import slick.model.ForeignKeyAction
import slick.relational.RelationalProfile
import slick.sql.SqlProfile
/** Base implementation for a Source code String generator */
abstract class AbstractSourceCodeGenerator(model: m.Model)
extends AbstractGenerator[String,String,String](model)
with StringGeneratorHelpers{
/** Generates code for the complete model (not wrapped in a package yet)
@group Basic customization overrides */
def code = {
"import slick.model.ForeignKeyAction\\n" +
( if(tables.exists(_.hlistEnabled)){
"import slick.collection.heterogeneous._\\n"+
"import slick.collection.heterogeneous.syntax._\\n"
} else ""
) +
( if(tables.exists(_.PlainSqlMapper.enabled)){
"// NOTE: GetResult mappers for plain SQL are only generated for tables where Slick knows how to map the types of all columns.\\n"+
"import slick.jdbc.{GetResult => GR}\\n"
} else ""
) +
(if(ddlEnabled){
"\\n/** DDL for all tables. Call .create to execute. */" +
(
if(tables.length > 5)
"\\nlazy val schema: profile.SchemaDescription = Array(" + tables.map(_.TableValue.name + ".schema").mkString(", ") + ").reduceLeft(_ ++ _)"
else if(tables.nonEmpty)
"\\nlazy val schema: profile.SchemaDescription = " + tables.map(_.TableValue.name + ".schema").mkString(" ++ ")
else
"\\nlazy val schema: profile.SchemaDescription = profile.DDL(Nil, Nil)"
) +
"\\n@deprecated(\\"Use .schema instead of .ddl\\", \\"3.0\\")"+
"\\ndef ddl = schema" +
"\\n\\n"
} else "") +
tables.map(_.code.mkString("\\n")).mkString("\\n\\n")
}
protected def tuple(i: Int) = termName(s"_${i+1}")
abstract class TableDef(model: m.Table) extends super.TableDef(model){
def compoundType(types: Seq[String]): String = {
if(hlistEnabled){
def mkHList(types: List[String]): String = types match {
case Nil => "HNil"
case e :: tail => s"HCons[$e," + mkHList(tail) + "]"
}
mkHList(types.toList)
}
else compoundValue(types)
}
def compoundValue(values: Seq[String]): String = {
if(hlistEnabled) values.mkString(" :: ") + " :: HNil"
else if (values.size == 1) values.head
else if(values.size <= 22) s"""(${values.mkString(", ")})"""
else throw new Exception("Cannot generate tuple for > 22 columns, please set hlistEnable=true or override compound.")
}
def factory = if(columns.size == 1) TableClass.elementType else s"${TableClass.elementType}.tupled"
def extractor = s"${TableClass.elementType}.unapply"
trait EntityTypeDef extends super.EntityTypeDef{
def code = {
val args = columns.map(c=>
c.default.map( v =>
s"${c.name}: ${c.exposedType} = $v"
).getOrElse(
s"${c.name}: ${c.exposedType}"
)
).mkString(", ")
if(classEnabled){
val prns = (parents.take(1).map(" extends "+_) ++ parents.drop(1).map(" with "+_)).mkString("")
(if(caseClassFinal) "final " else "") +
s"""case class $name($args)$prns"""
} else {
s"""
type $name = $types
/** Constructor for $name providing default values if available in the database schema. */
def $name($args): $name = {
${compoundValue(columns.map(_.name))}
}
""".trim
}
}
}
trait PlainSqlMapperDef extends super.PlainSqlMapperDef{
def code = {
val positional = compoundValue(columnsPositional.map(c => (if(c.asOption || c.model.nullable)s"<<?[${c.rawType}]"else s"<<[${c.rawType}]")))
val dependencies = columns.map(_.exposedType).distinct.zipWithIndex.map{ case (t,i) => s"""e$i: GR[$t]"""}.mkString(", ")
val rearranged = compoundValue(desiredColumnOrder.map(i => if(hlistEnabled) s"r($i)" else tuple(i)))
def result(args: String) = if(mappingEnabled) s"$factory($args)" else args
val body =
if(autoIncLast && columns.size > 1){
s"""
val r = $positional
import r._
${result(rearranged)} // putting AutoInc last
""".trim
} else
result(positional)
s"""
implicit def ${name}(implicit $dependencies): GR[${TableClass.elementType}] = GR{
prs => import prs._
${indent(body)}
}
""".trim
}
}
trait TableClassDef extends super.TableClassDef{
def star = {
val struct = compoundValue(columns.map(c=>if(c.asOption)s"Rep.Some(${c.name})" else s"${c.name}"))
val rhs = if(mappingEnabled) s"$struct <> ($factory, $extractor)" else struct
s"def * = $rhs"
}
def option = {
val struct = compoundValue(columns.map(c=>if(c.model.nullable)s"${c.name}" else s"Rep.Some(${c.name})"))
val rhs = if(mappingEnabled) s"""$struct.shaped.<>($optionFactory, (_:Any) => throw new Exception("Inserting into ? projection not supported."))""" else struct
s"def ? = $rhs"
}
def optionFactory = {
val accessors = columns.zipWithIndex.map{ case(c,i) =>
val accessor = if(columns.size > 1) tuple(i) else "r"
if(c.asOption || c.model.nullable) accessor else s"$accessor.get"
}
val fac = s"$factory(${compoundValue(accessors)})"
val discriminator = columns.zipWithIndex.collect{ case (c,i) if !c.model.nullable => if(columns.size > 1) tuple(i) else "r" }.headOption
val expr = discriminator.map(d => s"$d.map(_=> $fac)").getOrElse(s"None")
if(columns.size > 1)
s"{r=>import r._; $expr}"
else
s"r => $expr"
}
def code = {
val prns = parents.map(" with " + _).mkString("")
val args = model.name.schema.map(n => s"""Some("$n")""") ++ Seq("\\""+model.name.table+"\\"")
s"""
class $name(_tableTag: Tag) extends profile.api.Table[$elementType](_tableTag, ${args.mkString(", ")})$prns {
${indent(body.map(_.mkString("\\n")).mkString("\\n\\n"))}
}
""".trim()
}
}
trait TableValueDef extends super.TableValueDef{
def code = s"lazy val $name = new TableQuery(tag => new ${TableClass.name}(tag))"
}
class ColumnDef(model: m.Column) extends super.ColumnDef(model){
import ColumnOption._
import RelationalProfile.ColumnOption._
import SqlProfile.ColumnOption._
def columnOptionCode = {
case ColumnOption.PrimaryKey => Some(s"O.PrimaryKey")
case Default(value) => Some(s"O.Default(${default.get})") // .get is safe here
case SqlType(dbType) => Some(s"""O.SqlType("$dbType")""")
case Length(length,varying) => Some(s"O.Length($length,varying=$varying)")
case AutoInc => Some(s"O.AutoInc")
case Unique => Some(s"O.Unique")
case NotNull|Nullable => throw new SlickException( s"Please don't use Nullable or NotNull column options. Use an Option type, respectively the nullable flag in Slick's model model Column." )
case o => None // throw new SlickException( s"Don't know how to generate code for unexpected ColumnOption $o." )
}
def defaultCode = {
case Some(v) => s"Some(${defaultCode(v)})"
case s:String => "\\""+s.replaceAll("\\"", """\\\\"""")+"\\""
case None => s"None"
case v:Byte => s"$v"
case v:Int => s"$v"
case v:Long => s"${v}L"
case v:Float => s"${v}F"
case v:Double => s"$v"
case v:Boolean => s"$v"
case v:Short => s"$v"
case v:Char => s"'$v'"
case v:BigDecimal => s"""scala.math.BigDecimal(\\"$v\\")"""
case v => throw new SlickException( s"Dont' know how to generate code for default value $v of ${v.getClass}. Override def defaultCode to render the value." )
}
// Explicit type to allow overloading existing Slick method names.
// Explicit type argument for better error message when implicit type mapper not found.
def code = s"""val $name: Rep[$actualType] = column[$actualType]("${model.name}"${options.map(", "+_).mkString("")})"""
}
class PrimaryKeyDef(model: m.PrimaryKey) extends super.PrimaryKeyDef(model){
def code = s"""val $name = primaryKey("$dbName", ${compoundValue(columns.map(_.name))})"""
}
class ForeignKeyDef(model: m.ForeignKey) extends super.ForeignKeyDef(model){
def actionCode(action: ForeignKeyAction) = action match{
case ForeignKeyAction.Cascade => "ForeignKeyAction.Cascade"
case ForeignKeyAction.Restrict => "ForeignKeyAction.Restrict"
case ForeignKeyAction.NoAction => "ForeignKeyAction.NoAction"
case ForeignKeyAction.SetNull => "ForeignKeyAction.SetNull"
case ForeignKeyAction.SetDefault => "ForeignKeyAction.SetDefault"
}
def code = {
val pkTable = referencedTable.TableValue.name
val (pkColumns, fkColumns) = (referencedColumns, referencingColumns).zipped.map { (p, f) =>
val pk = s"r.${p.name}"
val fk = f.name
if(p.model.nullable && !f.model.nullable) (pk, s"Rep.Some($fk)")
else if(!p.model.nullable && f.model.nullable) (s"Rep.Some($pk)", fk)
else (pk, fk)
}.unzip
s"""lazy val $name = foreignKey("$dbName", ${compoundValue(fkColumns)}, $pkTable)(r => ${compoundValue(pkColumns)}, onUpdate=${onUpdate}, onDelete=${onDelete})"""
}
}
class IndexDef(model: m.Index) extends super.IndexDef(model){
def code = {
val unique = if(model.unique) s", unique=true" else ""
s"""val $name = index("$dbName", ${compoundValue(columns.map(_.name))}$unique)"""
}
}
}
}
trait StringGeneratorHelpers extends slick.codegen.GeneratorHelpers[String,String,String]{
def docWithCode(doc: String, code:String): String = (if(doc != "") "/** "+doc.split("\\n").mkString("\\n * ")+" */\\n" else "") + code
final def optionType(t: String) = s"Option[$t]"
def parseType(tpe: String): String = tpe
def shouldQuoteIdentifier(s: String) = {
def isIdent =
if(s.isEmpty) false
else Character.isJavaIdentifierStart(s.head) && s.tail.forall(Character.isJavaIdentifierPart)
scalaKeywords.contains(s) || !isIdent
}
def termName( name: String ) = if(shouldQuoteIdentifier(name)) "`"+name+"`" else name
def typeName( name: String ) = if(shouldQuoteIdentifier(name)) "`"+name+"`" else name
}
| Radsaggi/slick | slick-codegen/src/main/scala/slick/codegen/AbstractSourceCodeGenerator.scala | Scala | bsd-2-clause | 10,530 |
package fpscala.c03
import fpscala.datastructures.{List => FpList, Nil => FpNil}
import org.scalatest.{FlatSpec, Matchers}
class Exercise14Spec extends FlatSpec with Matchers {
"foldRight append" should "work" in {
Exercise14.append(FpList(1, 2, 3, 4), FpList(5, 6)) shouldBe FpList(1, 2, 3, 4, 5, 6)
Exercise14.append(FpList(5), FpNil) shouldBe FpList(5)
}
}
| willtaylor/fpscala | src/test/scala/fpscala/c03/Exercise14Spec.scala | Scala | gpl-3.0 | 376 |
/*
* Copyright 2016 okumin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package influent.internal.nio
import java.io.IOException
import java.nio.channels._
import java.util
import java.util.function.IntUnaryOperator
import influent.exception.InfluentIOException
import influent.internal.nio.NioEventLoopTask.UpdateInterestSet
import org.mockito.ArgumentMatchers._
import org.mockito.Mockito._
import org.scalatest.WordSpec
import org.scalatest.mockito.MockitoSugar
class NioEventLoopTaskSpec extends WordSpec with MockitoSugar {
private[this] class NopAttachment extends NioAttachment {
override def close(): Unit = ()
}
"Register" should {
"register a channel" in {
val selector = Selector.open()
val ops = SelectionKey.OP_WRITE
val attachment = new NopAttachment
val javaKey = mock[SelectionKey]
val channel = mock[SelectableChannel]
when(channel.configureBlocking(false)).thenReturn(channel)
when(channel.register(selector, ops, attachment)).thenReturn(javaKey)
val key = mock[NioSelectionKey]
val task = NioEventLoopTask.Register.of(selector, channel, key, ops, attachment)
task.run()
verify(channel).configureBlocking(false)
verify(channel).register(selector, ops, attachment)
verify(key).bind(javaKey)
}
"ignore errors" when {
"it fails configuring blocking mode" in {
val errors = Seq(
new ClosedChannelException,
new IOException()
)
errors.foreach { error =>
val selector = Selector.open()
val ops = SelectionKey.OP_WRITE
val attachment = new NopAttachment
val channel = mock[SelectableChannel]
when(channel.configureBlocking(false)).thenThrow(error)
val key = mock[NioSelectionKey]
val task = NioEventLoopTask.Register.of(selector, channel, key, ops, attachment)
task.run()
verify(channel).configureBlocking(false)
verify(channel, never()).register(any(), anyInt(), any())
verify(key, never()).bind(any())
}
}
"it fails registering the selector" in {
val errors = Seq(
new CancelledKeyException,
new IllegalArgumentException
)
errors.foreach { error =>
val selector = Selector.open()
val ops = SelectionKey.OP_WRITE
val attachment = new NopAttachment
val channel = mock[SelectableChannel]
when(channel.configureBlocking(false)).thenReturn(channel)
when(channel.register(selector, ops, attachment)).thenThrow(error)
val key = mock[NioSelectionKey]
val task = NioEventLoopTask.Register.of(selector, channel, key, ops, attachment)
task.run()
verify(channel).configureBlocking(false)
verify(channel).register(selector, ops, attachment)
verify(key, never()).bind(any())
}
}
}
}
"UpdateInterestSet" should {
def updater = new IntUnaryOperator {
override def applyAsInt(operand: Int): Int = operand | 2
}
"update an interest set" in {
val javaKey = mock[SelectionKey]
when(javaKey.interestOps()).thenReturn(1)
val key = NioSelectionKey.create()
key.bind(javaKey)
val task = UpdateInterestSet.of(key, updater)
task.run()
verify(javaKey).interestOps(1 | 2)
}
"do nothing" when {
"updated ops equals to the current ops" in {
val javaKey = mock[SelectionKey]
when(javaKey.interestOps()).thenReturn(1 | 2)
val key = NioSelectionKey.create()
key.bind(javaKey)
val task = UpdateInterestSet.of(key, updater)
task.run()
verify(javaKey, never()).interestOps(anyInt())
}
}
"ignore the error" when {
"it fails retrieving the interest set" in {
val javaKey = mock[SelectionKey]
when(javaKey.interestOps()).thenThrow(new CancelledKeyException)
val key = NioSelectionKey.create()
key.bind(javaKey)
val task = UpdateInterestSet.of(key, updater)
task.run()
verify(javaKey, never()).interestOps(anyInt())
}
"it fails configuring the interest set" in {
val errors = Seq(
new IllegalArgumentException,
new CancelledKeyException
)
errors.foreach { error =>
val javaKey = mock[SelectionKey]
when(javaKey.interestOps()).thenReturn(1)
when(javaKey.interestOps(1)).thenThrow(error)
val key = NioSelectionKey.create()
key.bind(javaKey)
val task = UpdateInterestSet.of(key, updater)
task.run()
verify(javaKey).interestOps(3)
}
}
}
}
"Select" should {
"select and execute IO operations" in {
val attachment = mock[NioAttachment]
val key1 = mock[SelectionKey]
when(key1.attachment()).thenReturn(attachment, Nil: _*)
when(key1.isWritable).thenReturn(true)
when(key1.isReadable).thenReturn(false)
when(key1.isAcceptable).thenReturn(false)
when(key1.isConnectable).thenReturn(false)
val key2 = mock[SelectionKey]
when(key2.attachment()).thenReturn(attachment, Nil: _*)
when(key2.isWritable).thenReturn(false)
when(key2.isReadable).thenReturn(true)
when(key2.isAcceptable).thenReturn(false)
when(key2.isConnectable).thenReturn(false)
val key3 = mock[SelectionKey]
when(key3.attachment()).thenReturn(attachment, Nil: _*)
when(key3.isWritable).thenReturn(false)
when(key3.isReadable).thenReturn(false)
when(key3.isAcceptable).thenReturn(true)
when(key3.isConnectable).thenReturn(false)
val key4 = mock[SelectionKey]
when(key4.attachment()).thenReturn(attachment, Nil: _*)
when(key4.isWritable).thenReturn(false)
when(key4.isReadable).thenReturn(false)
when(key4.isAcceptable).thenReturn(false)
when(key4.isConnectable).thenReturn(true)
val key5 = mock[SelectionKey]
when(key5.attachment()).thenReturn(attachment, Nil: _*)
when(key5.isWritable).thenReturn(true)
when(key5.isReadable).thenReturn(true)
when(key5.isAcceptable).thenReturn(false)
when(key5.isConnectable).thenReturn(false)
val keys = new util.LinkedHashSet[SelectionKey]()
keys.add(key1)
keys.add(key2)
keys.add(key3)
keys.add(key4)
keys.add(key5)
val selector = mock[Selector]
when(selector.select()).thenReturn(5)
when(selector.selectedKeys()).thenReturn(keys)
val task = NioEventLoopTask.Select.of(selector)
task.run()
verify(attachment, times(2)).onWritable()
verify(attachment, times(2)).onReadable()
verify(attachment).onAcceptable()
verify(attachment).onConnectable()
verifyNoMoreInteractions(attachment)
assert(keys.size() === 0)
}
"do nothing" when {
"select returns 0" in {
val selector = mock[Selector]
when(selector.select()).thenReturn(0)
val task = NioEventLoopTask.Select.of(selector)
task.run()
verify(selector).select()
verifyNoMoreInteractions(selector)
}
"select fails" in {
val selector = mock[Selector]
when(selector.select()).thenThrow(new IOException())
val task = NioEventLoopTask.Select.of(selector)
task.run()
verify(selector).select()
verifyNoMoreInteractions(selector)
}
}
"ignore attachment errors" in {
val attachment = mock[NioAttachment]
val key1 = mock[SelectionKey]
when(key1.attachment()).thenReturn(attachment, Nil: _*)
when(key1.isWritable).thenReturn(true)
when(key1.isReadable).thenReturn(false)
when(key1.isAcceptable).thenReturn(false)
when(key1.isConnectable).thenReturn(false)
when(attachment.onWritable()).thenThrow(new InfluentIOException())
val key2 = mock[SelectionKey]
when(key2.attachment()).thenReturn(attachment, Nil: _*)
when(key2.isWritable).thenReturn(false)
when(key2.isReadable).thenReturn(true)
when(key2.isAcceptable).thenReturn(false)
when(key2.isConnectable).thenReturn(false)
val keys = new util.LinkedHashSet[SelectionKey]()
keys.add(key1)
keys.add(key2)
val selector = mock[Selector]
when(selector.select()).thenReturn(2)
when(selector.selectedKeys()).thenReturn(keys)
val task = NioEventLoopTask.Select.of(selector)
task.run()
verify(attachment).onWritable()
verify(attachment).close()
verify(attachment).onReadable()
verifyNoMoreInteractions(attachment)
assert(keys.size() === 0)
}
}
}
| okumin/influent | influent-transport/src/test/scala/influent/internal/nio/NioEventLoopTaskSpec.scala | Scala | apache-2.0 | 9,279 |
package wordcount
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{IntWritable, Text}
import org.apache.hadoop.mapred.{FileInputFormat, FileOutputFormat, JobConf, JobClient}
import org.apache.hadoop.conf.Configured
import org.apache.hadoop.util.{GenericOptionsParser, Tool, ToolRunner}
// Enable existential types, which we use below in several places:
import scala.language.existentials
object WordCount extends Configured with Tool {
val HELP =
"""Usage: WordCount *which_mapper* [--use-combiner] input_directory output_directory
where *which_mapper* is one of the following options:
1 | no | no-buffer Simplest algorithm, but least efficient.
2 | not | no-buffer-use-tokenizer Like 'no', but uses a less efficient StringTokenizer, which yields more accurate results.
3 | buffer Buffer the counts and emit just one key-count pair for each work key. (Uses StringTokenizer.)
4 | buffer-flush Like 'buffer', but flushes data more often to limit memory usage.
and
--use-combiner Use the reducer as a combiner."""
def help(message: String = "") = {
message match {
case "" =>
case _ => println(message)
}
println(HELP)
ToolRunner.printGenericCommandUsage(Console.out)
}
override def run(args: Array[String]): Int = {
val conf = new JobConf(this.getClass)
conf.setJobName("Word Count")
conf.setJarByClass(this.getClass)
val optionsParser = new GenericOptionsParser(conf, args);
val (mapper, useCombiner, inputPath, outputPath) =
parseArgs(optionsParser.getRemainingArgs.toList) match {
case Right((m, useC, in, out)) => (m, useC, in, out)
case Left(0) => sys.exit(0)
case Left(_) => sys.error("Invalid settings returned by parseArgs for input args: "+args)
}
FileInputFormat.addInputPath(conf, new Path(inputPath))
FileOutputFormat.setOutputPath(conf, new Path(outputPath))
conf.setMapperClass(mapper)
conf.setReducerClass(classOf[WordCountReducer])
if (useCombiner)
conf.setCombinerClass(classOf[WordCountReducer])
conf.setOutputKeyClass(classOf[Text])
conf.setOutputValueClass(classOf[IntWritable])
JobClient.runJob(conf)
0
}
private type MapperClass = Class[_ <: org.apache.hadoop.mapred.Mapper[_, _, _, _]]
private case class Settings(
mapperClass: Option[MapperClass],
useCombiner: Boolean,
inputPath: Option[String],
outputPath: Option[String])
private def parseArgs(args: List[String]): Either[Int,(MapperClass,Boolean,String,String)] = {
args match {
case ("-h" | "--help") :: tail =>
help()
Left(0)
case _ if (args.length < 3) =>
help(s"Insufficient number of input arguments: $args")
Left(1)
case _ => // continue
}
def parse(a: List[String], settings: Settings): Either[Int,Settings] = a match {
case Nil => Right(settings)
case head :: tail => head match {
case "WordCount" => // should be first arg; this class name!
parse(tail, settings)
case "1" | "no" | "no-buffer" =>
parse(tail, settings.copy(mapperClass = Some(classOf[WordCountNoBuffering.Map])))
case "2" | "not" | "no-buffer-use-tokenizer" =>
parse(tail, settings.copy(mapperClass = Some(classOf[WordCountNoBufferingTokenization.Map])))
case "3" | "buffer" =>
parse(tail, settings.copy(mapperClass = Some(classOf[WordCountBuffering.Map])))
case "4" | "buffer-flush" =>
parse(tail, settings.copy(mapperClass = Some(classOf[WordCountBufferingFlushing.Map])))
case "--use-combiner" =>
parse(tail, settings.copy(useCombiner = true))
case s =>
if (settings.inputPath == None)
parse(tail, settings.copy(inputPath = Some(s)))
else if (settings.outputPath == None)
parse(tail, settings.copy(outputPath = Some(s)))
else {
help(s"Unrecognized argument '$s' in input arguments: $args")
Left(1)
}
}
}
parse(args, Settings(None, false, None, None)) match {
case Right(Settings(None, _, _, _)) => help("Must specify a mapper."); Left(1)
case Right(Settings(_, _, None, _)) => help("Must specify an input path."); Left(1)
case Right(Settings(_, _, _, None)) => help("Must specify an output path."); Left(1)
case Right(Settings(Some(m), useC, Some(in), Some(out))) => Right((m, useC, in, out))
case Left(x) => Left(x)
}
}
}
| deanwampler/scala-hadoop | src/main/scala/wordcount/WordCount.scala | Scala | apache-2.0 | 4,297 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.microburn
object DevMain extends App {
Main.runServer(Some("src/main/webapp"))
} | arkadius/micro-burn | src/main/scala/org/github/microburn/DevMain.scala | Scala | apache-2.0 | 719 |
import com.typesafe.sbt.web.SbtWeb.autoImport._
import play.Play.autoImport._
import play.sbt.PlayImport._
import play.sbt.routes.RoutesKeys._
import play.twirl.sbt.Import._
import PlayKeys._
import sbt._, Keys._
object ApplicationBuild extends Build {
val appName = "idbase"
val appVersion = "1.1"
val appDependencies = Seq(
cache,
ws,
"org.reactivemongo" %% "reactivemongo" % "0.11.14",
"org.reactivemongo" %% "play2-reactivemongo" % s"0.11.14-play24",
"org.pegdown" % "pegdown" % "1.6.0",
"jp.t2v" %% "play2-auth" % "0.14.1",
"default" % "ssu_2.10" % "0.1-SNAPSHOT"
)
val main = Project(appName, file(".")) enablePlugins _root_.play.sbt.PlayScala settings (
scalaVersion := "2.11.8",
resolvers ++= Seq(
"Sonatype Snapshots" at "http://oss.sonatype.org/content/repositories/snapshots/",
"iliaz.com" at "http://scala.iliaz.com/"
),
libraryDependencies ++= appDependencies,
sources in doc in Compile := List(),
scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature", "-language:_"),
TwirlKeys.templateImports ++= Seq("idbase.models.{Doc => D,_}"),
routesGenerator := InjectedRoutesGenerator
)
}
| ornicar/idbase | project/Build.scala | Scala | mit | 1,202 |
package test
import io.keen.client.scala.AttemptCountingEventStore
import scala.collection.concurrent.TrieMap
import scala.collection.mutable.ListBuffer
import org.specs2.mutable.BeforeAfter
abstract class AttemptCountingEventStoreSpecBase extends EventStoreSpecBase {
var attemptCountingStore: AttemptCountingEventStore = _
trait AttemptCountingEventStoreSetupTeardown extends BeforeAfter {
def before: Any = {
store = buildStore() // initialize our store
attemptCountingStore = store.asInstanceOf[AttemptCountingEventStore]
}
def after: Any = {}
}
sequential
"AttemptCountingEventStore" should {
"store and get event attempts" in new AttemptCountingEventStoreSetupTeardown {
val attempts: String = "blargh"
attemptCountingStore.setAttempts("project1", "collection1", attempts)
attempts must beEqualTo(attemptCountingStore.getAttempts("project1", "collection1"))
}
"get handles with attempts" in new AttemptCountingEventStoreSetupTeardown {
// add a couple events to the store
attemptCountingStore.store("project1", "collection1", testEvents(0))
attemptCountingStore.store("project1", "collection2", testEvents(1))
// set some value for attempts.json. this is to ensure that setting attempts doesn't
// interfere with getting handles
attemptCountingStore.setAttempts("project1", "collection1", "{}")
// get the handle map
val handleMap: TrieMap[String, ListBuffer[Long]] = attemptCountingStore.getHandles("project1")
(handleMap must not beNull)
handleMap.size must beEqualTo(2)
// get the lists of handles
var handles1: ListBuffer[Long] = handleMap.getOrElse("collection1", null)
(handles1 must not beNull)
handles1.size must beEqualTo(1)
var handles2: ListBuffer[Long] = handleMap.getOrElse("collection2", null)
(handles2 must not beNull)
handles2.size must beEqualTo(1)
// validate the actual events
store.get(handles1(0)) must beEqualTo(testEvents(0))
store.get(handles2(0)) must beEqualTo(testEvents(1))
}
}
}
| ches/KeenClient-Scala | src/test/scala/AttemptCountingEventStoreSpecBase.scala | Scala | mit | 2,118 |
package cn.gridx.scala.lang.io.files
import java.io.{File, FileWriter, PrintWriter}
import scala.io.{BufferedSource, Source}
/**
* Created by tao on 6/27/15.
*/
object ReadWriteTextFiles {
def main(args: Array[String]): Unit = {
CreateFile("jg.txt")
/*
val a = "\u0000"
val b = "\u0001"
if (a.equals("\u0000"))
println("0")
if (b.equals("\u0001"))
println("1")
//writeTextFile("/Users/tao/IdeaProjects/Scala/Examples/target/hello.txt")
//readLinesFromFile("/Users/tao/IdeaProjects/Scala/Examples/pom.xml")
*/
}
/**
* 写入文本文件
*
* Scala doesn’t offer any special file writing capability,
* so fall back and use the Java PrintWriter or FileWriter approaches
* @param path
*/
def writeTextFile(path: String): Unit = {
val writer = new PrintWriter(new File(path))
writer.println("hello")
writer.flush()
}
/**
* 读取文件
* 按行读
*/
def readLinesFromFile(path:String): Unit = {
val lines = Source.fromFile(path).getLines
var i = 0
for (line <- lines) {
println(i + " : " + line)
i += 1
}
}
/**
* 通过 Source.fromFile 读取文本文件全部内容
* */
def ReadWholeFileContents(): Unit = {
val path = "/Users/tao/.Rapp.history"
// 会取出该文件的所有内容
val src: BufferedSource = Source.fromFile(path)
// 并将所有的内容直接连成一个字符串
val contents: String = src.mkString
println(contents)
}
def CreateFile(path: String): Unit = {
val file = new File(path)
val writer = new PrintWriter(new FileWriter(file, true))
writer.append("\nhello")
writer.close()
}
}
| TaoXiao/Scala | lang/src/main/scala/cn/gridx/scala/lang/io/files/ReadWriteTextFiles.scala | Scala | apache-2.0 | 1,889 |
package org.concurrency.ch2
import scala.annotation.tailrec
import scala.collection.mutable
object Exs extends App {
// 1
def parallel[A,B] (a: => A, b: => B): (A,B) = {
var resA:A = null.asInstanceOf[A]
var resB:B = null.asInstanceOf[B]
val tA = Thread.thread {
resA = a
}
val tB = Thread.thread {
resB = b
}
tA.join(); tB.join()
(resA, resB)
}
// println(parallel(1+2,3+3))
// 2
def periodically(duration: Long, count: Int) (b: => Unit): Unit = {
var i = 0
var t = null.asInstanceOf[java.lang.Thread]
while(i < count) {
i+=1
t = Thread.thread {b}
java.lang.Thread.sleep(duration)
}
}
//periodically(2000, 2) {println(3+4)}
// 3
class SyncVar1[T] {
private var state: Option[T] = None
def get(): T = state.synchronized { // if synchronized were invoked on a var state initialized as '= _' it would throw a NullPointerExc
state match {
case None => throw new IllegalArgumentException()
case Some(x) =>
val tmp: T = x
state = None
tmp
}
}
def put(el: T): Unit = state.synchronized {
state match {
case None => state = Some(el)
case Some(x) => throw new IllegalArgumentException()
}
}
}
// val sv = new SyncVar1[Int]
// val t1 = Thread.thread {
// sv.put(5)
// }
// val t2 = Thread.thread {
// println(sv.get())
// }
//
// t1.join(); t2.join()
// 4
class SyncVar2[T] {
private var state: Option[T] = None
def get(): T = state.synchronized {
state match {
case None => throw new IllegalArgumentException()
case Some(x) =>
val tmp: T = x
state = None // if state.notify() was called afterwards, an IllegalMonitorStateExc would be thrown as the current thread does not own the object monitor anymore; it was lost on assigning None to state
tmp
}
}
def put(el: T): Unit = state.synchronized {
state match {
case None => state = Some(el)
case Some(x) => throw new IllegalArgumentException()
}
}
def isEmpty: Boolean = state.synchronized {
state match {
case None => true
case _ => false
}
}
def nonEmpty: Boolean = !isEmpty
}
// val sv2 = new SyncVar2[Int]
// val tprod = Thread.thread {
// sv2.synchronized {
// for (i <- 0 until 15) {
// while (sv2.nonEmpty) sv2.wait()
// sv2.put(i)
// sv2.notify()
// }
// }
// }
// val tcons = Thread.thread {
// @tailrec
// def go(): Unit = {
// var get: Int = 0
// sv2.synchronized {
// while (sv2.isEmpty) sv2.wait()
// get = sv2.get()
// sv2.notify()
// }
// println(get)
// if (get < 14) go()
// }
// go()
// }
// 5
// the exs before are not the best coding as they synchronize at the state var and at the SyncVar
class SyncVar3[T] {
private val lock = new AnyRef
var state: Option[T] = None
final def getWait(): T = lock.synchronized {
state match {
case Some(x) =>
val tmp: T = x
state = None
lock.notify()
tmp
case None => lock.wait(); getWait()
}
}
final def putWait(x: T): Unit = lock.synchronized {
state match {
case None => state = Some(x); lock.notify()
case Some(v) => lock.wait(); putWait(v)
}
}
}
// val sv2 = new SyncVar3[Int]
// val tprod = Thread.thread {
// for (i <- 0 until 15) {
// sv2.putWait(i)
// }
// }
//
// val tcons = Thread.thread {
// @tailrec
// def go(): Unit = {
// val get: Int = sv2.getWait()
// println(get)
// if (get < 14) go()
// }
// go()
// }
// 6
class SyncQueue[T](val n:Int) {
val q: mutable.Queue[T] = new mutable.Queue()
final def getWait(): T = q.synchronized {
while(q.isEmpty) q.wait()
val v = q.dequeue()
q.notify()
v
}
final def putWait(el: T) = q.synchronized {
while(n < q.size) q.wait()
q += el
q.notify()
}
}
// val sq = new SyncQueue[Int](3)
// val t1 = Thread.thread {
// for(i <- 0 until 15) sq.putWait(i)
// }
// val t2 = Thread.thread {
// var v:Int = sq.getWait()
// while(v < 14) {
// println(v); v = sq.getWait()
// }
// }
// 7
class Account(val name:String, var bal:Int) {
override def equals(other: Any):Boolean = other match {
case that:Account => this.name == that.name
case _ => false
}
override def hashCode = name.hashCode
}
private def send(src: Account, dst: Account, quantity:Int): Unit = {
Thread.thread {
src.synchronized {
dst.synchronized {
src.bal -= quantity
dst.bal += quantity
}
}
}
}
// this code supposes target is not in accounts
def sendAll(accounts:collection.immutable.Set[Account], target:Account): Unit = {
if(!accounts.contains(target))
for(acc <- accounts) send(acc, target, acc.bal)
}
// val dst = new Account("target", 0)
// sendAll((for(i <- 10 until 20) yield new Account("src", i)).toSet, dst)
// println(dst.bal)
// 8 9 10
class PriorityTaskPool(p:Int, important:Int) {
val pq: mutable.PriorityQueue[(Int, () => Unit)] = new mutable.PriorityQueue()(Ordering.by(t2 => -t2._1))
@volatile var terminated = false
val workers = for(_ <- 0 until p) yield new java.lang.Thread {
def poll() = pq.synchronized {
while(pq.isEmpty && !terminated) pq.wait()
println(this.getName)
pq.headOption match {
case Some(task) if !terminated || task._1 < important => Some(pq.dequeue())
case _ => None
}
}
@tailrec
override def run() = poll() match {
case Some(task) => task._2(); run()
case None =>
}
}
def go(): Unit = workers.foreach(_.start())
def asynchronous(priority:Int)(task: => Unit): Unit = pq.synchronized {
pq.enqueue(priority -> (() => task))
pq.notify()
}
def shutdown(): Unit = {
terminated = true
pq.synchronized { pq.notify() }
}
}
val ptp = new PriorityTaskPool(2,2)
ptp.asynchronous(3)(println(3))
ptp.asynchronous(2)(println(2))
ptp.asynchronous(1)(println(1))
ptp.asynchronous(1)(println(1))
ptp.asynchronous(2)(println(2))
ptp.asynchronous(3)(println(3))
ptp.asynchronous(-1)(println(-1))
ptp.go()
ptp.shutdown()
}
| marcos-sb/concurrent-programming-scala | src/main/scala-2.11/org/concurrency/ch2/Exs.scala | Scala | apache-2.0 | 6,533 |
package com.lucidchart.piezo.admin.controllers
import com.lucidchart.piezo.admin.utils.JobUtils
import play.api._
import play.api.mvc._
import com.lucidchart.piezo.{JobHistoryModel, TriggerHistoryModel, WorkerSchedulerFactory}
import org.quartz.impl.matchers.GroupMatcher
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.quartz._
import scala.Some
import scala.collection.JavaConverters._
import com.lucidchart.piezo.admin.views._
import play.api.libs.json._
import play.api.libs.functional.syntax._
object Jobs extends Jobs(new WorkerSchedulerFactory())
class Jobs(schedulerFactory: WorkerSchedulerFactory) extends Controller {
implicit val logger = Logger(this.getClass())
val scheduler = logExceptions(schedulerFactory.getScheduler())
val properties = schedulerFactory.props
val jobHistoryModel = logExceptions(new JobHistoryModel(properties))
val triggerHistoryModel = logExceptions(new TriggerHistoryModel(properties))
val jobFormHelper = new JobFormHelper()
def getJobsByGroup(): mutable.Buffer[(String, List[JobKey])] = {
val jobsByGroup =
for (groupName <- scheduler.getJobGroupNames().asScala) yield {
val jobs: List[JobKey] = scheduler.getJobKeys(GroupMatcher.jobGroupEquals(groupName)).asScala.toList
val sortedJobs: List[JobKey] = jobs.sortBy(jobKey => jobKey.getName())
(groupName, sortedJobs)
}
jobsByGroup.sortBy(groupList => groupList._1)
}
def getIndex = Action { implicit request =>
val allJobs: List[JobKey] = getJobsByGroup().flatMap(_._2).toList
val jobHistories = allJobs.flatMap({ job =>
jobHistoryModel.getJob(job.getName, job.getGroup).headOption
}).sortWith(_.start after _.start)
val triggeredJobs: List[JobKey] = TriggerHelper.getTriggersByGroup(scheduler).flatMap { case (group, triggerKeys) =>
triggerKeys.map(triggerKey => scheduler.getTrigger(triggerKey).getJobKey)
}.toList
val untriggeredJobs: List[JobKey] = allJobs.filterNot(x => triggeredJobs.contains(x))
Ok(com.lucidchart.piezo.admin.views.html.jobs(getJobsByGroup(), None, Some(jobHistories), untriggeredJobs, scheduler.getMetaData)(request))
}
def getJob(group: String, name: String) = Action { implicit request =>
val jobKey = new JobKey(name, group)
val jobExists = scheduler.checkExists(jobKey)
if (!jobExists) {
val errorMsg = Some("Job " + group + " " + name + " not found")
NotFound(com.lucidchart.piezo.admin.views.html.job(getJobsByGroup(), None, None, None, errorMsg)(request))
} else {
try {
val jobDetail: Option[JobDetail] = Some(scheduler.getJobDetail(jobKey))
val history = {
try {
Some(jobHistoryModel.getJob(name, group))
} catch {
case e: Exception => {
logger.error("Failed to get job history")
None
}
}
}
val triggers = scheduler.getTriggersOfJob(jobKey).asScala.toList
val (resumableTriggers, pausableTriggers) = triggers.filter(_.isInstanceOf[CronTrigger]).map(_.getKey()).partition{ triggerKey =>
scheduler.getTriggerState(triggerKey) == Trigger.TriggerState.PAUSED
}
Ok(com.lucidchart.piezo.admin.views.html.job(getJobsByGroup(), jobDetail, history, Some(triggers), None, pausableTriggers, resumableTriggers)(request))
} catch {
case e: Exception => {
val errorMsg = "Exception caught getting job " + group + " " + name + ". -- " + e.getLocalizedMessage()
logger.error(errorMsg, e)
InternalServerError(com.lucidchart.piezo.admin.views.html.job(getJobsByGroup(), None, None, None, Some(errorMsg))(request))
}
}
}
}
def deleteJob(group: String, name: String) = Action { implicit request =>
val jobKey = new JobKey(name, group)
if (!scheduler.checkExists(jobKey)) {
val errorMsg = Some("Job %s $s not found".format(group, name))
NotFound(com.lucidchart.piezo.admin.views.html.job(mutable.Buffer(), None, None, None, errorMsg)(request))
} else {
try {
scheduler.deleteJob(jobKey)
Ok(com.lucidchart.piezo.admin.views.html.job(getJobsByGroup(), None, None, None)(request))
} catch {
case e: Exception => {
val errorMsg = "Exception caught deleting job %s %s. -- %s".format(group, name, e.getLocalizedMessage())
logger.error(errorMsg, e)
InternalServerError(com.lucidchart.piezo.admin.views.html.job(mutable.Buffer(), None, None, None, Some(errorMsg))(request))
}
}
}
}
val submitNewMessage = "Create"
val formNewAction = routes.Jobs.postJob()
val submitEditMessage = "Save"
def formEditAction(group: String, name: String): Call = routes.Jobs.putJob(group, name)
def getNewJobForm(templateGroup: Option[String] = None, templateName: Option[String] = None) = Action { implicit request =>
//if (request.queryString.contains())
templateGroup match {
case Some(group) => getEditJob(group, templateName.get, true)
case None =>
val newJobForm = jobFormHelper.buildJobForm
Ok(com.lucidchart.piezo.admin.views.html.editJob(getJobsByGroup(), newJobForm, submitNewMessage, formNewAction, false)(request))
}
}
def getEditJob(group: String, name: String, isTemplate: Boolean)(implicit request: Request[AnyContent]) = {
val jobKey = new JobKey(name, group)
if (scheduler.checkExists(jobKey)) {
val jobDetail = scheduler.getJobDetail(jobKey)
val editJobForm = jobFormHelper.buildJobForm().fill(jobDetail)
if (isTemplate) Ok(com.lucidchart.piezo.admin.views.html.editJob(getJobsByGroup(), editJobForm, submitNewMessage, formNewAction, false)(request))
else Ok(com.lucidchart.piezo.admin.views.html.editJob(getJobsByGroup(), editJobForm, submitEditMessage, formEditAction(group, name), true)(request))
} else {
val errorMsg = Some("Job %s %s not found".format(group, name))
NotFound(com.lucidchart.piezo.admin.views.html.trigger(mutable.Buffer(), None, None, errorMsg)(request))
}
}
def getEditJobAction(group: String, name: String) = Action { implicit request => getEditJob(group, name, false) }
def putJob(group: String, name: String) = Action { implicit request =>
jobFormHelper.buildJobForm.bindFromRequest.fold(
formWithErrors =>
BadRequest(html.editJob(getJobsByGroup(), formWithErrors, submitNewMessage, formNewAction, false)),
value => {
val jobDetail = JobUtils.cleanup(value)
scheduler.addJob(jobDetail, true)
Redirect(routes.Jobs.getJob(value.getKey.getGroup(), value.getKey.getName()))
.flashing("message" -> "Successfully edited job.", "class" -> "")
}
)
}
def postJob() = Action { implicit request =>
jobFormHelper.buildJobForm.bindFromRequest.fold(
formWithErrors =>
BadRequest(com.lucidchart.piezo.admin.views.html.editJob(getJobsByGroup(), formWithErrors, submitNewMessage, formNewAction, false)),
value => {
try {
val jobDetail = JobUtils.cleanup(value)
scheduler.addJob(jobDetail, false)
Redirect(routes.Jobs.getJob(value.getKey.getGroup(), value.getKey.getName()))
.flashing("message" -> "Successfully added job.", "class" -> "")
} catch {
case alreadyExists: ObjectAlreadyExistsException =>
val form = jobFormHelper.buildJobForm.fill(value)
Ok(com.lucidchart.piezo.admin.views.html.editJob(getJobsByGroup(),
form,
submitNewMessage,
formNewAction,
false,
errorMessage = Some("Please provide unique group-name pair")
)(request))
}
}
)
}
def jobGroupTypeAhead(sofar: String) = Action { implicit request =>
val groups = scheduler.getJobGroupNames().asScala.toList
Ok(Json.obj("groups" -> groups.filter{ group =>
group.toLowerCase.contains(sofar.toLowerCase)
}))
}
def jobNameTypeAhead(group: String, sofar: String) = Action { implicit request =>
val jobs = scheduler.getJobKeys(GroupMatcher.jobGroupEquals(group)).asScala.toSet
Ok(Json.obj("jobs" -> jobs.filter(_.getName.toLowerCase.contains(sofar.toLowerCase)).map(_.getName)))
}
}
| pauldraper/piezo | admin/app/com/lucidchart/piezo/admin/controllers/Jobs.scala | Scala | apache-2.0 | 8,314 |
package tungsten
import org.junit.Test
import org.junit.Assert._
class RuntimeTest {
def testModule(module: Module) {
val errors = module.validate
assertEquals(Nil, errors)
}
@Test
def runtime64IsValid {
testModule(Runtime.getRuntime(true))
}
@Test
def runtime32IsValid {
testModule(Runtime.getRuntime(false))
}
}
| jayconrod/tungsten | core/src/test/scala/tungsten/RuntimeTest.scala | Scala | gpl-2.0 | 350 |
package sampler.data
import sampler.math.Random
import sampler.Implicits._
import org.scalatest.Matchers
import org.scalatest.FreeSpec
import org.scalatest.BeforeAndAfter
class EmpiricalTableTest extends FreeSpec with BeforeAndAfter with Matchers {
implicit val r = Random
var tolerance: Double = 1e-6
var d1: EmpiricalTable[Int] = _
var d2: EmpiricalTable[Int] = _
var d3: EmpiricalTable[Int] = _
before {
implicit val r = Random
// --d1-- --d2-- ---d3---
// 6 3
// 5,6 2,3
// 4,5,6 4,5,6 1,2,3,4
d1 = IndexedSeq(4, 5, 6).toEmpiricalTable
d2 = IndexedSeq(4, 5,5, 6,6,6).toEmpiricalTable
d3 = IndexedSeq(1, 2,2, 3,3,3, 4).toEmpiricalTable
}
"Number of observations" in {
assert(d1.size === 3)
assert(d2.size === 6)
assert(d3.size === 7)
}
"Support size" in {
assert(d1.supportSize === 3)
assert(d2.supportSize === 3)
assert(d3.supportSize === 4)
}
"Calculates relative probabilities of observations" in {
val e1 = 1.0/3.0
val e2 = 1.0/6.0
val e3 = 1.0/7.0
d1.probabilityTable(4) should be(e1 +- tolerance)
d2.probabilityTable(4) should be(e2 +- tolerance)
d3.probabilityTable(4) should be(e3 +- tolerance)
}
"Calculates map of counts for each observation" in {
assert(d1.freqTable === Map(4 -> 1, 5 -> 1, 6 ->1))
assert(d2.freqTable === Map(4 -> 1, 5 -> 2, 6 ->3))
assert(d3.freqTable === Map(1 -> 1, 2 -> 2, 3 -> 3, 4 -> 1))
}
"Is augmentable with traverable object" in {
val s1 = IndexedSeq(6)
val d4 = d1 ++ s1
assert(d4.supportSize === 3)
d4.probabilityTable(6) should be(0.5 +- tolerance)
assert(d4.freqTable === Map(4 -> 1, 5 -> 1, 6 ->2))
}
"Overrides Hash Code and Equals" in {
val d1a = IndexedSeq(4,5,6).toEmpiricalTable
val empSeq = IndexedSeq(1,2,3).toEmpiricalSeq
assert(d1.canEqual(d2))
assert(!d1.canEqual(empSeq))
assert(d1.equals(d1a))
assert(d1.hashCode === d1a.hashCode)
assert(!d1.equals(d2))
assert(!(d1.hashCode equals d2.hashCode))
}
}
| tsaratoon/Sampler | sampler-core/src/test/scala/sampler/data/EmpiricalTableTest.scala | Scala | apache-2.0 | 2,090 |
package io.github.mandar2812.dynaml.optimization
import breeze.linalg.{DenseVector, norm}
import io.github.mandar2812.dynaml.models.gp.AbstractGPRegressionModel
import org.apache.log4j.Logger
/**
* Created by mandar on 16/1/16.
*/
class GPMLOptimizer[I, T, M <: AbstractGPRegressionModel[T, I]](model: M)
extends GlobalOptimizer[M] {
override val system: M = model
protected val logger = Logger.getLogger(this.getClass)
override def optimize(initialConfig: Map[String, Double],
options: Map[String, String] = Map("tolerance" -> "0.0001",
"step" -> "0.005", "maxIterations" -> "50"))
: (M, Map[String, Double]) = {
logger.info("Starting Maximum Likelihood based optimization: ML-II")
logger.info("-----------------------------------------------------")
//Carry out gradient descent with step size alpha and
//for a specified number of maximum iterations
val tolerance = options("tolerance").toDouble
val alpha = options("step").toDouble
val maxit = options("maxIterations").toInt
var count = 1
var gradNorm = 1.0
var working_solution = initialConfig
do {
val gradient = system.gradEnergy(working_solution)
logger.info("Gradient at "+count+" iteration is: "+gradient)
gradNorm = norm(DenseVector(gradient.values.toArray), 2)
working_solution = working_solution.zip(gradient).map((confAndGrad) => {
val hyp = confAndGrad._1._1
val gr:Double = if(confAndGrad._2._2 == Double.PositiveInfinity){
1.0
} else if(confAndGrad._2._2 == Double.NegativeInfinity){
-1.0
} else if(confAndGrad._2._2 == Double.NaN){
1.0
} else {
confAndGrad._2._2
}
val newValue = confAndGrad._1._2 - alpha*gr
(hyp,newValue)
})
count += 1
} while(count < maxit && gradNorm >= tolerance)
logger.info("Stopped ML-II at "+count+" iterations")
logger.info("Final state : "+working_solution)
(system, working_solution)
}
}
| sisirkoppaka/bayeslearn | src/main/scala/io/github/mandar2812/dynaml/optimization/GPMLOptimizer.scala | Scala | apache-2.0 | 2,063 |
package collins.models.asset
import play.api.libs.json.JsObject
import play.api.libs.json.JsString
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import collins.models.Asset
import collins.models.AssetMetaValue
import collins.models.IpAddresses
import collins.models.IpmiInfo
import collins.models.LldpHelper
import collins.models.LshwHelper
import collins.models.MetaWrapper
import collins.models.PowerHelper
import collins.models.conversions.IpAddressFormat
import collins.models.conversions.IpmiFormat
import collins.util.LldpRepresentation
import collins.util.LshwRepresentation
import collins.util.config.Feature
import collins.util.power.PowerUnit.PowerUnitFormat
import collins.util.power.PowerUnits
import collins.util.power.PowerUnits
object AllAttributes {
def get(asset: Asset): AllAttributes = {
if (asset.isConfiguration) {
AllAttributes(asset,
LshwRepresentation.empty,
LldpRepresentation.empty,
None,
IpAddresses.findAllByAsset(asset),
PowerUnits(),
AssetMetaValue.findByAsset(asset)
)
} else {
val (lshwRep, mvs) = LshwHelper.reconstruct(asset)
val (lldpRep, mvs2) = LldpHelper.reconstruct(asset, mvs)
val ipmi = IpmiInfo.findByAsset(asset)
val addresses = IpAddresses.findAllByAsset(asset)
val (powerRep, mvs3) = PowerHelper.reconstruct(asset, mvs2)
val filtered: Seq[MetaWrapper] = mvs3.filter(f => !Feature.hideMeta.contains(f.getName))
AllAttributes(asset, lshwRep, lldpRep, ipmi, addresses, powerRep, filtered)
}
}
}
case class AllAttributes(
asset: Asset,
lshw: LshwRepresentation,
lldp: LldpRepresentation,
ipmi: Option[IpmiInfo],
addresses: Seq[IpAddresses],
power: PowerUnits,
mvs: Seq[MetaWrapper])
{
import collins.models.conversions._
import collins.util.power.PowerUnit.PowerUnitFormat
def exposeCredentials(showCreds: Boolean = false) = {
this.copy(ipmi = this.ipmi.map { _.withExposedCredentials(showCreds) })
.copy(mvs = this.metaValuesWithExposedCredentials(showCreds))
}
protected def metaValuesWithExposedCredentials(showCreds: Boolean): Seq[MetaWrapper] = {
if (showCreds) {
mvs
} else {
mvs.filter(mv => !Feature.encryptedTags.map(_.name).contains(mv.getName))
}
}
def toJsValue(): JsValue = {
val outSeq = Seq(
"ASSET" -> asset.toJsValue,
"HARDWARE" -> lshw.toJsValue,
"LLDP" -> lldp.toJsValue,
"IPMI" -> Json.toJson(ipmi),
"ADDRESSES" -> Json.toJson(addresses),
"POWER" -> Json.toJson(power),
"ATTRIBS" -> JsObject(mvs.groupBy { _.getGroupId }.map { case(groupId, mv) =>
groupId.toString -> JsObject(mv.map { mvw => mvw.getName -> JsString(mvw.getValue) })
}.toSeq)
)
JsObject(outSeq)
}
}
| funzoneq/collins | app/collins/models/asset/AllAttributes.scala | Scala | apache-2.0 | 2,806 |
package com.avast.cactus.grpc.server
trait GrpcService
| avast/cactus | grpc-server/src/main/scala/com/avast/cactus/grpc/server/GrpcService.scala | Scala | apache-2.0 | 56 |
/*
* The MIT License (MIT)
*
* Copyright (C) 2012 47 Degrees, LLC http://47deg.com hello@47deg.com
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
object Build extends android.AutoBuild | 47deg/mvessel | samples/simple-android/project/build.scala | Scala | mit | 735 |
package sri.relay.container
import sri.core.ReactElement
import sri.relay.{RelayComponentProps, RelayComponent}
import sri.relay.container.RelayContainer.{Fragment, Fragments, RootQueries}
import sri.relay.mutation.RelayMutationTransaction
import sri.relay.query.{RelayFragmentReference, RelayQueryFragment}
import sri.relay.route.BuildRQL.QueryBuilder
import sri.relay.route.RelayQueryConfig
import sri.relay.tools.RelayTypes.{ComponentReadyStateChangeCallback, Variables}
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.JSConverters.JSRichGenMap
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.{UndefOr => U}
object RootQueries {
def apply(queries: (String, QueryBuilder)*): RootQueries = queries.toMap.toJSDictionary
}
object Fragments {
def apply(fragments: (String, Fragment)*): Fragments = fragments.toMap.toJSDictionary
}
object RelayContainer {
type Fragments = js.Dictionary[Fragment]
type Fragment = js.Function0[js.Any]
type RootQueries = js.Dictionary[QueryBuilder]
}
@ScalaJSDefined
trait AbstractRelayContainerSpec extends js.Object {
val fragments: Fragments
val initialVariables: js.UndefOr[js.Object]
val prepareVariables: js.UndefOr[js.Function]
}
@ScalaJSDefined
abstract class RelayContainerSpec extends AbstractRelayContainerSpec {
override val initialVariables: js.UndefOr[js.Object] = js.undefined
override val prepareVariables: js.UndefOr[js.Function] = js.undefined
}
/**
*
* RelayContainer is a higher order component that provides the ability to:
*
* - Encode data dependencies using query fragments that are parameterized by
* routes and variables.
* - Manipulate variables via methods on `this.props.relay`.
* - Automatically subscribe to data changes.
* - Avoid unnecessary updates if data is unchanged.
* - Propagate the `route` via context (available on `this.props.relay`).
*
*/
@js.native
trait RelayContainer[P <: RelayComponentProps,S] extends js.Object {
var route: RelayQueryConfig = js.native
var variables: Variables = js.native
/**
* Requests an update to variables. This primes the cache for the new
* variables and notifies the caller of changes via the callback. As data
* becomes ready, the component will be updated.
*/
def setVariables(partialVariables: js.UndefOr[Variables] = js.undefined, callback: js.UndefOr[ComponentReadyStateChangeCallback] = js.undefined): Unit = js.native
/**
* Requests an update to variables. Unlike `setVariables`, this forces data
* to be fetched and written for the supplied variables. Any data that
* previously satisfied the queries will be overwritten.
*/
def forceFetch(partialVariables: js.UndefOr[Variables] = js.undefined, callback: js.UndefOr[ComponentReadyStateChangeCallback] = js.undefined): Unit = js.native
/**
* Determine if the supplied record reflects an optimistic update.
*/
def hasOptimisticUpdate(record: js.Object): Boolean = js.native
/**
* Returns the pending mutation transactions affecting the given record.
*/
def getPendingTransactions(record: js.Object): js.UndefOr[js.Array[RelayMutationTransaction]] = js.native
/**
* Returns any error related to fetching data for a deferred fragment.
*/
def getFragmentError(fragmentReference: RelayFragmentReference, record: js.Object): js.UndefOr[js.Error] = js.native
/**
* Checks if data for a deferred fragment is ready. This method should
* *always* be called before rendering a child component whose fragment was
* deferred (unless that child can handle null or missing data).
*/
def hasFragmentData(fragmentReference: RelayFragmentReference, record: js.Object): Boolean = js.native
def componentWillReceiveProps(nextProps: js.Object, nextContext: js.Object = ???): Unit = js.native
def shouldComponentUpdate(nextProps: js.Object, nextState: js.Any, nextContext: js.Any): Boolean = js.native
def render(): ReactElement = js.native
def getFragment(fragmentName: String, route: RelayQueryConfig = ???, variables: Variables = ???): RelayQueryFragment = js.native
def getFragmentNames(): js.Array[String] = js.native
}
| hamazy/sri | relay/src/main/scala/sri/relay/container/RelayContainer.scala | Scala | apache-2.0 | 4,191 |
package meritserver.http.directives
import akka.http.scaladsl.server.Directive0
import akka.http.scaladsl.server.Directives.authorize
import meritserver.services.{TeamService, UserService}
import meritserver.utils.Configuration
/**
* Created by boss on 11.02.17.
*/
trait AuthDirectives extends Configuration {
def authorizeUser(userId: String, authToken: Option[String]): Directive0 = {
authorizeAdmin(authToken) | authorize(authToken.fold(false) { token =>
UserService.getUserById(userId).fold(false) { _.authToken == token }
})
}
def authorizeTeamUser(teamId: String,
authToken: Option[String]): Directive0 = {
authorizeAdmin(authToken) | authorizeTeam(teamId, authToken) | authorize(
authToken.fold(false) { token =>
UserService.getUserByToken(token).fold(false) { _.teamId == teamId }
})
}
def authorizeTeam(teamId: String, authToken: Option[String]): Directive0 = {
authorizeAdmin(authToken) | authorize(authToken.fold(false) { token =>
TeamService.getTeamById(teamId).fold(false) { _.authToken == token }
})
}
def authorizeAdmin(authToken: Option[String]): Directive0 = {
authorize(authToken.fold(false) { _ == masterAuthToken })
}
}
| tafli/MeritServer | src/main/scala/meritserver/http/directives/AuthDirectives.scala | Scala | mit | 1,246 |
package bloggers.fp_state_based
import bloggers.fp_state_based.Blogger.uninitialized
import bloggers.fp_state_based.Events.Deactivated
import bloggers.fp_state_based.Events._
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuite}
class BloggersTest extends FunSuite with Matchers with BeforeAndAfterAll with BeforeAndAfter {
var johnId: String = "johnId"
var janeId: String = "janeId"
test("that aggregate is initialized with initial state") {
// when
val john = Commands.initialize(johnId, "John", "Smith").run(uninitialized)
// then
john._1 should equal(Blogger(johnId, "John", "Smith", List(), List(), true))
john._2 should equal(Initialized(johnId, "John", "Smith"))
}
test("that two bloggers can be befriended") {
// given
val recipe = for {
init <- Commands.initialize(johnId, "John", "Smith")
befriended <- Commands.befriend(janeId)
} yield (befriended)
// when
val johnSocializing = recipe.run(uninitialized)
// then
johnSocializing._1 should equal(Blogger(johnId, "John", "Smith", List(janeId), List(), true))
johnSocializing._2 should equal(Befriended(janeId))
}
test("that blogger can unfriend blogger") {
// given
val recipe = for {
init <- Commands.initialize(johnId, "John", "Smith")
befriended <- Commands.befriend(janeId)
unfriended <- Commands.unfriend(janeId)
} yield (unfriended)
// when
val johnUnsocializing = recipe.run(uninitialized)
// then
johnUnsocializing._1 should equal(Blogger(johnId, "John", "Smith", List(), List(), true))
johnUnsocializing._2 should equal(Unfriended(janeId))
}
test("that two bloggers can become enemies") {
// given
val recipe = for {
init <- Commands.initialize(johnId, "John", "Smith")
atWar <- Commands.makeEnemy(janeId)
} yield (atWar)
// when
val johnFighting = recipe.run(uninitialized)
// then
johnFighting._1 should equal(Blogger(johnId, "John", "Smith", List(), List(janeId), true))
johnFighting._2 should equal(MadeEnemy(janeId))
}
test("that blogger can deactivate account") {
val recipe = for {
init <- Commands.initialize(johnId, "John", "Smith")
deactivated <- Commands.deactivate("I'm out")
} yield (deactivated)
// when
val johnFighting = recipe.run(uninitialized)
// then
johnFighting._1 should equal(Blogger(johnId, "John", "Smith", List(), List(), false))
johnFighting._2 should equal(Deactivated("I'm out"))
}
//
test("mr & mrs smith scenario") {
val recipe = for {
john <- Commands.initialize(johnId, "John", "Smith")
johnInLove <- Commands.befriend(janeId)
johnNotSoMuchInLove <- Commands.unfriend(janeId)
johnBetrayed <- Commands.makeEnemy(janeId)
johnForgave <- Commands.makePeace(janeId)
johnBackInLove <- Commands.befriend(janeId)
johnOnRetirement <- Commands.deactivate("Found love of my life")
} yield (john :: johnInLove :: johnNotSoMuchInLove :: johnBetrayed :: johnForgave :: johnBackInLove :: johnOnRetirement :: Nil)
val john = recipe.run(uninitialized)
//then
john._1 should equal(Blogger(johnId, "John", "Smith", List(janeId), List(), false))
john._2 should equal(List(Initialized(johnId, "John", "Smith"), Befriended(janeId), Unfriended(janeId),
MadeEnemy(janeId), MadePeace(janeId), Befriended(janeId),
Deactivated("Found love of my life")))
}
} | speedcom/es_cqrs_example | src/test/scala/bloggers/fp_state_based/BloggersTest.scala | Scala | apache-2.0 | 3,469 |
package io.buoyant.namer.consul
import com.twitter.finagle._
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util._
import io.buoyant.consul.v1
import io.buoyant.namer.Metadata
import scala.util.control.NoStackTrace
private[consul] case class SvcKey(name: String, tag: Option[String]) {
override def toString = tag match {
case Some(t) => s"$name:$t"
case None => name
}
}
private[consul] object SvcAddr {
case class Stats(stats: StatsReceiver) {
val opens = stats.counter("opens")
val closes = stats.counter("closes")
val errors = stats.counter("errors")
val updates = stats.counter("updates")
}
/**
* Runs a long-polling loop on a service object to obtain the set of
* Addresses. This evaluates lazily so that only activity observed
*/
def apply(
consulApi: v1.ConsulApi,
datacenter: String,
key: SvcKey,
domain: Option[String],
consistency: Option[v1.ConsistencyMode] = None,
preferServiceAddress: Option[Boolean] = None,
stats: Stats
): Var[Addr] = {
val meta = mkMeta(key, datacenter, domain)
def getAddresses(index: Option[String]): Future[v1.Indexed[Set[Address]]] =
consulApi.serviceNodes(
key.name,
datacenter = Some(datacenter),
tag = key.tag,
blockingIndex = index,
consistency = consistency,
retry = true
).map(indexedToAddresses(preferServiceAddress))
// Start by fetching the service immediately, and then long-poll
// for service updates.
Var.async[Addr](Addr.Pending) { state =>
stats.opens.incr()
@volatile var stopped: Boolean = false
def loop(index0: Option[String]): Future[Unit] = {
if (stopped) Future.Unit
else getAddresses(index0).transform {
case Throw(Failure(Some(err: ConnectionFailedException))) =>
// Drop the index, in case it's been reset by a consul restart
loop(None)
case Throw(e) =>
// If an exception escaped getAddresses's retries, we
// treat it as effectively fatal to the service
// observation. In the future, we may consider retrying
// certain failures (with backoff).
state() = Addr.Failed(e)
stats.errors.incr()
Future.exception(e)
case Return(v1.Indexed(_, None)) =>
// If consul doesn't return an index, we're in bad shape.
state() = Addr.Failed(NoIndexException)
stats.errors.incr()
Future.exception(NoIndexException)
case Return(v1.Indexed(addrs, index1)) =>
stats.updates.incr()
val addr = addrs match {
case addrs if addrs.isEmpty => Addr.Neg
case addrs => Addr.Bound(addrs, meta)
}
state() = addr
loop(index1)
}
}
val pending = loop(None)
Closable.make { _ =>
stopped = true
stats.closes.incr()
pending.raise(ServiceRelease)
Future.Unit
}
}
}
private[this] def mkMeta(key: SvcKey, dc: String, domain: Option[String]) =
domain match {
case None => Addr.Metadata.empty
case Some(domain) =>
val authority = key.tag match {
case Some(tag) => s"${tag}.${key.name}.service.${dc}.${domain}"
case None => s"${key.name}.service.${dc}.${domain}"
}
Addr.Metadata(Metadata.authority -> authority)
}
private[this] def indexedToAddresses(preferServiceAddress: Option[Boolean]): v1.Indexed[Seq[v1.ServiceNode]] => v1.Indexed[Set[Address]] = {
case v1.Indexed(nodes, idx) =>
val addrs = preferServiceAddress match {
case Some(false) => nodes.flatMap(serviceNodeToNodeAddr).toSet
case _ => nodes.flatMap(serviceNodeToAddr).toSet
}
v1.Indexed(addrs, idx)
}
/**
* Prefer service IPs to node IPs. Invalid addresses are ignored.
*/
private val serviceNodeToAddr: v1.ServiceNode => Traversable[Address] = { n =>
(n.Address, n.ServiceAddress, n.ServicePort) match {
case (_, Some(ip), Some(port)) if !ip.isEmpty => Try(Address(ip, port)).toOption
case (Some(ip), _, Some(port)) if !ip.isEmpty => Try(Address(ip, port)).toOption
case _ => None
}
}
/**
* Always use node IPs. Invalid addresses are ignored.
*/
private val serviceNodeToNodeAddr: v1.ServiceNode => Traversable[Address] = { n =>
(n.Address, n.ServicePort) match {
case (Some(ip), Some(port)) if !ip.isEmpty => Try(Address(ip, port)).toOption
case _ => None
}
}
private[this] val ServiceRelease =
Failure("service observation released", Failure.Interrupted)
private[this] val NoIndexException =
Failure(new IllegalArgumentException("consul did not return an index") with NoStackTrace)
}
| denverwilliams/linkerd | namer/consul/src/main/scala/io/buoyant/namer/consul/SvcAddr.scala | Scala | apache-2.0 | 4,848 |
package com.arcusys.learn.models.request
import com.arcusys.learn.models.{ BaseSortableCollectionRequestModel, BaseCollectionRequest, BaseCollectionRequestModel }
import com.arcusys.learn.service.util.Parameter
import org.scalatra.{ ScalatraBase }
/**
* Created by Iliya Tryapitsin on 12.03.14.
*/
object BaseCollectionFilteredRequest extends BaseCollectionFilteredRequest
trait BaseCollectionFilteredRequest extends BaseCollectionRequest {
final val Filter = "filter"
}
abstract class BaseSortableCollectionFilteredRequestModel[T](scalatra: ScalatraBase, toEnum: String => T) extends BaseSortableCollectionRequestModel(scalatra, toEnum) {
def filter = Parameter(BaseCollectionFilteredRequest.Filter).withDefault("")
}
abstract class BaseCollectionFilteredRequestModel(scalatra: ScalatraBase) extends BaseCollectionRequestModel(scalatra) {
def filter = Parameter(BaseCollectionFilteredRequest.Filter).withDefault("")
} | ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/models/request/BaseCollectionFilteredRequest.scala | Scala | lgpl-3.0 | 931 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.config
import com.twitter.io.TempFile
import com.twitter.ostrich.admin.RuntimeEnvironment
import com.twitter.util.Eval
import com.twitter.zipkin.builder.Builder
import com.twitter.zipkin.query.ZipkinQuery
import org.scalatest.{FunSuite, Matchers}
class ConfigSpec extends FunSuite with Matchers {
val eval = new Eval
test("validate query configs") {
val queryConfigFiles = Seq(
"/query-dev.scala",
"/query-cassandra.scala"
) map { TempFile.fromResourcePath(_) }
for (file <- queryConfigFiles) {
val config = eval[Builder[RuntimeEnvironment => ZipkinQuery]](file)
config should not be(Nil)
config.apply()
}
}
}
| chang2394/zipkin | zipkin-query-service/src/test/scala/com/twitter/zipkin/config/ConfigSpec.scala | Scala | apache-2.0 | 1,294 |
package it.almawave.kb.http.models
import it.almawave.linkeddata.kb.catalog.models.URIWithLabel
import it.almawave.linkeddata.kb.catalog.models.ItemByLanguage
import it.almawave.linkeddata.kb.catalog.models.Version
import it.almawave.linkeddata.kb.catalog.models.LANG
// REVIEW
case class OntologyMetaModel(
id: String,
source: String,
url: String,
prefix: String,
namespace: String,
concepts: Set[String],
imports: Set[URIWithLabel],
titles: Seq[ItemByLanguage],
descriptions: Seq[ItemByLanguage],
versions: Seq[Version],
creators: Set[URIWithLabel],
// CHECK with provenance
publishedBy: String,
owner: String,
langs: Seq[LANG],
lastEditDate: String,
license: URIWithLabel,
tags: Seq[URIWithLabel],
categories: Seq[URIWithLabel],
keywords: Seq[String],
// CHECK with provenance
provenance: Seq[Map[String, Any]]) | seralf/daf-semantics | semantic_standardization/src/main/scala/it/almawave/kb/http/models/Models.scala | Scala | apache-2.0 | 864 |
package com.ponkotuy.intercept
import io.lemonlabs.uri.Uri
import io.netty.buffer.ByteBuf
/**
*
* @author ponkotuy
* Date: 14/02/18.
*/
trait Interceptor {
def input(uri: Uri, requestContent: ByteBuf, responseContent: ByteBuf): Unit
}
| ttdoda/MyFleetGirls | client/src/main/scala/com/ponkotuy/intercept/Interceptor.scala | Scala | mit | 243 |
/*
* Copyright 2011 Arktekk AS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.arktekk.atom.extension.opensearch
import java.util.Locale
import java.nio.charset.Charset
import com.codecommit.antixml._
import no.arktekk.atom.extension.opensearch.OpensearchConstants._
import no.arktekk.atom._
import scala.Some
/**
* @author Erlend Hamnaberg<erlend@hamnaberg.net>
*/
case class Query private[opensearch](wrapped: Elem) extends ElementWrapper{
type T = Query
protected def self = this
def copy(elem: Elem) = new Query(elem)
def role = attr("role").map(Role(_)).get
def title = attr("title")
def searchTerms = attr("searchTerms")
def count = attr("count").map(_.toInt)
def totalResults = attr("totalResults").map(_.toInt)
def startIndex = attr("startIndex").map(_.toInt)
def startPage = attr("startPage").map(_.toInt)
def inputEncoding = attr("inputEncoding").map(Charset.forName(_))
def outputEncoding = attr("outputEncoding").map(Charset.forName(_))
def withRole(role: Role) = withAttribute("role", role.name)
def withTitle(title: String) = withAttribute("title", title)
def withSearchTerms(terms: String) = withAttribute("searchTerms", terms)
def withCount(count: Int) = withAttribute("count", count.toString)
def withTotalResults(results: Int) = withAttribute("totalResults", results.toString)
def withStartIndex(index: Int) = withAttribute("startIndex", index.toString)
def withStartPage(page: Int) = withAttribute("startPage", page.toString)
def withInputEncoding(encoding: Charset) = withAttribute("inputEncoding", encoding.name())
def withOutputEncoding(encoding: Charset) = withAttribute("outputEncoding", encoding.name())
}
object Query {
val selector: Selector[Elem] = NSRepr(ns) -> "Query"
def apply(role: Role): Query = {
Query(Elem(NamespaceBinding(prefix, ns), "Query", Attributes("role" -> role.name)))
}
def apply(): Query = {
apply(Role.REQUEST)
}
}
class Role(val name: String) {
override def hashCode() = name.hashCode()
override def equals(obj: Any) = obj match {
case Role(n) => n == name
case _ => false
}
override def toString = name
}
object Role {
val REQUEST = new Role("request")
val EXAMPLE = new Role("example")
val RELATED = new Role("related")
val CORRECTION = new Role("correction")
val SUBSET = new Role("subset")
val SUPERSET = new Role("superset")
private val map = Map(
REQUEST.name -> REQUEST,
EXAMPLE.name -> EXAMPLE,
RELATED.name -> RELATED,
CORRECTION.name -> CORRECTION,
SUBSET.name -> SUBSET,
SUPERSET.name -> SUPERSET
)
def apply(name: String) = {
val lower = name.toLowerCase(Locale.ENGLISH)
map.get(lower).getOrElse(new Role(lower))
}
def unapply(role: Role) = Some(role.name)
}
| arktekk/scala-atom | src/main/scala/no/arktekk/atom/extension/opensearch/Query.scala | Scala | apache-2.0 | 3,314 |
package org.eichelberger.sfc
import com.typesafe.scalalogging.slf4j.LazyLogging
import org.eichelberger.sfc.utils.Lexicographics
import Lexicographics.Lexicographic
import org.eichelberger.sfc.SpaceFillingCurve._
object RowMajorCurve {
def apply(x: OrdinalNumber*): RowMajorCurve = new RowMajorCurve(OrdinalVector(x: _*))
}
/**
* Assumes that the dimensions are listed in order from most
* significant (first) to least significant (last).
*
* If you think about this, it's really just bit-ordering:
* The most significant bits are first, followed by the less
* significant bits, and the least significant bits bring up
* the end.
*/
case class RowMajorCurve(precisions: OrdinalVector) extends SpaceFillingCurve with Lexicographic with LazyLogging {
import org.eichelberger.sfc.RowMajorCurve._
val name = "R"
val bitMasks = precisions.x.map(p => (1L << p) - 1L)
def index(point: OrdinalVector): OrdinalNumber = {
var i = 0
var acc = 0L
while (i < precisions.size) {
acc = (acc << precisions(i)) | (point(i) & bitMasks(i))
i = i + 1
}
acc
}
def inverseIndex(ordinal: OrdinalNumber): OrdinalVector = {
var i = precisions.size - 1
var point = OrdinalVector()
var ord = ordinal
while (i >= 0) {
point = point ++ (ord & bitMasks(i))
ord = ord >> precisions(i)
i = i - 1
}
point.reverse
}
def getRangesCoveringQuery(query: Query): Iterator[OrdinalPair] = {
// quick check for "everything"
if (isEverything(query))
return Seq(OrdinalPair(0, size - 1L)).iterator
// naive: assume none of the dimensions is full-range
// (if they are, the range-consolidation should fix it, albeit more slowly
// than if we handled it up front)
val allRangeSets: Seq[OrdinalRanges] = query.toSeq
val lastRangeSet: OrdinalRanges = allRangeSets.last
val prefinalRangeSets: Seq[OrdinalRanges] =
if (allRangeSets.size > 1) allRangeSets.dropRight(1)
else Seq()
// only consider combinations preceding the last (least significant) dimension
val itr = rangesCombinationsIterator(prefinalRangeSets)
val ranges = itr.flatMap(vec => {
lastRangeSet.toSeq.map(r => {
val minIdx = index(vec ++ r.min)
val maxIdx = index(vec ++ r.max)
OrdinalPair(minIdx, maxIdx)
})
})
// final clean-up
consolidatedRangeIterator(ranges)
}
}
| cne1x/sfseize | src/main/scala/org/eichelberger/sfc/RowMajorCurve.scala | Scala | apache-2.0 | 2,409 |
package client
import common._
import com.typesafe.config.ConfigFactory
import akka.actor.{Actor, ActorRef, ActorLogging, ActorSystem, Props, AddressFromURIString}
import akka.remote.routing.RemoteRouterConfig
import akka.routing.RoundRobinPool
import scala.util.control._
import scala.concurrent.duration._
import scala.concurrent.{Future, ExecutionContext}
import akka.util.Timeout
import akka.actor.ReceiveTimeout
import akka.pattern.ask
class ClientActor extends Actor {
// Get Ref to Master Actor
var master = context.actorSelection("akka.tcp://TwitterServer@127.0.0.1:2552/user/master")
//Display welcome message to user
println("Welcome to the Twitter Client!")
println("Please enter your username or create a new one to get started.")
var username = readLine("username> ")
println("Connecting to Twitter Server with username: " + username)
master ! CONNECT(username)
def interfaceLoop(){
// Set Future for Tweets
implicit val ec = ExecutionContext.Implicits.global
implicit lazy val timeout = Timeout(10 seconds)
val future = master.ask(GETTWEETS(username))
future.onSuccess{
case TWEETS(tweets: List[String]) =>
println("Loading tweets...")
for(tweet <- tweets){
println(tweet)
}
println("Done loading tweets.")
}
//Main interface loop for user commands
var loop = new Breaks;
loop.breakable{
//print out user options
println("***Commands and Options***")
println("***Enter > 1 < for User List***")
println("***Enter > 2 < for Posting Tweet***")
println("***Enter > 3 < to follow user***")
println("***Enter > 4 < for all posts by hashtag***")
println("***Enter > 5 < for all posts by user***")
println("***Enter > 0 < to disconnect and shutdown app***")
println("******************************")
//Read in user commands
for(ln <- io.Source.stdin.getLines){
if(ln.contains("1")){
println("List of Users loading...")
master ! GETUSERS
loop.break
}else if(ln.contains("2")){
var hashtag = ""
var tweet = readLine("Tweet> ")
if(tweet.contains("#")){
//hashtag tweet
var parsedTweet = tweet.split(" ")
for(word <- parsedTweet){
if(word.contains("#")) hashtag = word;
}
println("Posting hashtag: #" + hashtag + ", with Tweet: " + tweet)
master ! TWEET(hashtag, tweet, username)
}else{
//just post tweet
println("Posting Tweet: " + tweet)
master ! TWEET("", tweet, username)
}
loop.break
}else if(ln.contains("3")){
var userToFollow = readLine("UserToFollow> ")
println("Requesting to follow: " + userToFollow)
master ! FOLLOW(username, userToFollow)
loop.break
}else if(ln.contains("4")){
var tagTweets = readLine("HashTag> ")
println("Loading Tweets with hashtag #" + tagTweets)
master ! REQUESTBYTAG(tagTweets)
loop.break
}else if(ln.contains("5")){
var userTweets = readLine("Username> ")
println("Loading Tweets by user: " + userTweets)
master ! REQUESTBYUSER(userTweets)
loop.break
}else if(ln.contains("0")){
println("Disconnecting from server.")
master ! DISCONNECT(username)
loop.break
}else{
println("Please enter a valid number/option (1 - 4).")
}
}
}
}
//context.setReceiveTimeout(100 milliseconds)
def receive = {
case USERS(users: List[String]) =>
for(user <- users){
println("Active User: " + user)
}
interfaceLoop()
case TWEETS(tweets: List[String]) =>
for(tweet <- tweets){
println("Tweet: " + tweet)
}
interfaceLoop()
case CONNECTED(msg: String) =>
println(msg)
interfaceLoop()
case FOLLOWING(msg: String) =>
println(msg)
interfaceLoop()
case POSTED(msg: String) =>
println(msg)
interfaceLoop()
case REALTIMETWEET(tweet: String) =>
println(tweet)
interfaceLoop()
case ReceiveTimeout =>
interfaceLoop()
case DISCONNECTED =>
println("Disconnected from server, shutting down client.")
context.system.shutdown
case _ =>
println("Received unknown msg.")
interfaceLoop()
}
}
| highlanderkev/ChatShare | twitterapp/client/src/main/scala/clientactor.scala | Scala | mit | 4,581 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.interactive.tests.core
import scala.tools.nsc.io.Path
/** Common settings for the test. */
private[tests] trait TestSettings {
protected final val TIMEOUT = 30000 // timeout in milliseconds
/** The root directory for this test suite, usually the test kind ("test/files/presentation"). */
protected val outDir = Path(System.getProperty("partest.cwd", "."))
/** The base directory for this test, usually a subdirectory of "test/files/presentation/" */
protected val baseDir = Option(System.getProperty("partest.testname")).map(outDir / _).getOrElse(Path("."))
/** Where source files are placed. */
protected val sourceDir = "src"
protected implicit val reporter: Reporter = ConsoleReporter
}
| lrytz/scala | src/interactive/scala/tools/nsc/interactive/tests/core/TestSettings.scala | Scala | apache-2.0 | 1,026 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java.service.projections.req
import java.nio.file.Path
import ws.epigraph.compiler._
import ws.epigraph.java.NewlineStringInterpolator.NewlineHelper
import ws.epigraph.java.service.projections.ProjectionGenUtil
import ws.epigraph.java.{JavaGen, JavaGenNames, JavaGenUtils}
import ws.epigraph.lang.Qn
import ws.epigraph.projections.gen.ProjectionReferenceName
import ws.epigraph.projections.op.OpParams
import ws.epigraph.types.DatumTypeApi
/**
* @author <a href="mailto:konstantin.sobolev@gmail.com">Konstantin Sobolev</a>
*/
trait ReqProjectionGen extends JavaGen {
protected type GenType <: ReqProjectionGen
protected val baseNamespaceProvider: BaseNamespaceProvider
protected def baseNamespace: Qn = baseNamespaceProvider.baseNamespace
protected def namespaceSuffix: Qn = Qn.EMPTY
final def namespace: Qn = {
val r = baseNamespace.append(namespaceSuffix)
// System.out.println(s"namespace for $shortClassName = $r")
r
}
def shortClassName: String
def fullClassName: String = namespace.append(shortClassName).toString
protected def parentClassGenOpt: Option[GenType] = None
// override def children: Iterable[JavaGen] = parentClassGenOpt
override def relativeFilePath: Path = JavaGenUtils.fqnToPath(namespace).resolve(shortClassName + ".java")
protected val packageStatement: String = s"package $namespace;"
override def description: String = s"${ super.description }\\n java class $namespace::$shortClassName"
def extendsClause: String = parentClassGenOpt.map(p => s"extends ${ p.fullClassName } ").getOrElse("")
}
object ReqProjectionGen {
val classNamePrefix: String = "" // "Req" // we don't generate "Op", so this should be OK ?
val classNameSuffix: String = "Projection"
def baseNamespace(referenceName: Option[ProjectionReferenceName], default: Qn): Qn =
referenceName.map(n => JavaGenNames.pnq(ProjectionGenUtil.toQn(n))).getOrElse(default)
def namespaceSuffix(name: Option[ProjectionReferenceName], default: Qn): Qn = {
val r = name.map(_ => Qn.EMPTY).getOrElse(default)
// System.out.println(s"namespaceSuffix($name, $default)->'$r'")
r
}
def generateParams(op: OpParams, namespace: String, reqParamsExpr: String): CodeChunk = {
import scala.collection.JavaConversions._
op.asMap().values().map { p =>
val datumType: CDatumType = JavaGenUtils.toCType(p.projection().`type`().asInstanceOf[DatumTypeApi])
// Scala doesn't get it
val valueType = JavaGenNames.lqn2(datumType, namespace)
val notnull = p.projection().flag() || p.projection().defaultValue() != null
val nullAnnotation = if (notnull) "@NotNull" else "@Nullable"
val nullHandlingCode = if (notnull) "assert param != null;" else "if (param == null) return null;"
def genPrimitiveParam(nativeType: String): String = /*@formatter:off*/sn"""\\
/**
* @return {@code ${p.name()}} parameter value
*/
public $nullAnnotation $nativeType get${JavaGenUtils.up(p.name())}Parameter() {
ReqParam param = $reqParamsExpr.get("${p.name()}");
$nullHandlingCode
$valueType nativeValue = ($valueType) param.value();
return nativeValue == null ? null : nativeValue.getVal();
}
"""/*@formatter:on*/
def genNonPrimitiveParam: String = /*@formatter:off*/sn"""\\
/**
* @return {@code ${p.name()}} parameter value
*/
public $nullAnnotation $valueType get${JavaGenUtils.up(p.name())}Parameter() {
ReqParam param = $reqParamsExpr.get("${p.name()}");
$nullHandlingCode
return ($valueType) param.value();
}
"""/*@formatter:on*/
// unwrap primitive param accessors to return native values
val paramCode = if (JavaGenUtils.builtInPrimitives.containsKey(datumType.name.name))
datumType.kind match {
case CTypeKind.STRING => genPrimitiveParam("String")
case CTypeKind.INTEGER => genPrimitiveParam("Integer")
case CTypeKind.LONG => genPrimitiveParam("Long")
case CTypeKind.DOUBLE => genPrimitiveParam("Double")
case CTypeKind.BOOLEAN => genPrimitiveParam("Boolean")
case _ => genNonPrimitiveParam
} else genNonPrimitiveParam
CodeChunk(
paramCode, Set(
"org.jetbrains.annotations.Nullable",
"org.jetbrains.annotations.NotNull",
"ws.epigraph.projections.req.ReqParam"
)
)
}.foldLeft(CodeChunk.empty)(_ + _)
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/service/projections/req/ReqProjectionGen.scala | Scala | apache-2.0 | 5,011 |
package com.typesafe.akka.http.benchmark.handlers
import akka.http.scaladsl.marshalling.{Marshaller, ToEntityMarshaller}
import akka.http.scaladsl.model.HttpCharsets._
import akka.http.scaladsl.model.MediaTypes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.typesafe.akka.http.benchmark.Infrastructure
import com.typesafe.akka.http.benchmark.Templating
import com.typesafe.akka.http.benchmark.datastore.DataStore
import com.typesafe.akka.http.benchmark.entity.Fortune
trait FortunesHandler { _: Infrastructure with DataStore with Templating =>
def fortunesEndpoint: Route =
get {
path("fortunes") {
onSuccess(getFortunes)(complete(_))
}
}
private implicit lazy val fortunesMarshaller: ToEntityMarshaller[Seq[Fortune]] = {
val fortunesTemplate = templateEngine.load("/templates/fortunes.mustache")
Marshaller.opaque { fortunes =>
HttpEntity(
contentType = `text/html`.withCharset(`UTF-8`),
string = templateEngine.layout("", fortunesTemplate, Map("fortunes" -> fortunes))
)
}
}
}
| jeevatkm/FrameworkBenchmarks | frameworks/Scala/akka-http/src/main/scala/com/typesafe/akka/http/benchmark/handlers/FortunesHandler.scala | Scala | bsd-3-clause | 1,142 |
package juan.ddd.proto
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}
import scala.concurrent.duration._
import spray.testkit.ScalatestRouteTest
/**
* Standard base class for tests. Includes the following features:
*
* - WordSpec style tests with Matcher DSL for assertions
*
* - Support for testing Futures including the useful whenReady construct
*
* - Support for testing spray Routes
*/
class StandardSpec extends WordSpec
with Matchers
with ScalaFutures
with ScalatestRouteTest
{
protected implicit def routeTestTimeout = {
RouteTestTimeout(1.seconds)
}
}
| juan62/ddd-proto | src/test/scala/juan/ddd/proto/StandardSpec.scala | Scala | mit | 633 |
package scala
// #scalaexample
import javax.net.ssl._
import play.core.ApplicationProvider
import play.server.api._
class CustomSSLEngineProvider(appProvider: ApplicationProvider) extends SSLEngineProvider {
override def createSSLEngine(): SSLEngine = {
// change it to your custom implementation
SSLContext.getDefault.createSSLEngine
}
}
// #scalaexample
| jyotikamboj/container | pf-documentation/manual/detailedTopics/production/code/scala/CustomSSLEngineProvider.scala | Scala | mit | 372 |
package org.jetbrains.plugins.scala.lang.refactoring.introduceVariable
import java.{util => ju}
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.{Pass, TextRange}
import com.intellij.psi.PsiModifier.PRIVATE
import com.intellij.psi._
import com.intellij.psi.impl.source.codeStyle.CodeEditUtil
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.util.PsiTreeUtil.findElementOfClassAtOffset
import com.intellij.refactoring.introduce.inplace.OccurrencesChooser
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, childOf, inWriteAction, startCommand}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScDeclaredElementsHolder, ScPatternDefinition, ScVariableDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScExtendsBlock, ScTemplateBody}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil._
import org.jetbrains.plugins.scala.lang.refactoring.util.{ScalaRefactoringUtil, ScalaVariableValidator, ValidationReporter}
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* Created by Kate Ustyuzhanina
* on 9/18/15
*/
trait IntroduceExpressions {
this: ScalaIntroduceVariableHandler =>
val INTRODUCE_VARIABLE_REFACTORING_NAME: String = ScalaBundle.message("introduce.variable.title")
import IntroduceExpressions._
def invokeExpression(file: PsiFile, startOffset: Int, endOffset: Int)
(implicit project: Project, editor: Editor): Unit = {
try {
UsageTrigger.trigger(ScalaBundle.message("introduce.variable.id"))
PsiDocumentManager.getInstance(project).commitAllDocuments()
writableScalaFile(file, INTRODUCE_VARIABLE_REFACTORING_NAME)
val (expr, types) = getExpressionWithTypes(file, startOffset, endOffset).getOrElse {
showErrorHint(ScalaBundle.message("cannot.refactor.not.expression"), INTRODUCE_VARIABLE_REFACTORING_NAME)
return
}
checkCanBeIntroduced(expr).foreach { message =>
showErrorHint(message, INTRODUCE_VARIABLE_REFACTORING_NAME)
return
}
val occurrences = fileEncloser(file, startOffset).toSeq.flatMap {
getOccurrenceRanges(expr, _)
}
implicit val validator: ScalaVariableValidator = ScalaVariableValidator(file, expr, occurrences)
val suggestedNames = SuggestedNames(expr, types)
val occurrencesInFile = OccurrencesInFile(file, new TextRange(startOffset, endOffset), occurrences)
if (isInplaceAvailable(editor)) runInplace(suggestedNames, occurrencesInFile)
else runWithDialog(suggestedNames, occurrencesInFile)
}
catch {
case _: IntroduceException =>
}
}
def runRefactoring(occurrences: OccurrencesInFile, expression: ScExpression, varName: String, varType: ScType,
replaceAllOccurrences: Boolean, isVariable: Boolean)
(implicit editor: Editor): Unit = {
startCommand(editor.getProject, INTRODUCE_VARIABLE_REFACTORING_NAME) {
runRefactoringInside(occurrences, expression, varName, varType, replaceAllOccurrences, isVariable, fromDialogMode = true) // this for better debug
}
editor.getSelectionModel.removeSelection()
}
private def runInplace(suggestedNames: SuggestedNames, occurrences: OccurrencesInFile)
(implicit project: Project, editor: Editor): Unit = {
import OccurrencesChooser.ReplaceChoice
val callback: Pass[ReplaceChoice] = (replaceChoice: ReplaceChoice) => {
val replaceAll = ReplaceChoice.NO != replaceChoice
startCommand(project, INTRODUCE_VARIABLE_REFACTORING_NAME) {
val SuggestedNames(expression, types, names) = suggestedNames
val reference = inWriteAction {
runRefactoringInside(occurrences, expression, names.head, types.head, replaceAll, isVariable = false, fromDialogMode = false)
}
performInplaceRefactoring(reference.getElement, types.headOption, replaceAll, forceInferType(expression), names)
}
}
val OccurrencesInFile(_, mainRange, occurrences_) = occurrences
if (occurrences_.isEmpty) {
callback.pass(ReplaceChoice.NO)
} else {
val chooser = new OccurrencesChooser[TextRange](editor) {
override def getOccurrenceRange(occurrence: TextRange): TextRange = occurrence
}
chooser.showChooser(mainRange, ju.Arrays.asList(occurrences_ : _*), callback)
}
}
private def runWithDialog(suggestedNames: SuggestedNames, occurrences: OccurrencesInFile)
(implicit project: Project, editor: Editor, validator: ScalaVariableValidator): Unit = {
val occurrences_ = occurrences.occurrences
val SuggestedNames(expression, types, names) = suggestedNames
val dialog = new ScalaIntroduceVariableDialog(project, types, occurrences_.length, new ValidationReporter(project, this), names, expression)
this.showDialogImpl(dialog, occurrences_).foreach { dialog =>
runRefactoring(occurrences, suggestedNames.expression,
varName = dialog.getEnteredName,
varType = dialog.getSelectedType,
replaceAllOccurrences = dialog.isReplaceAllOccurrences,
isVariable = dialog.isDeclareVariable
)
}
}
}
object IntroduceExpressions {
private class SuggestedNames(val expression: ScExpression, val types: Array[ScType]) {
def names: Array[String] = NameSuggester.suggestNames(expression).toArray
}
private object SuggestedNames {
def apply(expression: ScExpression, types: Array[ScType]): SuggestedNames =
new SuggestedNames(expression, types)
def unapply(names: SuggestedNames): Option[(ScExpression, Array[ScType], Array[String])] =
Some(names.expression, names.types, names.names)
}
case class OccurrencesInFile(file: PsiFile, mainRange: TextRange, occurrences: Seq[TextRange])
private def performInplaceRefactoring(newDeclaration: PsiElement,
maybeType: Option[ScType],
replaceAll: Boolean,
forceType: Boolean,
suggestedNames: Array[String])
(implicit project: Project, editor: Editor): Unit = {
val maybeNamedElement = newDeclaration match {
case holder: ScDeclaredElementsHolder => holder.declaredElements.headOption
case enum: ScEnumerator => enum.pattern.bindings.headOption
case _ => None
}
val newExpr = newDeclaration match {
case ScVariableDefinition.expr(x) => x
case ScPatternDefinition.expr(x) => x
case enum: ScEnumerator => enum.rvalue
case _ => null
}
maybeNamedElement.filter(_.isValid).foreach { named =>
editor.getCaretModel.moveToOffset(named.getTextOffset)
editor.getSelectionModel.removeSelection()
if (isInplaceAvailable(editor)) {
(editor.getDocument, PsiDocumentManager.getInstance(project)) match {
case (document, manager) =>
manager.commitDocument(document)
manager.doPostponedOperationsAndUnblockDocument(document)
}
new ScalaInplaceVariableIntroducer(newExpr, maybeType, named, replaceAll, forceType)
.performInplaceRefactoring(new ju.LinkedHashSet(ju.Arrays.asList(suggestedNames: _*)))
}
}
}
private def forceInferType(expression: ScExpression) = expression.isInstanceOf[ScFunctionExpr]
//returns smart pointer to ScDeclaredElementsHolder or ScEnumerator
private def runRefactoringInside(occurrencesInFile: OccurrencesInFile,
expression: ScExpression,
varName: String,
varType: ScType,
replaceAllOccurrences: Boolean,
isVariable: Boolean,
fromDialogMode: Boolean)
(implicit editor: Editor): SmartPsiElementPointer[PsiElement] = {
val OccurrencesInFile(file, mainRange, occurrences_) = occurrencesInFile
val occurrences = if (replaceAllOccurrences) occurrences_ else Seq(mainRange)
val mainOccurence = occurrences.indexWhere(range => range.contains(mainRange) || mainRange.contains(range))
val copy = expressionToIntroduce(expression)
val forceType = forceInferType(copy)
def needsTypeAnnotation(element: PsiElement) =
ScalaInplaceVariableIntroducer.needsTypeAnnotation(element, copy, forceType, fromDialogMode)
val maybeTypeText = Option(varType).map(_.canonicalText)
runRefactoringInside(file, unparExpr(copy), occurrences, mainOccurence, varName, isVariable, forceType) { element =>
maybeTypeText
.filter(_ => needsTypeAnnotation(element))
.getOrElse("")
}
}
private[this] def runRefactoringInside(file: PsiFile,
expression: ScExpression,
occurrences: Seq[TextRange],
mainOccurence: Int,
varName: String,
isVariable: Boolean,
forceType: Boolean)
(typeTextIfNeeded: PsiElement => String)
(implicit editor: Editor): SmartPsiElementPointer[PsiElement] = {
implicit val projectContext: ProjectContext = file
object inExtendsBlock {
def unapply(e: PsiElement): Option[ScExtendsBlock] = {
e match {
case extBl: ScExtendsBlock =>
Some(extBl)
case elem if PsiTreeUtil.getParentOfType(elem, classOf[ScClassParents]) != null =>
PsiTreeUtil.getParentOfType(elem, classOf[ScExtendsBlock]) match {
case _ childOf (_: ScNewTemplateDefinition) => None
case extBl => Some(extBl)
}
case _ => None
}
}
}
def isOneLiner = {
val lineText = getLineText(editor)
val model = editor.getSelectionModel
val document = editor.getDocument
val selectedText = model.getSelectedText
val oneLineSelected = selectedText != null && lineText != null && selectedText.trim == lineText.trim
val element = file.findElementAt(model.getSelectionStart)
var parent = element
def atSameLine(elem: PsiElement) = {
val textRange = elem.getTextRange
val lineNumbers = Seq(model.getSelectionStart, textRange.getStartOffset, textRange.getEndOffset)
.map(document.getLineNumber)
lineNumbers.distinct.size == 1
}
while (parent != null && !parent.isInstanceOf[PsiFile] && atSameLine(parent)) {
parent = parent.getParent
}
val insideExpression = parent match {
case null | _: ScBlock | _: ScTemplateBody | _: ScEarlyDefinitions | _: PsiFile => false
case _ => true
}
oneLineSelected && !insideExpression
}
val revertInfo = RevertInfo(file.getText, editor.getCaretModel.getOffset)
editor.putUserData(ScalaIntroduceVariableHandler.REVERT_INFO, revertInfo)
val fastDefinition = occurrences.length == 1 && isOneLiner
//changes document directly
val replacedOccurences = replaceOccurences(occurrences, varName, file)
//only Psi-operations after this moment
var firstRange = replacedOccurences.head
val firstElement = findParentExpr(file, firstRange)
val parentExprs =
if (occurrences.length == 1)
firstElement match {
case _ childOf ((block: ScBlock) childOf ((_) childOf (call: ScMethodCall)))
if forceType && block.statements.size == 1 => Seq(call)
case _ childOf ((block: ScBlock) childOf (infix: ScInfixExpr))
if forceType && block.statements.size == 1 => Seq(infix)
case expr => Seq(expr)
}
else replacedOccurences.toSeq.map(findParentExpr(file, _))
val commonParent: PsiElement = PsiTreeUtil.findCommonParent(parentExprs: _*)
val nextParentInFile = nextParent(commonParent, file)
editor.getCaretModel.moveToOffset(replacedOccurences(mainOccurence).getEndOffset)
def createEnumeratorIn(forStmt: ScForStatement): ScEnumerator = {
val parent: ScEnumerators = forStmt.enumerators.orNull
val inParentheses = parent.prevSiblings.toList.exists(_.getNode.getElementType == ScalaTokenTypes.tLPARENTHESIS)
val created = createEnumerator(varName, expression, typeTextIfNeeded(parent))
val elem = parent.getChildren.filter(_.getTextRange.contains(firstRange)).head
var result: ScEnumerator = null
if (elem != null) {
var needSemicolon = true
var sibling = elem.getPrevSibling
if (inParentheses) {
while (sibling != null && sibling.getText.trim == "") sibling = sibling.getPrevSibling
if (sibling != null && sibling.getText.endsWith(";")) needSemicolon = false
val semicolon = parent.addBefore(createSemicolon, elem)
result = parent.addBefore(created, semicolon).asInstanceOf[ScEnumerator]
if (needSemicolon) {
parent.addBefore(createSemicolon, result)
}
} else {
if (sibling.getText.indexOf('\n') != -1) needSemicolon = false
result = parent.addBefore(created, elem).asInstanceOf[ScEnumerator]
parent.addBefore(createNewLine()(elem.getManager), elem)
if (needSemicolon) {
parent.addBefore(createNewLine(), result)
}
}
}
result
}
def createVariableDefinition(): PsiElement = {
if (fastDefinition) {
val declaration = createDeclaration(varName, typeTextIfNeeded(firstElement), isVariable, expression)
replaceRangeByDeclaration(declaration.getText, firstRange)(declaration.getProject, editor)
val start = firstRange.getStartOffset
Option(findElementOfClassAtOffset(file, start, classOf[ScMember], /*strictStart =*/ false))
.getOrElse(findElementOfClassAtOffset(file, start, classOf[ScEnumerator], /*strictStart =*/ false))
} else {
var needFormatting = false
val parent = commonParent match {
case inExtendsBlock(extBl) =>
needFormatting = true
extBl.addEarlyDefinitions()
case _ =>
val needBraces = !commonParent.isInstanceOf[ScBlock] && ScalaRefactoringUtil.needBraces(commonParent, nextParentInFile)
if (needBraces) {
firstRange = firstRange.shiftRight(1)
val replaced = commonParent.replace(createExpressionFromText("{" + commonParent.getText + "}"))
replaced.getPrevSibling match {
case ws: PsiWhiteSpace if ws.getText.contains("\n") => ws.delete()
case _ =>
}
replaced
} else container(commonParent).getOrElse(file)
}
val anchor = parent.getChildren.find(_.getTextRange.contains(firstRange)).getOrElse(parent.getLastChild)
if (anchor != null) {
val created = createDeclaration(varName, typeTextIfNeeded(anchor), isVariable, expression)
val result = ScalaPsiUtil.addStatementBefore(created.asInstanceOf[ScBlockStatement], parent, Some(anchor))
CodeEditUtil.markToReformat(parent.getNode, needFormatting)
result
} else throw new IntroduceException
}
}
val createdDeclaration: PsiElement = isIntroduceEnumerator(commonParent, nextParentInFile, firstRange) match {
case Some(forStmt) => createEnumeratorIn(forStmt)
case _ => createVariableDefinition()
}
setPrivateModifier(createdDeclaration)
ScalaPsiUtil.adjustTypes(createdDeclaration)
SmartPointerManager.getInstance(file.getProject).createSmartPsiElementPointer(createdDeclaration)
}
private[this] def replaceRangeByDeclaration(text: String, range: TextRange)
(implicit project: Project, editor: Editor): Unit = {
val startOffset = range.getStartOffset
val document = editor.getDocument
document.replaceString(startOffset, range.getEndOffset, text)
PsiDocumentManager.getInstance(project).commitDocument(document)
editor.getCaretModel.moveToOffset(startOffset + text.length)
}
private[this] def isIntroduceEnumerator(parent: PsiElement, element: PsiElement, range: TextRange): Option[ScForStatement] = {
val maybeParent = element match {
case statement: ScForStatement if statement.body.contains(parent) => None
case statement: ScForStatement => Some(statement)
case _: ScEnumerator | _: ScGenerator => Option(element.getParent.getParent)
case guard: ScGuard if guard.getParent.isInstanceOf[ScEnumerators] => Option(element.getParent.getParent)
case _ => Some(parent)
}
maybeParent.collect {
case statement: ScForStatement => statement
}.filter(_.enumerators.exists(isAfterFirstGenerator(_, range)))
}
private[this] def setPrivateModifier(declaration: PsiElement): Unit = declaration match {
case member: ScMember if !member.isLocal => member.setModifierProperty(PRIVATE, value = true)
case _ =>
}
private[this] def isAfterFirstGenerator(enumerators: ScEnumerators, range: TextRange): Boolean =
enumerators.generators.headOption
.exists(_.getTextRange.getEndOffset < range.getStartOffset)
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/introduceVariable/IntroduceExpressions.scala | Scala | apache-2.0 | 18,191 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package util {
import Helpers._
import _root_.scala.util.parsing.combinator.Parsers
/**
* The CombParserHelpers trait provides parser combinators helpers
*/
trait CombParserHelpers {
self: Parsers =>
/** the type of input elements defined in the Parsers trait is <code>Char</code> */
type Elem = Char
/** @return a CharArray input build from a String */
implicit def strToInput(in: String): Input = new _root_.scala.util.parsing.input.CharArrayReader(in.toCharArray)
/** @return true if the character is an end of file */
def isEof(c: Char): Boolean = c == '\\032'
/** @return true if the character is not an end of file */
def notEof(c: Char): Boolean = !isEof(c)
/** @return true if the character is a digit */
def isNum(c: Char): Boolean = Character.isDigit(c)
/** @return true if the character is not a digit */
def notNum(c: Char): Boolean = !isNum(c)
/** @return true if the character is a space character */
def wsc(c: Char): Boolean = c == ' ' || c == '\\n' || c == '\\r' || c == '\\t'
/** @return a whitespace parser */
def wsc: Parser[Elem] = elem("wsc", wsc)
/** alias for the wsc parser */
def white = wsc
/** @return a unit parser for any repetition of whitespaces */
def whiteSpace: Parser[Unit] = rep(white) ^^^ ()
/** @return a parser accepting a 'line' space, either ' ' or '\\t' */
def aSpace = accept("whitespace", { case c if (c == ' ') || c == '\\t' => true })
/** @return a unit parser for any repetition of 'line' spaces */
def lineSpace = rep(aSpace)
/**
* @param elements list of characters
* @return a unit parser which will succeed if the input matches the list of characters regardless
* of the case (uppercase or lowercase)
*/
def acceptCI[ES <% List[Elem]](es: ES): Parser[List[Elem]] =
es.foldRight[Parser[List[Elem]]](
success(Nil)){(x, pxs) => acceptCIChar(x) ~ pxs ^^ mkList}
def xform(in: Char): Char = Character.toUpperCase(in)
private def acceptCIChar(c: Elem) = acceptIf(a => xform(a) == xform(c))("`"+c+"' expected but " + _ + " found")
/**
* @return a trimmed string of the input (a List of Elem)
*/
implicit def ns(in: List[Elem]): String = in.mkString("").trim
/**
* @return a list of elements (Elem) from a String
*/
implicit def strToLst(in: String): List[Elem] = (new scala.collection.immutable.StringOps(in)).toList
/**
* @return a parser for a digit
*/
def digit = elem("digit", isNum)
/**
* @return a parser for a slash
*/
def slash = elem("slash", c => c == '/')
/**
* @return a parser for a colon
*/
def colon = elem("colon", c => c == ':')
/**
* @return a parser discarding end of lines
*/
def EOL: Parser[Unit] = (accept("\\n\\r") | accept("\\r\\n") | '\\r' |
'\\n' | '\\032' ) ^^^ ()
def notEOL: Parser[Elem] = (not(EOL) ~> anyChar)
def notEOF: Parser[Elem] = (not(accept('\\032')) ~> anyChar)
def anyChar: Parser[Elem] = elem("Any Char", c => c != '\\032')
/**
* @return a parser returning an Int if succeeding
*/
def aNumber: Parser[Int] = rep1(elem("Number", isNum)) ^^ {case xs => xs.mkString("").toInt}
/**
* @return a parser which tries the permutations of a list of parsers
*/
def permute[T](p: (Parser[T])*): Parser[List[T]] = permute((lst : List[Parser[T]]) => lst.permute, p :_*)
/**
* @return a parser which tries the permutations of a list and sublists of parsers
*/
def permuteAll[T](p: (Parser[T])*): Parser[List[T]] = permute((lst : List[Parser[T]]) => lst.permuteAll, p :_*)
/**
* @param func list permutation function. Returns all permutations on the list or all permutations on the list plus all permutations on sublists for example
* @return a parser which tries the permutations of a list of parsers, given a permutation function
*/
def permute[T](func: List[Parser[T]] => List[List[Parser[T]]], p: (Parser[T])*): Parser[List[T]] =
if (p.isEmpty)
success(Nil);
else {
val right: Parser[List[T]] = success(Nil)
p.toList match {
case Nil => right
case x :: Nil => x ~ right ^^ {case ~(x, xs) => x :: xs}
case xs => func(xs).map(_.foldRight(right)(
_ ~ _ ^^ {case ~(x, xs) => x :: xs})).
reduceLeft((a: Parser[List[T]], b: Parser[List[T]]) => a | b)
}
}
/**
* @return a parser which parses the input using p a number of times
*/
def repNN[T](n: Int, p: => Parser[T]): Parser[List[T]] = if (n == 0) rep(p) else p ~ repNN(n - 1, p) ^^ {case ~(x, xs) => x :: xs}
}
trait SafeSeqParser extends Parsers {
/** A parser generator for non-empty repetitions.
*
* <p> rep1(f, p) first uses `f' (which must succeed) and then repeatedly uses `p' to
* parse the input until `p' fails
* (the result is a `List' of the consecutive results of `f' and `p')</p>
*
* @param first a `Parser' that parses the first piece of input
* @param p a `Parser' that is to be applied successively to the rest of the input (if any)
* @return A parser that returns a list of results produced by first applying `f' and then
* repeatedly `p' to the input (it only succeeds if `f' matches).
*/
override def rep1[T](first: => Parser[T], p: => Parser[T]): Parser[List[T]] = new Parser[List[T]] {
def apply(in0: Input) = {
val xs = new _root_.scala.collection.mutable.ListBuffer[T]
var in = in0
var res = first(in)
while(res.successful) {
xs += res.get
in = res.next
res = p(in)
}
if (!xs.isEmpty) Success(xs.toList, res.next)
else Failure("TODO", in0)
}
}
/** A parser generator for non-empty repetitions.
*
* <p>rep1sep(first, p, q) starts by using `first', followed by repeatedly uses of `p' interleaved with `q'
* to parse the input, until `p' fails. `first' must succeed (the result is a `List' of the
* consecutive results of `first' and `p')</p>
*
* @param first a `Parser' that is to be applied to the first element of input
* @param p a `Parser' that is to be applied successively to the input
* @param q a `Parser' that parses the elements that separate the elements parsed by `p'
* (interleaved with `q')
* @return A parser that returns a list of results produced by repeatedly applying `p' to the input
* (and that only succeeds if `p' matches at least once).
* The results of `p' are collected in a list. The results of `q' are discarded.
*/
override def rep1sep[T](p: => Parser[T], q: => Parser[Any]): Parser[List[T]] =
new Parser[List[T]] {
def apply(in0: Input) = {
val xs = new _root_.scala.collection.mutable.ListBuffer[T]
var in = in0
var gotQ = true
var res = p(in)
while (res.successful && gotQ) {
xs += res.get
in = res.next
val r2 = q(in)
gotQ = r2.successful
if (gotQ) {
in = r2.next
res = p(in)
}
}
if (!xs.isEmpty) Success(xs.toList, res.next)
else Failure("TODO", in0)
}
}
}
}
}
| wsaccaco/lift | framework/lift-base/lift-util/src/main/scala/net/liftweb/util/CombParserHelpers.scala | Scala | apache-2.0 | 7,743 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.util
import monix.execution.atomic.Atomic
import scala.concurrent.{Future, Promise}
/**
* Represents a Promise that completes with `value` after
* receiving a `countdownUntil` number of `countdown()` calls.
*/
private[monix] final class PromiseCounter[A] private (value: A, initial: Int) {
require(initial > 0, "length must be strictly positive")
private[this] val promise = Promise[A]()
private[this] val counter = Atomic(initial)
def future: Future[A] =
promise.future
def acquire(): Unit =
counter.increment()
def countdown(): Unit = {
val update = counter.decrementAndGet()
if (update == 0) promise.success(value)
}
def success(value: A): Unit =
promise.success(value)
}
private[monix] object PromiseCounter {
def apply[A](value: A, initial: Int): PromiseCounter[A] =
new PromiseCounter[A](value, initial)
} | Wogan/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/util/PromiseCounter.scala | Scala | apache-2.0 | 1,574 |
package com.ing.baker.runtime.scaladsl
import com.ing.baker.runtime.common.LanguageDataStructures.ScalaApi
import com.ing.baker.runtime.{common, javadsl}
import com.ing.baker.types.Value
case class IngredientInstance(name: String, value: Value) extends common.IngredientInstance with ScalaApi {
def asJava: javadsl.IngredientInstance = javadsl.IngredientInstance(name, value)
}
| ing-bank/baker | core/baker-interface/src/main/scala/com/ing/baker/runtime/scaladsl/IngredientInstance.scala | Scala | mit | 384 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.Eq
import cats.data.EitherT
import cats.laws.discipline.{
CoflatMapTests,
DeferTests,
FunctorFilterTests,
MonadErrorTests,
MonoidKTests,
SemigroupalTests
}
import cats.laws.discipline.arbitrary.catsLawsArbitraryForPartialFunction
import monix.eval.Task
import monix.execution.schedulers.TestScheduler
object TypeClassLawsForIterantTaskSuite extends BaseLawsSuite {
type F[α] = Iterant[Task, α]
implicit lazy val ec: TestScheduler = TestScheduler()
// Explicit instance due to weird implicit resolution problem
implicit lazy val iso: SemigroupalTests.Isomorphisms[F] =
SemigroupalTests.Isomorphisms.invariant
// Explicit instance, since Scala can't figure it out below :-(
val eqEitherT: Eq[EitherT[F, Throwable, Int]] =
implicitly[Eq[EitherT[F, Throwable, Int]]]
checkAllAsync("Defer[Iterant[Task]]", slowConfig) { _ =>
DeferTests[F].defer[Int]
}
checkAllAsync("MonadError[Iterant[Task]]") { _ =>
implicit val eqE = eqEitherT
MonadErrorTests[F, Throwable].monadError[Int, Int, Int]
}
checkAllAsync("MonoidK[Iterant[Task]]") { implicit ec =>
MonoidKTests[F].monoidK[Int]
}
checkAllAsync("CoflatMap[Iterant[Task]]") { implicit ec =>
CoflatMapTests[F].coflatMap[Int, Int, Int]
}
checkAllAsync("FunctorFilter[Iterant[Task]]") { implicit ec =>
FunctorFilterTests[F].functorFilter[Int, Int, Int]
}
}
| alexandru/monifu | monix-tail/shared/src/test/scala/monix/tail/TypeClassLawsForIterantTaskSuite.scala | Scala | apache-2.0 | 2,088 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager.utils
import java.util.Properties
import kafka.manager.model._
trait LogkafkaNewConfigs {
def configNames : Set[String]
def configMaps: Map[String, String]
def validate(props: Properties)
}
object LogkafkaNewConfigs {
val logkafkaConfigsByVersion : Map[KafkaVersion, LogkafkaNewConfigs] = Map(
Kafka_0_8_1_1 -> logkafka81.LogConfig,
Kafka_0_8_2_0 -> logkafka82.LogConfig,
Kafka_0_8_2_1 -> logkafka82.LogConfig,
Kafka_0_8_2_2 -> logkafka82.LogConfig
)
def configNames(version: KafkaVersion) : Set[String] = {
logkafkaConfigsByVersion.get(version) match {
case Some(tc) => tc.configNames
case None => throw new IllegalArgumentException(s"Undefined logkafka configs for version : $version, cannot get config names")
}
}
def configMaps(version: KafkaVersion) : Map[String, String] = {
logkafkaConfigsByVersion.get(version) match {
case Some(tc) => tc.configMaps
case None => throw new IllegalArgumentException(s"Undefined logkafka configs for version : $version, cannot get config maps")
}
}
def validate(version: KafkaVersion, props: Properties) : Unit = {
logkafkaConfigsByVersion.get(version) match {
case Some(tc) => tc.validate(props)
case None => throw new IllegalArgumentException(s"Undefined logkafka configs for version : $version, cannot validate config")
}
}
}
| xuwei-k/kafka-manager | app/kafka/manager/utils/LogkafkaNewConfigs.scala | Scala | apache-2.0 | 1,518 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.boxes
import uk.gov.hmrc.ct.box._
case class ACQ8114(value: Option[Boolean])
extends CtBoxIdentifier(name = "Statutory notes: Borrowings")
with CtOptionalBoolean
with Input | liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/boxes/ACQ8114.scala | Scala | apache-2.0 | 827 |
package chana.jpql
import chana.jpql.nodes.JPQLParser
import chana.jpql.rats.JPQLGrammar
import java.io.StringReader
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
import xtc.tree.Node
class JPQLGrammarSpec extends WordSpecLike with Matchers with BeforeAndAfterAll {
def parse(query: String) {
val reader = new StringReader(query)
val grammar = new JPQLGrammar(reader, "")
val r = grammar.pJPQL(0)
if (r.hasValue) {
// for the signature of method: <T> T semanticValue(), we have to call
// with at least of Any to avoid:
// xtc.tree.GNode$Fixed1 cannot be cast to scala.runtime.Nothing$
val rootNode = r.semanticValue[Node]
info("\n\n## " + query + " ##\n\n" + rootNode)
// now let's do JPQLParsing
val parser = new JPQLParser()
val stmt = parser.parse(query)
info("\nParsed:\n" + stmt)
}
assert(r.hasValue, "\n\n## " + query + " ##\n\n" + r.parseError.msg + " at " + r.parseError.index)
}
"JPQLGrammar" when {
"parse jpql statement" should {
"with Aggregation functions" in {
val queris = List(
"SELECT COUNT(e) FROM Employee e",
" SELECT COUNT(e) FROM Employee e ", // leave spaces at head and tail
"SELECT MAX(e.salary) FROM Employee e")
queris foreach parse
}
"with Constructors" in {
val queris = List(
"SELECT NEW com.acme.reports.EmpReport(e.firstName, e.lastName, e.salary) FROM Employee e")
queris foreach parse
}
"with FROM Clause" in {
val queris = List(
"SELECT e FROM Employee e",
"SELECT e, a FROM Employee e, MailingAddress a WHERE e.address = a.address")
queris foreach parse
}
"with JOIN" in {
val queris = List(
"SELECT e FROM Employee e JOIN e.address a WHERE a.city = :city",
"SELECT e FROM Employee e JOIN e.projects p JOIN e.projects p2 WHERE p.name = :p1 AND p2.name = :p2")
queris foreach parse
}
"with JOIN FETCH" in {
val queris = List(
"SELECT e FROM Employee e JOIN FETCH e.address",
"SELECT e FROM Employee e JOIN FETCH e.address a ORDER BY a.city")
queris foreach parse
}
"with LEFT JOIN" in {
val queris = List(
"SELECT e FROM Employee e LEFT JOIN e.address a ORDER BY a.city")
queris foreach parse
}
"with ON" in {
val queris = List(
"SELECT e FROM Employee e LEFT JOIN e.address a ON a.city = :city",
"SELECT e FROM Employee e LEFT JOIN MailingAddress a ON e.address = a.address")
queris foreach parse
}
"with ORDER BY clause" in {
val queris = List(
"SELECT e FROM Employee e ORDER BY e.lastName ASC, e.firstName ASC",
"SELECT e FROM Employee e ORDER BY UPPER(e.lastName)",
"SELECT e FROM Employee e LEFT JOIN e.manager m ORDER BY m.lastName",
"SELECT e FROM Employee e ORDER BY e.address")
queris foreach parse
}
"with GROUP BY Clause" in {
val queris = List(
"SELECT AVG(e.salary), e.address.city FROM Employee e GROUP BY e.address.city",
"SELECT AVG(e.salary), e.address.city FROM Employee e GROUP BY e.address.city ORDER BY e.salary",
"SELECT e, COUNT(p) FROM Employee e LEFT JOIN e.projects p GROUP BY e")
queris foreach parse
}
"with HAVING Clause" in {
val queris = List(
"SELECT AVG(e.salary), e.address.city FROM Employee e GROUP BY e.address.city HAVING e.salary > 100000")
queris foreach parse
}
"with WHERE Clause" in {
val queris = List(
"SELECT e FROM Employee e WHERE e.firstName IN (:name1, :name2, :name3)",
"SELECT e FROM Employee e WHERE e.firstName IN (:name1)",
"SELECT e FROM Employee e WHERE e.firstName IN :names",
"SELECT e FROM Employee e WHERE e.firstName IN (SELECT e2.firstName FROM Employee e2 WHERE e2.lastName = 'Smith')",
"SELECT e FROM Employee e WHERE e.firstName = (SELECT e2.firstName FROM Employee e2 WHERE e2.id = :id)",
"SELECT e FROM Employee e WHERE e.salary < (SELECT e2.salary FROM Employee e2 WHERE e2.id = :id)",
"SELECT e FROM Employee e WHERE e.firstName = ANY (SELECT e2.firstName FROM Employee e2 WHERE e.id <> e.id)",
"SELECT e FROM Employee e WHERE e.salary <= ALL (SELECT e2.salary FROM Employee e2)",
"SELECT e FROM Employee e WHERE e.manager = e2.manager",
"SELECT e FROM Employee e WHERE e.manager = :manager",
"SELECT e FROM Employee e WHERE e.manager = ?11",
"SELECT e FROM Employee e WHERE e.manager <> :manager",
"SELECT e FROM Employee e WHERE e.manager IS NULL",
"SELECT e FROM Employee e WHERE e.manager IS NOT NULL",
"SELECT e FROM Employee e WHERE e.manager IN (SELECT e2 FROM Employee e2 WHERE SIZE(e2.managedEmployees) < 2)",
"SELECT e FROM Employee e WHERE e.manager NOT IN (:manager1, :manager2)")
queris foreach parse
}
"with Update Querie" in {
val queris = List(
"UPDATE Employee e SET e.salary = 60000 WHERE e.salary = 50000",
"UPDATE Employee e SET e.salary = 60000, e.name = 'James' WHERE e.salary = 50000",
"UPDATE Employee SET salary = 60000, name = 'James' WHERE salary = 50000")
queris foreach parse
}
"with Delete Queries" in {
val queris = List(
"DELETE FROM Employee e WHERE e.department IS NULL")
queris foreach parse
}
"with Insert" in {
val queris = List(
"INSERT INTO Employee (id, name, salary) VALUES('1234', 'Bob', 50000)",
"INSERT INTO Employee VALUES('1234', 'Bob', 50000)",
"INSERT INTO PersonInfo p (emails) VALUES (JSON(\"bond1@abc.com\")), (JSON(\"bond2@abc.com\")) WHERE p.id = '1'")
queris foreach parse
}
"with Literals" in {
val queris = List(
"SELECT e FROM Employee e WHERE e.name = 'Bob'",
"SELECT e FROM Employee e WHERE e.name = 'Baie-D''Urfé'",
"SELECT e FROM Employee e WHERE e.id = 1234",
"SELECT e FROM Employee e WHERE e.id = 1234L",
"SELECT s FROM Stat s WHERE s.ratio > 3.14",
"SELECT s FROM Stat s WHERE s.ratio > 3.14F",
"SELECT s FROM Stat s WHERE s.ratio > 3.14e32D",
"SELECT e FROM Employee e WHERE e.active = TRUE",
"SELECT e FROM Employee e WHERE e.active = fAlse",
"SELECT e FROM Employee e WHERE e.startDate = {d'2012-01-03'}",
"SELECT e FROM Employee e WHERE e.startTime = {t'09:00:00'}",
"SELECT e FROM Employee e WHERE e.version = {ts'2012-01-03 09:00:00.000000001'}",
"SELECT e FROM Employee e WHERE e.gender = org.acme.Gender.MALE",
"UPDATE Employee e SET e.manager = NULL WHERE e.manager = :manager")
queris foreach parse
}
"with Functions" in {
val queris = List(
"SELECT (e.salary - 1000) FROM Employee e",
"SELECT (e.salary + 1000) FROM Employee e",
"SELECT (e.salary * 1000) FROM Employee e",
"SELECT (e.salary / 1000) FROM Employee e",
"SELECT ABS(e.salary - e.manager.salary) FROM Employee e",
"SELECT CASE e.STATUS WHEN 0 THEN 'active' WHEN 1 THEN 'consultant' ELSE 'unknown' END FROM Employee e",
"SELECT COALESCE(e.salary, 0) FROM Employee e",
"SELECT CONCAT(e.firstName, ' ', e.lastName) FROM Employee e",
"SELECT CURRENT_DATE FROM Employee e WHERE e.time = CURRENT_DATE",
"SELECT CURRENT_DATE FROM Employee e",
"SELECT CURRENT_TIME FROM Employee e",
"SELECT CURRENT_TIMESTAMP FROM Employee e",
"SELECT LENGTH(e.lastName) FROM Employee e",
"SELECT LOCATE('-', e.lastName) FROM Employee e",
"SELECT LOWER(e.lastName) FROM Employee e",
"SELECT MOD(e.hoursWorked, 8) FROM Employee e",
"SELECT NULLIF(e.salary, 0) FROM Employee e",
"SELECT SQRT(e.RESULT) FROM Employee e",
"SELECT SUBSTRING(e.lastName, 0, 2) FROM Employee e",
"SELECT TRIM(TRAILING FROM e.lastName), TRIM(e.lastName), TRIM(LEADING '-' FROM e.lastName) FROM Employee e",
"SELECT UPPER(e.lastName) FROM Employee e")
queris foreach parse
}
"with Special Operators" in {
val queris = List(
"SELECT toDo FROM Employee e JOIN e.toDoList toDo WHERE INDEX(toDo) = 1",
"SELECT p, KEY(p) FROM Employee e JOIN e.priorities p WHERE KEY(p) = 'high'",
"SELECT e FROM Employee e WHERE SIZE(e.managedEmployees) < 2",
"SELECT e FROM Employee e WHERE e.managedEmployees IS EMPTY",
"SELECT e FROM Employee e WHERE 'write code' MEMBER OF e.responsibilities",
"SELECT p FROM Project p WHERE TYPE(p) = LargeProject",
"SELECT e FROM Employee e JOIN TREAT(e.projects AS LargeProject) p WHERE p.budget > 1000000",
"SELECT p FROM Phone p WHERE FUNCTION('TO_NUMBER', p.areaCode) > 613")
queris foreach parse
}
"with JSON value" in {
val queris = List(
"SELECT JSON({}) FROM Employee e",
"SELECT JSON({\"name\": 1}) FROM Employee e",
"SELECT JSON({\"name\": \"str\"}) FROM Employee e",
"SELECT JSON({\"name\": \"str\", \"amount\": 100.0}) FROM Employee e",
"SELECT JSON({\"name\": [\"str\", 2, 2.0, 2e0, -2.0e1]}) FROM Employee e",
"SELECT JSON([\"str\", 2, 2.0, 2e0, -2.0e1, true, false, null]) FROM Employee e",
"SELECT json([\"str\", 2, 2.0, 2e0, -2.0e3, {\"name\": 1}]) FROM Employee e")
queris foreach parse
}
}
}
}
| wandoulabs/chana | src/test/scala/chana/jpql/JPQLGrammarSpec.scala | Scala | apache-2.0 | 9,869 |
import markets.StackableActor
/** Financial conduct authority supervises/regulates LSE. */
class FinancialConductAuthority extends StackableActor {
override def receive: Receive = {
super.receive
}
}
| ScalABM/models-library | farmer-patelli-zovko/src/main/scala-2.11/FinancialConductAuthority.scala | Scala | apache-2.0 | 211 |
/**
* Copyright (c) 2017-2018, Benjamin Fradet, and other contributors.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package ste
import org.apache.spark.sql.{ Column, DataFrame, Dataset, Encoder }
import org.apache.spark.sql.functions._
import scala.collection.generic.IsTraversableOnce
import shapeless._
import shapeless.ops.hlist._
import shapeless.syntax.std.tuple._
import shapeless.labelled.FieldType
@annotation.implicitNotFound("""
Type ${A} does not have a DataTypeSelector defined in the library.
You need to define one yourself.
""")
sealed trait DataTypeSelector[A] {
import DataTypeSelector.Select
val select: Select
}
object DataTypeSelector extends SelectorImplicits {
type Select = (Prefix, Option[Flatten]) => Column
def apply[A](implicit s: DataTypeSelector[A]): DataTypeSelector[A] = s
def pure[A](s: Select): DataTypeSelector[A] =
new DataTypeSelector[A] {
val select: Select = s
}
def simpleColumn[A]: DataTypeSelector[A] =
pure[A]((prefix, _) => col(prefix.map(s => s"`${s.mkString(".")}`").mkString(".")))
}
@annotation.implicitNotFound("""
Type ${A} does not have a StructTypeSelector defined in the library.
You need to define one yourself.
""")
sealed trait StructTypeSelector[A] extends DataTypeSelector[A] {
import DataTypeSelector.Select
val select: Select
}
object StructTypeSelector extends SelectorImplicits {
import DataTypeSelector.Select
def apply[A](implicit s: StructTypeSelector[A]): StructTypeSelector[A] = s
def pure[A](s: Select): StructTypeSelector[A] =
new StructTypeSelector[A] {
val select: Select = s
}
}
@annotation.implicitNotFound("""
Type ${A} does not have a MultiStructTypeSelector defined in the library.
You need to define one yourself.
""")
sealed trait MultiStructTypeSelector[A] {
import MultiStructTypeSelector.Select
val select: Select
}
object MultiStructTypeSelector {
import StructTypeSelector.Prefix
type Select = (Prefix, Option[Flatten], List[Option[Flatten]]) => List[Column]
def pure[A](s: Select): MultiStructTypeSelector[A] =
new MultiStructTypeSelector[A] {
val select: Select = s
}
}
trait SelectorImplicits {
type Prefix = Vector[Vector[String]]
private def addPrefix(prefix: Prefix, s: String, flatten: Option[Flatten]): Prefix = flatten match {
case Some(Flatten(_, _, true)) => prefix.dropRight(1) :+ prefix.lastOption.map(_.dropRight(1) :+ s).getOrElse(Vector(s))
case Some(_) => prefix.dropRight(1) :+ prefix.lastOption.map(_ :+ s).getOrElse(Vector(s))
case _ => prefix :+ Vector(s)
}
implicit val hnilSelector: MultiStructTypeSelector[HNil] =
MultiStructTypeSelector.pure((_, _, _) => List.empty)
implicit def hconsSelector[K <: Symbol, H, T <: HList](
implicit
witness: Witness.Aux[K],
hSelector: Lazy[DataTypeSelector[H]],
tSelector: MultiStructTypeSelector[T]
): MultiStructTypeSelector[FieldType[K, H] :: T] = MultiStructTypeSelector.pure { (prefix, parentFlatten, flatten) =>
val fieldName = witness.value.name
val hColumn = hSelector.value.select(addPrefix(prefix, fieldName, parentFlatten), flatten.headOption.flatten).as(fieldName)
val tColumns = tSelector.select(prefix, parentFlatten, flatten.tail)
hColumn +: tColumns
}
implicit def productSelector[A, H <: HList, HF <: HList](
implicit
generic: LabelledGeneric.Aux[A, H],
flattenAnnotations: Annotations.Aux[Flatten, A, HF],
hSelector: Lazy[MultiStructTypeSelector[H]],
flattenToList: ToList[HF, Option[Flatten]]
): StructTypeSelector[A] = StructTypeSelector.pure { (prefix, flatten) =>
val flattens = flattenAnnotations().toList[Option[Flatten]]
struct(hSelector.value.select(prefix, flatten, flattens): _*)
}
implicit val binarySelector: DataTypeSelector[Array[Byte]] = DataTypeSelector.simpleColumn
implicit val booleanSelector: DataTypeSelector[Boolean] = DataTypeSelector.simpleColumn
implicit val byteSelector: DataTypeSelector[Byte] = DataTypeSelector.simpleColumn
implicit val dateSelector: DataTypeSelector[java.sql.Date] = DataTypeSelector.simpleColumn
implicit val decimalSelector: DataTypeSelector[BigDecimal] = DataTypeSelector.simpleColumn
implicit val doubleSelector: DataTypeSelector[Double] = DataTypeSelector.simpleColumn
implicit val floatSelector: DataTypeSelector[Float] = DataTypeSelector.simpleColumn
implicit val intSelector: DataTypeSelector[Int] = DataTypeSelector.simpleColumn
implicit val longSelector: DataTypeSelector[Long] = DataTypeSelector.simpleColumn
implicit val nullSelector: DataTypeSelector[Unit] = DataTypeSelector.simpleColumn
implicit val shortSelector: DataTypeSelector[Short] = DataTypeSelector.simpleColumn
implicit val stringSelector: DataTypeSelector[String] = DataTypeSelector.simpleColumn
implicit val timestampSelector: DataTypeSelector[java.sql.Timestamp] = DataTypeSelector.simpleColumn
implicit def optionSelector[T]: DataTypeSelector[Option[T]] = DataTypeSelector.simpleColumn
implicit def traversableOnceSelector[A0, C[_]](
implicit
s: DataTypeSelector[A0],
is: IsTraversableOnce[C[A0]] { type A = A0 }
): DataTypeSelector[C[A0]] = DataTypeSelector.pure { (prefix, flatten) =>
flatten
.map(f => (0 until f.times).map(i => s.select(addPrefix(prefix, i.toString, flatten), flatten)))
.map(array(_: _*))
.getOrElse(s.select(prefix, flatten))
}
implicit def mapSelector[K, V](
implicit s: DataTypeSelector[V]
): DataTypeSelector[Map[K, V]] = DataTypeSelector.pure { (prefix, flatten) =>
flatten
.map(_.keys.flatMap(k => List(lit(k), s.select(addPrefix(prefix, k, flatten), flatten))))
.map(map(_: _*))
.getOrElse(s.select(prefix, flatten))
}
implicit class FlattenedDataFrame(df: DataFrame) {
def asNested[A : Encoder : StructTypeSelector]: Dataset[A] = selectNested.as[A]
def selectNested[A](implicit s: StructTypeSelector[A]): DataFrame =
df.select(s.select(Vector.empty, None).as("nested")).select("nested.*")
}
}
| BenFradet/struct-type-encoder | core/src/main/scala/ste/selector.scala | Scala | apache-2.0 | 6,807 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.handler
import java.io.PrintStream
import java.net.URI
import javax.servlet.FilterChain
import com.rackspace.com.papi.components.checker._
import com.rackspace.com.papi.components.checker.servlet._
import com.rackspace.com.papi.components.checker.step.results.Result
import org.w3c.dom.Document
class ConsoleResultHandler(val out : PrintStream=System.out) extends ResultHandler {
def init (validator : Validator, checker : Option[Document]) : Unit = {}
def handle (req : CheckerServletRequest, resp : CheckerServletResponse, chain : FilterChain, result : Result) : Unit = {
Console.withOut(out) {
def valid(v : Boolean) = {
var vout = "["
if (v) {vout = vout+Console.GREEN+"VALID"} else {vout = vout+Console.RED+"NOPE"}
vout+Console.RESET+"]"
}
printf ("%s %s %s %s\n", valid(result.valid), req.getMethod() , (new URI(req.getRequestURI())).getPath, result.toString)
}
}
}
| tylerroyal/api-checker | core/src/main/scala/com/rackspace/com/papi/components/checker/handler/ConsoleResultHandler.scala | Scala | apache-2.0 | 1,608 |
package at.logic.gapt.proofs.hoare
import at.logic.gapt.expr._
object usedVariables {
def apply( p: Program ): List[FOLVar] = p match {
case Assign( x, t ) => x :: freeVariables( t ).toList
case IfElse( c, a, b ) => freeVariables( c ).toList ++ usedVariables( a ) ++ usedVariables( b )
case ForLoop( i, n, b ) => i :: n :: usedVariables( b )
case Skip() => Nil
case Sequence( a, b ) => usedVariables( a ) ++ usedVariables( b )
}
}
object mapVariableNames {
def apply( p: Program, f: String => String ): Program =
substVariables( p, ( x: FOLVar ) => FOLVar( f( x.name ) ) )
}
object substVariables {
def apply( p: Program, f: Map[FOLVar, FOLTerm] ): Program =
apply( p, ( x: FOLVar ) => f.getOrElse( x, x ) )
def apply( p: Program, f: FOLVar => FOLTerm ): Program = p match {
case Assign( x, t ) => Assign( f( x ).asInstanceOf[FOLVar], apply( t, f ) )
case IfElse( c, a, b ) => IfElse( apply( c, f ), apply( a, f ), apply( b, f ) )
case ForLoop( i, n, b ) => ForLoop( f( i ).asInstanceOf[FOLVar], f( n ).asInstanceOf[FOLVar], apply( b, f ) )
case Skip() => Skip()
case Sequence( a, b ) => Sequence( apply( a, f ), apply( b, f ) )
}
def apply( t: FOLTerm, f: FOLVar => FOLTerm ): FOLTerm = makeSubstitution( t, f )( t )
def apply( t: FOLFormula, f: FOLVar => FOLTerm ): FOLFormula = makeSubstitution( t, f )( t )
private def makeSubstitution( t: FOLExpression, f: FOLVar => FOLTerm ) =
FOLSubstitution( freeVariables( t ).toList map ( ( x: FOLVar ) => x -> f( x ) ) )
}
object LoopFree {
def unapply( p: Program ): Option[Program] = p match {
case Assign( _, _ ) => Some( p )
case IfElse( _, LoopFree( _ ), LoopFree( _ ) ) => Some( p )
case Skip() => Some( p )
case Sequence( LoopFree( _ ), LoopFree( _ ) ) => Some( p )
case _ => None
}
}
object weakestPrecondition {
def apply( p: Program, f: FOLFormula ): FOLFormula = p match {
case Assign( x, t ) => FOLSubstitution( x, t )( f )
case IfElse( c, a, b ) => And( Imp( c, weakestPrecondition( a, f ) ), Imp( Neg( c ), weakestPrecondition( b, f ) ) )
case Skip() => f
case Sequence( a, b ) => weakestPrecondition( a, weakestPrecondition( b, f ) )
}
}
| gebner/gapt | core/src/main/scala/at/logic/gapt/proofs/hoare/utils.scala | Scala | gpl-3.0 | 2,377 |
package org.bitcoins.core.serializers.script
import org.bitcoins.core.protocol.script.ScriptPubKey
import org.bitcoins.core.script.bitwise.OP_EQUALVERIFY
import org.bitcoins.core.script.constant._
import org.bitcoins.core.script.crypto.{ OP_CHECKSIG, OP_HASH160 }
import org.bitcoins.core.script.stack.OP_DUP
import org.bitcoins.core.util.{ BitcoinSUtil, TestUtil }
import org.scalatest.{ FlatSpec, MustMatchers }
/**
* Created by chris on 1/12/16.
*/
class RawScriptPubKeyParserTest extends FlatSpec with MustMatchers {
val encode = BitcoinSUtil.encodeHex(_: Seq[Byte])
"RawScriptPubKeyParser" must "read then write the scriptPubKey and get the original scriptPubKey" in {
val scriptPubKey: ScriptPubKey = RawScriptPubKeyParser.read(TestUtil.rawScriptPubKey)
encode(RawScriptPubKeyParser.write(scriptPubKey)) must be(TestUtil.rawScriptPubKey)
}
it must "read a raw scriptPubKey and give us the expected asm" in {
val scriptPubKey = RawScriptPubKeyParser.read(TestUtil.rawP2PKHScriptPubKey)
val expectedAsm: Seq[ScriptToken] =
List(OP_DUP, OP_HASH160, BytesToPushOntoStack(20), ScriptConstant("31a420903c05a0a7de2de40c9f02ebedbacdc172"),
OP_EQUALVERIFY, OP_CHECKSIG)
scriptPubKey.asm must be(expectedAsm)
}
it must "read a raw scriptPubKey from an output" in {
//from b30d3148927f620f5b1228ba941c211fdabdae75d0ba0b688a58accbf018f3cc
//output is index 1
val rawScriptPubKey = "17a914af575bd77c5ce7eba3bd9ce6f89774713ae62c7987"
val scriptPubKey = RawScriptPubKeyParser.read(rawScriptPubKey)
encode(RawScriptPubKeyParser.write(scriptPubKey)) must be(rawScriptPubKey)
}
it must "read and write the scriptPubKey that pushes using a PUSHDATA1 that is negative when read as signed" in {
val rawScriptPubKey = "0x4c 0xae 0x606563686f2022553246736447566b58312b5a536e587574356542793066794778625456415675534a6c376a6a334878416945325364667657734f53474f36633338584d7439435c6e543249584967306a486956304f376e775236644546673d3d22203e20743b206f70656e73736c20656e63202d7061737320706173733a5b314a564d7751432d707269766b65792d6865785d202d64202d6165732d3235362d636263202d61202d696e207460 DROP DUP HASH160 0x14 0xbfd7436b6265aa9de506f8a994f881ff08cc2872 EQUALVERIFY CHECKSIG"
val asm = ScriptParser.fromString(rawScriptPubKey)
val scriptPubKey = ScriptPubKey.fromAsm(asm)
val actualRawScriptPubKey = RawScriptPubKeyParser.write(scriptPubKey)
//the actual hex representation is from a bitcoin core test case inside of tx_valid.json
encode(actualRawScriptPubKey) must be("ca4cae606563686f2022553246736447566b58312b5a536e587574356542793066794778625456415675534a6c376a6a334878416945325364667657734f53474f36633338584d7439435c6e543249584967306a486956304f376e775236644546673d3d22203e20743b206f70656e73736c20656e63202d7061737320706173733a5b314a564d7751432d707269766b65792d6865785d202d64202d6165732d3235362d636263202d61202d696e2074607576a914bfd7436b6265aa9de506f8a994f881ff08cc287288ac")
}
}
| Christewart/bitcoin-s-core | src/test/scala/org/bitcoins/core/serializers/script/RawScriptPubKeyParserTest.scala | Scala | mit | 2,964 |
package ch.wsl.box.rest.logic.cron
import ch.wsl.box.jdbc.Connection
import ch.wsl.box.model.boxentities.BoxCron
import ch.wsl.box.jdbc.PostgresProfile.api._
import ch.wsl.box.services.Services
import scribe.Logging
import scala.concurrent.ExecutionContext
class BoxCronLoader(cronScheduler:CronScheduler) extends Logging {
def load()(implicit ec:ExecutionContext, services: Services) = {
services.connection.adminDB.run{
BoxCron.BoxCronTable.result
}.map{_.map{ c =>
logger.info(s"Add scheduler ${c.name} reccurring at ${c.cron}")
cronScheduler.addSqlJob(SQLJob(c.name,c.cron,services.connection.adminDB,sql"#${c.sql}".as[Boolean].head))
}}
}
}
| Insubric/box | server/src/main/scala/ch/wsl/box/rest/logic/cron/BoxCronLoader.scala | Scala | apache-2.0 | 685 |
package org.sgine.ui
import org.powerscala.Resource
/**
* @author Matt Hicks <mhicks@sgine.org>
*/
object Scale9Example extends UI {
val scale9 = Scale9(Resource("scale9test.png"), 50.0, 50.0, 450.0, 450.0)
scale9.size.width := 200.0
contents += scale9
} | Axiometry/sgine | ui/src/test/scala/org/sgine/ui/Scale9Example.scala | Scala | bsd-3-clause | 264 |
package scala.pickling.`null`.binary
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, binary._
import static._
case class D(d: String)
case class C(x: String, y: Int, d: D)
class NullBinaryTest extends FunSuite {
test("main") {
val c = C(null, 0, null)
implicit val pd = implicitly[AbstractPicklerUnpickler[D]]
implicit val pc = implicitly[AbstractPicklerUnpickler[C]]
val pickle = c.pickle
assert(pickle.value.mkString("[", ",", "]") === "[0,0,0,28,115,99,97,108,97,46,112,105,99,107,108,105,110,103,46,110,117,108,108,46,98,105,110,97,114,121,46,67,-2,0,0,0,0,-2]")
assert(pickle.unpickle[C].toString === c.toString)
}
}
| scala/pickling | core/src/test/scala/scala/pickling/binary/NullBinaryTest.scala | Scala | bsd-3-clause | 687 |
/*
* Beangle, Agile Development Scaffold and Toolkits.
*
* Copyright © 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.style.maven
import java.io.File
import org.apache.maven.plugin.{AbstractMojo, MojoExecutionException}
import org.apache.maven.plugins.annotations.{LifecyclePhase, Mojo, Parameter}
import org.apache.maven.project.MavenProject
import org.beangle.style.core.WhiteSpaceFormater
import org.beangle.style.util.Files./
import org.beangle.style.util.Strings
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
@Mojo(name = "ws-check", defaultPhase = LifecyclePhase.VERIFY, threadSafe = true)
class WSCheckMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
private var project: MavenProject = _
def execute(): Unit = {
import scala.jdk.CollectionConverters._
val locs = new ArrayBuffer[String]
project.getCompileSourceRoots.asScala foreach { resource =>
check(resource, locs)
}
project.getTestCompileSourceRoots.asScala foreach { resource =>
check(resource, locs)
}
project.getResources.asScala foreach { resource =>
check(resource.getDirectory, locs)
}
project.getTestResources.asScala foreach { resource =>
check(resource.getDirectory, locs)
}
val warns = new ArrayBuffer[String]
locs foreach { loc =>
getLog.info(s"checking $loc ...")
WhiteSpaceFormater.check(new File(loc), warns)
}
if (warns.nonEmpty) {
val files = warns.map(f => Strings.substringAfter(f, project.getBasedir.getAbsolutePath + /))
getLog.warn("Whitespace violations:\n" + files.mkString("\n"))
throw new MojoExecutionException("Find violations")
}
}
private def check(path: String, dirs: mutable.Buffer[String]): Unit = {
val pathDir = new File(path)
if (pathDir.exists() && !path.startsWith(project.getBasedir.getAbsolutePath + / + "target")) {
val loc = pathDir.getCanonicalPath
if (!dirs.contains(loc)) {
dirs += loc
}
}
}
}
| beangle/beangle | style/src/main/scala/org/beangle/style/maven/WSCheckMojo.scala | Scala | lgpl-3.0 | 2,719 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex
import java.time.{Instant, LocalDate, LocalDateTime, LocalTime, OffsetDateTime, ZonedDateTime}
import kantan.codecs.export.Exported
import kantan.codecs.strings.StringDecoder
import kantan.codecs.strings.java8.{TimeDecoderCompanion, ToFormatLiteral}
/** Declares [[kantan.regex.GroupDecoder]] instances for java8 date and time types.
*
* Note that the type for default codecs might come as a surprise: the wrapping `Exported` is used to lower their
* priority. This is necessary because the standard use case will be to `import kantan.regex.java8._`, which
* brings both the instance creation and default instances in scope. Without this type trickery, custom instances
* and default ones would always clash.
*/
package object java8 extends TimeDecoderCompanion[Option[String], DecodeError, codecs.type] with ToFormatLiteral {
override def decoderFrom[D](d: StringDecoder[D]) = codecs.fromString(d)
implicit val defaultInstantGroupDecoder: Exported[GroupDecoder[Instant]] =
Exported(defaultInstantDecoder)
implicit val defaultZonedDateTimeGroupDecoder: Exported[GroupDecoder[ZonedDateTime]] =
Exported(defaultZonedDateTimeDecoder)
implicit val defaultOffsetDateTimeGroupDecoder: Exported[GroupDecoder[OffsetDateTime]] =
Exported(defaultOffsetDateTimeDecoder)
implicit val defaultLocalDateTimeGroupDecoder: Exported[GroupDecoder[LocalDateTime]] =
Exported(defaultLocalDateTimeDecoder)
implicit val defaultLocalDateGroupDecoder: Exported[GroupDecoder[LocalDate]] =
Exported(defaultLocalDateDecoder)
implicit val defaultLocalTimeGroupDecoder: Exported[GroupDecoder[LocalTime]] =
Exported(defaultLocalTimeDecoder)
}
| nrinaudo/kantan.regex | java8/src/main/scala/kantan/regex/java8/package.scala | Scala | apache-2.0 | 2,288 |
// Copyright (C) Maxime MORGE 2017
package org.scaia.solver.asia
import org.scaia.asia._
/**
* Local search algorithm which returns a "good" matching
* @param pb to solve
* @param rule to apply (maximize the utilitarian/egalitarian welfare
*/
class HillClimbingSolver(pb : IAProblem, rule: SocialRule) extends ASIASolver(pb){
/**
* Returns a matching which maximizes the social rule
* @return
*/
override def solve() : Matching = {
var step= 0
var found = false
var current = pb.generateRandomMatching()
do{
step += 1
if (debug) println(s"HillClimbingSolver: step $step")
val currentWelfare= rule match {
case Utilitarian => current.utilitarianWelfare()
case Egalitarian => current.egalitarianWelfare()
}
val neighbor= highValueSuccessor(current)
val neighborWelfare= rule match {
case Utilitarian => neighbor.utilitarianWelfare()
case Egalitarian => neighbor.egalitarianWelfare()
}
if (debug) println(s"currentWelfare: $currentWelfare neighborWelfare: $neighborWelfare")
if (neighborWelfare <= currentWelfare){// The neighbour do not improve
found = true
}
else current = neighbor
} while(! found)
current
}
/**
* Return the successor matching with the highest utilitarian welfare
*/
def highValueSuccessor(current: Matching) = {
val inds : Array[Individual] = pb.individuals.toArray
var maxW = -1.0
var bestMatching = new Matching(pb)
pb.individuals.foreach{ i => //For each individual i
val ai= current.a(i)
pb.activities.filterNot(_.equals(ai)).foreach{ a => //For each other activity a
if (!current.isFull(a)){//If a is not full
val neighbor= current
neighbor.a+=(i -> a)// i moves to a
//Build the groups
neighbor.g+=(i-> (current.p(a) + i))
current.p(a).foreach( j => neighbor.g+=(j -> (current.p(a) + i)))
current.p(ai).foreach( j => neighbor.g+=(j -> (current.p(ai) - i)))
val w= rule match {
case Utilitarian => neighbor.utilitarianWelfare()
case Egalitarian => neighbor.egalitarianWelfare()
}
if (w> maxW){
maxW=w
bestMatching=neighbor
}
}else{//If a is full
//Switch candidate
current.p(a).foreach{ j=> // For each individual j assigned to a
val neighbor= current.swap(i,j)// swap i and j
val w= rule match {
case Utilitarian => neighbor.utilitarianWelfare()
case Egalitarian => neighbor.egalitarianWelfare()
}
if (w> maxW){
maxW=w
bestMatching=neighbor
}
}
}
}
if (!ai.equals(Activity.VOID)){//if i is active
val neighbor= current
//Move i to inactive
neighbor.a+=(i -> Activity.VOID)
//Build the groupe
neighbor.g+=(i -> Group(i))
current.p(ai).filterNot(_.equals(i)).foreach( j => neighbor.g+=(j -> (current.p(ai) - i)))
val w= rule match {
case Utilitarian => neighbor.utilitarianWelfare()
case Egalitarian => neighbor.egalitarianWelfare()
}
if (w> maxW){
maxW=w
bestMatching=neighbor
}
}
}
bestMatching
}
}
| maximemorge/ScaIA | src/main/scala/org/scaia/solver/asia/HillClimbingSolver.scala | Scala | gpl-3.0 | 3,392 |
package com.chrisomeara.pillar
import java.util.Date
import java.io.InputStream
import scala.collection.mutable
import scala.io.Source
object Parser {
def apply(): Parser = new Parser
private val MatchAttribute = """^-- (authoredAt|description|up|down):(.*)$""".r
}
class PartialMigration {
var description: String = ""
var authoredAt: String = ""
var up = new mutable.MutableList[String]()
var down: Option[mutable.MutableList[String]] = None
def validate: Option[Map[String, String]] = {
val errors = mutable.Map[String, String]()
if (description.isEmpty) errors("description") = "must be present"
if (authoredAt.isEmpty) errors("authoredAt") = "must be present"
if (!authoredAt.isEmpty && authoredAtAsLong < 1) errors("authoredAt") = "must be a number greater than zero"
if (up.isEmpty) errors("up") = "must be present"
if (errors.nonEmpty) Some(errors.toMap) else None
}
def authoredAtAsLong: Long = {
try {
authoredAt.toLong
} catch {
case _:NumberFormatException => -1
}
}
}
class Parser {
import Parser.MatchAttribute
trait ParserState
case object ParsingAttributes extends ParserState
case object ParsingUp extends ParserState
case object ParsingDown extends ParserState
def parse(resource: InputStream): Migration = {
val inProgress = new PartialMigration
var state: ParserState = ParsingAttributes
Source.fromInputStream(resource).getLines().foreach {
case MatchAttribute("authoredAt", authoredAt) =>
inProgress.authoredAt = authoredAt.trim
case MatchAttribute("description", description) =>
inProgress.description = description.trim
case MatchAttribute("up", _) =>
state = ParsingUp
case MatchAttribute("down", _) =>
inProgress.down = Some(new mutable.MutableList[String]())
state = ParsingDown
case cql =>
if (!cql.isEmpty) {
state match {
case ParsingUp => inProgress.up += cql
case ParsingDown => inProgress.down.get += cql
case other => // ignored
}
}
}
inProgress.validate match {
case Some(errors) => throw new InvalidMigrationException(errors)
case None =>
inProgress.down match {
case Some(downLines) =>
if (downLines.isEmpty) {
Migration(inProgress.description, new Date(inProgress.authoredAtAsLong), inProgress.up.mkString("\\n"), None)
} else {
Migration(inProgress.description, new Date(inProgress.authoredAtAsLong), inProgress.up.mkString("\\n"), Some(downLines.mkString("\\n")))
}
case None => Migration(inProgress.description, new Date(inProgress.authoredAtAsLong), inProgress.up.mkString("\\n"))
}
}
}
}
| weirded/pillar | src/main/scala/com/chrisomeara/pillar/Parser.scala | Scala | mit | 2,803 |
package be.studiocredo.aws
object AwsConfigKeys {
val accessKey: String = "aws.access-key"
val secretKey: String = "aws.secret-key"
val keyPairId: String = "aws.key-pair.id"
val keyPairPrivateKeyResource: String = "aws.key-pair.private-key.resource"
val keyPairPrivateKeyPath: String = "aws.key-pair.private-key.path"
val s3BucketName: String = "aws.s3.bucket-name"
val s3LogBucketName: String = "aws.s3.log-bucket-name"
val s3Region: String = "aws.s3.region"
val cfDnsAlias: String = "aws.cf.dns-alias"
val cfUseAlias: String = "aws.cf.use-alias"
val cfRegion: String = "aws.cf.region"
val cfUrlValidity: String = "aws.cf.url-validity"
} | studiocredo/ticket-reservation | app/be/studiocredo/aws/AwsConfigKeys.scala | Scala | apache-2.0 | 663 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.