code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Copyright 2010-2012 Alex Jones
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with work for additional information
* regarding copyright ownership. The ASF licenses file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package html
import dates.May
import model.GameKey
import models.{Location, Competition}
import java.time.ZonedDateTime
import org.specs2.mutable.Specification
/**
* Test that game locator comparators are well behaved.
*
* @author alex
*
*/
class GameLocatorSpec extends Specification {
"The two different types of game locators" should {
val gameKeyLocator: GameLocator =
new GameKeyLocator(new GameKey(Competition.FACP, Location.AWAY, "Opponents", 2012))
val datePlayedLocator: GameLocator = DatePlayedLocator(ZonedDateTime.now())
"order GameKeyLocators before DatePlayedLocators" in {
gameKeyLocator must be_<(datePlayedLocator)
}
"order DatePlayedLocators after GameKeyLocators" in {
datePlayedLocator must be_>(gameKeyLocator)
}
}
"Comparing DatePlayedLocators" should {
val lowerDatePlayedLocator: GameLocator = DatePlayedLocator(May(1, 2000) at (9, 30))
val higherDatePlayedLocator: GameLocator = DatePlayedLocator(May(3, 2000) at (9, 30))
"be greater than when the date is greater than" in {
higherDatePlayedLocator must be_>(lowerDatePlayedLocator)
}
"be less than when the date is less than" in {
lowerDatePlayedLocator must be_<(higherDatePlayedLocator)
}
"be equal when the dates are equal" in {
higherDatePlayedLocator must be_>=(higherDatePlayedLocator)
higherDatePlayedLocator must be_<=(higherDatePlayedLocator)
}
}
"Comparing GameKeyLocators" should {
val lowerGameKeyLocator: GameLocator = GameKeyLocator(new GameKey(Competition.FACP, Location.AWAY, "Opponents", 2011))
val higherGameKeyLocator: GameLocator = GameKeyLocator(new GameKey(Competition.FACP, Location.AWAY, "Opponents", 2012))
"be greater than when the game key is greater than" in {
higherGameKeyLocator must be_>(lowerGameKeyLocator)
}
"be less than when the game key is less than" in {
lowerGameKeyLocator must be_<(higherGameKeyLocator)
}
"be equal when the game keys are equal" in {
higherGameKeyLocator must be_<=(higherGameKeyLocator)
higherGameKeyLocator must be_>=(higherGameKeyLocator)
}
}
}
| unclealex72/west-ham-calendar | test/html/GameLocatorSpec.scala | Scala | apache-2.0 | 2,994 |
package thangiee.riotapi.summoner
case class Summoner(
id: Long,
name: String,
profileIconId: Int,
revisionDate: Long,
summonerLevel: Int
) | Thangiee/Riot-API-Scala | src/main/scala/thangiee/riotapi/summoner/Summoner.scala | Scala | mit | 152 |
package org.wikiwatershed.mmw.geoprocessing
import akka.http.scaladsl.unmarshalling.Unmarshaller._
import akka.http.scaladsl.server.{ HttpApp, Route }
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import spray.json._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
case class InputData(
operationType: String,
rasters: List[String],
targetRaster: Option[String],
pixelIsArea: Option[Boolean],
zoom: Int,
polygonCRS: String,
rasterCRS: String,
polygon: List[String],
vectorCRS: Option[String],
vector: Option[List[String]]
)
case class PostRequest(input: InputData)
case class ResultInt(result: Map[String, Int])
case class ResultDouble(result: Map[String, Double])
case class ResultSummary(result: Seq[Map[String, Double]])
// HUCs have an id and a shape. The shape is GeoJSON, but we've transmitted
// them as Strings in the past so we continue to do so here.
case class HUC (
id: HucID,
shape: GeoJSONString // GeoJSON Polygon or MultiPolygon
)
case class Operation (
name: String, // RasterGroupedCount, RasterGroupedAverage, RasterLinesJoin
label: OperationID,
rasters: List[RasterID],
targetRaster: Option[RasterID],
pixelIsArea: Option[Boolean]
)
case class MultiInput (
shapes: List[HUC],
streamLines: Option[List[GeoJSONString]], // GeoJSON MultiLineString
operations: List[Operation]
)
object PostRequestProtocol extends DefaultJsonProtocol {
implicit val inputFormat = jsonFormat10(InputData)
implicit val postFormat = jsonFormat1(PostRequest)
implicit val resultFormat = jsonFormat1(ResultInt)
implicit val resultDoubleFormat = jsonFormat1(ResultDouble)
implicit val resultSummaryFormat = jsonFormat1(ResultSummary)
implicit val hucFormat = jsonFormat2(HUC)
implicit val operationFormat = jsonFormat5(Operation)
implicit val multiInputFormat = jsonFormat3(MultiInput)
}
object WebServer extends HttpApp with App with LazyLogging with Geoprocessing with ErrorHandler {
import PostRequestProtocol._
@throws(classOf[InvalidOperationException])
def routes: Route =
handleExceptions(geoprocessingExceptionHandler) {
get {
path("ping") {
complete("pong")
}
} ~
post {
path("run") {
entity(as[PostRequest]) { data =>
data.input.operationType match {
case "RasterGroupedCount" =>
complete(getRasterGroupedCount(data.input))
case "RasterGroupedAverage" =>
complete(getRasterGroupedAverage(data.input))
case "RasterLinesJoin" =>
complete(getRasterLinesJoin(data.input))
case "RasterSummary" =>
complete(getRasterSummary(data.input))
case _ => {
val message = s"Unknown operationType: ${data.input.operationType}"
throw new InvalidOperationException(message)
}
}
}
} ~
path("multi") {
entity(as[MultiInput]) { input =>
complete(getMultiOperations(input))
}
}
}
}
val config = ConfigFactory.load()
val port = config.getInt("geoprocessing.port")
val host = config.getString("geoprocessing.hostname")
startServer(host, port)
}
| WikiWatershed/mmw-geoprocessing | api/src/main/scala/WebServer.scala | Scala | apache-2.0 | 3,312 |
package org.scalaide.core.internal.builder.zinc
import java.io.File
import java.util.zip.ZipFile
import org.eclipse.core.resources.IContainer
import org.eclipse.core.runtime.IPath
import org.eclipse.core.runtime.SubMonitor
import org.scalaide.core.IScalaInstallation
import org.scalaide.core.IScalaProject
import org.scalaide.core.internal.ScalaPlugin
import org.scalaide.core.internal.project.ScalaInstallation.scalaInstanceForInstallation
import org.scalaide.ui.internal.preferences
import org.scalaide.util.internal.SettingConverterUtil
import sbt.internal.inc.AnalyzingCompiler
import sbt.internal.inc.CompilerCache
import sbt.internal.inc.CompilerBridgeProvider
import sbt.internal.inc.Locate
import sbt.internal.inc.classpath.ClasspathUtilities
import xsbti.Logger
import xsbti.Maybe
import xsbti.compile.ClasspathOptions
import xsbti.compile.CompileAnalysis
import xsbti.compile.CompileProgress
import xsbti.compile.DefinesClass
import xsbti.compile.IncOptions
import xsbti.compile.IncOptionsUtil
import xsbti.compile.MultipleOutput
import xsbti.compile.TransactionalManagerType
/**
* Inputs-like class, but not implementing xsbti.compile.Inputs.
*
* We return a real IncOptions instance, instead of relying on the Java interface,
* based on String maps. This allows us to use the transactional classfile writer.
*/
class SbtInputs(installation: IScalaInstallation,
sourceFiles: Seq[File],
project: IScalaProject,
javaMonitor: SubMonitor,
scalaProgress: CompileProgress,
tempDir: File, // used to store classfiles between compilation runs to implement all-or-nothing semantics
logger: Logger,
addToClasspath: Seq[IPath] = Seq.empty,
srcOutputs: Seq[(IContainer, IContainer)] = Seq.empty) {
def cache = CompilerCache.fresh // May want to explore caching possibilities.
private val allProjects = project +: project.transitiveDependencies.flatMap(ScalaPlugin().asScalaProject)
def analysisMap(f: File): Maybe[CompileAnalysis] =
if (f.isFile)
Maybe.nothing[CompileAnalysis]
else {
val analysis = allProjects.collectFirst {
case project if project.buildManager.buildManagerOf(f).nonEmpty =>
project.buildManager.buildManagerOf(f).get.latestAnalysis
}
analysis.map { analysis =>
Maybe.just(analysis)
}.getOrElse(Maybe.nothing[CompileAnalysis])
}
def progress = Maybe.just(scalaProgress)
def incOptions: IncOptions = {
IncOptionsUtil.defaultIncOptions().
withApiDebug(project.storage.getBoolean(SettingConverterUtil.convertNameToProperty(preferences.ScalaPluginSettings.apiDiff.name))).
withRelationsDebug(project.storage.getBoolean(SettingConverterUtil.convertNameToProperty(preferences.ScalaPluginSettings.relationsDebug.name))).
withClassfileManagerType(Maybe.just(new TransactionalManagerType(tempDir, logger))).
withApiDumpDirectory(Maybe.nothing()).
withRecompileOnMacroDef(Maybe.just(project.storage.getBoolean(SettingConverterUtil.convertNameToProperty(preferences.ScalaPluginSettings.recompileOnMacroDef.name)))).
// Turning off name hashing is not supported in class-based dependency tracking
withNameHashing(true)
}
def outputFolders = srcOutputs.map {
case (_, out) => out.getRawLocation
}
def classpath = (project.scalaClasspath.jdkPaths ++ project.scalaClasspath.userCp ++ addToClasspath ++ outputFolders)
.distinct
.map { cp β
val location = Option(cp.toFile).flatMap(f β Option(f.getAbsoluteFile))
location getOrElse (throw new IllegalStateException(s"The classpath location `$cp` is invalid."))
}.toArray
def sources = sourceFiles.toArray
def output = new MultipleOutput {
private def sourceOutputFolders =
if (srcOutputs.nonEmpty) srcOutputs else project.sourceOutputFolders
override def outputGroups = sourceOutputFolders.map {
case (src, out) => new MultipleOutput.OutputGroup {
override def sourceDirectory = {
val loc = src.getLocation
if (loc != null)
loc.toFile()
else
throw new IllegalStateException(s"The source folder location `$src` is invalid.")
}
override def outputDirectory = {
val loc = out.getLocation
if (loc != null)
loc.toFile()
else
throw new IllegalStateException(s"The output folder location `$out` is invalid.")
}
}
}.toArray
}
// remove arguments not understood by build compiler
def scalacOptions =
if (project.isUsingCompatibilityMode())
project.scalacArguments.filter(buildCompilerOption).toArray
else
project.scalacArguments.toArray
/** Remove the source-level related arguments */
private def buildCompilerOption(arg: String): Boolean =
!arg.startsWith("-Xsource") && !(arg == "-Ymacro-expand:none")
def javacOptions: Seq[String] = Nil // Not used.
import org.scalaide.ui.internal.preferences.ScalaPluginSettings.compileOrder
import org.scalaide.util.internal.SettingConverterUtil.convertNameToProperty
import xsbti.compile.CompileOrder._
def order = project.storage.getString(convertNameToProperty(compileOrder.name)) match {
case "JavaThenScala" => JavaThenScala
case "ScalaThenJava" => ScalaThenJava
case _ => Mixed
}
/**
* @return Right-biased instance of Either (error message in Left, value in Right)
*/
def compilers: Either[String, Compilers] = {
val scalaInstance = scalaInstanceForInstallation(installation)
val store = ScalaPlugin().compilerBridgeStore
store.compilerBridgeFor(installation)(javaMonitor.newChild(10)).right.map {
compilerBridge =>
// prevent zinc from adding things to the (boot)classpath
val cpOptions = new ClasspathOptions(false, false, false, /* autoBoot = */ false, /* filterLibrary = */ false)
Compilers(
new AnalyzingCompiler(scalaInstance, CompilerBridgeProvider.constant(compilerBridge.toFile), cpOptions, _ β (), None),
new JavaEclipseCompiler(project.underlying, javaMonitor))
}
}
}
private[zinc] object Locator {
val NoClass = new DefinesClass {
override def apply(className: String) = false
}
def apply(f: File): DefinesClass =
if (f.isDirectory)
new DirectoryLocator(f)
else if (f.exists && ClasspathUtilities.isArchive(f))
new JarLocator(f)
else
NoClass
class DirectoryLocator(dir: File) extends DefinesClass {
override def apply(className: String): Boolean = Locate.classFile(dir, className).isFile
}
class JarLocator(jar: File) extends DefinesClass {
lazy val entries: Set[String] = {
val zipFile = new ZipFile(jar, ZipFile.OPEN_READ)
try {
import scala.collection.JavaConverters._
zipFile.entries.asScala.filterNot(_.isDirectory).map { entry =>
toClassNameFromJarFileName(entry.getName)
}.toSet
} finally
zipFile.close()
}
private def toClassNameFromJarFileName(jarFileName: String): String = {
val noClassAtEnd = if (jarFileName.endsWith(".class"))
jarFileName.substring(0, jarFileName.lastIndexOf(".class"))
else
jarFileName
noClassAtEnd.replaceAll("/", ".")
}
override def apply(className: String): Boolean =
entries.contains(className)
}
}
| sschaef/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/builder/zinc/SbtInputs.scala | Scala | bsd-3-clause | 7,350 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.metadata
import java.nio.charset.StandardCharsets
import org.locationtech.geomesa.index.metadata.KeyValueStoreMetadata.decodeRow
import org.locationtech.geomesa.utils.collection.CloseableIterator
import scala.util.control.NonFatal
/**
* Table-based metadata implementation for key-value stores. As with TableBasedMetadata, the metadata is
* persisted in a database table. The underlying table will be lazily created when required. Metadata values
* are cached with a configurable timeout to save repeated database reads.
*
* @tparam T type param
*/
trait KeyValueStoreMetadata[T] extends TableBasedMetadata[T] {
// separator used between type names and keys
val typeNameSeparator: Char = '~'
def encodeRow(typeName: String, key: String): Array[Byte] =
KeyValueStoreMetadata.encodeRow(typeName, key, typeNameSeparator)
override protected def write(typeName: String, rows: Seq[(String, Array[Byte])]): Unit =
write(rows.map { case (k, v) => (encodeRow(typeName, k), v) })
override protected def delete(typeName: String, keys: Seq[String]): Unit =
delete(keys.map(k => encodeRow(typeName, k)))
override protected def scanValue(typeName: String, key: String): Option[Array[Byte]] =
scanValue(encodeRow(typeName, key))
override protected def scanValues(typeName: String, prefix: String): CloseableIterator[(String, Array[Byte])] = {
scanRows(Some(encodeRow(typeName, prefix))).flatMap { case (row, value) =>
try { CloseableIterator.single((decodeRow(row, typeNameSeparator)._2, value)) } catch {
case NonFatal(_) =>
logger.warn(s"Ignoring unexpected row in catalog table: ${new String(row, StandardCharsets.UTF_8)}")
CloseableIterator.empty
}
}
}
override protected def scanKeys(): CloseableIterator[(String, String)] = {
scanRows(None).flatMap { case (row, _) =>
try { CloseableIterator.single(decodeRow(row, typeNameSeparator)) } catch {
case NonFatal(_) =>
logger.warn(s"Ignoring unexpected row in catalog table: ${new String(row, StandardCharsets.UTF_8)}")
CloseableIterator.empty
}
}
}
/**
* Writes row/value pairs
*
* @param rows row/values
*/
protected def write(rows: Seq[(Array[Byte], Array[Byte])]): Unit
/**
* Deletes multiple rows
*
* @param rows rows
*/
protected def delete(rows: Seq[Array[Byte]])
/**
* Reads a value from the underlying table
*
* @param row row
* @return value, if it exists
*/
protected def scanValue(row: Array[Byte]): Option[Array[Byte]]
/**
* Reads row keys from the underlying table
*
* @param prefix row key prefix
* @return matching row keys and values
*/
protected def scanRows(prefix: Option[Array[Byte]]): CloseableIterator[(Array[Byte], Array[Byte])]
}
object KeyValueStoreMetadata {
def encodeRow(typeName: String, key: String, separator: Char): Array[Byte] = {
// escaped to %U+XXXX unicode since decodeRow splits by separator
val escape = s"%${"U+%04X".format(separator.toInt)}"
s"${typeName.replace(separator.toString, escape)}$separator$key".getBytes(StandardCharsets.UTF_8)
}
def decodeRow(row: Array[Byte], separator: Char): (String, String) = {
// escaped to %U+XXXX unicode since decodeRow splits by separator
val escape = s"%${"U+%04X".format(separator.toInt)}"
val all = new String(row, StandardCharsets.UTF_8)
val split = all.indexOf(separator)
(all.substring(0, split).replace(escape, separator.toString), all.substring(split + 1, all.length))
}
}
| aheyne/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/metadata/KeyValueStoreMetadata.scala | Scala | apache-2.0 | 4,093 |
package com.nthportal.versions
package semver
/**
* A [[http://semver.org/spec/2.0.0.html SemVer 2.0.0]] version possibly
* containing build metadata.
*
* @param extendedVersion the [[v3.ExtendedVersion version with an extension]]
* @param buildMetadata the build metadata, if any exists
* @tparam E the type of the version extension
* @tparam M the type of the build metadata
*/
final case class SemanticVersion[E, M](extendedVersion: v3.ExtendedVersion[E], buildMetadata: Option[M])
extends Ordered[SemanticVersion[E, _]] {
/**
* Returns the [[v3.Version version]] portion of this SemVer version.
*
* This method is equivalent to [[v3.ExtendedVersion.version]].
*
* @return the version portion of this SemVer version
*/
@inline
def version: v3.Version = extendedVersion.version
/**
* Returns the extension portion of this SemVer version.
*
* This method is equivalent to [[v3.ExtendedVersion.version]].
*
* @return the extension portion of this SemVer version
*/
@inline
def extension: E = extendedVersion.extension
override def compare(that: SemanticVersion[E, _]): Int = this.extendedVersion compare that.extendedVersion
override def toString =
extendedVersion.toString + {
buildMetadata match {
case Some(data) => s"+$data"
case None => ""
}
}
}
| NthPortal/versions | src/main/scala/com/nthportal/versions/semver/SemanticVersion.scala | Scala | apache-2.0 | 1,364 |
package com.twitter.finagle.mux
import com.twitter.finagle.FailureFlags
import com.twitter.logging.HasLogLevel
import scala.util.control.NoStackTrace
/**
* Indicates that a client requested that a given request be discarded.
*
* This implies that the client issued a Tdiscarded message for a given tagged
* request, as per [[com.twitter.finagle.mux]].
*/
class ClientDiscardedRequestException private[mux] (why: String, val flags: Long)
extends Exception(why)
with FailureFlags[ClientDiscardedRequestException]
with HasLogLevel
with NoStackTrace {
def logLevel: com.twitter.logging.Level = com.twitter.logging.Level.DEBUG
def this(why: String) = this(why, FailureFlags.Interrupted)
def copyWithFlags(newFlags: Long): ClientDiscardedRequestException =
new ClientDiscardedRequestException(why, newFlags)
}
/**
* Indicates that the server failed to interpret or act on the request. This
* could mean that the client sent a [[com.twitter.finagle.mux]] message type
* that the server is unable to process.
*/
case class ServerError(what: String) extends Exception(what) with NoStackTrace
/**
* Indicates that the server encountered an error whilst processing the client's
* request. In contrast to [[com.twitter.finagle.mux.ServerError]], a
* ServerApplicationError relates to server application failure rather than
* failure to interpret the request.
*/
case class ServerApplicationError(what: String) extends Exception(what) with NoStackTrace
| luciferous/finagle | finagle-mux/src/main/scala/com/twitter/finagle/mux/Exceptions.scala | Scala | apache-2.0 | 1,486 |
package org.helianto.ingress.controller
import java.util.Locale
import org.helianto.ingress.service.ResponseService
import org.helianto.user.domain.User
import org.springframework.stereotype.Controller
import org.springframework.ui.Model
import org.springframework.web.bind.annotation.{GetMapping, RequestMapping, RequestMethod, RequestParam}
/**
* Login controller.
*
* @author mauriciofernandesdecastro
*/
@Controller
@RequestMapping(value = Array("/login"))
class LoginController(responseService:ResponseService) {
/**
* Login page.
*
* @param model model injetado automaticamente pelo container para receber o modelo
* @param locale locale injetado automaticamente pelo container para identificar a localizaΓ§Γ£o
*/
@GetMapping
def getSignInPage(model: Model, locale: Locale) = {
responseService.loginResponse(model, locale)
}
/**
* Login errors.
*
* @param model model
* @param error error parameter
* @param locale locale
*/
@GetMapping(params = Array("error"))
def showLoginErrorParam(model: Model, @RequestParam error: String, locale: Locale) = {
responseService.loginErrorResponse(model, locale, error)
}
/**
* Login errors.
*
* @param model model
* @param type error type
* @param locale locale
*/
@GetMapping(value = Array("/error"))
def showLoginErrorPath(model: Model, @RequestParam `type`: String, locale: Locale) = {
responseService.loginErrorResponse(model, locale, `type`)
}
} | iservport/helianto-spring | src/main/scala/org/helianto/ingress/controller/LoginController.scala | Scala | apache-2.0 | 1,515 |
package org.openmole.plugin.sampling.onefactor
import org.openmole.core.highlight.HighLight
import org.openmole.core.pluginregistry.PluginRegistry
import org.osgi.framework.{ BundleActivator, BundleContext }
class Activator extends BundleActivator {
override def stop(context: BundleContext): Unit =
PluginRegistry.unregister(this)
override def start(context: BundleContext): Unit = {
import org.openmole.core.highlight.HighLight._
val keyWords: Vector[HighLight] =
Vector(
SamplingHighLight(classOf[OneFactorSampling])
)
PluginRegistry.register(this, Vector(this.getClass.getPackage), highLight = keyWords)
}
} | openmole/openmole | openmole/plugins/org.openmole.plugin.sampling.onefactor/src/main/scala/org/openmole/plugin/sampling/onefactor/Activator.scala | Scala | agpl-3.0 | 658 |
package com.pygmalios.reactiveinflux
import com.pygmalios.reactiveinflux.error.ReactiveInfluxError
import play.api.libs.ws.WSRequestHolder
class ReactiveInfluxException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
class ReactiveInfluxResultError(val errors: Set[ReactiveInfluxError], val request: WSRequestHolder)
extends ReactiveInfluxException(s"${errors.mkString(",")} [${request.method} ${request.url}]")
| pygmalios/reactiveinflux | src/main/scala/com/pygmalios/reactiveinflux/ReactiveinfluxException.scala | Scala | apache-2.0 | 451 |
package me.jie.ksrdd
import java.util.Properties
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.{Decoder}
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.slf4j.LoggerFactory
/**
* Created by jie on 4/29/16.
*/
object ksRDD {
private val log = LoggerFactory.getLogger(getClass)
def apply[K, V](
sc: SparkContext, kafkaProps: Properties,
fetchInfo: Map[TopicAndPartition, Seq[OffsetFetchInfo]],
fetchMessageMaxCount: Int = 1024 * 1024 * 1024,
keyDecoder: Decoder[K] ,//= new DefaultDecoder ,
valueDecoder: Decoder[V] //= new DefaultDecoder
) =
new ksRDD(sc, kafkaProps, fetchInfo, fetchMessageMaxCount, keyDecoder, valueDecoder)
}
class ksRDD[K, V] private(
_sc: SparkContext, kafkaProps: Properties,
fetchInfo: Map[TopicAndPartition, Seq[OffsetFetchInfo]],
fetchMessageMaxCount: Int,
keyDecoder: Decoder[K], valueDecoder: Decoder[V]
) extends RDD[MessageAndMetadata[K,V]](_sc, Nil){
@DeveloperApi
override def compute(split: Partition, context: TaskContext): Iterator[MessageAndMetadata[K, V]] = {
if(context.attemptNumber() > 1){
log.warn(s"Attempt ${context.attemptNumber} times for fetching ${split}")
}
val taskStartTime = System.currentTimeMillis()
context.addTaskCompletionListener(_ => {
val used = (System.currentTimeMillis() - taskStartTime) / 1000.0
if(used > 300.0) {
log.warn(s"Fetched ${split} in a quite Long time (${used}s)")
}
})
val topicAndPartition = split.asInstanceOf[kafkaRDDPartition].topicAndPartition
val offsetFetchInfo = split.asInstanceOf[kafkaRDDPartition].offsetFetchInfo
kafkaStream(kafkaProps, keyDecoder, valueDecoder).fetch(topicAndPartition, offsetFetchInfo).iterator
}
override protected def getPartitions: Array[Partition] = {
def slice(offsetFetchInfo: OffsetFetchInfo, maxSizeForSlice: Int): Seq[OffsetFetchInfo] = {
val OffsetFetchInfo(from, to) = offsetFetchInfo
(1 + to - from).ensuring(_ < Int.MaxValue).toInt match {
case totalSize if totalSize > maxSizeForSlice => {
val buckets = (totalSize + maxSizeForSlice - 1) / maxSizeForSlice
val (size, rest) = (totalSize / buckets, totalSize % buckets)
val sliceSizes = (1 to buckets) map ( x => if(x <= rest) 1 else 0 ) map(_ + size)
val grads = sliceSizes.inits.map(_.sum).map(_ + from - 1).toSeq.reverse
grads.sliding(2).map(slice => OffsetFetchInfo(slice(0) + 1, slice(1))).toSeq
}
case _ => Seq(offsetFetchInfo)
}
}
fetchInfo.map {
case (topicAndPartition, partitionFetchInfos) =>
partitionFetchInfos.flatMap(slice(_, fetchMessageMaxCount)).map { (topicAndPartition, _)}
}.flatten.zipWithIndex.map {
case ((topicAndPartition, offsetFetchInfo), index) => new kafkaRDDPartition(id, index,
topicAndPartition, offsetFetchInfo)
}.toArray
}
}
private[ksrdd] class kafkaRDDPartition(
rddId: Int,
override val index: Int,
val topicAndPartition: TopicAndPartition,
val offsetFetchInfo: OffsetFetchInfo
) extends Partition {
override def hashCode: Int = 41 * (41 + rddId) + index
override def toString: String = "{rddId: %d, index: %d, topic: %s, partition: %d, offset: %s}"
.format(rddId, index, topicAndPartition.topic, topicAndPartition.partition, offsetFetchInfo)
} | JensenFeng/KSRdd | src/main/scala/me/jie/ksrdd/ksRDD.scala | Scala | apache-2.0 | 4,007 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.yarn
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.api.records.ApplicationId
import org.apache.hadoop.yarn.api.ApplicationConstants
import org.apache.samza.config.Config
import org.apache.samza.config.JobConfig
import org.apache.samza.util.Util
import org.apache.samza.job.ApplicationStatus
import org.apache.samza.job.ApplicationStatus.Running
import org.apache.samza.job.StreamJob
import org.apache.samza.job.ApplicationStatus.SuccessfulFinish
import org.apache.samza.job.ApplicationStatus.UnsuccessfulFinish
import org.apache.samza.config.YarnConfig.Config2Yarn
import org.apache.samza.config.JobConfig.Config2Job
import org.apache.samza.config.YarnConfig
import org.apache.samza.config.ShellCommandConfig
import org.apache.samza.SamzaException
import org.apache.samza.serializers.model.SamzaObjectMapper
import org.apache.samza.config.JobConfig.Config2Job
import scala.collection.JavaConversions._
import org.apache.samza.config.MapConfig
import org.apache.samza.config.ConfigException
import org.apache.samza.config.SystemConfig
object YarnJob {
val DEFAULT_AM_CONTAINER_MEM = 1024
}
/**
* Starts the application manager
*/
class YarnJob(config: Config, hadoopConfig: Configuration) extends StreamJob {
import YarnJob._
val client = new ClientHelper(hadoopConfig)
var appId: Option[ApplicationId] = None
def submit: YarnJob = {
appId = client.submitApplication(
new Path(config.getPackagePath.getOrElse(throw new SamzaException("No YARN package path defined in config."))),
config.getAMContainerMaxMemoryMb.getOrElse(DEFAULT_AM_CONTAINER_MEM),
1,
List(
"export SAMZA_LOG_DIR=%s && ln -sfn %s logs && exec ./__package/bin/run-am.sh 1>logs/%s 2>logs/%s"
format (ApplicationConstants.LOG_DIR_EXPANSION_VAR, ApplicationConstants.LOG_DIR_EXPANSION_VAR, ApplicationConstants.STDOUT, ApplicationConstants.STDERR)),
Some({
val coordinatorSystemConfig = Util.buildCoordinatorStreamConfig(config)
val envMap = Map(
ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG -> Util.envVarEscape(SamzaObjectMapper.getObjectMapper.writeValueAsString(coordinatorSystemConfig)),
ShellCommandConfig.ENV_JAVA_OPTS -> Util.envVarEscape(config.getAmOpts.getOrElse("")))
val envMapWithJavaHome = config.getAMJavaHome match {
case Some(javaHome) => envMap + (ShellCommandConfig.ENV_JAVA_HOME -> javaHome)
case None => envMap
}
envMapWithJavaHome
}),
Some("%s_%s" format (config.getName.get, config.getJobId.getOrElse(1))))
this
}
def waitForFinish(timeoutMs: Long): ApplicationStatus = {
val startTimeMs = System.currentTimeMillis()
while (System.currentTimeMillis() - startTimeMs < timeoutMs) {
Option(getStatus) match {
case Some(s) => if (SuccessfulFinish.equals(s) || UnsuccessfulFinish.equals(s)) return s
case None => null
}
Thread.sleep(1000)
}
Running
}
def waitForStatus(status: ApplicationStatus, timeoutMs: Long): ApplicationStatus = {
val startTimeMs = System.currentTimeMillis()
while (System.currentTimeMillis() - startTimeMs < timeoutMs) {
Option(getStatus) match {
case Some(s) => if (status.equals(s)) return status
case None => null
}
Thread.sleep(1000)
}
Running
}
def getStatus: ApplicationStatus = {
appId match {
case Some(appId) => client.status(appId).getOrElse(null)
case None => null
}
}
def kill: YarnJob = {
appId match {
case Some(appId) => client.kill(appId)
case None => None
}
this
}
}
| zcan/samza | samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnJob.scala | Scala | apache-2.0 | 4,585 |
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, File, FileInputStream}
import java.util
import java.util.zip.GZIPOutputStream
import org.openqa.selenium.{By, Capabilities, JavascriptExecutor, WebDriver, WebElement}
import org.openqa.selenium.remote.{DesiredCapabilities, RemoteWebDriver}
import org.openqa.selenium.remote.server.{DriverFactory, DriverProvider}
import org.scalajs.jsenv.selenium.SeleniumJSEnv
import sbt.URL
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.io.Source
import scala.util.Try
object BrowserStackRunner {
def load(): SeleniumJSEnv = {
val AUTOMATE_USERNAME = System.getenv("AUTOMATE_USERNAME")
val AUTOMATE_ACCESS_KEY = System.getenv("AUTOMATE_ACCESS_KEY")
val URL = "https://" + AUTOMATE_USERNAME + ":" + AUTOMATE_ACCESS_KEY + "@hub-cloud.browserstack.com/wd/hub"
import com.browserstack.local.Local
val tmpDir = new java.io.File(".tmp")
Try {
val bsLocal = new Local()
if (!tmpDir.exists) tmpDir.mkdir()
val bsLocalArgs = new java.util.HashMap[String, String]()
bsLocalArgs.put("key", AUTOMATE_ACCESS_KEY)
println(tmpDir.getAbsolutePath)
bsLocalArgs.put("folder", tmpDir.getAbsolutePath + "/")
bsLocal.start(bsLocalArgs);
println(bsLocal.isRunning());
}
new SimpleHttpServer(tmpDir).start()
val caps = new DesiredCapabilities
caps.setCapability("os", "Windows")
caps.setCapability("os_version", "10")
caps.setCapability("resolution", "1920x1080")
caps.setCapability("browser", "Chrome")
caps.setCapability("browser_version", "latest")
caps.setCapability("browserstack.selenium_version", "3.141.59")
caps.setCapability("browserstack.local", "true")
caps.setCapability("browserstack.networkLogs", "true")
caps.setCapability("browserstack.console", "info")
caps.setCapability("name", "Box Framework Test") // test name
caps.setCapability("build", Try(System.getenv("BUILD_CODE")).getOrElse("Build code")) // CI/CD job or build name
val jsenv = new org.scalajs.jsenv.selenium.SeleniumJSEnv(caps, SeleniumJSEnv.Config()
.withMaterializeInServer(".tmp", "http://localhost:3000/")
.withDriverFactory(new DriverFactory {
override def registerDriverProvider(driverProvider: DriverProvider): Unit = {}
override def newInstance(capabilities: Capabilities) = {
val d = new RemoteWebDriver(new URL(URL), capabilities)
new WebDriver with JavascriptExecutor{
override def get(s: String): Unit = d.get(s)
override def getCurrentUrl: String = d.getCurrentUrl
override def getTitle: String = d.getTitle
override def findElements(by: By): util.List[WebElement] = d.findElements(by)
override def findElement(by: By): WebElement = d.findElement(by)
override def getPageSource: String = d.getPageSource
override def close(): Unit = {
println("calling close")
//d.close()
d.quit()
}
override def quit(): Unit = d.quit()
override def getWindowHandles: util.Set[String] = d.getWindowHandles
override def getWindowHandle: String = d.getWindowHandle
override def switchTo(): WebDriver.TargetLocator = d.switchTo()
override def navigate(): WebDriver.Navigation = d.navigate()
override def manage(): WebDriver.Options = d.manage()
override def executeScript(s: String, objects: Object*): Object = d.executeScript(s, objects: _*)
override def executeAsyncScript(s: String, objects: Object*): Object = d.executeAsyncScript(s,objects:_*)
}
}
})
.withKeepAlive(false)
)
println(jsenv.name)
jsenv
}
}
import java.io.{InputStream, OutputStream}
import java.net.InetSocketAddress
import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
class SimpleHttpServer(root:File) extends Thread {
override def run() {
val server = HttpServer.create(new InetSocketAddress(3000), 0)
server.createContext("/", new RootHandler(root))
server.setExecutor(null)
server.start()
}
}
class RootHandler(root:File) extends HttpHandler {
def handle(t: HttpExchange) {
//displayPayload(t.getRequestBody)
sendResponse(t)
}
private def displayPayload(body: InputStream): Unit ={
println()
println("******************** REQUEST START ********************")
println()
copyStream(body, System.out)
println()
println("********************* REQUEST END *********************")
println()
}
private def copyStream(in: InputStream, out: OutputStream) {
Iterator
.continually(in.read)
.takeWhile(-1 !=)
.foreach(out.write)
}
private def sendResponse(t: HttpExchange) {
val uri = t.getRequestURI();
println("looking for: " + root.getAbsolutePath + uri.getPath());
val path = uri.getPath();
val file = new File(root.getAbsolutePath + path)
if (!file.exists()) {
// Object does not exist or is not a file: reject with 404 error.
val response = "404 (Not Found)\\n";
t.sendResponseHeaders(404, response.length());
val os = t.getResponseBody();
os.write(response.getBytes());
os.close();
} else {
// Object exists and is a file: accept with response code 200.
val mime: String =
if (path.substring(path.length() - 3).equals(".js")) "application/javascript"
else if (path.substring(path.length() - 3).equals("css")) "text/css"
else "text/html"
// compress
val bufferGzip = Array.ofDim[Byte](4096)
val bout = new ByteArrayOutputStream();
val gout = new GZIPOutputStream(bout)
val fs = new FileInputStream(file)
var countGzip = 0
while ({countGzip = fs.read(bufferGzip); countGzip } >= 0) {
gout.write(bufferGzip, 0, countGzip)
}
gout.flush()
gout.close()
fs.close()
val gzipResource = bout.toByteArray
val h = t.getResponseHeaders()
h.set("Content-Type", mime)
h.set("Content-Encoding", "gzip")
t.sendResponseHeaders(200, gzipResource.length)
println(gzipResource.length)
val buffer = Array.ofDim[Byte](0x10000)
val os = t.getResponseBody()
val bais = new ByteArrayInputStream(gzipResource)
var count = 0
while ({count = bais.read(buffer); count } >= 0) {
os.write(buffer, 0, count)
}
bais.close()
os.close()
}
}
}
| Insubric/box | project/BrowserStackRunner.scala | Scala | apache-2.0 | 6,658 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.version
trait Version {
def name: String
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/version/Version.scala | Scala | apache-2.0 | 675 |
/**
* Copyright (C) 2019 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon
import org.scalajs.dom.html
import org.scalajs.jquery.{JQuery, JQueryEventObject, JQueryStatic}
import scala.concurrent.{ExecutionContext, Future}
import scala.scalajs.js
import org.orbeon.oxf.util.CoreUtils._
package object jquery {
implicit class JqueryOps(private val j: JQuery) extends AnyVal {
@inline private def asJsAny(body: => Any): js.Any = { body; () }
def onWithSelector(events: String, selector: String, handler: JQueryEventObject => _): Unit =
j.on(
events = events,
selector = selector,
handler = ((e: JQueryEventObject) => asJsAny(handler(e))): js.Function1[JQueryEventObject, js.Any]
)
def headElem : Option[html.Element] = j.length > 0 option j(0)
def headJQuery : Option[JQuery] = j.length > 0 option j.first()
def headElemJQuery: Option[(html.Element, JQuery)] = j.length > 0 option (j(0), j.first())
}
implicit class JqueryStaticOps(private val j: JQueryStatic) extends AnyVal {
// Expose jQuery's `$(function)` as a `Future`
def readyF(implicit executor: ExecutionContext): Future[Unit] =
j.when(j.asInstanceOf[js.Dynamic].ready).asInstanceOf[js.Thenable[js.Any]].toFuture map (_ => ())
}
}
| orbeon/orbeon-forms | xforms-web/src/main/scala/org/orbeon/jquery/package.scala | Scala | lgpl-2.1 | 1,904 |
package com.cloudray.scalapress.account
import javax.persistence._
import org.hibernate.annotations.Index
import scala.beans.BeanProperty
/** @author Stephen Samuel */
@Entity
@Table(name = "accounts_types")
class AccountType {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
@Index(name = "id_index")
@BeanProperty
var id: Long = _
@Index(name = "name_index")
@BeanProperty
var name: String = _
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/account/AccountType.scala | Scala | apache-2.0 | 422 |
/**
* An example MapReduce application using Scala and HBase.
*
*
* The MIT License (MIT)
*
* Copyright (c) 2014 Jeremy Fisher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* @author Jeremy Fisher <jeremy@rentawebgeek.com>
*/
package com.rentawebgeek
package object hbmr {
object Families {
val content = HString("content")
}
object Qualifiers {
val text = HString("text")
val count = HString("count")
}
object Tables {
val webTable = HString("web-table")
val wordCount = HString("word-count")
}
}
| rawg/scala-hbase-wordcount | src/main/scala/package.scala | Scala | mit | 1,605 |
package ml.combust.mleap.executor.repository
import java.net.URI
import java.nio.file.Path
import akka.actor.ActorSystem
import com.typesafe.config.Config
import scala.concurrent.Future
import scala.concurrent.duration.TimeUnit
object Repository {
/** Create a repository from a configuration.
*
* @param config typesafe config
* @param system actor system
* @return repository
*/
def fromConfig(config: Config)
(implicit system: ActorSystem): Repository = {
val c = Class.forName(config.getString("class"))
c.getField("MODULE$").get(c).
asInstanceOf[RepositoryProvider].
create(config)
}
}
/** Repository is source for MLeap bundles that will
* ultimately be used for transforming data.
*/
trait Repository {
/** Download the bundle specified by the URI
* to a local file.
*
* @param uri uri of the bundle to download
* @return future of the local file path after completion
*/
def downloadBundle(uri: URI): Future[Path]
/** Whether this repository can handle the given URI.
*
* @param uri uri for the bundle
* @return true if it can handle, false otherwise
*/
def canHandle(uri: URI): Boolean
/** Close any resources this repository has.
*
*/
def shutdown(): Unit
/** Await termination of this repository.
*
* @param timeout timeout
* @param unit unit of timeout
*/
def awaitTermination(timeout: Long, unit: TimeUnit): Unit
}
/** Can create repositories from Typesafe configs.
*/
trait RepositoryProvider {
/** Crates a repository from a typesafe config.
*
* @param config repository configuration
* @param system actor system
* @return repository
*/
def create(config: Config)(implicit system: ActorSystem): Repository
}
| combust/mleap | mleap-executor/src/main/scala/ml/combust/mleap/executor/repository/Repository.scala | Scala | apache-2.0 | 1,803 |
import testgen._
import TestSuiteBuilder._
import java.io.File
object BeerSongTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/beer-song.json")
def toString(expected: CanonicalDataParser.Expected): String = {
expected match {
case Right(xs: List[String]) => s"""\\"${xs.mkString("\\\\n")}\\\\n\\""""
case _ => throw new IllegalArgumentException()
}
}
def fromLabeledTestFromInput(argNames: String*): ToTestCaseData =
withLabeledTest { sut =>
labeledTest =>
val args = sutArgsFromInput(labeledTest.result, argNames: _*)
val property = labeledTest.property
val sutCall =
s"""$sut.$property($args)"""
val expected = toString(labeledTest.expected)
TestCaseData(labeledTest.description, sutCall, expected)
}
val code = TestSuiteBuilder.build(file,
fromLabeledTestFromInput("startBottles", "takeDown"))
println(s"-------------")
println(code)
println(s"-------------")
}
}
| ricemery/xscala | testgen/src/main/scala/BeerSongTestGenerator.scala | Scala | mit | 1,066 |
package achilles
import breeze.linalg.SparseVector
/**
* Created by panda on 3/31/15.
*/
class TermNode(val identity: String) extends Node {
var termTopic: SparseVector[Double] = ???
override def nodeType(): String = "term"
override def compute(): Unit = ???
override def timestamp(): Long = ???
}
| yinxusen/achilles | src/main/scala/achilles/TermNode.scala | Scala | apache-2.0 | 312 |
class Two[A, B]
class One[A] extends Two[A, A]
object Test {
def foo[F[_, _]](x: F[Int, Int]) = x
val t: One[Int] = ???
foo(t)
}
| dotty-staging/dotty | tests/pos/t2712-8.scala | Scala | apache-2.0 | 137 |
package info.armado.ausleihe.admin.transport.responses
import javax.xml.bind.annotation.{XmlAccessType, XmlAccessorType, XmlRootElement}
object VerifyGamesResponseDTO {
def apply(alreadyExistingBarcodes: Array[String], duplicateBarcodes: Array[String], emptyTitleBarcodes: Array[String]): VerifyGamesResponseDTO =
VerifyGamesResponseDTO(
alreadyExistingBarcodes.isEmpty && duplicateBarcodes.isEmpty && emptyTitleBarcodes.isEmpty,
alreadyExistingBarcodes, duplicateBarcodes, emptyTitleBarcodes
)
}
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
case class VerifyGamesResponseDTO(var valid: Boolean,
var alreadyExistingBarcodes: Array[String],
var duplicateBarcodes: Array[String],
var emptyTitleBarcodes: Array[String]) {
def this() = this(false, Array(), Array(), Array())
}
| Spielekreis-Darmstadt/lending | lending-admin-interfaces/src/main/scala/info/armado/ausleihe/admin/transport/responses/VerifyGamesResponseDTO.scala | Scala | apache-2.0 | 911 |
package dwaspada.thedaam.domain
trait DomainEventListener {
def isSubscribedTo(domainEvent: DomainEvent): Boolean
def handleEvent(domainEvent: DomainEvent): Unit
}
| dewey92/commuterline-ddd | src/main/scala/dwaspada/thedaam/domain/DomainEventListener.scala | Scala | mit | 169 |
import java.io.IOException
import com.typesafe.scalalogging.Logger
import io.circe.generic.auto._
import javafx.fxml.FXMLLoader
import loci.communicator.ws.akka.WS
import loci.registry.{Binding, Registry}
import loci.serializer.circe._
import loci.transmitter.RemoteRef
import loci.transmitter.transmittable.IdenticallyTransmittable
import rescala.default._
import scalafx.Includes._
import scalafx.application.JFXApp
import scalafx.application.JFXApp.PrimaryStage
import scalafx.beans.property._
import scalafx.scene.Scene
import scalafx.scene.control._
import scalafxml.core.{FXMLView, NoDependencyResolver}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.math.BigDecimal.RoundingMode
import rescala.extra.replication.CirceCodecs._
/** An example of a BorderPane layout, with placement of children in the top,
* left, center, right, and bottom positions.
*
* @see scalafx.scene.layout.BorderPane
*/
object DividiApp extends JFXApp {
// display splashscreen and ask for username
val enterNameDialog = new TextInputDialog(defaultValue = "Alice") {
initOwner(stage)
title = "Dividi"
headerText = "Welcome to Dividi!"
contentText = "Please enter your name:"
}
val username = enterNameDialog.showAndWait().getOrElse("")
if (username == "") System.exit(0)
// load new gui
val resource = getClass.getResource("Gui.fxml")
if (resource == null) {
throw new IOException("Cannot load resource: Gui.fxml")
}
val onlineGui = BooleanProperty(true)
val delayGui = IntegerProperty(0)
// define event fired on submit
type Title = String
type Amount = BigDecimal
type Payer = String
type Timestamp = Long
val logger: Logger = Logger("Dividi")
case class Transaction(title: Title, amount: Amount, payer: Payer, sharedBetween: Set[Payer], timestamp: Timestamp) {
override def toString: String = {
val sharers = sharedBetween.toList.sorted
if (sharers.length > 1)
s"$payer paid $amount for $title. Shared between ${sharers.dropRight(1).mkString(", ")} and ${sharers.last}."
else s"$payer paid $amount for $title. Shared between ${sharers.mkString(",")}."
}
}
implicit val _transmittableGrowOnlyLog: IdenticallyTransmittable[PGrowOnlyLog[Transaction]] =
IdenticallyTransmittable()
// instanciate shared log
@scala.annotation.nowarn
val logBinding = Binding[PGrowOnlyLog[Transaction]]("log")
val (registry, transactionLogDist): (Registry, PGrowOnlyLog[Transaction]) = {
val registry = new Registry
if (username == "Alice") { // server mode
registry.listen(WS(1099))
val newLog = PGrowOnlyLog[Transaction]()
registry.bind(logBinding)(newLog)
(registry, newLog)
} else { // client mode
val connection: Future[RemoteRef] = registry.connect(WS("ws://localhost:1099/"))
val remote: RemoteRef = Await.result(connection, Duration.Inf)
val subscribedLog: Future[PGrowOnlyLog[Transaction]] = registry.lookup(logBinding, remote)
val log: PGrowOnlyLog[Transaction] = Await.result(subscribedLog, Duration.Inf)
(registry, log)
}
}
val transactionLog = transactionLogDist.crdtSignal
// listen for new transactions and append them to the log
val newTransaction: Evt[Transaction] = Evt[Transaction]()
transactionLogDist.observe(newTransaction)
// extract all people involved
val peopleInvolved: Signal[Set[Payer]] = Signal {
transactionLog().iterator.foldLeft(Set[Payer](username))((people, transaction) =>
people + transaction.payer ++ transaction.sharedBetween
)
}
// calculate a map keeping track of the debts of all users
val debts: Signal[Map[Payer, Amount]] = Signal {
transactionLog().iterator.foldLeft(Map[Payer, Amount]().withDefaultValue(0: Amount))((debts, transaction) => {
val payer = transaction.payer
val amount = transaction.amount
val share = {
if (transaction.sharedBetween.nonEmpty)
transaction.amount / transaction.sharedBetween.size
else
0: Amount
}
// map with updated debt for all people involved in transaction
val updatedDebtorEntries = transaction.sharedBetween.foldLeft(debts)((map, debtor) => {
map + (debtor -> (map(debtor) - share).setScale(2, RoundingMode.CEILING))
})
// add positive amount for payer
updatedDebtorEntries + (payer -> (updatedDebtorEntries(payer) + amount))
})
}
// propose transactions to settle debts
val howToSettle: Signal[List[(Payer, Payer, Amount)]] = debts.map(resolveDebts(_))
def resolveDebts(
debts: Map[Payer, Amount],
neededTransactions: List[(Payer, Payer, Amount)] = List()
): List[(Payer, Payer, Amount)] = {
if (!debts.exists(_.getValue < 0))
neededTransactions
else {
println(debts)
println(neededTransactions)
val maxDebtor = debts.minBy(debt => debt.getValue)._1 // find person with maximum debt
println(s"Max debtor is $maxDebtor")
// find best person to give money to
val lenders = debts.filter(_.getValue > 0).keys // find users without debt (lenders)
val firstTry = (lenders.head, debts(lenders.head) + debts(maxDebtor)) // try first lender
val bestChoice = lenders.foldLeft(firstTry: (Payer, Amount))(
(currentBest, lender) => { // check if other lenders prove better (have payed amount closer to maxDebtor's debt)
val thisTry = (lender, debts(lender) + debts(maxDebtor))
if (thisTry._2.abs < currentBest._2.abs)
thisTry
else
currentBest
}
)
val lender = bestChoice._1
val proposedTransaction = {
if (bestChoice._2 > 0) // lend > debt
(maxDebtor, lender, debts(maxDebtor).abs)
else // debt > lend
(maxDebtor, lender, debts(lender))
}
resolveDebts(
debts + (maxDebtor -> (debts(maxDebtor) + proposedTransaction._3)) + (lender -> (debts(
lender
) - proposedTransaction._3)),
neededTransactions :+ proposedTransaction
)
}
}
// render FXML
val root = FXMLView(resource, NoDependencyResolver)
stage = new PrimaryStage() {
title = s"Dividi: $username"
scene = new Scene(root)
}
stage.onCloseRequest = (_: Any) => registry.terminate() // terminate registry on window close
}
| guidosalva/REScala | Historical/dividiParoli/src/main/scala/DividiApp.scala | Scala | apache-2.0 | 6,487 |
/*
* DARWIN Genetic Algorithms Framework Project.
* Copyright (c) 2003, 2005, 2007, 2009, 2011, 2016, 2017. Phasmid Software
*
* Originally, developed in Java by Rubecula Software, LLC and hosted by SourceForge.
* Converted to Scala by Phasmid Software and hosted by github at https://github.com/rchillyard/Darwin
*
* This file is part of Darwin.
*
* Darwin is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.phasmid.darwin.eco
import com.phasmid.darwin.base.{Auditable, Identifiable, Plain}
import com.phasmid.laScala.fp.NamedFunction
import com.phasmid.laScala.{OldRenderableCaseClass, Prefix}
/**
* Class to define the concept of Fitness. Fitness applies in each of the two aspects of evolution:
* Survival of the Fittest and Sexual Selection.
*
* Note that we do not make it a case class because we want to distinguish between creating new
* instances of Fitness with/without checking the requirement.
*
* In Survival, fitness is a measure of the viability of an organism's phenotype adapting to an environment.
* It's a Double value and should be in the range 0..1
*
* If a Fitness value is f, then the likelihood of an organism surviving one generation is f,
* assuming that there are no other applicable fitness values other than 1.
*
* Thus, for an environment with many applicable fitness values, the overall likelihood of surviving one generation
* is P(f1, f2, ..., fN) where P stands for the product.
*
* NOTE: this behavior is encoded in the & operator.
*
* In Sexual Selection, fitness is a measure of the probability of a mate being chosen from amongst other potential mates.
*
* Created by scalaprof on 5/5/16.
*/
class Fitness private(val x: Double) extends (() => Double) with Ordering[Fitness] with Plain {
/**
* Method to yield the underlying value of this Fitness as a Double
*
* @return the value of x
*/
def apply(): Double = x
/**
* Method to define the behavior of combining two Fitness values.
*
* @param other the other fitness value
* @return a new instance of Fitness with a value which is the product of both x values
*/
def &(other: Fitness): Fitness = new Fitness(x * other.x)
/**
* Method to define the behavior of negating (complementing) this Fitness.
* NOTE: this is not set up for unary application so you will have to use the form f.-
*
* @return the complementary Fitness
*/
def - : Fitness = new Fitness(1 - x)
/**
* Method to define the behavior of reducing this Fitness by a factor.
*
* @param y the reduction factor
* @return the reduced Fitness
*/
def /(y: Double): Fitness = {
require(y >= 1, s"the reduction factor in / method was not at least 1: $y")
new Fitness(x / y)
}
override def equals(obj: scala.Any): Boolean = obj match {
case Fitness(y) => math.abs(x - y) < 1E-10
case _ => false
}
override def hashCode(): Int = x.hashCode()
/**
* Used by render()
*
* @return a String based on x
*/
override def toString(): String = s"Fitness($x)"
def compare(f1: Fitness, f2: Fitness): Int = f1.x compareTo f2.x
}
/**
* Viability represents overall fitness when there are many individual fitness values for an Adaptatype.
* As discussed above, the basic model is that combined fitness is the product of each individual fitness.
* Thus viability is could be defined simply as the product of each fitness.
* Yet, this may result in some very small fitness values when an Adaptatype has many adaptations.
* Thus it is generally better to use the geometric mean, by taking the nth-root of the product.
*
* Viability can also be used in the case where there is more than one sexually-selective trait. However,
* in most simple systems, we typically stick to one such trait and thus Viability in that context will be
* based on one Fitness only. You can think of it then as the Viability of a pair as mates capable of producing live offspring.
* The probability of their progeny surviving is covered by environmental (survival) Fitness.
*
* CONSIDER We could allow some variation on this theme by using implicits.
* However, in this particular situation, it's quite tricky because the Fitness and Viability both extend no-param
* functions which yield a result. The use of apply makes it very difficult to implement an implicit mechanism.
*
* CONSIDER use CaseIdentifiable
*
* @param fs a sequence of Fitness values
*/
case class Viability(fs: Seq[Fitness]) extends (() => Fitness) with Auditable {
/**
* Method to add a Fitness to this Viability
*
* @param f the Fitness value to add
* @return a new Viability with f added
*/
def +(f: Fitness) = Viability(fs :+ f)
/**
* Method to yield a Fitness value.
* If the Viability is empty, then we assume a perfectly fit (viable) result.
*
* @return the Fitness value which corresponds to the geometric mean of all Fitness values.
*/
def apply: Fitness = Viability.geometricMean(fs)
override def toString(): String = s"Viability($fs)"
override def render(indent: Int = 0)(implicit tab: (Int) => Prefix): String = OldRenderableCaseClass(this).render(indent)(tab)
}
object Viability {
def create(fs: Fitness*): Viability = Viability(fs)
private def geometricMean(fs: Seq[Fitness]) = if (fs.isEmpty) Fitness.viable
else Fitness(math.exp(math.log(fs.foldLeft(Fitness.viable)(_ & _)()) / fs.length))
}
/**
* This case class defines a X=>T=>Fitness function and a shape factor.
* The way to think about these FunctionShapes is that we are comparing the trait value (t)
* against the eco value (x).
* For the delta function, for instance, if t>x, then viable, otherwise nonViable.
*
* @param s the name of the shape function
* @param g the (T,X)=>Fitness
* @tparam T the underlying type of the trait
* @tparam X the underlying type of the ecofactor
*/
case class ShapeFunction[T, X](s: String, g: X => T => Fitness) extends NamedFunction[X, T => Fitness](s, g) with Identifiable
object Fitness {
val viable: Fitness = new Fitness(1)
val nonViable: Fitness = viable.-
val tossup: Fitness = new Fitness(0.5)
def apply(x: Double): Fitness = {
require(x >= 0.0 && x <= 1.0, s"invalid Fitness: $x must be in range 0..1")
new Fitness(x)
}
def unapply(f: Fitness): Option[Double] = Some(f.x)
}
object ShapeFunction {
import Fitness.{nonViable, viable}
import Numeric.IntIsIntegral
/**
* Generic method to construct a ShapeFunction based on two parametric types: T and X, and two functions.
*
* @param f a function which, given two Double values, yields a Fitness (examples are Dirac delta function or logistic function)
* @param g a function which, given a Fitness, yields a Fitness (examples are identity and the negation method)
* @param name the name of the shape
* @tparam T the underlying trait type
* @tparam X the underlying eco factor type
* @return ShapeFunction object
*/
def apply[T: Numeric, X: Numeric](f: (Double, Double) => Fitness, g: Fitness => Fitness, name: String): ShapeFunction[T, X] = ShapeFunction[T, X](name, { x: X => t: T => g(f(implicitly[Numeric[T]].toDouble(t), implicitly[Numeric[X]].toDouble(x))) })
/**
* Generic method to construct a ShapeFunction based on two parametric types: T and X, two functions and a constant.
*
* @param f a function which, given two Double values, yields a Fitness (examples are Dirac delta function or logistic function)
* @param g a function which, given a Fitness, yields a Fitness (examples are identity and the negation method)
* @param k the constant which, applied to a sigmoid function (such as logistic), varies its shape -- a value of zero would result in
* the Dirac delta function; a value of +infinity would result in half for all inputs (i.e. no discrimination).
* @param name the name of the shape
* @tparam T the underlying trait type
* @tparam X the underlying eco factor type
* @return ShapeFunction object
*/
def apply[T: Numeric, X: Numeric](f: Double => (Double, Double) => Fitness, g: Fitness => Fitness, k: Double, name: String): ShapeFunction[T, X] = ShapeFunction[T, X](name, { x: X => t: T => g(f(k)(implicitly[Numeric[T]].toDouble(t), implicitly[Numeric[X]].toDouble(x))) })
/**
* This is the standard value for the logistic function.
*/
val k = 1
/**
* Following are the "usual" four shape functions: Dirac and Logistic (regular and inverted).
* They are "usual" because the ShapeFunction is based on Double, Double.
* If you need other shapes, simply build a ShapeFunction in a similar manner to here.
*/
val shapeDirac: ShapeFunction[Double, Double] = ShapeFunction(dirac, identity, "shapeDirac")
val shapeDiracInv: ShapeFunction[Double, Double] = ShapeFunction(dirac, _.-, "shapeDirac-i")
val shapeLogistic: ShapeFunction[Double, Double] = ShapeFunction(logistic, identity, k, "shapeLogistic")
val shapeLogisticInv: ShapeFunction[Double, Double] = ShapeFunction(logistic, _.-, k, "shapeLogistic-i")
/**
* Following are the Int/Double values of the four shape functions: Dirac and Logistic (regular and inverted).
*/
val shapeDirac_I: ShapeFunction[Double, Int] = ShapeFunction(dirac, identity, "shapeDirac")
val shapeDiracInv_I: ShapeFunction[Double, Int] = ShapeFunction(dirac, _.-, "shapeDirac-i")
val shapeLogistic_I: ShapeFunction[Double, Int] = ShapeFunction(logistic, identity, k, "shapeLogistic")
val shapeLogisticInv_I: ShapeFunction[Double, Int] = ShapeFunction(logistic, _.-, k, "shapeLogistic-i")
/**
* Method to compare x1 with x2 and determine viability.
* The shape of this function is a Dirac "delta" function.
*
* @param x1 the first parameter
* @param x2 the second parameter
* @return viable if x1>=x2 otherwise, nonViable
*/
def dirac(x1: Double, x2: Double): Fitness = if (x1 >= x2) viable else nonViable
/**
* Method to compare x1 with x2 using a shapeLogistic function.
* The shape of this function is a sigmoid function.
*
* @param k the scale factor for the logistic function: if k = 1, we get the "standard" logistic function;
* if k = +infinity, then all results would be essentially one half, if k = 0, the logistic function behaves
* identically with the dirac function.
* @param x1 the first parameter
* @param x2 the second parameter
* @return approximately 1 if x1 >> x2, approximately 0 if x1 << x2, and exactly 1/2 if x1--x2
*/
def logistic(k: Double)(x1: Double, x2: Double): Fitness = Fitness(logisticFunction(x1 - x2, k))
private def logisticFunction(x: Double, k: Double) = 1 / (1 + math.exp(-x / k))
} | rchillyard/Darwin | src/main/scala/com/phasmid/darwin/eco/Fitness.scala | Scala | gpl-3.0 | 11,483 |
package com.perevillega.sesms
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.PrivateMethodTester
import play.api.libs.json.{JsArray, Json}
import com.perevillega.sesms.support.Country
/**
* Tests for Fixtures
* User: pvillega
*/
class FixturesTest extends FunSpec with ShouldMatchers with PrivateMethodTester {
describe("A Fixtures object") {
it("should generate empty schedules if we don't give teams") {
val fixtures = Fixtures.createFixtures(Nil)
fixtures.isEmpty should be(true)
}
it("should generate valid schedules if we give one team") {
val teams = List("A")
val fixtures = Fixtures.createFixtures(teams)
fixtures.isEmpty should be(false)
fixtures(0).week should be(1)
fixtures(0).games.flatten.sorted should be(List(List("A",Fixtures.placeholder)).flatten.sorted)
}
it("should valid generate valid schedules if the number of teams is even") {
val teams = List("A", "B", "C", "D", "E", "F")
val fixtures = Fixtures.createFixtures(teams)
fixtures.isEmpty should be(false)
fixtures.length should be(teams.length-1)
fixtures(0).week should be(1)
fixtures(fixtures.length-1).week should be(teams.length-1)
fixtures.foreach{ f => f.games.flatten.sorted == teams.sorted should be(true) }
}
it("should valid generate valid schedules if the number of teams is odd") {
val teams = List("A", "B", "C", "D", "E")
val fixtures = Fixtures.createFixtures(teams)
fixtures.isEmpty should be(false)
fixtures.length should be(teams.length) //consider we have the placeholder added to these fixtures
fixtures(0).week should be(1)
fixtures(fixtures.length-1).week should be(teams.length) //consider we have the placeholder added to these fixtures
fixtures.foreach{ f => f.games.flatten.sorted == (Fixtures.placeholder::teams).sorted should be(true) }
// we have additional off days as teams are odd
fixtures.foreach{ f => f.games.exists(l => l(0) == Fixtures.placeholder || l(1) == Fixtures.placeholder) should be(true) }
}
it("should load Fixtures from JSON") {
val json = """{"week":1,"games":[["A","B"],["C","D"]]}"""
val fixtures = Fixtures.fromJson(Json.parse(json))
fixtures.week should be(1)
fixtures.games should be(List(List("A","B"), List("C", "D")))
}
it("should convert Fixtures to JSON") {
val fixtures = Fixtures(1, List(List("A","B"), List("C", "D")))
val json = Fixtures.toJson(fixtures)
(json \\ "week").as[Int] should be(1)
val array = (json \\ "games").as[JsArray]
array(0)(0).as[String] should be("A")
array(0)(1).as[String] should be("B")
array(1)(0).as[String] should be("C")
array(1)(1).as[String] should be("D")
}
}
describe("A Fixtures class") {
// Empty
}
}
| pvillega/S-ESMS | src/test/scala/com/perevillega/sesms/FixturesTest.scala | Scala | lgpl-3.0 | 2,919 |
package com.github.zenpie.macrowave.internal.scanner
import java.util
import java.util.ConcurrentModificationException
import com.github.zenpie.macrowave.internal.scanner
import scala.collection.mutable.Iterable
import scala.util.hashing.MurmurHash3
object DfaState {
def apply(values: Int*): DfaState = {
val result = new DfaState(values.length + 16)
result ++= values
result
}
def empty: DfaState = new DfaState(16)
}
final class DfaState(capacity: Int) extends Iterable[Int] {
private var _action: Action = NoAction
private var data = new Array[Int](capacity)
private var size_ = 0
def updateAction(action: Action): Unit = (this._action, action) match {
case (NoAction, _) => this._action = action
case (TokenAction(a), TokenAction(b)) if b.value < a.value => this._action = action
case _ => ()
}
def action = _action
override def size = size_
def +=(value: Int): DfaState.this.type = {
var i = util.Arrays.binarySearch(data, 0, size_, value)
if (i < 0) {
i += 1
i = -i
if (size_ >= data.length) {
val ncap = Math.ceil(data.length * 1.5f).toInt
data = util.Arrays.copyOf(data, ncap)
}
if (i >= size_) {
data(size_) = value
} else {
System.arraycopy(data, i, data, i + 1, size_ - i)
data(i) = value
}
size_ += 1
}
this
}
def ++=(values: TraversableOnce[Int]): DfaState.this.type = {
values.foreach(this.+=)
this
}
def -=(value: Int): DfaState.this.type = {
val i = util.Arrays.binarySearch(data, 0, size_, value)
if (i >= 0) {
System.arraycopy(data, i + 1, data, i, size_ - (i + 1))
size_ -= 1
}
this
}
override def forall(f: Int => Boolean): Boolean = {
var result = true
val iter = iterator
while (iter.hasNext && result) {
result &&= f(iter.next())
}
result
}
override def hashCode(): Int =
MurmurHash3.unorderedHash(this)
override def toString(): String =
mkString("DfaState(", ",", ")")
override def equals(other: Any): Boolean = other match {
case s: DfaState =>
s.size == this.size &&
s.forall(this.contains)
case _ => false
}
def contains(value: Int): Boolean =
util.Arrays.binarySearch(data, 0, size_, value) >= 0
override def iterator: Iterator[Int] = new Iterator[Int] {
private var i = 0
override def hasNext: Boolean = i < DfaState.this.size
override def next(): Int = {
if (i >= DfaState.this.size) {
throw new ConcurrentModificationException()
}
val r = data(i)
i += 1
r
}
}
}
| zen-pie/macrowave | src/main/scala/com/github/zenpie/macrowave/internal/scanner/DfaState.scala | Scala | mit | 2,736 |
package org.scalamu.testing.junit
import org.junit.runner.RunWith
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RunWithJar extends FlatSpec with Matchers {
"RunWithInherited" should "run scalatest test with Junit runner" in {
10 should be > 1
}
}
| sugakandrey/scalamu | testing/junit/src/test/scala/org/scalamu/testing/junit/RunWithJar.scala | Scala | gpl-3.0 | 329 |
/**
*
*/
package net.machinemuse.numina.network
import java.io._
import java.util
import cpw.mods.fml.common.network.NetworkRegistry
import cpw.mods.fml.common.network.internal.FMLProxyPacket
import io.netty.buffer.ByteBufInputStream
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.ChannelHandlerContext
import io.netty.handler.codec.MessageToMessageCodec
import net.machinemuse.numina.general.MuseLogger
import net.machinemuse.numina.scala.MuseNumericRegistry
import net.minecraft.client.Minecraft
import net.minecraft.client.network.NetHandlerPlayClient
import net.minecraft.network.NetHandlerPlayServer
/**
* @author MachineMuse
*/
@Sharable
object MusePacketHandler extends MessageToMessageCodec[FMLProxyPacket, MusePacket]{
val networkChannelName: String = "Numina"
val packagers = new MuseNumericRegistry[MusePackager]
val channels = NetworkRegistry.INSTANCE.newChannel(networkChannelName, this)
override def decode(ctx: ChannelHandlerContext, msg: FMLProxyPacket, out: util.List[AnyRef]): Unit = {
val data = new DataInputStream(new ByteBufInputStream(msg.payload))
var packetType: Int = 0
try {
msg.handler match {
case h: NetHandlerPlayServer =>
val player = h.playerEntity
packetType = data.readInt
for {// For comprehension for an option
packager <- MusePacketHandler.packagers.get(packetType)
packet = packager.read(data, player)
} {
packet handleServer player
}
case h: NetHandlerPlayClient =>
val player = Minecraft.getMinecraft.thePlayer
packetType = data.readInt
for {// For comprehension for an option
packager <- MusePacketHandler.packagers.get(packetType)
packet = packager.read(data, player)
} {
packet handleClient player
}
}
} catch {
case e: IOException => MuseLogger.logException("PROBLEM READING PACKET IN DECODE STEP D:", e)
}
}
override def encode(ctx: ChannelHandlerContext, msg: MusePacket, out: util.List[AnyRef]) = {
out.add(msg.getFMLProxyPacket)
}
}
| MachineMuse/Numina | src/main/scala/net/machinemuse/numina/network/MusePacketHandler.scala | Scala | bsd-2-clause | 2,163 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import scala.collection.JavaConverters._
import org.bson.BsonDocument
import org.bson.codecs.BsonValueCodecProvider
import org.bson.codecs.configuration.CodecRegistries.fromProviders
import com.mongodb.reactivestreams.client.{ ListCollectionsPublisher, MongoDatabase => JMongoDatabase }
import org.mongodb.scala.bson.conversions.Bson
import org.mongodb.scala.model._
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{ FlatSpec, Matchers }
class MongoDatabaseSpec extends BaseSpec with MockFactory {
val wrapped = mock[JMongoDatabase]
val clientSession = mock[ClientSession]
val mongoDatabase = MongoDatabase(wrapped)
val command = Document()
val readPreference: ReadPreference = ReadPreference.secondary()
"MongoDatabase" should "have the same methods as the wrapped MongoDatabase" in {
val wrapped = classOf[JMongoDatabase].getMethods.map(_.getName)
val local = classOf[MongoDatabase].getMethods.map(_.getName)
wrapped.foreach((name: String) => {
val cleanedName = name.stripPrefix("get")
assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name")
})
}
it should "return the underlying getCollection[T]" in {
wrapped.expects(Symbol("getCollection"))("collectionName", classOf[Document]).once()
wrapped.expects(Symbol("getCollection"))("collectionName", classOf[BsonDocument]).once()
mongoDatabase.getCollection("collectionName")
mongoDatabase.getCollection[BsonDocument]("collectionName")
}
it should "return the underlying getName" in {
wrapped.expects(Symbol("getName"))().once()
mongoDatabase.name
}
it should "return the underlying getCodecRegistry" in {
wrapped.expects(Symbol("getCodecRegistry"))().once()
mongoDatabase.codecRegistry
}
it should "return the underlying getReadPreference" in {
wrapped.expects(Symbol("getReadPreference"))().once()
mongoDatabase.readPreference
}
it should "return the underlying getWriteConcern" in {
wrapped.expects(Symbol("getWriteConcern"))().once()
mongoDatabase.writeConcern
}
it should "return the underlying getReadConcern" in {
wrapped.expects(Symbol("getReadConcern"))().once()
mongoDatabase.readConcern
}
it should "return the underlying withCodecRegistry" in {
val codecRegistry = fromProviders(new BsonValueCodecProvider())
wrapped.expects(Symbol("withCodecRegistry"))(codecRegistry).once()
mongoDatabase.withCodecRegistry(codecRegistry)
}
it should "return the underlying withReadPreference" in {
wrapped.expects(Symbol("withReadPreference"))(readPreference).once()
mongoDatabase.withReadPreference(readPreference)
}
it should "return the underlying withWriteConcern" in {
val writeConcern = WriteConcern.MAJORITY
wrapped.expects(Symbol("withWriteConcern"))(writeConcern).once()
mongoDatabase.withWriteConcern(writeConcern)
}
it should "return the underlying withReadConcern" in {
val readConcern = ReadConcern.MAJORITY
wrapped.expects(Symbol("withReadConcern"))(readConcern).once()
mongoDatabase.withReadConcern(readConcern)
}
it should "call the underlying runCommand[T] when writing" in {
wrapped.expects(Symbol("runCommand"))(command, classOf[Document]).once()
wrapped.expects(Symbol("runCommand"))(command, classOf[BsonDocument]).once()
wrapped.expects(Symbol("runCommand"))(clientSession, command, classOf[Document]).once()
wrapped.expects(Symbol("runCommand"))(clientSession, command, classOf[BsonDocument]).once()
mongoDatabase.runCommand(command)
mongoDatabase.runCommand[BsonDocument](command)
mongoDatabase.runCommand(clientSession, command)
mongoDatabase.runCommand[BsonDocument](clientSession, command)
}
it should "call the underlying runCommand[T] when reading" in {
wrapped.expects(Symbol("runCommand"))(command, readPreference, classOf[Document]).once()
wrapped.expects(Symbol("runCommand"))(command, readPreference, classOf[BsonDocument]).once()
wrapped.expects(Symbol("runCommand"))(clientSession, command, readPreference, classOf[Document]).once()
wrapped.expects(Symbol("runCommand"))(clientSession, command, readPreference, classOf[BsonDocument]).once()
mongoDatabase.runCommand(command, readPreference)
mongoDatabase.runCommand[BsonDocument](command, readPreference)
mongoDatabase.runCommand(clientSession, command, readPreference)
mongoDatabase.runCommand[BsonDocument](clientSession, command, readPreference)
}
it should "call the underlying drop()" in {
wrapped.expects(Symbol("drop"))().once()
wrapped.expects(Symbol("drop"))(clientSession).once()
mongoDatabase.drop()
mongoDatabase.drop(clientSession)
}
it should "call the underlying listCollectionNames()" in {
wrapped.expects(Symbol("listCollectionNames"))().once()
wrapped.expects(Symbol("listCollectionNames"))(clientSession).once()
mongoDatabase.listCollectionNames()
mongoDatabase.listCollectionNames(clientSession)
}
it should "call the underlying listCollections()" in {
wrapped.expects(Symbol("listCollections"))(*).returns(stub[ListCollectionsPublisher[Document]]).once()
wrapped
.expects(Symbol("listCollections"))(classOf[BsonDocument])
.returns(stub[ListCollectionsPublisher[BsonDocument]])
.once()
wrapped
.expects(Symbol("listCollections"))(clientSession, *)
.returns(stub[ListCollectionsPublisher[Document]])
.once()
wrapped
.expects(Symbol("listCollections"))(clientSession, classOf[BsonDocument])
.returns(stub[ListCollectionsPublisher[BsonDocument]])
.once()
mongoDatabase.listCollections()
mongoDatabase.listCollections[BsonDocument]()
mongoDatabase.listCollections(clientSession)
mongoDatabase.listCollections[BsonDocument](clientSession)
}
it should "call the underlying createCollection()" in {
val options = CreateCollectionOptions()
.capped(true)
.validationOptions(
ValidationOptions()
.validator(Document("""{level: {$gte: 10}}"""))
.validationLevel(ValidationLevel.MODERATE)
.validationAction(ValidationAction.WARN)
)
.indexOptionDefaults(IndexOptionDefaults().storageEngine(Document("""{storageEngine: { mmapv1: {}}}""")))
.storageEngineOptions(Document("""{ wiredTiger: {}}"""))
wrapped.expects(Symbol("createCollection"))("collectionName").once()
wrapped.expects(Symbol("createCollection"))("collectionName", options).once()
wrapped.expects(Symbol("createCollection"))(clientSession, "collectionName").once()
wrapped.expects(Symbol("createCollection"))(clientSession, "collectionName", options).once()
mongoDatabase.createCollection("collectionName")
mongoDatabase.createCollection("collectionName", options)
mongoDatabase.createCollection(clientSession, "collectionName")
mongoDatabase.createCollection(clientSession, "collectionName", options)
}
it should "call the underlying createView()" in {
val options = CreateViewOptions().collation(Collation.builder().locale("en").build())
val pipeline = List.empty[Bson]
wrapped.expects(Symbol("createView"))("viewName", "collectionName", pipeline.asJava).once()
wrapped.expects(Symbol("createView"))("viewName", "collectionName", pipeline.asJava, options).once()
wrapped.expects(Symbol("createView"))(clientSession, "viewName", "collectionName", pipeline.asJava).once()
wrapped.expects(Symbol("createView"))(clientSession, "viewName", "collectionName", pipeline.asJava, options).once()
mongoDatabase.createView("viewName", "collectionName", pipeline)
mongoDatabase.createView("viewName", "collectionName", pipeline, options)
mongoDatabase.createView(clientSession, "viewName", "collectionName", pipeline)
mongoDatabase.createView(clientSession, "viewName", "collectionName", pipeline, options)
}
it should "call the underlying watch" in {
val pipeline = List(Document("$match" -> 1))
wrapped.expects(Symbol("watch"))(classOf[Document]).once()
wrapped.expects(Symbol("watch"))(pipeline.asJava, classOf[Document]).once()
wrapped.expects(Symbol("watch"))(pipeline.asJava, classOf[BsonDocument]).once()
wrapped.expects(Symbol("watch"))(clientSession, pipeline.asJava, classOf[Document]).once()
wrapped.expects(Symbol("watch"))(clientSession, pipeline.asJava, classOf[BsonDocument]).once()
mongoDatabase.watch() shouldBe a[ChangeStreamObservable[_]]
mongoDatabase.watch(pipeline) shouldBe a[ChangeStreamObservable[_]]
mongoDatabase.watch[BsonDocument](pipeline) shouldBe a[ChangeStreamObservable[_]]
mongoDatabase.watch(clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]]
mongoDatabase.watch[BsonDocument](clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]]
}
it should "call the underlying aggregate" in {
val pipeline = List(Document("$match" -> 1))
wrapped.expects(Symbol("aggregate"))(pipeline.asJava, classOf[Document]).once()
wrapped.expects(Symbol("aggregate"))(pipeline.asJava, classOf[BsonDocument]).once()
wrapped.expects(Symbol("aggregate"))(clientSession, pipeline.asJava, classOf[Document]).once()
wrapped.expects(Symbol("aggregate"))(clientSession, pipeline.asJava, classOf[BsonDocument]).once()
mongoDatabase.aggregate(pipeline) shouldBe a[AggregateObservable[_]]
mongoDatabase.aggregate[BsonDocument](pipeline) shouldBe a[AggregateObservable[_]]
mongoDatabase.aggregate(clientSession, pipeline) shouldBe a[AggregateObservable[_]]
mongoDatabase.aggregate[BsonDocument](clientSession, pipeline) shouldBe a[AggregateObservable[_]]
}
}
| rozza/mongo-java-driver | driver-scala/src/test/scala/org/mongodb/scala/MongoDatabaseSpec.scala | Scala | apache-2.0 | 10,349 |
/*
* Copyright 2014-16 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package au.com.intelix.rs.core.services.internal
import java.util
import akka.actor.ActorRef
import au.com.intelix.config.ConfigOps.wrap
import au.com.intelix.evt.TraceE
import au.com.intelix.rs.core.services.SequentialMessageIdGenerator
import au.com.intelix.rs.core.services.internal.InternalMessages.DownstreamDemandRequest
import scala.collection.mutable
object DemandProducerContract {
case object StartedDemandProducer extends TraceE
case object StoppedDemandProducer extends TraceE
}
trait DemandProducerContract extends SimpleInMemoryAcknowledgedDelivery {
import DemandProducerContract._
private val idGenerator = new SequentialMessageIdGenerator()
private val HighWatermark = nodeCfg.asLong("service-port.backpressure.high-watermark", 500)
private val LowWatermark = nodeCfg.asLong("service-port.backpressure.low-watermark", 200)
private val targets: Set[ActorRef] = Set.empty
private val pending: mutable.Map[ActorRef, LocalDemand] = mutable.HashMap()
private val pendingList: util.ArrayList[LocalDemand] = new util.ArrayList[LocalDemand]()
def cancelDemandProducerFor(ref: ActorRef) = {
pending get ref foreach { c =>
raise(StoppedDemandProducer, 'target -> ref)
pending -= ref
pendingList remove c
}
}
def cancelAllDemandProducers(): Unit = {
raise(StoppedDemandProducer, 'target -> "all")
pending clear()
pendingList clear()
}
def startDemandProducerFor(ref: ActorRef, withAcknowledgedDelivery: Boolean) = {
if (!pending.contains(ref)) {
raise(StartedDemandProducer, 'target -> ref, 'seed -> idGenerator.seed)
val contract = new LocalDemand(ref, withAcknowledgedDelivery)
pending += ref -> contract
pendingList add contract
checkDemand()
}
}
override def preStart(): Unit = {
super.preStart()
checkDemand()
}
def checkDemand() = {
var idx = 0
while (idx < pendingList.size()) {
val nextContract = pendingList get idx
nextContract check()
idx += 1
}
}
onTick {
checkDemand()
}
def upstreamDemandFulfilled(id: ActorRef, c: Int) =
pending get id foreach { d =>
d.dec()
d.check()
}
private class LocalDemand(val ref: ActorRef, withAcknowledgedDelivery: Boolean) {
private var demand = 0L
def dec() = demand -= 1
def hasDemand = demand > 0
def check() =
if (demand <= LowWatermark) {
val newDemand = HighWatermark - demand
val msg = DownstreamDemandRequest(idGenerator.next(), newDemand)
if (withAcknowledgedDelivery)
acknowledgedDelivery(ref, msg, SpecificDestination(ref))
else ref ! msg
demand = HighWatermark
}
}
}
| intelix/reactiveservices | platform/core/src/main/scala/au/com/intelix/rs/core/services/internal/DemandProducerContract.scala | Scala | apache-2.0 | 3,320 |
package sectery.producers
import java.sql.Connection
import java.sql.Timestamp
import java.util.concurrent.TimeUnit
import org.ocpsoft.prettytime.PrettyTime
import sectery.Db
import sectery.Producer
import sectery.Rx
import sectery.Tx
import zio.Clock
import zio.ZIO
object Tell extends Producer:
private val tell = """^@tell\s+([^\s]+)\s+(.+)\s*$""".r
override def help(): Iterable[Info] =
Some(
Info(
"@tell",
"@tell <nick> <message>, e.g. @tell bob remember the milk"
)
)
override def init(): ZIO[Db.Db, Throwable, Unit] =
Db { conn =>
val s =
"""|CREATE TABLE IF NOT EXISTS TELL (
| _CHANNEL_ VARCHAR(64) NOT NULL,
| _FROM_ VARCHAR(64) NOT NULL,
| _TO_ VARCHAR(64) NOT NULL,
| _MESSAGE_ VARCHAR(64) NOT NULL,
| _TIMESTAMP_ TIMESTAMP NOT NULL
|)
|""".stripMargin
val stmt = conn.createStatement
stmt.executeUpdate(s)
stmt.close
}
override def apply(
m: Rx
): ZIO[Db.Db with Clock, Throwable, Iterable[Tx]] =
m match
case Rx(c, from, tell(to, message)) =>
for {
now <- Clock.currentTime(TimeUnit.MILLISECONDS)
reply <- Db { conn =>
val s =
"INSERT INTO TELL (_CHANNEL_, _FROM_, _TO_, _MESSAGE_, _TIMESTAMP_) VALUES (?, ?, ?, ?, ?)"
val stmt = conn.prepareStatement(s)
stmt.setString(1, c)
stmt.setString(2, from)
stmt.setString(3, to)
stmt.setString(4, message)
stmt.setTimestamp(5, new Timestamp(now))
stmt.executeUpdate()
stmt.close
Some(Tx(c, "I will let them know."))
}
} yield reply
case Rx(c, nick, _) =>
def findMessages(conn: Connection): List[Tx] =
val s =
"""|SELECT _FROM_, _MESSAGE_, _TIMESTAMP_
|FROM TELL
|WHERE _CHANNEL_ = ? AND _TO_ = ?
|ORDER BY _TIMESTAMP_ DESC
|""".stripMargin
val stmt = conn.prepareStatement(s)
stmt.setString(1, c)
stmt.setString(2, nick)
val rs = stmt.executeQuery()
var msgs: List[Tx] = Nil
while (rs.next()) {
val from = rs.getString("_FROM_")
val message = rs.getString("_MESSAGE_")
val timestamp = rs.getDate("_TIMESTAMP_")
val prettytime = (new PrettyTime).format(timestamp)
msgs = Tx(
c,
s"${nick}: ${prettytime}, ${from} said: ${message}"
) :: msgs
}
stmt.close
msgs
def dropMessages(conn: Connection): Unit =
val s =
"DELETE FROM TELL WHERE _CHANNEL_ = ? AND _TO_ = ?"
val stmt = conn.prepareStatement(s)
stmt.setString(1, c)
stmt.setString(2, nick)
stmt.executeUpdate()
stmt.close
Db { conn =>
val messages = findMessages(conn)
dropMessages(conn)
messages
}
| earldouglas/sectery | modules/producers/src/main/scala/sectery/producers/Tell.scala | Scala | mit | 3,093 |
package org.jetbrains.plugins.scala
package findUsages
package parameters
import com.intellij.psi._
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.search.{PsiSearchHelper, TextOccurenceProcessor, UsageSearchContext}
import com.intellij.util.{Processor, QueryExecutor}
import org.jetbrains.plugins.scala.extensions.inReadAction
import org.jetbrains.plugins.scala.finder.ScalaSourceFilterScope
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScArgumentExprList, ScAssignStmt}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import scala.collection.mutable
/**
* User: Alexander Podkhalyuzin
* Date: 17.08.2009
*/
class NamingParamsSearcher extends QueryExecutor[PsiReference, ReferencesSearch.SearchParameters] {
def execute(queryParameters: ReferencesSearch.SearchParameters, consumer: Processor[PsiReference]): Boolean = {
val project = queryParameters.getProject
val scope = inReadAction(ScalaSourceFilterScope(queryParameters))
val data = inReadAction {
queryParameters.getElementToSearch match {
case e if !e.isValid => None
case parameter: ScParameter => Some((parameter, parameter.name))
case _ => None
}
}
data match {
case Some((parameter, name)) =>
val collectedReferences = new mutable.HashSet[PsiReference]
val processor = new TextOccurenceProcessor {
def execute(element: PsiElement, offsetInElement: Int): Boolean = {
val references = inReadAction(element.getReferences)
for (ref <- references if ref.getRangeInElement.contains(offsetInElement) && !collectedReferences.contains(ref)) {
ref match {
case refElement: ScReferenceElement =>
inReadAction {
refElement.getParent match {
case assign: ScAssignStmt if assign.getLExpression == refElement &&
assign.getParent.isInstanceOf[ScArgumentExprList] =>
Option(refElement.resolve()) match {
case Some(`parameter`) => if (!consumer.process(ref)) return false
case Some(x: ScParameter) =>
ScalaPsiUtil.parameterForSyntheticParameter(x) match {
case Some(realParam) =>
if (realParam == parameter && !consumer.process(ref)) return false
case None =>
}
case _ =>
}
case _ =>
}
}
case _ =>
}
}
true
}
}
val helper: PsiSearchHelper = PsiSearchHelper.SERVICE.getInstance(project)
helper.processElementsWithWord(processor, scope, name, UsageSearchContext.IN_CODE, true)
case _ => true
}
}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/findUsages/parameters/NamingParamsSearcher.scala | Scala | apache-2.0 | 3,110 |
// Copyright (C) 2015 ENSIME Authors
// License: GPL 3.0
package org.ensime.util
import Predef.{ any2stringadd => _, _ => _ }
package object map {
implicit class RichMap[K, V](val map: Map[K, V]) extends AnyVal {
/**
* Map.mapValues is notoriously inconsistent and returns a View
* rather than a solid implementation, this is what you thought it
* did.
*/
def mapValuesEagerly[W](f: V => W): Map[K, W] = map.map {
case (k, v) => (k, f(v))
}
}
// I'm sure CanBuildFrom could make this general to all value containers
implicit class RichMultiMapSet[K, V](val map: Map[K, Set[V]]) extends AnyVal {
/**
* Treating `map` as a multimap, merge with another similarly
* structured object removing duplicate values.
*/
def merge(other: Map[K, Set[V]]): Map[K, Set[V]] = {
import collection.mutable
val builder = new mutable.HashMap[K, mutable.Set[V]] with mutable.MultiMap[K, V]
builder ++= map.mapValuesEagerly { v => v.to[mutable.Set] }
for {
(k, vs) <- other
v <- vs
} builder.addBinding(k, v)
builder.map {
case (k, vs) => (k, vs.toSet)
}(collection.breakOut)
}
}
}
| eddsteel/ensime | util/src/main/scala/org/ensime/util/map.scala | Scala | gpl-3.0 | 1,208 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
object Test extends App {
class A { def foo = ??? }
val c = cm.classSymbol(classOf[A])
println(c)
println(c.fullName)
println(c.info)
}
| som-snytt/dotty | tests/disabled/reflect/run/t5256b.scala | Scala | apache-2.0 | 238 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.utils
import breeze.linalg.{DenseMatrix, DenseVector, cholesky, inv}
import io.github.mandar2812.dynaml.pipes.{ReversibleScaler, Scaler}
/**
* Scales attributes of a vector pattern using the sample mean and variance of
* each dimension. This assumes that there is no covariance between the data
* dimensions.
*
* @param mean Sample mean of the data
* @param sigma Sample variance of each data dimension
* @author mandar2812 date: 17/6/16.
*
* */
case class GaussianScaler(mean: DenseVector[Double], sigma: DenseVector[Double])
extends ReversibleScaler[DenseVector[Double]]{
override val i: Scaler[DenseVector[Double]] =
Scaler((pattern: DenseVector[Double]) => (pattern *:* sigma) + mean)
override def run(data: DenseVector[Double]): DenseVector[Double] = (data-mean) /:/ sigma
def apply(r: Range): GaussianScaler = GaussianScaler(mean(r), sigma(r))
def apply(index: Int): UnivariateGaussianScaler = UnivariateGaussianScaler(mean(index), sigma(index))
def ++(other: GaussianScaler) =
GaussianScaler(
DenseVector(this.mean.toArray++other.mean.toArray),
DenseVector(this.sigma.toArray++other.sigma.toArray))
}
/**
* Scales the attributes of a data pattern using the sample mean and covariance matrix
* calculated on the data set. This allows standardization of multivariate data sets
* where the covariance of individual data dimensions is not negligible.
*
* @param mean Sample mean of data
* @param sigma Sample covariance matrix of data.
* */
case class MVGaussianScaler(mean: DenseVector[Double], sigma: DenseMatrix[Double])
extends ReversibleScaler[DenseVector[Double]] {
val sigmaInverse = cholesky(inv(sigma))
override val i: Scaler[DenseVector[Double]] =
Scaler((pattern: DenseVector[Double]) => (inv(sigmaInverse.t) * pattern) + mean)
override def run(data: DenseVector[Double]): DenseVector[Double] = sigmaInverse.t * (data - mean)
def apply(r: Range): MVGaussianScaler = MVGaussianScaler(mean(r), sigma(r,r))
}
case class UnivariateGaussianScaler(mean: Double, sigma: Double) extends ReversibleScaler[Double] {
require(sigma > 0.0, "Std. Deviation for gaussian scaling must be strictly positive!")
/**
* The inverse operation of this scaling.
*
**/
override val i = Scaler((pattern: Double) => (pattern*sigma) + mean)
override def run(data: Double) = (data-mean)/sigma
} | transcendent-ai-labs/DynaML | dynaml-core/src/main/scala/io/github/mandar2812/dynaml/utils/GaussianScaler.scala | Scala | apache-2.0 | 3,190 |
package models.customer
import play.api.libs.json.Json
import models._
import models.AssetSupport._
import org.joda.time.DateTime
case class ContactTypeIn(_id: IdType,
createdAt: DateTime,
lastModifiedAt: DateTime,
active: Boolean,
description: String,
name: String) extends AssetIn with AssetUpdateBuilder[ContactTypeUpdate] {
override def fillup(lastModifiedAt: DateTime): ContactTypeUpdate = ContactTypeUpdate(lastModifiedAt, active, description, name)
}
object ContactTypeIn extends AssetInCompanion[ContactTypeIn] {
val collectionName = "contacttypes"
val format = Json.format[ContactTypeIn]
}
case class ContactTypeUpdate(lastModifiedAt: DateTime,
active: Boolean,
description: String,
name: String) extends AssetUpdate
object ContactTypeUpdate extends AssetUpdateCompanion[ContactTypeUpdate] {
val format = Json.format[ContactTypeUpdate]
val collectionName = ContactTypeIn.collectionName
}
case class ContactTypeCreate(active: Boolean,
description: String,
name: String) extends AssetCreate[ContactTypeIn] {
override def fillup(b: AssetBase) = ContactTypeIn(b.id, b.createdAt, b.lastModifiedAt, active, description, name)
}
object ContactTypeCreate {
implicit val reads = Json.reads[ContactTypeCreate]
}
| tsechov/shoehorn | app/models/customer/contacttype.scala | Scala | apache-2.0 | 1,510 |
package info.armado.ausleihe.admin.transport.requests
import javax.xml.bind.annotation.{XmlAccessType, XmlAccessorType, XmlRootElement}
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
case class ChangeOwnerRequestDTO(var identityCardBarcodeString: String,
var owner: String) {
def this() = this(null, null)
}
| Spielekreis-Darmstadt/lending | lending-admin-interfaces/src/main/scala/info/armado/ausleihe/admin/transport/requests/ChangeOwnerRequestDTO.scala | Scala | apache-2.0 | 352 |
package sky.first.actors
import akka.actor.{Props, ActorRef, Actor}
/**
* Created by szekai on 20/08/2014.
*/
//case class Word(word:String, count:Int)
case class Result()
case class MapData(dataList: List[(String, Int)])
case class ReduceData(reduceDataMap: Map[String,Int])
class MasterActor extends Actor{
val aggregateActor: ActorRef = context.actorOf(Props[AggregateActor], name = "aggregate")
val reduceActor: ActorRef = context.actorOf(Props(classOf[ReduceActor],aggregateActor), name = "reduce")
val mapActor: ActorRef = context.actorOf(Props(classOf[MapActor],reduceActor), name = "map")
def receive: Receive = {
case message: String => mapActor ! message
case message: Result => aggregateActor ! message
}
}
| szekai/akka-example | FirstAkkaApplication/src/main/scala/sky/first/actors/MasterActor.scala | Scala | apache-2.0 | 743 |
/*
* Copyright 2014 Cisco Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cisco.oss.foundation.orchestration.scope.utils
import org.jboss.netty.util.{Timeout, TimerTask, HashedWheelTimer}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration.Duration
import org.jboss.netty.handler.timeout.TimeoutException
import java.util.concurrent.TimeUnit
/**
* User: Yair Ogen
* Date: 10/15/13
* Time: 12:06 PM
* taken from: http://stackoverflow.com/a/16305056/200937
*/
object TimeoutScheduler {
val timer = new HashedWheelTimer()
def scheduleTimeout(promise: Promise[_], after: Duration) = {
timer.newTimeout(new TimerTask {
def run(timeout: Timeout) {
promise.failure(new TimeoutException("Operation timed out after " + after.toMillis + " millis"))
}
}, after.toNanos, TimeUnit.NANOSECONDS)
}
def withTimeout[T](fut: Future[T])(implicit ec: ExecutionContext, after: Duration) = {
val prom = Promise[T]()
val timeout = TimeoutScheduler.scheduleTimeout(prom, after)
val combinedFut = Future.firstCompletedOf(List(fut, prom.future))
fut onComplete {
case result => timeout.cancel()
}
combinedFut
}
}
| foundation-runtime/orchestration | src/main/java/com/cisco/oss/foundation/orchestration/scope/utils/TimeoutScheduler.scala | Scala | apache-2.0 | 1,748 |
import scala.reflect.runtime.universe._
object Test extends dotty.runtime.LegacyApp {
def fooNoTypeTag[T] = {
println(implicitly[WeakTypeTag[T]])
println(implicitly[WeakTypeTag[List[T]]])
}
fooNoTypeTag[Int]
}
| yusuke2255/dotty | tests/pending/run/macro-reify-abstypetag-typeparams-notags/Test.scala | Scala | bsd-3-clause | 225 |
package edu.chop.cbmi.dataExpress.dataModels.sql
import java.sql.ResultSet
/**
* Specialized [[scala.collection.Iterator]] that handles the peculiarities of dealing with a JDBC ResultSet
*/
abstract class SqlIterator[+T] private[sql](private val sql_query_package: SqlQueryPackage) extends Iterator[T] {
protected var cursor_advanced = false
protected var more_rows = false
lazy protected val result_set: ResultSet = sql_query_package.resultSet
lazy protected val meta = sql_query_package.meta
protected def generate_next() : T
override def hasNext() = {
if (!cursor_advanced) {
cursor_advanced = true
more_rows = result_set.next()
}
more_rows
}
override def next(): T = {
if (cursor_advanced) cursor_advanced = false
else more_rows = result_set.next()
//Some databases will complain if a result set is not properly closed, need to generate the next set of values
//then close the result set.
val next = generate_next()
if(!hasNext) {
result_set.close()
}
next
}
def next_item_in_column(i: Int) = meta.getColumnType(i) match {
//Postgres Boolean values such as 't' were having issues
//because they are mapped to java.sql.Types.BIT
//TODO: this code needs to be moved out of here
case java.sql.Types.BIT => result_set.getBoolean(i)
case _ => result_set.getObject(i)
}
} | chop-dbhi/dataexpress | src/main/scala/edu/chop/cbmi/dataExpress/dataModels/sql/SqlIterator.scala | Scala | bsd-2-clause | 1,384 |
package ProjectEuler
import scala.collection.immutable
/**
* If dn represents the nth digit of the fractional part, find the value of the following expression.
*/
object Problem40 {
def champernownesConstant(m: Int): Int = {
val stringDigits: immutable.IndexedSeq[Int] = (1 until 200000).flatMap(_.toString.map(_ - '0'))
(0 until m).foldRight(1)((idx, b) => b * stringDigits(math.pow(10, idx).intValue - 1))
}
}
| dandxy89/LearningScala | src/main/scala/ProjectEuler/Problem40.scala | Scala | mit | 432 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils.logkafka81
import java.util.Properties
import scala.util.matching.Regex
import kafka.manager.utils.LogkafkaNewConfigs
object Defaults {
val Valid = true
val FollowLast = true
val BatchSize = 200
val LineDelimiter = 10 // 10 means ascii '\\n'
val RemoveDelimiter = true
val Topic = ""
val Key = ""
val Partition = -1
val CompressionCodec= "none"
val RequiredAcks = 1
val MessageTimeoutMs = 0
val RegexFilterPattern = ""
val LaggingMaxBytes = 0
val RotateLaggingMaxSec = 0
}
/**
* Configuration settings for a log
* @param valid Enable now or not
* @param followLast If set to "false", when restarting logkafka process, the log_path formatted with current time will be collect;
If set to "true", when restarting logkafka process, the last collecting file will be collected continually
* @param batchSize The batch size of messages to be sent
* @param lineDelimiter Delimiter of log file lines
* @param removeDelimiter Remove delimiter or not when collecting log file lines
* @param topic The topic of messages to be sent
* @param key The key of messages to be sent
* @param partition The partition of messages to be sent.
-1 : random
n(>=0): partition n
* @param compressionCodec Optional compression method of messages: none, gzip, snappy
* @param requiredAcks Number of required acks
* @param messageTimeoutMs Local message timeout. This value is only enforced locally
and limits the time a produced message waits for successful delivery.
A time of 0 is infinite.
* @param regexFilterPattern The messages matching this pattern will be dropped.
*
*/
case class LogConfig(val valid: Boolean = Defaults.Valid,
val followLast: Boolean = Defaults.FollowLast,
val batchSize: Long = Defaults.BatchSize,
val lineDelimiter: Int = Defaults.LineDelimiter,
val removeDelimiter: Boolean = Defaults.RemoveDelimiter,
val topic: String = Defaults.Topic,
val key: String = Defaults.Key,
val partition: Int = Defaults.Partition,
val compressionCodec: String = Defaults.CompressionCodec,
val requiredAcks: Int = Defaults.RequiredAcks,
val messageTimeoutMs: Long = Defaults.MessageTimeoutMs,
val regexFilterPattern: String = Defaults.RegexFilterPattern,
val laggingMaxBytes: Long = Defaults.LaggingMaxBytes,
val rotateLaggingMaxSec: Long = Defaults.RotateLaggingMaxSec) {
def toProps: Properties = {
val props = new Properties()
import LogConfig._
props.put(ValidProp, valid.toString)
props.put(FollowLastProp, followLast.toString)
props.put(BatchSizeProp, batchSize.toString)
props.put(LineDelimiterProp, lineDelimiter.toString)
props.put(RemoveDelimiterProp, removeDelimiter.toString)
props.put(TopicProp, topic.toString)
props.put(KeyProp, key.toString)
props.put(PartitionProp, partition.toString)
props.put(CompressionCodecProp, compressionCodec.toString)
props.put(RequiredAcksProp, requiredAcks.toString)
props.put(MessageTimeoutMsProp, messageTimeoutMs.toString)
props.put(RegexFilterPatternProp, regexFilterPattern.toString)
props
}
/**
* Get the absolute value of the given number. If the number is Int.MinValue return 0.
* This is different from java.lang.Math.abs or scala.math.abs in that they return Int.MinValue (!).
*/
def abs(n: Int) = if(n == Integer.MIN_VALUE) 0 else math.abs(n)
}
object LogConfig extends LogkafkaNewConfigs {
import kafka.manager.utils.logkafka81.LogkafkaConfigErrors._
import kafka.manager.utils._
val minLineDelimiter = 0
val maxLineDelimiter = 255
val maxRegexFilterPatternLength = 255
val ValidProp = "valid"
val FollowLastProp = "follow_last"
val BatchSizeProp = "batchsize"
val LineDelimiterProp = "line_delimiter"
val RemoveDelimiterProp = "remove_delimiter"
val TopicProp = "topic"
val KeyProp = "key"
val PartitionProp = "partition"
val CompressionCodecProp = "compression_codec"
val RequiredAcksProp = "required_acks"
val MessageTimeoutMsProp = "message_timeout_ms"
val RegexFilterPatternProp = "regex_filter_pattern"
val LaggingMaxBytesProp = "lagging_max_bytes"
val RotateLaggingMaxSecProp = "rotate_lagging_max_sec"
val ConfigMaps = Map(ValidProp -> Defaults.Valid.toString,
FollowLastProp -> Defaults.FollowLast.toString,
BatchSizeProp -> Defaults.BatchSize.toString,
LineDelimiterProp -> Defaults.LineDelimiter.toString,
RemoveDelimiterProp -> Defaults.RemoveDelimiter.toString,
TopicProp -> Defaults.Topic.toString,
KeyProp -> Defaults.Key.toString,
PartitionProp -> Defaults.Partition.toString,
CompressionCodecProp -> Defaults.CompressionCodec.toString,
RequiredAcksProp -> Defaults.RequiredAcks.toString,
MessageTimeoutMsProp -> Defaults.MessageTimeoutMs.toString,
RegexFilterPatternProp -> Defaults.RegexFilterPattern.toString,
LaggingMaxBytesProp -> Defaults.LaggingMaxBytes.toString,
RotateLaggingMaxSecProp -> Defaults.RotateLaggingMaxSec.toString)
def configMaps = ConfigMaps
val ConfigNames = ConfigMaps.keySet
def configNames = ConfigNames
/**
* Parse the given properties instance into a LogConfig object
*/
def fromProps(props: Properties): LogConfig = {
new LogConfig(valid = props.getProperty(ValidProp, Defaults.Valid.toString).toBoolean,
followLast = props.getProperty(FollowLastProp, Defaults.FollowLast.toString).toBoolean,
batchSize = props.getProperty(BatchSizeProp, Defaults.BatchSize.toString).toLong,
lineDelimiter = props.getProperty(LineDelimiterProp, Defaults.LineDelimiter.toString).toInt,
removeDelimiter = props.getProperty(RemoveDelimiterProp, Defaults.RemoveDelimiter.toString).toBoolean,
topic = props.getProperty(TopicProp, Defaults.Topic.toString).toString,
key = props.getProperty(KeyProp, Defaults.Key.toString).toString,
partition = props.getProperty(PartitionProp, Defaults.Partition.toString).toInt,
compressionCodec = props.getProperty(CompressionCodecProp, Defaults.CompressionCodec.toString).toString,
requiredAcks= props.getProperty(RequiredAcksProp, Defaults.RequiredAcks.toString).toInt,
messageTimeoutMs = props.getProperty(MessageTimeoutMsProp, Defaults.MessageTimeoutMs.toString).toLong,
regexFilterPattern = props.getProperty(RegexFilterPatternProp, Defaults.RegexFilterPattern.toString).toString,
laggingMaxBytes = props.getProperty(LaggingMaxBytesProp, Defaults.LaggingMaxBytes.toString).toLong,
rotateLaggingMaxSec = props.getProperty(RotateLaggingMaxSecProp, Defaults.RotateLaggingMaxSec.toString).toLong)
}
/**
* Create a log config instance using the given properties and defaults
*/
def fromProps(defaults: Properties, overrides: Properties): LogConfig = {
val props = new Properties(defaults)
props.putAll(overrides)
fromProps(props)
}
/**
* Check that property names are valid
*/
def validateNames(props: Properties) {
import scala.collection.JavaConverters._
for(name <- props.keys().asScala)
require(LogConfig.ConfigNames.asJava.contains(name), "Unknown configuration \\"%s\\".".format(name))
}
/**
* Check that the given properties contain only valid log config names, and that all values can be parsed.
*/
def validate(props: Properties) {
validateNames(props)
validateLineDelimiter(props)
validateTopic(props)
validateRegexFilterPattern(props)
LogConfig.fromProps(LogConfig().toProps, props) // check that we can parse the values
}
/**
* Check that LineDelimiter is reasonable
*/
private def validateLineDelimiter(props: Properties) {
val lineDelimiter = props.getProperty(LineDelimiterProp)
if (lineDelimiter == null) return
checkCondition(lineDelimiter.toInt >= minLineDelimiter && lineDelimiter.toInt <= maxLineDelimiter, LogkafkaConfigErrors.InvalidLineDelimiter)
}
/**
* Check that Topic is reasonable
*/
private def validateTopic(props: Properties) {
val topic = props.getProperty(TopicProp)
require(topic != null , "Topic is null")
}
/**
* Check that is RegexFilterPattern reasonable
*/
private def validateRegexFilterPattern(props: Properties) {
val regexFilterPattern = props.getProperty(RegexFilterPatternProp)
if (regexFilterPattern == null) return
checkCondition(regexFilterPattern.length <= maxRegexFilterPatternLength, LogkafkaConfigErrors.InvalidRegexFilterPatternLength)
val valid = try {
s"""$regexFilterPattern""".r
true
} catch {
case e: Exception => false
}
checkCondition(valid, LogkafkaConfigErrors. InvalidRegexFilterPattern)
}
}
object LogkafkaConfigErrors {
import kafka.manager.utils.UtilError
class InvalidLineDelimiter private[LogkafkaConfigErrors] extends UtilError(
"line delimiter is illegal, should be an decimal number between 0 and 255")
class InvalidRegexFilterPattern private[LogkafkaConfigErrors] extends UtilError(
"regex filter pattern is illegal, does not conform to pcre2")
class InvalidRegexFilterPatternLength private[LogkafkaConfigErrors] extends UtilError(
"regex filter pattern is illegal, can't be longer than " + LogConfig.maxRegexFilterPatternLength + " characters")
val InvalidLineDelimiter = new InvalidLineDelimiter
val InvalidRegexFilterPattern = new InvalidRegexFilterPattern
val InvalidRegexFilterPatternLength = new InvalidRegexFilterPatternLength
}
| zeph/kafka-manager | app/kafka/manager/utils/logkafka81/LogConfig.scala | Scala | apache-2.0 | 10,973 |
// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package havlak
import som._
/**
* LoopStructureGraph
*
* Maintain loop structure for a given CFG.
*
* Two values are maintained for this loop graph, depth, and nesting level.
* For example:
*
* loop nesting level depth
*----------------------------------------
* loop-0 2 0
* loop-1 1 1
* loop-3 1 1
* loop-2 0 2
*
* @author rhundt
*/
final class LoopStructureGraph {
private val root = new SimpleLoop(null, true)
private val loops = new Vector[SimpleLoop]()
private var loopCounter = 0
root.setNestingLevel(0)
root.setCounter(loopCounter)
loopCounter += 1
loops.append(root)
def createNewLoop(bb: BasicBlock, isReducible: Boolean): SimpleLoop = {
val loop = new SimpleLoop(bb, isReducible)
loop.setCounter(loopCounter)
loopCounter += 1
loops.append(loop)
loop
}
def calculateNestingLevel(): Unit = {
// link up all 1st level loops to artificial root node.
loops.forEach { liter =>
if (!liter.isRoot()) {
if (liter.getParent() == null) {
liter.setParent(root)
}
}
}
// recursively traverse the tree and assign levels.
calculateNestingLevelRec(root, 0)
}
def calculateNestingLevelRec(loop: SimpleLoop, depth: Int): Unit = {
loop.setDepthLevel(depth)
loop.getChildren().forEach { liter =>
calculateNestingLevelRec(liter, depth + 1)
loop.setNestingLevel(
Math.max(loop.getNestingLevel(), 1 + liter.getNestingLevel()))
}
}
def getNumLoops(): Int = loops.size()
}
| cedricviaccoz/scala-native | benchmarks/src/main/scala/havlak/LoopStructureGraph.scala | Scala | bsd-3-clause | 2,212 |
/*
* Sentries
* Copyright (c) 2012-2015 Erik van Oosten All rights reserved.
*
* The primary distribution site is https://github.com/erikvanoosten/sentries
*
* This software is released under the terms of the BSD 2-Clause License.
* There is NO WARRANTY. See the file LICENSE for the full text.
*/
package nl.grons.sentries.support
import java.util.EventListener
import com.yammer.metrics.core.MetricName
/**
* Listeners for events from the registry. Listeners must be thread-safe.
*/
trait SentriesRegistryListener extends EventListener {
/**
* Called when a sentry has been added to the [[nl.grons.sentries.support.SentriesRegistry]].
*
* @param name the name of the sentry
* @param sentry the sentry
*/
def onSentryAdded(name: MetricName, sentry: NamedSentry)
/**
* Called when a sentry has been removed from the [[nl.grons.sentries.support.SentriesRegistry]].
*
* @param name the name of the sentry
*/
def onSentryRemoved(name: MetricName)
}
| erikvanoosten/sentries | src/main/scala/nl/grons/sentries/support/SentriesRegistryListener.scala | Scala | bsd-2-clause | 1,000 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
import scala.collection.mutable.LinkedHashSet
import org.apache.avro.{Schema, SchemaNormalization}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.History._
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.internal.config.Network._
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils
/**
* Configuration for a Spark application. Used to set various Spark parameters as key-value pairs.
*
* Most of the time, you would create a SparkConf object with `new SparkConf()`, which will load
* values from any `spark.*` Java system properties set in your application as well. In this case,
* parameters you set directly on the `SparkConf` object take priority over system properties.
*
* For unit tests, you can also call `new SparkConf(false)` to skip loading external settings and
* get the same configuration no matter what the system properties are.
*
* All setter methods in this class support chaining. For example, you can write
* `new SparkConf().setMaster("local").setAppName("My app")`.
*
* @param loadDefaults whether to also load values from Java system properties
*
* @note Once a SparkConf object is passed to Spark, it is cloned and can no longer be modified
* by the user. Spark does not support modifying the configuration at runtime.
*/
class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Serializable {
import SparkConf._
/** Create a SparkConf that loads defaults from system properties and the classpath */
def this() = this(true)
private val settings = new ConcurrentHashMap[String, String]()
@transient private lazy val reader: ConfigReader = {
val _reader = new ConfigReader(new SparkConfigProvider(settings))
_reader.bindEnv((key: String) => Option(getenv(key)))
_reader
}
if (loadDefaults) {
loadFromSystemProperties(false)
}
private[spark] def loadFromSystemProperties(silent: Boolean): SparkConf = {
// Load any spark.* system properties
for ((key, value) <- Utils.getSystemProperties if key.startsWith("spark.")) {
set(key, value, silent)
}
this
}
/** Set a configuration variable. */
def set(key: String, value: String): SparkConf = {
set(key, value, false)
}
private[spark] def set(key: String, value: String, silent: Boolean): SparkConf = {
if (key == null) {
throw new NullPointerException("null key")
}
if (value == null) {
throw new NullPointerException("null value for " + key)
}
if (!silent) {
logDeprecationWarning(key)
}
settings.put(key, value)
this
}
private[spark] def set[T](entry: ConfigEntry[T], value: T): SparkConf = {
set(entry.key, entry.stringConverter(value))
this
}
private[spark] def set[T](entry: OptionalConfigEntry[T], value: T): SparkConf = {
set(entry.key, entry.rawStringConverter(value))
this
}
/**
* The master URL to connect to, such as "local" to run locally with one thread, "local[4]" to
* run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster.
*/
def setMaster(master: String): SparkConf = {
set("spark.master", master)
}
/** Set a name for your application. Shown in the Spark web UI. */
def setAppName(name: String): SparkConf = {
set("spark.app.name", name)
}
/** Set JAR files to distribute to the cluster. */
def setJars(jars: Seq[String]): SparkConf = {
for (jar <- jars if (jar == null)) logWarning("null jar passed to SparkContext constructor")
set(JARS, jars.filter(_ != null))
}
/** Set JAR files to distribute to the cluster. (Java-friendly version.) */
def setJars(jars: Array[String]): SparkConf = {
setJars(jars.toSeq)
}
/**
* Set an environment variable to be used when launching executors for this application.
* These variables are stored as properties of the form spark.executorEnv.VAR_NAME
* (for example spark.executorEnv.PATH) but this method makes them easier to set.
*/
def setExecutorEnv(variable: String, value: String): SparkConf = {
set("spark.executorEnv." + variable, value)
}
/**
* Set multiple environment variables to be used when launching executors.
* These variables are stored as properties of the form spark.executorEnv.VAR_NAME
* (for example spark.executorEnv.PATH) but this method makes them easier to set.
*/
def setExecutorEnv(variables: Seq[(String, String)]): SparkConf = {
for ((k, v) <- variables) {
setExecutorEnv(k, v)
}
this
}
/**
* Set multiple environment variables to be used when launching executors.
* (Java-friendly version.)
*/
def setExecutorEnv(variables: Array[(String, String)]): SparkConf = {
setExecutorEnv(variables.toSeq)
}
/**
* Set the location where Spark is installed on worker nodes.
*/
def setSparkHome(home: String): SparkConf = {
set("spark.home", home)
}
/** Set multiple parameters together */
def setAll(settings: Iterable[(String, String)]): SparkConf = {
settings.foreach { case (k, v) => set(k, v) }
this
}
/**
* Set multiple parameters together
*/
@deprecated("Use setAll(Iterable) instead", "3.0.0")
def setAll(settings: Traversable[(String, String)]): SparkConf = {
settings.foreach { case (k, v) => set(k, v) }
this
}
/** Set a parameter if it isn't already configured */
def setIfMissing(key: String, value: String): SparkConf = {
if (settings.putIfAbsent(key, value) == null) {
logDeprecationWarning(key)
}
this
}
private[spark] def setIfMissing[T](entry: ConfigEntry[T], value: T): SparkConf = {
if (settings.putIfAbsent(entry.key, entry.stringConverter(value)) == null) {
logDeprecationWarning(entry.key)
}
this
}
private[spark] def setIfMissing[T](entry: OptionalConfigEntry[T], value: T): SparkConf = {
if (settings.putIfAbsent(entry.key, entry.rawStringConverter(value)) == null) {
logDeprecationWarning(entry.key)
}
this
}
/**
* Use Kryo serialization and register the given set of classes with Kryo.
* If called multiple times, this will append the classes from all calls together.
*/
def registerKryoClasses(classes: Array[Class[_]]): SparkConf = {
val allClassNames = new LinkedHashSet[String]()
allClassNames ++= get(KRYO_CLASSES_TO_REGISTER).map(_.trim)
.filter(!_.isEmpty)
allClassNames ++= classes.map(_.getName)
set(KRYO_CLASSES_TO_REGISTER, allClassNames.toSeq)
set(SERIALIZER, classOf[KryoSerializer].getName)
this
}
private final val avroNamespace = "avro.schema."
/**
* Use Kryo serialization and register the given set of Avro schemas so that the generic
* record serializer can decrease network IO
*/
def registerAvroSchemas(schemas: Schema*): SparkConf = {
for (schema <- schemas) {
set(avroNamespace + SchemaNormalization.parsingFingerprint64(schema), schema.toString)
}
this
}
/** Gets all the avro schemas in the configuration used in the generic Avro record serializer */
def getAvroSchema: Map[Long, String] = {
getAll.filter { case (k, v) => k.startsWith(avroNamespace) }
.map { case (k, v) => (k.substring(avroNamespace.length).toLong, v) }
.toMap
}
/** Remove a parameter from the configuration */
def remove(key: String): SparkConf = {
settings.remove(key)
this
}
private[spark] def remove(entry: ConfigEntry[_]): SparkConf = {
remove(entry.key)
}
/** Get a parameter; throws a NoSuchElementException if it's not set */
def get(key: String): String = {
getOption(key).getOrElse(throw new NoSuchElementException(key))
}
/** Get a parameter, falling back to a default if not set */
def get(key: String, defaultValue: String): String = {
getOption(key).getOrElse(defaultValue)
}
/**
* Retrieves the value of a pre-defined configuration entry.
*
* - This is an internal Spark API.
* - The return type if defined by the configuration entry.
* - This will throw an exception is the config is not optional and the value is not set.
*/
private[spark] def get[T](entry: ConfigEntry[T]): T = {
entry.readFrom(reader)
}
/**
* Get a time parameter as seconds; throws a NoSuchElementException if it's not set. If no
* suffix is provided then seconds are assumed.
* @throws java.util.NoSuchElementException If the time parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as seconds
*/
def getTimeAsSeconds(key: String): Long = catchIllegalValue(key) {
Utils.timeStringAsSeconds(get(key))
}
/**
* Get a time parameter as seconds, falling back to a default if not set. If no
* suffix is provided then seconds are assumed.
* @throws NumberFormatException If the value cannot be interpreted as seconds
*/
def getTimeAsSeconds(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.timeStringAsSeconds(get(key, defaultValue))
}
/**
* Get a time parameter as milliseconds; throws a NoSuchElementException if it's not set. If no
* suffix is provided then milliseconds are assumed.
* @throws java.util.NoSuchElementException If the time parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as milliseconds
*/
def getTimeAsMs(key: String): Long = catchIllegalValue(key) {
Utils.timeStringAsMs(get(key))
}
/**
* Get a time parameter as milliseconds, falling back to a default if not set. If no
* suffix is provided then milliseconds are assumed.
* @throws NumberFormatException If the value cannot be interpreted as milliseconds
*/
def getTimeAsMs(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.timeStringAsMs(get(key, defaultValue))
}
/**
* Get a size parameter as bytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then bytes are assumed.
* @throws java.util.NoSuchElementException If the size parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as bytes
*/
def getSizeAsBytes(key: String): Long = catchIllegalValue(key) {
Utils.byteStringAsBytes(get(key))
}
/**
* Get a size parameter as bytes, falling back to a default if not set. If no
* suffix is provided then bytes are assumed.
* @throws NumberFormatException If the value cannot be interpreted as bytes
*/
def getSizeAsBytes(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.byteStringAsBytes(get(key, defaultValue))
}
/**
* Get a size parameter as bytes, falling back to a default if not set.
* @throws NumberFormatException If the value cannot be interpreted as bytes
*/
def getSizeAsBytes(key: String, defaultValue: Long): Long = catchIllegalValue(key) {
Utils.byteStringAsBytes(get(key, defaultValue + "B"))
}
/**
* Get a size parameter as Kibibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Kibibytes are assumed.
* @throws java.util.NoSuchElementException If the size parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as Kibibytes
*/
def getSizeAsKb(key: String): Long = catchIllegalValue(key) {
Utils.byteStringAsKb(get(key))
}
/**
* Get a size parameter as Kibibytes, falling back to a default if not set. If no
* suffix is provided then Kibibytes are assumed.
* @throws NumberFormatException If the value cannot be interpreted as Kibibytes
*/
def getSizeAsKb(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.byteStringAsKb(get(key, defaultValue))
}
/**
* Get a size parameter as Mebibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Mebibytes are assumed.
* @throws java.util.NoSuchElementException If the size parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as Mebibytes
*/
def getSizeAsMb(key: String): Long = catchIllegalValue(key) {
Utils.byteStringAsMb(get(key))
}
/**
* Get a size parameter as Mebibytes, falling back to a default if not set. If no
* suffix is provided then Mebibytes are assumed.
* @throws NumberFormatException If the value cannot be interpreted as Mebibytes
*/
def getSizeAsMb(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.byteStringAsMb(get(key, defaultValue))
}
/**
* Get a size parameter as Gibibytes; throws a NoSuchElementException if it's not set. If no
* suffix is provided then Gibibytes are assumed.
* @throws java.util.NoSuchElementException If the size parameter is not set
* @throws NumberFormatException If the value cannot be interpreted as Gibibytes
*/
def getSizeAsGb(key: String): Long = catchIllegalValue(key) {
Utils.byteStringAsGb(get(key))
}
/**
* Get a size parameter as Gibibytes, falling back to a default if not set. If no
* suffix is provided then Gibibytes are assumed.
* @throws NumberFormatException If the value cannot be interpreted as Gibibytes
*/
def getSizeAsGb(key: String, defaultValue: String): Long = catchIllegalValue(key) {
Utils.byteStringAsGb(get(key, defaultValue))
}
/** Get a parameter as an Option */
def getOption(key: String): Option[String] = {
Option(settings.get(key)).orElse(getDeprecatedConfig(key, settings))
}
/** Get an optional value, applying variable substitution. */
private[spark] def getWithSubstitution(key: String): Option[String] = {
getOption(key).map(reader.substitute)
}
/** Get all parameters as a list of pairs */
def getAll: Array[(String, String)] = {
settings.entrySet().asScala.map(x => (x.getKey, x.getValue)).toArray
}
/**
* Get all parameters that start with `prefix`
*/
def getAllWithPrefix(prefix: String): Array[(String, String)] = {
getAll.filter { case (k, v) => k.startsWith(prefix) }
.map { case (k, v) => (k.substring(prefix.length), v) }
}
/**
* Get all parameters that start with `prefix` and end with 'suffix'
*/
def getAllWithPrefixAndSuffix(prefix: String, suffix: String): Array[(String, String)] = {
getAll.filter { case (k, v) => k.startsWith(prefix) && k.endsWith(suffix) }
.map { case (k, v) => (k.substring(prefix.length, (k.length - suffix.length)), v) }
}
/**
* Get a parameter as an integer, falling back to a default if not set
* @throws NumberFormatException If the value cannot be interpreted as an integer
*/
def getInt(key: String, defaultValue: Int): Int = catchIllegalValue(key) {
getOption(key).map(_.toInt).getOrElse(defaultValue)
}
/**
* Get a parameter as a long, falling back to a default if not set
* @throws NumberFormatException If the value cannot be interpreted as a long
*/
def getLong(key: String, defaultValue: Long): Long = catchIllegalValue(key) {
getOption(key).map(_.toLong).getOrElse(defaultValue)
}
/**
* Get a parameter as a double, falling back to a default if not ste
* @throws NumberFormatException If the value cannot be interpreted as a double
*/
def getDouble(key: String, defaultValue: Double): Double = catchIllegalValue(key) {
getOption(key).map(_.toDouble).getOrElse(defaultValue)
}
/**
* Get a parameter as a boolean, falling back to a default if not set
* @throws IllegalArgumentException If the value cannot be interpreted as a boolean
*/
def getBoolean(key: String, defaultValue: Boolean): Boolean = catchIllegalValue(key) {
getOption(key).map(_.toBoolean).getOrElse(defaultValue)
}
/** Get all executor environment variables set on this SparkConf */
def getExecutorEnv: Seq[(String, String)] = {
getAllWithPrefix("spark.executorEnv.")
}
/**
* Returns the Spark application id, valid in the Driver after TaskScheduler registration and
* from the start in the Executor.
*/
def getAppId: String = get("spark.app.id")
/** Does the configuration contain a given parameter? */
def contains(key: String): Boolean = {
settings.containsKey(key) ||
configsWithAlternatives.get(key).toSeq.flatten.exists { alt => contains(alt.key) }
}
private[spark] def contains(entry: ConfigEntry[_]): Boolean = contains(entry.key)
/** Copy this object */
override def clone: SparkConf = {
val cloned = new SparkConf(false)
settings.entrySet().asScala.foreach { e =>
cloned.set(e.getKey(), e.getValue(), true)
}
cloned
}
/**
* By using this instead of System.getenv(), environment variables can be mocked
* in unit tests.
*/
private[spark] def getenv(name: String): String = System.getenv(name)
/**
* Wrapper method for get() methods which require some specific value format. This catches
* any [[NumberFormatException]] or [[IllegalArgumentException]] and re-raises it with the
* incorrectly configured key in the exception message.
*/
private def catchIllegalValue[T](key: String)(getValue: => T): T = {
try {
getValue
} catch {
case e: NumberFormatException =>
// NumberFormatException doesn't have a constructor that takes a cause for some reason.
throw new NumberFormatException(s"Illegal value for config key $key: ${e.getMessage}")
.initCause(e)
case e: IllegalArgumentException =>
throw new IllegalArgumentException(s"Illegal value for config key $key: ${e.getMessage}", e)
}
}
/**
* Get task resource requirements.
*/
private[spark] def getTaskResourceRequirements(): Map[String, Int] = {
getAllWithPrefix(SPARK_TASK_RESOURCE_PREFIX)
.withFilter { case (k, v) => k.endsWith(SPARK_RESOURCE_AMOUNT_SUFFIX)}
.map { case (k, v) => (k.dropRight(SPARK_RESOURCE_AMOUNT_SUFFIX.length), v.toInt)}.toMap
}
/**
* Checks for illegal or deprecated config settings. Throws an exception for the former. Not
* idempotent - may mutate this conf object to convert deprecated settings to supported ones.
*/
private[spark] def validateSettings() {
if (contains("spark.local.dir")) {
val msg = "Note that spark.local.dir will be overridden by the value set by " +
"the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS" +
" in YARN)."
logWarning(msg)
}
val executorOptsKey = EXECUTOR_JAVA_OPTIONS.key
// Used by Yarn in 1.1 and before
sys.props.get("spark.driver.libraryPath").foreach { value =>
val warning =
s"""
|spark.driver.libraryPath was detected (set to '$value').
|This is deprecated in Spark 1.2+.
|
|Please instead use: ${DRIVER_LIBRARY_PATH.key}
""".stripMargin
logWarning(warning)
}
// Validate spark.executor.extraJavaOptions
getOption(executorOptsKey).foreach { javaOpts =>
if (javaOpts.contains("-Dspark")) {
val msg = s"$executorOptsKey is not allowed to set Spark options (was '$javaOpts'). " +
"Set them directly on a SparkConf or in a properties file when using ./bin/spark-submit."
throw new Exception(msg)
}
if (javaOpts.contains("-Xmx")) {
val msg = s"$executorOptsKey is not allowed to specify max heap memory settings " +
s"(was '$javaOpts'). Use spark.executor.memory instead."
throw new Exception(msg)
}
}
// Validate memory fractions
for (key <- Seq(MEMORY_FRACTION.key, MEMORY_STORAGE_FRACTION.key)) {
val value = getDouble(key, 0.5)
if (value > 1 || value < 0) {
throw new IllegalArgumentException(s"$key should be between 0 and 1 (was '$value').")
}
}
if (contains("spark.master") && get("spark.master").startsWith("yarn-")) {
val warning = s"spark.master ${get("spark.master")} is deprecated in Spark 2.0+, please " +
"instead use \\"yarn\\" with specified deploy mode."
get("spark.master") match {
case "yarn-cluster" =>
logWarning(warning)
set("spark.master", "yarn")
set(SUBMIT_DEPLOY_MODE, "cluster")
case "yarn-client" =>
logWarning(warning)
set("spark.master", "yarn")
set(SUBMIT_DEPLOY_MODE, "client")
case _ => // Any other unexpected master will be checked when creating scheduler backend.
}
}
if (contains(SUBMIT_DEPLOY_MODE)) {
get(SUBMIT_DEPLOY_MODE) match {
case "cluster" | "client" =>
case e => throw new SparkException(s"${SUBMIT_DEPLOY_MODE.key} can only be " +
"\\"cluster\\" or \\"client\\".")
}
}
if (contains(CORES_MAX) && contains(EXECUTOR_CORES)) {
val totalCores = getInt(CORES_MAX.key, 1)
val executorCores = get(EXECUTOR_CORES)
val leftCores = totalCores % executorCores
if (leftCores != 0) {
logWarning(s"Total executor cores: ${totalCores} is not " +
s"divisible by cores per executor: ${executorCores}, " +
s"the left cores: ${leftCores} will not be allocated")
}
}
val encryptionEnabled = get(NETWORK_CRYPTO_ENABLED) || get(SASL_ENCRYPTION_ENABLED)
require(!encryptionEnabled || get(NETWORK_AUTH_ENABLED),
s"${NETWORK_AUTH_ENABLED.key} must be enabled when enabling encryption.")
val executorTimeoutThresholdMs = get(NETWORK_TIMEOUT) * 1000
val executorHeartbeatIntervalMs = get(EXECUTOR_HEARTBEAT_INTERVAL)
val networkTimeout = NETWORK_TIMEOUT.key
// If spark.executor.heartbeatInterval bigger than spark.network.timeout,
// it will almost always cause ExecutorLostFailure. See SPARK-22754.
require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " +
s"${networkTimeout}=${executorTimeoutThresholdMs}ms must be no less than the value of " +
s"${EXECUTOR_HEARTBEAT_INTERVAL.key}=${executorHeartbeatIntervalMs}ms.")
}
/**
* Return a string listing all keys and values, one per line. This is useful to print the
* configuration out for debugging.
*/
def toDebugString: String = {
Utils.redact(this, getAll).sorted.map { case (k, v) => k + "=" + v }.mkString("\\n")
}
}
private[spark] object SparkConf extends Logging {
/**
* Maps deprecated config keys to information about the deprecation.
*
* The extra information is logged as a warning when the config is present in the user's
* configuration.
*/
private val deprecatedConfigs: Map[String, DeprecatedConfig] = {
val configs = Seq(
DeprecatedConfig("spark.cache.class", "0.8",
"The spark.cache.class property is no longer being used! Specify storage levels using " +
"the RDD.persist() method instead."),
DeprecatedConfig("spark.yarn.user.classpath.first", "1.3",
"Please use spark.{driver,executor}.userClassPathFirst instead."),
DeprecatedConfig("spark.kryoserializer.buffer.mb", "1.4",
"Please use spark.kryoserializer.buffer instead. The default value for " +
"spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " +
"are no longer accepted. To specify the equivalent now, one may use '64k'."),
DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."),
DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0",
"Please use the new blacklisting options, spark.blacklist.*"),
DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"),
DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"),
DeprecatedConfig("spark.shuffle.service.index.cache.entries", "2.3.0",
"Not used anymore. Please use spark.shuffle.service.index.cache.size"),
DeprecatedConfig("spark.yarn.credentials.file.retention.count", "2.4.0", "Not used anymore."),
DeprecatedConfig("spark.yarn.credentials.file.retention.days", "2.4.0", "Not used anymore."),
DeprecatedConfig("spark.yarn.services", "3.0.0", "Feature no longer available.")
)
Map(configs.map { cfg => (cfg.key -> cfg) } : _*)
}
/**
* Maps a current config key to alternate keys that were used in previous version of Spark.
*
* The alternates are used in the order defined in this map. If deprecated configs are
* present in the user's configuration, a warning is logged.
*
* TODO: consolidate it with `ConfigBuilder.withAlternative`.
*/
private val configsWithAlternatives = Map[String, Seq[AlternateConfig]](
EXECUTOR_USER_CLASS_PATH_FIRST.key -> Seq(
AlternateConfig("spark.files.userClassPathFirst", "1.3")),
UPDATE_INTERVAL_S.key -> Seq(
AlternateConfig("spark.history.fs.update.interval.seconds", "1.4"),
AlternateConfig("spark.history.fs.updateInterval", "1.3"),
AlternateConfig("spark.history.updateInterval", "1.3")),
CLEANER_INTERVAL_S.key -> Seq(
AlternateConfig("spark.history.fs.cleaner.interval.seconds", "1.4")),
MAX_LOG_AGE_S.key -> Seq(
AlternateConfig("spark.history.fs.cleaner.maxAge.seconds", "1.4")),
"spark.yarn.am.waitTime" -> Seq(
AlternateConfig("spark.yarn.applicationMaster.waitTries", "1.3",
// Translate old value to a duration, with 10s wait time per try.
translation = s => s"${s.toLong * 10}s")),
REDUCER_MAX_SIZE_IN_FLIGHT.key -> Seq(
AlternateConfig("spark.reducer.maxMbInFlight", "1.4")),
KRYO_SERIALIZER_BUFFER_SIZE.key -> Seq(
AlternateConfig("spark.kryoserializer.buffer.mb", "1.4",
translation = s => s"${(s.toDouble * 1000).toInt}k")),
KRYO_SERIALIZER_MAX_BUFFER_SIZE.key -> Seq(
AlternateConfig("spark.kryoserializer.buffer.max.mb", "1.4")),
SHUFFLE_FILE_BUFFER_SIZE.key -> Seq(
AlternateConfig("spark.shuffle.file.buffer.kb", "1.4")),
EXECUTOR_LOGS_ROLLING_MAX_SIZE.key -> Seq(
AlternateConfig("spark.executor.logs.rolling.size.maxBytes", "1.4")),
IO_COMPRESSION_SNAPPY_BLOCKSIZE.key -> Seq(
AlternateConfig("spark.io.compression.snappy.block.size", "1.4")),
IO_COMPRESSION_LZ4_BLOCKSIZE.key -> Seq(
AlternateConfig("spark.io.compression.lz4.block.size", "1.4")),
RPC_NUM_RETRIES.key -> Seq(
AlternateConfig("spark.akka.num.retries", "1.4")),
RPC_RETRY_WAIT.key -> Seq(
AlternateConfig("spark.akka.retry.wait", "1.4")),
RPC_ASK_TIMEOUT.key -> Seq(
AlternateConfig("spark.akka.askTimeout", "1.4")),
RPC_LOOKUP_TIMEOUT.key -> Seq(
AlternateConfig("spark.akka.lookupTimeout", "1.4")),
"spark.streaming.fileStream.minRememberDuration" -> Seq(
AlternateConfig("spark.streaming.minRememberDuration", "1.5")),
"spark.yarn.max.executor.failures" -> Seq(
AlternateConfig("spark.yarn.max.worker.failures", "1.5")),
MEMORY_OFFHEAP_ENABLED.key -> Seq(
AlternateConfig("spark.unsafe.offHeap", "1.6")),
RPC_MESSAGE_MAX_SIZE.key -> Seq(
AlternateConfig("spark.akka.frameSize", "1.6")),
"spark.yarn.jars" -> Seq(
AlternateConfig("spark.yarn.jar", "2.0")),
MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM.key -> Seq(
AlternateConfig("spark.reducer.maxReqSizeShuffleToMem", "2.3")),
LISTENER_BUS_EVENT_QUEUE_CAPACITY.key -> Seq(
AlternateConfig("spark.scheduler.listenerbus.eventqueue.size", "2.3")),
DRIVER_MEMORY_OVERHEAD.key -> Seq(
AlternateConfig("spark.yarn.driver.memoryOverhead", "2.3")),
EXECUTOR_MEMORY_OVERHEAD.key -> Seq(
AlternateConfig("spark.yarn.executor.memoryOverhead", "2.3")),
KEYTAB.key -> Seq(
AlternateConfig("spark.yarn.keytab", "3.0")),
PRINCIPAL.key -> Seq(
AlternateConfig("spark.yarn.principal", "3.0")),
KERBEROS_RELOGIN_PERIOD.key -> Seq(
AlternateConfig("spark.yarn.kerberos.relogin.period", "3.0")),
KERBEROS_FILESYSTEMS_TO_ACCESS.key -> Seq(
AlternateConfig("spark.yarn.access.namenodes", "2.2"),
AlternateConfig("spark.yarn.access.hadoopFileSystems", "3.0")),
"spark.kafka.consumer.cache.capacity" -> Seq(
AlternateConfig("spark.sql.kafkaConsumerCache.capacity", "3.0"))
)
/**
* A view of `configsWithAlternatives` that makes it more efficient to look up deprecated
* config keys.
*
* Maps the deprecated config name to a 2-tuple (new config name, alternate config info).
*/
private val allAlternatives: Map[String, (String, AlternateConfig)] = {
configsWithAlternatives.keys.flatMap { key =>
configsWithAlternatives(key).map { cfg => (cfg.key -> (key -> cfg)) }
}.toMap
}
/**
* Return whether the given config should be passed to an executor on start-up.
*
* Certain authentication configs are required from the executor when it connects to
* the scheduler, while the rest of the spark configs can be inherited from the driver later.
*/
def isExecutorStartupConf(name: String): Boolean = {
(name.startsWith("spark.auth") && name != SecurityManager.SPARK_AUTH_SECRET_CONF) ||
name.startsWith("spark.rpc") ||
name.startsWith("spark.network") ||
isSparkPortConf(name)
}
/**
* Return true if the given config matches either `spark.*.port` or `spark.port.*`.
*/
def isSparkPortConf(name: String): Boolean = {
(name.startsWith("spark.") && name.endsWith(".port")) || name.startsWith("spark.port.")
}
/**
* Looks for available deprecated keys for the given config option, and return the first
* value available.
*/
def getDeprecatedConfig(key: String, conf: JMap[String, String]): Option[String] = {
configsWithAlternatives.get(key).flatMap { alts =>
alts.collectFirst { case alt if conf.containsKey(alt.key) =>
val value = conf.get(alt.key)
if (alt.translation != null) alt.translation(value) else value
}
}
}
/**
* Logs a warning message if the given config key is deprecated.
*/
def logDeprecationWarning(key: String): Unit = {
deprecatedConfigs.get(key).foreach { cfg =>
logWarning(
s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " +
s"may be removed in the future. ${cfg.deprecationMessage}")
return
}
allAlternatives.get(key).foreach { case (newKey, cfg) =>
logWarning(
s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " +
s"may be removed in the future. Please use the new key '$newKey' instead.")
return
}
if (key.startsWith("spark.akka") || key.startsWith("spark.ssl.akka")) {
logWarning(
s"The configuration key $key is not supported anymore " +
s"because Spark doesn't use Akka since 2.0")
}
}
/**
* A function to help parsing configs with multiple parts where the base and
* suffix could be one of many options. For instance configs like:
* spark.executor.resource.{resourceName}.{count/addresses}
* This function takes an Array of configs you got from the
* getAllWithPrefix function, selects only those that end with the suffix
* passed in and returns just the base part of the config before the first
* '.' and its value.
*/
def getConfigsWithSuffix(
configs: Array[(String, String)],
suffix: String
): Array[(String, String)] = {
configs.filter { case (rConf, _) => rConf.endsWith(suffix)}.
map { case (k, v) => (k.split('.').head, v) }
}
/**
* A function to help parsing configs with multiple parts where the base and
* suffix could be one of many options. For instance configs like:
* spark.executor.resource.{resourceName}.{count/addresses}
* This function takes an Array of configs you got from the
* getAllWithPrefix function and returns the base part of the config
* before the first '.'.
*/
def getBaseOfConfigs(configs: Array[(String, String)]): Set[String] = {
configs.map { case (k, _) => k.split('.').head }.toSet
}
/**
* Holds information about keys that have been deprecated and do not have a replacement.
*
* @param key The deprecated key.
* @param version Version of Spark where key was deprecated.
* @param deprecationMessage Message to include in the deprecation warning.
*/
private case class DeprecatedConfig(
key: String,
version: String,
deprecationMessage: String)
/**
* Information about an alternate configuration key that has been deprecated.
*
* @param key The deprecated config key.
* @param version The Spark version in which the key was deprecated.
* @param translation A translation function for converting old config values into new ones.
*/
private case class AlternateConfig(
key: String,
version: String,
translation: String => String = null)
}
| icexelloss/spark | core/src/main/scala/org/apache/spark/SparkConf.scala | Scala | apache-2.0 | 33,711 |
package $package$
import java.lang.management.ManagementFactory
import akka.http.scaladsl.server.{Directives, Route}
import scala.concurrent.duration._
trait StatusService extends BaseService {
import Directives._
import io.circe.generic.auto._
protected case class Status(uptime: String)
override protected def routes: Route =
get {
log.info("/status executed")
complete(Status(Duration(ManagementFactory.getRuntimeMXBean.getUptime, MILLISECONDS).toString()))
}
}
| silvaren/akka-http.g8 | src/main/g8/src/main/scala/$package$/StatusService.scala | Scala | apache-2.0 | 499 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import scala.collection.mutable.HashMap
import scala.concurrent.duration._
import org.mockito.Matchers.{anyInt, anyObject, anyString, eq => meq}
import org.mockito.Mockito.{atLeast, atMost, never, spy, times, verify, when}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.util.ManualClock
class FakeSchedulerBackend extends SchedulerBackend {
def start() {}
def stop() {}
def reviveOffers() {}
def defaultParallelism(): Int = 1
def maxNumConcurrentTasks(): Int = 0
}
class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with BeforeAndAfterEach
with Logging with MockitoSugar with Eventually {
var failedTaskSetException: Option[Throwable] = None
var failedTaskSetReason: String = null
var failedTaskSet = false
var blacklist: BlacklistTracker = null
var taskScheduler: TaskSchedulerImpl = null
var dagScheduler: DAGScheduler = null
val stageToMockTaskSetBlacklist = new HashMap[Int, TaskSetBlacklist]()
val stageToMockTaskSetManager = new HashMap[Int, TaskSetManager]()
override def beforeEach(): Unit = {
super.beforeEach()
failedTaskSet = false
failedTaskSetException = None
failedTaskSetReason = null
stageToMockTaskSetBlacklist.clear()
stageToMockTaskSetManager.clear()
}
override def afterEach(): Unit = {
if (taskScheduler != null) {
taskScheduler.stop()
taskScheduler = null
}
if (dagScheduler != null) {
dagScheduler.stop()
dagScheduler = null
}
super.afterEach()
}
def setupScheduler(confs: (String, String)*): TaskSchedulerImpl = {
val conf = new SparkConf().setMaster("local").setAppName("TaskSchedulerImplSuite")
confs.foreach { case (k, v) => conf.set(k, v) }
sc = new SparkContext(conf)
taskScheduler = new TaskSchedulerImpl(sc)
setupHelper()
}
def setupSchedulerWithMockTaskSetBlacklist(confs: (String, String)*): TaskSchedulerImpl = {
blacklist = mock[BlacklistTracker]
val conf = new SparkConf().setMaster("local").setAppName("TaskSchedulerImplSuite")
conf.set(config.BLACKLIST_ENABLED, true)
confs.foreach { case (k, v) => conf.set(k, v) }
sc = new SparkContext(conf)
taskScheduler =
new TaskSchedulerImpl(sc, sc.conf.getInt("spark.task.maxFailures", 4)) {
override def createTaskSetManager(taskSet: TaskSet, maxFailures: Int): TaskSetManager = {
val tsm = super.createTaskSetManager(taskSet, maxFailures)
// we need to create a spied tsm just so we can set the TaskSetBlacklist
val tsmSpy = spy(tsm)
val taskSetBlacklist = mock[TaskSetBlacklist]
when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(taskSetBlacklist))
stageToMockTaskSetManager(taskSet.stageId) = tsmSpy
stageToMockTaskSetBlacklist(taskSet.stageId) = taskSetBlacklist
tsmSpy
}
override private[scheduler] lazy val blacklistTrackerOpt = Some(blacklist)
}
setupHelper()
}
def setupHelper(): TaskSchedulerImpl = {
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
dagScheduler = new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
override def taskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
// Normally the DAGScheduler puts this in the event loop, which will eventually fail
// dependent jobs
failedTaskSet = true
failedTaskSetReason = reason
failedTaskSetException = exception
}
}
taskScheduler
}
test("Scheduler does not always schedule tasks on the same workers") {
val taskScheduler = setupScheduler()
val numFreeCores = 1
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
// Repeatedly try to schedule a 1-task job, and make sure that it doesn't always
// get scheduled on the same executor. While there is a chance this test will fail
// because the task randomly gets placed on the first executor all 1000 times, the
// probability of that happening is 2^-1000 (so sufficiently small to be considered
// negligible).
val numTrials = 1000
val selectedExecutorIds = 1.to(numTrials).map { _ =>
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
taskDescriptions(0).executorId
}
val count = selectedExecutorIds.count(_ == workerOffers(0).executorId)
assert(count > 0)
assert(count < numTrials)
assert(!failedTaskSet)
}
test("Scheduler correctly accounts for multiple CPUs per task") {
val taskCpus = 2
val taskScheduler = setupScheduler("spark.task.cpus" -> taskCpus.toString)
// Give zero core offers. Should not generate any tasks
val zeroCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 0),
new WorkerOffer("executor1", "host1", 0))
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(zeroCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// No tasks should run as we only have 1 core free.
val numFreeCores = 1
val singleCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(singleCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// Now change the offers to have 2 cores in one executor and verify if it
// is chosen.
val multiCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(1 === taskDescriptions.length)
assert("executor0" === taskDescriptions(0).executorId)
assert(!failedTaskSet)
}
test("Scheduler does not crash when tasks are not serializable") {
val taskCpus = 2
val taskScheduler = setupScheduler("spark.task.cpus" -> taskCpus.toString)
val numFreeCores = 1
val taskSet = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)), 0, 0, 0, null)
val multiCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
assert(failedTaskSet)
assert(failedTaskSetReason.contains("Failed to serialize task"))
// Now check that we can still submit tasks
// Even if one of the task sets has not-serializable tasks, the other task set should
// still be processed without error
taskScheduler.submitTasks(FakeTask.createTaskSet(1))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(taskDescriptions.map(_.executorId) === Seq("executor0"))
}
test("refuse to schedule concurrent attempts for the same stage (SPARK-8103)") {
val taskScheduler = setupScheduler()
val attempt1 = FakeTask.createTaskSet(1, 0)
val attempt2 = FakeTask.createTaskSet(1, 1)
taskScheduler.submitTasks(attempt1)
intercept[IllegalStateException] { taskScheduler.submitTasks(attempt2) }
// OK to submit multiple if previous attempts are all zombie
taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId)
.get.isZombie = true
taskScheduler.submitTasks(attempt2)
val attempt3 = FakeTask.createTaskSet(1, 2)
intercept[IllegalStateException] { taskScheduler.submitTasks(attempt3) }
taskScheduler.taskSetManagerForAttempt(attempt2.stageId, attempt2.stageAttemptId)
.get.isZombie = true
taskScheduler.submitTasks(attempt3)
assert(!failedTaskSet)
}
test("don't schedule more tasks after a taskset is zombie") {
val taskScheduler = setupScheduler()
val numFreeCores = 1
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
// now mark attempt 1 as a zombie
taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId)
.get.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// if we schedule another attempt for the same stage, it should get scheduled
val attempt2 = FakeTask.createTaskSet(10, 1)
// submit attempt 2, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt2)
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions3.length)
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(taskDescriptions3(0).taskId)).get
assert(mgr.taskSet.stageAttemptId === 1)
assert(!failedTaskSet)
}
test("if a zombie attempt finishes, continue scheduling tasks for non-zombie attempts") {
val taskScheduler = setupScheduler()
val numFreeCores = 10
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions.length)
// now mark attempt 1 as a zombie
val mgr1 = taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId).get
mgr1.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// submit attempt 2
val attempt2 = FakeTask.createTaskSet(10, 1)
taskScheduler.submitTasks(attempt2)
// attempt 1 finished (this can happen even if it was marked zombie earlier -- all tasks were
// already submitted, and then they finish)
taskScheduler.taskSetFinished(mgr1)
// now with another resource offer, we should still schedule all the tasks in attempt2
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions3.length)
taskDescriptions3.foreach { task =>
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(task.taskId)).get
assert(mgr.taskSet.stageAttemptId === 1)
}
assert(!failedTaskSet)
}
test("tasks are not re-scheduled while executor loss reason is pending") {
val taskScheduler = setupScheduler()
val e0Offers = IndexedSeq(new WorkerOffer("executor0", "host0", 1))
val e1Offers = IndexedSeq(new WorkerOffer("executor1", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// mark executor0 as dead but pending fail reason
taskScheduler.executorLost("executor0", LossReasonPending)
// offer some more resources on a different executor, nothing should change
val taskDescriptions2 = taskScheduler.resourceOffers(e1Offers).flatten
assert(0 === taskDescriptions2.length)
// provide the actual loss reason for executor0
taskScheduler.executorLost("executor0", SlaveLost("oops"))
// executor0's tasks should have failed now that the loss reason is known, so offering more
// resources should make them be scheduled on the new executor.
val taskDescriptions3 = taskScheduler.resourceOffers(e1Offers).flatten
assert(1 === taskDescriptions3.length)
assert("executor1" === taskDescriptions3(0).executorId)
assert(!failedTaskSet)
}
test("scheduled tasks obey task and stage blacklists") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
(0 to 2).foreach {stageId =>
val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
}
// Setup our mock blacklist:
// * stage 0 is blacklisted on node "host1"
// * stage 1 is blacklisted on executor "executor3"
// * stage 0, partition 0 is blacklisted on executor 0
// (mocked methods default to returning false, ie. no blacklisting)
when(stageToMockTaskSetBlacklist(0).isNodeBlacklistedForTaskSet("host1")).thenReturn(true)
when(stageToMockTaskSetBlacklist(1).isExecutorBlacklistedForTaskSet("executor3"))
.thenReturn(true)
when(stageToMockTaskSetBlacklist(0).isExecutorBlacklistedForTask("executor0", 0))
.thenReturn(true)
val offers = IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 10)
)
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
// We should schedule all tasks.
assert(firstTaskAttempts.size === 6)
// Whenever we schedule a task, we must consult the node and executor blacklist. (The test
// doesn't check exactly what checks are made because the offers get shuffled.)
(0 to 2).foreach { stageId =>
verify(stageToMockTaskSetBlacklist(stageId), atLeast(1))
.isNodeBlacklistedForTaskSet(anyString())
verify(stageToMockTaskSetBlacklist(stageId), atLeast(1))
.isExecutorBlacklistedForTaskSet(anyString())
}
def tasksForStage(stageId: Int): Seq[TaskDescription] = {
firstTaskAttempts.filter{_.name.contains(s"stage $stageId")}
}
tasksForStage(0).foreach { task =>
// executors 1 & 2 blacklisted for node
// executor 0 blacklisted just for partition 0
if (task.index == 0) {
assert(task.executorId === "executor3")
} else {
assert(Set("executor0", "executor3").contains(task.executorId))
}
}
tasksForStage(1).foreach { task =>
// executor 3 blacklisted
assert("executor3" != task.executorId)
}
// no restrictions on stage 2
// Finally, just make sure that we can still complete tasks as usual with blacklisting
// in effect. Finish each of the tasksets -- taskset 0 & 1 complete successfully, taskset 2
// fails.
(0 to 2).foreach { stageId =>
val tasks = tasksForStage(stageId)
val tsm = taskScheduler.taskSetManagerForAttempt(stageId, 0).get
val valueSer = SparkEnv.get.serializer.newInstance()
if (stageId == 2) {
// Just need to make one task fail 4 times.
var task = tasks(0)
val taskIndex = task.index
(0 until 4).foreach { attempt =>
assert(task.attemptNumber === attempt)
tsm.handleFailedTask(task.taskId, TaskState.FAILED, TaskResultLost)
val nextAttempts =
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("executor4", "host4", 1))).flatten
if (attempt < 3) {
assert(nextAttempts.size === 1)
task = nextAttempts(0)
assert(task.index === taskIndex)
} else {
assert(nextAttempts.size === 0)
}
}
// End the other task of the taskset, doesn't matter whether it succeeds or fails.
val otherTask = tasks(1)
val result = new DirectTaskResult[Int](valueSer.serialize(otherTask.taskId), Seq())
tsm.handleSuccessfulTask(otherTask.taskId, result)
} else {
tasks.foreach { task =>
val result = new DirectTaskResult[Int](valueSer.serialize(task.taskId), Seq())
tsm.handleSuccessfulTask(task.taskId, result)
}
}
assert(tsm.isZombie)
}
// the tasksSets complete, so the tracker should be notified of the successful ones
verify(blacklist, times(1)).updateBlacklistForSuccessfulTaskSet(
stageId = 0,
stageAttemptId = 0,
failuresByExec = stageToMockTaskSetBlacklist(0).execToFailures)
verify(blacklist, times(1)).updateBlacklistForSuccessfulTaskSet(
stageId = 1,
stageAttemptId = 0,
failuresByExec = stageToMockTaskSetBlacklist(1).execToFailures)
// but we shouldn't update for the failed taskset
verify(blacklist, never).updateBlacklistForSuccessfulTaskSet(
stageId = meq(2),
stageAttemptId = anyInt(),
failuresByExec = anyObject())
}
test("scheduled tasks obey node and executor blacklists") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
(0 to 2).foreach { stageId =>
val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
}
val offers = IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 10),
new WorkerOffer("executor4", "host3", 1)
)
// setup our mock blacklist:
// host1, executor0 & executor3 are completely blacklisted
// This covers everything *except* one core on executor4 / host3, so that everything is still
// schedulable.
when(blacklist.isNodeBlacklisted("host1")).thenReturn(true)
when(blacklist.isExecutorBlacklisted("executor0")).thenReturn(true)
when(blacklist.isExecutorBlacklisted("executor3")).thenReturn(true)
val stageToTsm = (0 to 2).map { stageId =>
val tsm = taskScheduler.taskSetManagerForAttempt(stageId, 0).get
stageId -> tsm
}.toMap
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
firstTaskAttempts.foreach { task => logInfo(s"scheduled $task on ${task.executorId}") }
assert(firstTaskAttempts.size === 1)
assert(firstTaskAttempts.head.executorId === "executor4")
('0' until '2').foreach { hostNum =>
verify(blacklist, atLeast(1)).isNodeBlacklisted("host" + hostNum)
}
}
test("abort stage when all executors are blacklisted and we cannot acquire new executor") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
val taskSet = FakeTask.createTaskSet(numTasks = 10, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// first just submit some offers so the scheduler knows about all the executors
taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor1", "host0", 2),
WorkerOffer("executor2", "host0", 2),
WorkerOffer("executor3", "host1", 2)
))
// now say our blacklist updates to blacklist a bunch of resources, but *not* everything
when(blacklist.isNodeBlacklisted("host1")).thenReturn(true)
when(blacklist.isExecutorBlacklisted("executor0")).thenReturn(true)
// make an offer on the blacklisted resources. We won't schedule anything, but also won't
// abort yet, since we know of other resources that work
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor3", "host1", 2)
)).flatten.size === 0)
assert(!tsm.isZombie)
// now update the blacklist so that everything really is blacklisted
when(blacklist.isExecutorBlacklisted("executor1")).thenReturn(true)
when(blacklist.isExecutorBlacklisted("executor2")).thenReturn(true)
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor3", "host1", 2)
)).flatten.size === 0)
assert(tsm.isZombie)
verify(tsm).abort(anyString(), anyObject())
}
test("SPARK-22148 abort timer should kick in when task is completely blacklisted & no new " +
"executor can be acquired") {
// set the abort timer to fail immediately
taskScheduler = setupSchedulerWithMockTaskSetBlacklist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0")
// We have only 1 task remaining with 1 executor
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = firstTaskAttempts.find(_.executorId == "executor0").get
taskScheduler.statusUpdate(failedTask.taskId, TaskState.FAILED, ByteBuffer.allocate(0))
// we explicitly call the handleFailedTask method here to avoid adding a sleep in the test suite
// Reason being - handleFailedTask is run by an executor service and there is a momentary delay
// before it is launched and this fails the assertion check.
tsm.handleFailedTask(failedTask.taskId, TaskState.FAILED, UnknownReason)
when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer on the blacklisted executor. We won't schedule anything, and set the abort
// timer to kick in immediately
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
// Wait for the abort timer to kick in. Even though we configure the timeout to be 0, there is a
// slight delay as the abort timer is launched in a separate thread.
eventually(timeout(500.milliseconds)) {
assert(tsm.isZombie)
}
}
test("SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "10")
// We have only 1 task remaining with 1 executor
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = firstTaskAttempts.head
taskScheduler.statusUpdate(failedTask.taskId, TaskState.FAILED, ByteBuffer.allocate(0))
// we explicitly call the handleFailedTask method here to avoid adding a sleep in the test suite
// Reason being - handleFailedTask is run by an executor service and there is a momentary delay
// before it is launched and this fails the assertion check.
tsm.handleFailedTask(failedTask.taskId, TaskState.FAILED, UnknownReason)
when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer on the blacklisted executor. We won't schedule anything, and set the abort
// timer to expire if no new executors could be acquired. We kill the existing idle blacklisted
// executor and try to acquire a new one.
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm))
assert(!tsm.isZombie)
// Offer a new executor which should be accepted
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host0", 1)
)).flatten.size === 1)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
// This is to test a scenario where we have two taskSets completely blacklisted and on acquiring
// a new executor we don't want the abort timer for the second taskSet to expire and abort the job
test("SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
// We have 2 taskSets with 1 task remaining in each with 1 executor completely blacklisted
val taskSet1 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet1)
val taskSet2 = FakeTask.createTaskSet(numTasks = 1, stageId = 1, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet2)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
// Fail the running task
val failedTask = firstTaskAttempts.head
taskScheduler.statusUpdate(failedTask.taskId, TaskState.FAILED, ByteBuffer.allocate(0))
tsm.handleFailedTask(failedTask.taskId, TaskState.FAILED, UnknownReason)
when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer. We will schedule the task from the second taskSet. Since a task was scheduled
// we do not kick off the abort timer for taskSet1
val secondTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
val tsm2 = stageToMockTaskSetManager(1)
val failedTask2 = secondTaskAttempts.head
taskScheduler.statusUpdate(failedTask2.taskId, TaskState.FAILED, ByteBuffer.allocate(0))
tsm2.handleFailedTask(failedTask2.taskId, TaskState.FAILED, UnknownReason)
when(tsm2.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask(
"executor0", failedTask2.index)).thenReturn(true)
// make an offer on the blacklisted executor. We won't schedule anything, and set the abort
// timer for taskSet1 and taskSet2
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm))
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm2))
assert(taskScheduler.unschedulableTaskSetToExpiryTime.size == 2)
// Offer a new executor which should be accepted
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host1", 1)
)).flatten.size === 1)
// Check if all the taskSets are cleared
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
// this test is to check that we don't abort a taskSet which is not being scheduled on other
// executors as it is waiting on locality timeout and not being aborted because it is still not
// completely blacklisted.
test("SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely blacklisted") {
taskScheduler = setupSchedulerWithMockTaskSetBlacklist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0",
// This is to avoid any potential flakiness in the test because of large pauses in jenkins
config.LOCALITY_WAIT.key -> "30s"
)
val preferredLocation = Seq(ExecutorCacheTaskLocation("host0", "executor0"))
val taskSet1 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0,
preferredLocation)
taskScheduler.submitTasks(taskSet1)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
var taskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = taskAttempts.head
taskScheduler.statusUpdate(failedTask.taskId, TaskState.FAILED, ByteBuffer.allocate(0))
tsm.handleFailedTask(failedTask.taskId, TaskState.FAILED, UnknownReason)
when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer but we won't schedule anything yet as scheduler locality is still PROCESS_LOCAL
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host0", 1)
)).flatten.isEmpty)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
/**
* Helper for performance tests. Takes the explicitly blacklisted nodes and executors; verifies
* that the blacklists are used efficiently to ensure scheduling is not O(numPendingTasks).
* Creates 1 offer on executor[1-3]. Executor1 & 2 are on host1, executor3 is on host2. Passed
* in nodes and executors should be on that list.
*/
private def testBlacklistPerformance(
testName: String,
nodeBlacklist: Seq[String],
execBlacklist: Seq[String]): Unit = {
// Because scheduling involves shuffling the order of offers around, we run this test a few
// times to cover more possibilities. There are only 3 offers, which means 6 permutations,
// so 10 iterations is pretty good.
(0 until 10).foreach { testItr =>
test(s"$testName: iteration $testItr") {
// When an executor or node is blacklisted, we want to make sure that we don't try
// scheduling each pending task, one by one, to discover they are all blacklisted. This is
// important for performance -- if we did check each task one-by-one, then responding to a
// resource offer (which is usually O(1)-ish) would become O(numPendingTasks), which would
// slow down scheduler throughput and slow down scheduling even on healthy executors.
// Here, we check a proxy for the runtime -- we make sure the scheduling is short-circuited
// at the node or executor blacklist, so we never check the per-task blacklist. We also
// make sure we don't check the node & executor blacklist for the entire taskset
// O(numPendingTasks) times.
taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
// we schedule 500 tasks so we can clearly distinguish anything that is O(numPendingTasks)
val taskSet = FakeTask.createTaskSet(numTasks = 500, stageId = 0, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
val offers = IndexedSeq(
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 1)
)
// We should check the node & exec blacklists, but only O(numOffers), not O(numPendingTasks)
// times. In the worst case, after shuffling, we offer our blacklisted resource first, and
// then offer other resources which do get used. The taskset blacklist is consulted
// repeatedly as we offer resources to the taskset -- each iteration either schedules
// something, or it terminates that locality level, so the maximum number of checks is
// numCores + numLocalityLevels
val numCoresOnAllOffers = offers.map(_.cores).sum
val numLocalityLevels = TaskLocality.values.size
val maxBlacklistChecks = numCoresOnAllOffers + numLocalityLevels
// Setup the blacklist
nodeBlacklist.foreach { node =>
when(stageToMockTaskSetBlacklist(0).isNodeBlacklistedForTaskSet(node)).thenReturn(true)
}
execBlacklist.foreach { exec =>
when(stageToMockTaskSetBlacklist(0).isExecutorBlacklistedForTaskSet(exec))
.thenReturn(true)
}
// Figure out which nodes have any effective blacklisting on them. This means all nodes
// that are explicitly blacklisted, plus those that have *any* executors blacklisted.
val nodesForBlacklistedExecutors = offers.filter { offer =>
execBlacklist.contains(offer.executorId)
}.map(_.host).toSet.toSeq
val nodesWithAnyBlacklisting = (nodeBlacklist ++ nodesForBlacklistedExecutors).toSet
// Similarly, figure out which executors have any blacklisting. This means all executors
// that are explicitly blacklisted, plus all executors on nodes that are blacklisted.
val execsForBlacklistedNodes = offers.filter { offer =>
nodeBlacklist.contains(offer.host)
}.map(_.executorId).toSeq
val executorsWithAnyBlacklisting = (execBlacklist ++ execsForBlacklistedNodes).toSet
// Schedule a taskset, and make sure our test setup is correct -- we are able to schedule
// a task on all executors that aren't blacklisted (whether that executor is a explicitly
// blacklisted, or implicitly blacklisted via the node blacklist).
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(firstTaskAttempts.size === offers.size - executorsWithAnyBlacklisting.size)
// Now check that we haven't made too many calls to any of the blacklist methods.
// We should be checking our node blacklist, but it should be within the bound we defined
// above.
verify(stageToMockTaskSetBlacklist(0), atMost(maxBlacklistChecks))
.isNodeBlacklistedForTaskSet(anyString())
// We shouldn't ever consult the per-task blacklist for the nodes that have been blacklisted
// for the entire taskset, since the taskset level blacklisting should prevent scheduling
// from ever looking at specific tasks.
nodesWithAnyBlacklisting.foreach { node =>
verify(stageToMockTaskSetBlacklist(0), never)
.isNodeBlacklistedForTask(meq(node), anyInt())
}
executorsWithAnyBlacklisting.foreach { exec =>
// We should be checking our executor blacklist, but it should be within the bound defined
// above. Its possible that this will be significantly fewer calls, maybe even 0, if
// there is also a node-blacklist which takes effect first. But this assert is all we
// need to avoid an O(numPendingTask) slowdown.
verify(stageToMockTaskSetBlacklist(0), atMost(maxBlacklistChecks))
.isExecutorBlacklistedForTaskSet(exec)
// We shouldn't ever consult the per-task blacklist for executors that have been
// blacklisted for the entire taskset, since the taskset level blacklisting should prevent
// scheduling from ever looking at specific tasks.
verify(stageToMockTaskSetBlacklist(0), never)
.isExecutorBlacklistedForTask(meq(exec), anyInt())
}
}
}
}
testBlacklistPerformance(
testName = "Blacklisted node for entire task set prevents per-task blacklist checks",
nodeBlacklist = Seq("host1"),
execBlacklist = Seq())
testBlacklistPerformance(
testName = "Blacklisted executor for entire task set prevents per-task blacklist checks",
nodeBlacklist = Seq(),
execBlacklist = Seq("executor3")
)
test("abort stage if executor loss results in unschedulability from previously failed tasks") {
// Make sure we can detect when a taskset becomes unschedulable from a blacklisting. This
// test explores a particular corner case -- you may have one task fail, but still be
// schedulable on another executor. However, that executor may fail later on, leaving the
// first task with no place to run.
val taskScheduler = setupScheduler(
config.BLACKLIST_ENABLED.key -> "true"
)
val taskSet = FakeTask.createTaskSet(2)
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(taskSet.stageId, taskSet.stageAttemptId).get
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1)
)).flatten
assert(Set("executor0", "executor1") === firstTaskAttempts.map(_.executorId).toSet)
// Fail one of the tasks, but leave the other running.
val failedTask = firstTaskAttempts.find(_.executorId == "executor0").get
taskScheduler.handleFailedTask(tsm, failedTask.taskId, TaskState.FAILED, TaskResultLost)
// At this point, our failed task could run on the other executor, so don't give up the task
// set yet.
assert(!failedTaskSet)
// Now we fail our second executor. The other task can still run on executor1, so make an offer
// on that executor, and make sure that the other task (not the failed one) is assigned there.
taskScheduler.executorLost("executor1", SlaveLost("oops"))
val nextTaskAttempts =
taskScheduler.resourceOffers(IndexedSeq(new WorkerOffer("executor0", "host0", 1))).flatten
// Note: Its OK if some future change makes this already realize the taskset has become
// unschedulable at this point (though in the current implementation, we're sure it will not).
assert(nextTaskAttempts.size === 1)
assert(nextTaskAttempts.head.executorId === "executor0")
assert(nextTaskAttempts.head.attemptNumber === 1)
assert(nextTaskAttempts.head.index != failedTask.index)
// Now we should definitely realize that our task set is unschedulable, because the only
// task left can't be scheduled on any executors due to the blacklist.
taskScheduler.resourceOffers(IndexedSeq(new WorkerOffer("executor0", "host0", 1)))
sc.listenerBus.waitUntilEmpty(100000)
assert(tsm.isZombie)
assert(failedTaskSet)
val idx = failedTask.index
assert(failedTaskSetReason === s"""
|Aborting $taskSet because task $idx (partition $idx)
|cannot run anywhere due to node and executor blacklist.
|Most recent failure:
|${tsm.taskSetBlacklistHelperOpt.get.getLatestFailureReason}
|
|Blacklisting behavior can be configured via spark.blacklist.*.
|""".stripMargin)
}
test("don't abort if there is an executor available, though it hasn't had scheduled tasks yet") {
// interaction of SPARK-15865 & SPARK-16106
// if we have a small number of tasks, we might be able to schedule them all on the first
// executor. But if those tasks fail, we should still realize there is another executor
// available and not bail on the job
val taskScheduler = setupScheduler(
config.BLACKLIST_ENABLED.key -> "true"
)
val taskSet = FakeTask.createTaskSet(2, (0 until 2).map { _ => Seq(TaskLocation("host0")) }: _*)
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(taskSet.stageId, taskSet.stageAttemptId).get
val offers = IndexedSeq(
// each offer has more than enough free cores for the entire task set, so when combined
// with the locality preferences, we schedule all tasks on one executor
new WorkerOffer("executor0", "host0", 4),
new WorkerOffer("executor1", "host1", 4)
)
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(firstTaskAttempts.size == 2)
firstTaskAttempts.foreach { taskAttempt => assert("executor0" === taskAttempt.executorId) }
// fail all the tasks on the bad executor
firstTaskAttempts.foreach { taskAttempt =>
taskScheduler.handleFailedTask(tsm, taskAttempt.taskId, TaskState.FAILED, TaskResultLost)
}
// Here is the main check of this test -- we have the same offers again, and we schedule it
// successfully. Because the scheduler first tries to schedule with locality in mind, at first
// it won't schedule anything on executor1. But despite that, we don't abort the job. Then the
// scheduler tries for ANY locality, and successfully schedules tasks on executor1.
val secondTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(secondTaskAttempts.size == 2)
secondTaskAttempts.foreach { taskAttempt => assert("executor1" === taskAttempt.executorId) }
assert(!failedTaskSet)
}
test("SPARK-16106 locality levels updated if executor added to existing host") {
val taskScheduler = setupScheduler()
taskScheduler.submitTasks(FakeTask.createTaskSet(2, 0,
(0 until 2).map { _ => Seq(TaskLocation("host0", "executor2")) }: _*
))
val taskDescs = taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1)
)).flatten
// only schedule one task because of locality
assert(taskDescs.size === 1)
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(taskDescs(0).taskId)).get
assert(mgr.myLocalityLevels.toSet === Set(TaskLocality.NODE_LOCAL, TaskLocality.ANY))
// we should know about both executors, even though we only scheduled tasks on one of them
assert(taskScheduler.getExecutorsAliveOnHost("host0") === Some(Set("executor0")))
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1")))
// when executor2 is added, we should realize that we can run process-local tasks.
// And we should know its alive on the host.
val secondTaskDescs = taskScheduler.resourceOffers(
IndexedSeq(new WorkerOffer("executor2", "host0", 1))).flatten
assert(secondTaskDescs.size === 1)
assert(mgr.myLocalityLevels.toSet ===
Set(TaskLocality.PROCESS_LOCAL, TaskLocality.NODE_LOCAL, TaskLocality.ANY))
assert(taskScheduler.getExecutorsAliveOnHost("host0") === Some(Set("executor0", "executor2")))
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1")))
// And even if we don't have anything left to schedule, another resource offer on yet another
// executor should also update the set of live executors
val thirdTaskDescs = taskScheduler.resourceOffers(
IndexedSeq(new WorkerOffer("executor3", "host1", 1))).flatten
assert(thirdTaskDescs.size === 0)
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1", "executor3")))
}
test("scheduler checks for executors that can be expired from blacklist") {
taskScheduler = setupScheduler()
taskScheduler.submitTasks(FakeTask.createTaskSet(1, 0))
taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1)
)).flatten
verify(blacklist).applyBlacklistTimeout()
}
test("if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val e0Offers = IndexedSeq(WorkerOffer("executor0", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// mark executor0 as dead
taskScheduler.executorLost("executor0", SlaveLost())
assert(!taskScheduler.isExecutorAlive("executor0"))
assert(!taskScheduler.hasExecutorsAliveOnHost("host0"))
assert(taskScheduler.getExecutorsAliveOnHost("host0").isEmpty)
// Check that state associated with the lost task attempt is cleaned up:
assert(taskScheduler.taskIdToExecutorId.isEmpty)
assert(taskScheduler.taskIdToTaskSetManager.isEmpty)
assert(taskScheduler.runningTasksByExecutors.get("executor0").isEmpty)
}
test("if a task finishes with TaskState.LOST its executor is marked as dead") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val e0Offers = IndexedSeq(WorkerOffer("executor0", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// Report the task as failed with TaskState.LOST
taskScheduler.statusUpdate(
tid = taskDescriptions.head.taskId,
state = TaskState.LOST,
serializedData = ByteBuffer.allocate(0)
)
// Check that state associated with the lost task attempt is cleaned up:
assert(taskScheduler.taskIdToExecutorId.isEmpty)
assert(taskScheduler.taskIdToTaskSetManager.isEmpty)
assert(taskScheduler.runningTasksByExecutors.get("executor0").isEmpty)
// Check that the executor has been marked as dead
assert(!taskScheduler.isExecutorAlive("executor0"))
assert(!taskScheduler.hasExecutorsAliveOnHost("host0"))
assert(taskScheduler.getExecutorsAliveOnHost("host0").isEmpty)
}
test("Locality should be used for bulk offers even with delay scheduling off") {
val conf = new SparkConf()
.set("spark.locality.wait", "0")
sc = new SparkContext("local", "TaskSchedulerImplSuite", conf)
// we create a manual clock just so we can be sure the clock doesn't advance at all in this test
val clock = new ManualClock()
// We customize the task scheduler just to let us control the way offers are shuffled, so we
// can be sure we try both permutations, and to control the clock on the tasksetmanager.
val taskScheduler = new TaskSchedulerImpl(sc) {
override def shuffleOffers(offers: IndexedSeq[WorkerOffer]): IndexedSeq[WorkerOffer] = {
// Don't shuffle the offers around for this test. Instead, we'll just pass in all
// the permutations we care about directly.
offers
}
override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = {
new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock)
}
}
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
taskScheduler.initialize(new FakeSchedulerBackend)
// Make two different offers -- one in the preferred location, one that is not.
val offers = IndexedSeq(
WorkerOffer("exec1", "host1", 1),
WorkerOffer("exec2", "host2", 1)
)
Seq(false, true).foreach { swapOrder =>
// Submit a taskset with locality preferences.
val taskSet = FakeTask.createTaskSet(
1, stageId = 1, stageAttemptId = 0, Seq(TaskLocation("host1", "exec1")))
taskScheduler.submitTasks(taskSet)
val shuffledOffers = if (swapOrder) offers.reverse else offers
// Regardless of the order of the offers (after the task scheduler shuffles them), we should
// always take advantage of the local offer.
val taskDescs = taskScheduler.resourceOffers(shuffledOffers).flatten
withClue(s"swapOrder = $swapOrder") {
assert(taskDescs.size === 1)
assert(taskDescs.head.executorId === "exec1")
}
}
}
test("With delay scheduling off, tasks can be run at any locality level immediately") {
val conf = new SparkConf()
.set("spark.locality.wait", "0")
sc = new SparkContext("local", "TaskSchedulerImplSuite", conf)
// we create a manual clock just so we can be sure the clock doesn't advance at all in this test
val clock = new ManualClock()
val taskScheduler = new TaskSchedulerImpl(sc) {
override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = {
new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock)
}
}
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
taskScheduler.initialize(new FakeSchedulerBackend)
// make an offer on the preferred host so the scheduler knows its alive. This is necessary
// so that the taskset knows that it *could* take advantage of locality.
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec1", "host1", 1)))
// Submit a taskset with locality preferences.
val taskSet = FakeTask.createTaskSet(
1, stageId = 1, stageAttemptId = 0, Seq(TaskLocation("host1", "exec1")))
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(1, 0).get
// make sure we've setup our test correctly, so that the taskset knows it *could* use local
// offers.
assert(tsm.myLocalityLevels.contains(TaskLocality.NODE_LOCAL))
// make an offer on a non-preferred location. Since the delay is 0, we should still schedule
// immediately.
val taskDescs =
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec2", "host2", 1))).flatten
assert(taskDescs.size === 1)
assert(taskDescs.head.executorId === "exec2")
}
test("TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported") {
intercept[IllegalArgumentException] {
val taskScheduler = setupScheduler(
TaskSchedulerImpl.SCHEDULER_MODE_PROPERTY -> SchedulingMode.NONE.toString)
taskScheduler.initialize(new FakeSchedulerBackend)
}
}
test("Completions in zombie tasksets update status of non-zombie taskset") {
val taskScheduler = setupSchedulerWithMockTaskSetBlacklist()
val valueSer = SparkEnv.get.serializer.newInstance()
def completeTaskSuccessfully(tsm: TaskSetManager, partition: Int): Unit = {
val indexInTsm = tsm.partitionToIndex(partition)
val matchingTaskInfo = tsm.taskAttempts.flatten.filter(_.index == indexInTsm).head
val result = new DirectTaskResult[Int](valueSer.serialize(1), Seq())
tsm.handleSuccessfulTask(matchingTaskInfo.taskId, result)
}
// Submit a task set, have it fail with a fetch failed, and then re-submit the task attempt,
// two times, so we have three active task sets for one stage. (For this to really happen,
// you'd need the previous stage to also get restarted, and then succeed, in between each
// attempt, but that happens outside what we're mocking here.)
val zombieAttempts = (0 until 2).map { stageAttempt =>
val attempt = FakeTask.createTaskSet(10, stageAttemptId = stageAttempt)
taskScheduler.submitTasks(attempt)
val tsm = taskScheduler.taskSetManagerForAttempt(0, stageAttempt).get
val offers = (0 until 10).map{ idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) }
taskScheduler.resourceOffers(offers)
assert(tsm.runningTasks === 10)
// fail attempt
tsm.handleFailedTask(tsm.taskAttempts.head.head.taskId, TaskState.FAILED,
FetchFailed(null, 0, 0, 0, "fetch failed"))
// the attempt is a zombie, but the tasks are still running (this could be true even if
// we actively killed those tasks, as killing is best-effort)
assert(tsm.isZombie)
assert(tsm.runningTasks === 9)
tsm
}
// we've now got 2 zombie attempts, each with 9 tasks still active. Submit the 3rd attempt for
// the stage, but this time with insufficient resources so not all tasks are active.
val finalAttempt = FakeTask.createTaskSet(10, stageAttemptId = 2)
taskScheduler.submitTasks(finalAttempt)
val finalTsm = taskScheduler.taskSetManagerForAttempt(0, 2).get
val offers = (0 until 5).map{ idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) }
val finalAttemptLaunchedPartitions = taskScheduler.resourceOffers(offers).flatten.map { task =>
finalAttempt.tasks(task.index).partitionId
}.toSet
assert(finalTsm.runningTasks === 5)
assert(!finalTsm.isZombie)
// We simulate late completions from our zombie tasksets, corresponding to all the pending
// partitions in our final attempt. This means we're only waiting on the tasks we've already
// launched.
val finalAttemptPendingPartitions = (0 until 10).toSet.diff(finalAttemptLaunchedPartitions)
finalAttemptPendingPartitions.foreach { partition =>
completeTaskSuccessfully(zombieAttempts(0), partition)
}
// If there is another resource offer, we shouldn't run anything. Though our final attempt
// used to have pending tasks, now those tasks have been completed by zombie attempts. The
// remaining tasks to compute are already active in the non-zombie attempt.
assert(
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec-1", "host-1", 1))).flatten.isEmpty)
val remainingTasks = finalAttemptLaunchedPartitions.toIndexedSeq.sorted
// finally, if we finish the remaining partitions from a mix of tasksets, all attempts should be
// marked as zombie.
// for each of the remaining tasks, find the tasksets with an active copy of the task, and
// finish the task.
remainingTasks.foreach { partition =>
val tsm = if (partition == 0) {
// we failed this task on both zombie attempts, this one is only present in the latest
// taskset
finalTsm
} else {
// should be active in every taskset. We choose a zombie taskset just to make sure that
// we transition the active taskset correctly even if the final completion comes
// from a zombie.
zombieAttempts(partition % 2)
}
completeTaskSuccessfully(tsm, partition)
}
assert(finalTsm.isZombie)
// no taskset has completed all of its tasks, so no updates to the blacklist tracker yet
verify(blacklist, never).updateBlacklistForSuccessfulTaskSet(anyInt(), anyInt(), anyObject())
// finally, lets complete all the tasks. We simulate failures in attempt 1, but everything
// else succeeds, to make sure we get the right updates to the blacklist in all cases.
(zombieAttempts ++ Seq(finalTsm)).foreach { tsm =>
val stageAttempt = tsm.taskSet.stageAttemptId
tsm.runningTasksSet.foreach { index =>
if (stageAttempt == 1) {
tsm.handleFailedTask(tsm.taskInfos(index).taskId, TaskState.FAILED, TaskResultLost)
} else {
val result = new DirectTaskResult[Int](valueSer.serialize(1), Seq())
tsm.handleSuccessfulTask(tsm.taskInfos(index).taskId, result)
}
}
// we update the blacklist for the stage attempts with all successful tasks. Even though
// some tasksets had failures, we still consider them all successful from a blacklisting
// perspective, as the failures weren't from a problem w/ the tasks themselves.
verify(blacklist).updateBlacklistForSuccessfulTaskSet(meq(0), meq(stageAttempt), anyObject())
}
}
test("don't schedule for a barrier taskSet if available slots are less than pending tasks") {
val taskCpus = 2
val taskScheduler = setupScheduler("spark.task.cpus" -> taskCpus.toString)
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625")),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627")))
val attempt1 = FakeTask.createBarrierTaskSet(3)
// submit attempt 1, offer some resources, since the available slots are less than pending
// tasks, don't schedule barrier tasks on the resource offer.
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions.length)
}
test("schedule tasks for a barrier taskSet if all tasks can be launched together") {
val taskCpus = 2
val taskScheduler = setupScheduler("spark.task.cpus" -> taskCpus.toString)
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625")),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627")),
new WorkerOffer("executor2", "host2", numFreeCores, Some("192.168.0.101:49629")))
val attempt1 = FakeTask.createBarrierTaskSet(3)
// submit attempt 1, offer some resources, all tasks get launched together
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(3 === taskDescriptions.length)
}
test("cancelTasks shall kill all the running tasks and fail the stage") {
val taskScheduler = setupScheduler()
taskScheduler.initialize(new FakeSchedulerBackend {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Since we only submit one stage attempt, the following call is sufficient to mark the
// task as killed.
taskScheduler.taskSetManagerForAttempt(0, 0).get.runningTasksSet.remove(taskId)
}
})
val attempt1 = FakeTask.createTaskSet(10, 0)
taskScheduler.submitTasks(attempt1)
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1))
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(2 === taskDescriptions.length)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
assert(2 === tsm.runningTasks)
taskScheduler.cancelTasks(0, false)
assert(0 === tsm.runningTasks)
assert(tsm.isZombie)
assert(taskScheduler.taskSetManagerForAttempt(0, 0).isEmpty)
}
test("killAllTaskAttempts shall kill all the running tasks and not fail the stage") {
val taskScheduler = setupScheduler()
taskScheduler.initialize(new FakeSchedulerBackend {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Since we only submit one stage attempt, the following call is sufficient to mark the
// task as killed.
taskScheduler.taskSetManagerForAttempt(0, 0).get.runningTasksSet.remove(taskId)
}
})
val attempt1 = FakeTask.createTaskSet(10, 0)
taskScheduler.submitTasks(attempt1)
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1))
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(2 === taskDescriptions.length)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
assert(2 === tsm.runningTasks)
taskScheduler.killAllTaskAttempts(0, false, "test")
assert(0 === tsm.runningTasks)
assert(!tsm.isZombie)
assert(taskScheduler.taskSetManagerForAttempt(0, 0).isDefined)
}
test("mark taskset for a barrier stage as zombie in case a task fails") {
val taskScheduler = setupScheduler()
val attempt = FakeTask.createBarrierTaskSet(3)
taskScheduler.submitTasks(attempt)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
val offers = (0 until 3).map{ idx =>
WorkerOffer(s"exec-$idx", s"host-$idx", 1, Some(s"192.168.0.101:4962$idx"))
}
taskScheduler.resourceOffers(offers)
assert(tsm.runningTasks === 3)
// Fail a task from the stage attempt.
tsm.handleFailedTask(tsm.taskAttempts.head.head.taskId, TaskState.FAILED, TaskKilled("test"))
assert(tsm.isZombie)
}
}
| facaiy/spark | core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala | Scala | apache-2.0 | 61,396 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.benchmarks
import ai.h2o.sparkling.ml.models.H2OMOJOModel
import org.apache.spark.sql.DataFrame
class TrainAlgorithmFromDataFrameBenchmark(context: BenchmarkContext, algorithmBundle: AlgorithmBundle)
extends AlgorithmBenchmarkBase[DataFrame, DataFrame](context, algorithmBundle) {
override protected def initialize(): DataFrame = loadDataToDataFrame()
override protected def convertInput(input: DataFrame): DataFrame = input
override protected def train(trainingDataFrame: DataFrame): H2OMOJOModel = {
val initializedAlgorithm = algorithmBundle.swAlgorithm.setLabelCol(context.datasetDetails.labelCol)
initializedAlgorithm.fit(trainingDataFrame)
}
override protected def cleanUp(dataFrame: DataFrame, model: H2OMOJOModel): Unit = removeFromCache(dataFrame)
}
| h2oai/sparkling-water | benchmarks/src/main/scala/ai/h2o/sparkling/benchmarks/TrainAlgorithmFromDataFrameBenchmark.scala | Scala | apache-2.0 | 1,612 |
/**
* Problem: http://www.codechef.com/problems/ERROR
* GitHub: https://github.com/amezhenin/codechef_problems
*/
object Main {
/**
* Checkout https://github.com/amezhenin/codechef_scala_template to test your solutions with sbt-doctest
* {{{
* >>> Main.alg("11111110")
* false
*
* >>> Main.alg("10101010101010")
* true
*
* }}}
* */
def alg(s: String): Boolean = {
(s contains "101") || (s contains "010")
}
def main(args : Array[String]) = {
for (i <- 0 until readInt()){
val res = alg(readLine())
if (res) {
println("Good")
} else {
println("Bad")
}
}
}
}
| amezhenin/codechef_problems | easy/error.scala | Scala | mit | 656 |
/*
* Copyright (c) 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package context.invoker
import scala.collection.JavaConverters._
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import org.scalaide.debug.internal.expression.context.JdiContext
import org.scalaide.debug.internal.expression.proxies.JdiProxy
import org.scalaide.debug.internal.expression.proxies.primitives.PrimitiveJdiProxy
import org.scalaide.logging.HasLogger
import com.sun.jdi._
/** Common interface for method invokers. */
trait MethodInvoker extends HasLogger {
def apply(): Option[Value]
/**
* Checks if type underlying given proxy conforms to given type.
*
* @param proxy object to check
* @param tpe type to check
*/
protected final def conformsTo(proxy: JdiProxy, tpe: Type): Boolean = {
def isBoxedPrimitive(name: String) = Names.Java.boxed.all.contains(name)
if (proxy.__value == null) tpe.isInstanceOf[ClassType]
else (tpe, proxy, proxy.__type) match {
// check for boxed types
case (classType: ClassType, primitiveProxy: PrimitiveJdiProxy[_, _, _], _) if isBoxedPrimitive(classType.name) =>
classType.name == primitiveProxy.boxedName
case (primitive: PrimitiveType, primitiveProxy: PrimitiveJdiProxy[_, _, _], _) =>
primitive == primitiveProxy.__type
case (parentArrayType: ArrayType, _, thisArrayType: ArrayType) =>
isSuperClassOf(parentArrayType.componentType)(thisArrayType.componentType)
case (parentType: Type, _, thisType: Type) =>
isSuperClassOf(parentType)(thisType)
}
}
private def isSuperClassOf(parentType: Type)(thisType: Type): Boolean = {
(parentType, thisType) match {
case (parentType, thisType) if parentType == thisType || parentType.name == Names.Java.Object =>
true
case (parentInterfaceType: InterfaceType, thisClassType: ClassType) =>
thisClassType.allInterfaces.contains(parentInterfaceType)
case (parentRefType: ReferenceType, thisClassType: ClassType) =>
Option(thisClassType.superclass()).map(isSuperClassOf(parentRefType)).getOrElse(false)
case _ =>
false
}
}
/**
* Runs `argumentTypes()` on given method, but catches all `ClassNotLoadedException`s and loads
* required classes using provided context.
* Context is called by-name cause it's only needed when something fails.
*/
protected final def argumentTypesLoaded(method: Method, context: => JdiContext): Seq[Type] = {
Try(method.argumentTypes()) match {
case Success(types) => types.asScala
case Failure(cnl: ClassNotLoadedException) =>
context.loadClass(cnl.className)
argumentTypesLoaded(method, context)
case Failure(otherError) => throw otherError
}
}
/** Boxes proxy value if type is a `ReferenceType`. */
protected def autobox(tpe: Type, proxy: JdiProxy): Value = (proxy, tpe) match {
case (primitive: PrimitiveJdiProxy[_, _, _], _: ReferenceType) => primitive.boxed
case (other, _) => other.__value
}
}
/**
* Base operation on standard methods.
*/
trait BaseMethodInvoker extends MethodInvoker {
// name of method
protected def methodName: String
// reference to search (could be object or ClassType in the case of Java static members)
protected def referenceType: ReferenceType
// basic arguments of call
protected def args: Seq[JdiProxy]
// augmented arguments of call, includes additional parameter for static methods from super traits (if needed)
protected def methodArgs(method: Method): Seq[JdiProxy] = args
// method match for this call
private def matchesSignature(method: Method): Boolean =
!method.isAbstract &&
method.arity == methodArgs(method).length &&
checkTypes(argumentTypesLoaded(method, methodArgs(method).head.__context), method)
private final def checkTypes(types: Seq[Type], arguments: Seq[JdiProxy]): Boolean =
arguments.zip(types).forall((conformsTo _).tupled)
protected final def checkTypes(types: Seq[Type], method: Method): Boolean =
checkTypes(types, methodArgs(method))
protected final def checkTypesRight(types: Seq[Type], method: Method): Boolean =
checkTypes(types.reverse, methodArgs(method).reverse)
private final def generateArguments(types: Seq[Type], arguments: Seq[JdiProxy]): Seq[Value] =
types.zip(arguments).map { case (tpe, arg) => autobox(tpe, arg) }
/**
* Generates arguments for given call - transform boxed primitives to unboxed ones if needed
*/
protected final def generateArguments(method: Method): Seq[Value] =
generateArguments(method.argumentTypes.asScala, methodArgs(method))
protected final def generateArgumentsRight(method: Method): Seq[Value] =
generateArguments(method.argumentTypes.asScala.reverse, methodArgs(method).reverse).reverse
// search for all visible methods
protected def allMethods: Seq[Method] =
// both visibleMethods and allMethods calls are required to maintain proper order of methods
(referenceType.visibleMethods.asScala ++ referenceType.allMethods.asScala).filter(_.name == methodName)
// found methods
protected def matching: Seq[Method] = allMethods.filter(matchesSignature)
// handles situation when you have multiple overloads
protected def handleMultipleOverloads(candidates: Seq[Method], invoker: Method => Value): Option[Value] = {
candidates match {
case Nil => None
case method +: Nil =>
Some(invoker(method))
case multiple @ _ +: _ =>
logger.warn(multipleOverloadsMessage(multiple))
multiple.collectFirst {
case method if !method.isBridge && !method.isAbstract =>
invoker(method)
}
}
}
// message for multiple overloads of method def
private final def multipleOverloadsMessage(methods: Seq[Method]): String = {
val overloads = methods.map(prettyPrint).mkString("\t", "\n\t", "")
s"Multiple overloaded methods found, using first one. This may not be correct. Possible overloads:\n$overloads"
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/context/invoker/MethodInvoker.scala | Scala | bsd-3-clause | 6,086 |
package com.ubirch.avatar.cmd
import java.net.URL
import java.util.Base64
import com.ubirch.avatar.model.rest.MessageVersion
import com.ubirch.avatar.model.rest.device.DeviceDataRaw
import com.ubirch.util.crypto.ecc.EccUtil
import com.ubirch.util.json.{Json4sUtil, MyJsonProtocol}
import org.apache.commons.codec.binary.Hex
import org.joda.time.{DateTime, DateTimeZone}
import org.json4s.JsonAST.JArray
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import uk.co.bigbeeconsultants.http.HttpClient
import uk.co.bigbeeconsultants.http.header.{Header, Headers, MediaType}
import uk.co.bigbeeconsultants.http.request.RequestBody
import scala.util.Random
object DeviceUpdateTester extends App with MyJsonProtocol {
val avsApiUrl = new URL("http://localhost:8080/api/avatarService/v1/device/update")
// val avsApiUrl = new URL("https://api.ubirch.dev.ubirch.com/api/avatarService/v1/device/update")
private val eccUtil = new EccUtil()
val (pubKey, privKey) = eccUtil.generateEccKeyPair
val pubKeyHex = "8b93b9fd501493e2ef39a0efc19ba3325750be77149c5568af6ebd3bb2717ba6"
val privKeyHex = "26009602df60c827c56a60a488c29aa3ad525349d3c77e21e73c6e25bc79a07a8b93b9fd501493e2ef39a0efc19ba3325750be77149c5568af6ebd3bb2717ba6"
// val pubKey64 = Base64.getEncoder.encodeToString(pubKey.getEncoded)
// val privKey64 = Base64.getEncoder.encodeToString(privKey.getEncoded)
val pubKey64 = Base64.getEncoder.encodeToString(Hex.decodeHex(pubKeyHex))
val privKey64 = Base64.getEncoder.encodeToString(Hex.decodeHex(privKeyHex))
val hashedDeviceId = "7u5KLfooUMnQrK2UHHTfrhuZwdhqcBafPUYivMNkJXLeggaAcsaval+CvfelNbzoRnfPpGGUhS4krs2ddHO9rg=="
val payload = JArray(List(
("r" -> Random.nextInt(255)) ~
("g" -> Random.nextInt(255)) ~
("b" -> Random.nextInt(255)) ~
("ts" -> DateTime.now(DateTimeZone.UTC).minusSeconds(1).toDateTimeISO.toString),
("r" -> Random.nextInt(255)) ~
("g" -> Random.nextInt(255)) ~
("b" -> Random.nextInt(255)) ~
("ts" -> DateTime.now(DateTimeZone.UTC).toDateTimeISO.toString)
))
val payloadJson = render(payload)
val payloadStr = Json4sUtil.jvalue2String(payload)
println(payloadStr)
val signature = eccUtil.signPayload(privKey64, payloadStr)
val ddr = DeviceDataRaw(
v = MessageVersion.v003,
a = hashedDeviceId,
ts = DateTime.now(DateTimeZone.UTC).toDateTimeISO,
p = payload,
s = Some(signature)
)
val ddrStr = Json4sUtil.jvalue2String(Json4sUtil.any2jvalue(ddr).get)
println(ddrStr)
val hc = new HttpClient()
val headers = Headers(List[Header](Header("Content-Type:application/json")))
val body = RequestBody(contentType = MediaType.APPLICATION_JSON, string = ddrStr)
doPost()
Thread.sleep(500)
doPost()
// Thread.sleep(1000)
// doPost
//
// Thread.sleep(2000)
// doPost
//
// Thread.sleep(5000)
// doPost
//
// Thread.sleep(10000)
// doPost
println("Ende")
private def doPost(): Unit = {
val resp = hc.post(url = avsApiUrl, body = Some(body), requestHeaders = headers)
println(resp.body.asString)
}
}
| ubirch/ubirch-avatar-service | cmdtools/src/main/scala/com/ubirch/avatar/cmd/DeviceUpdateTester.scala | Scala | apache-2.0 | 3,109 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.core.utils
import java.lang.{ StringBuilder => JStringBuilder }
import java.util.function.IntConsumer
import java.util.{ BitSet => JBitSet }
/**
* Support for rending HTTP header parameters according to RFC5987.
*/
private[play] object HttpHeaderParameterEncoding {
private def charSeqToBitSet(chars: Seq[Char]): JBitSet = {
val ints: Seq[Int] = chars.map(_.toInt)
val max = ints.fold(0)(Math.max(_, _))
assert(max <= 256) // We should only be dealing with 7 or 8 bit chars
val bitSet = new JBitSet(max)
ints.foreach(bitSet.set(_))
bitSet
}
private val AlphaNum: Seq[Char] = ('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')
// From https://tools.ietf.org/html/rfc5987#section-3.2.1:
//
// attr-char = ALPHA / DIGIT
// / "!" / "#" / "$" / "&" / "+" / "-" / "."
// / "^" / "_" / "`" / "|" / "~"
// ; token except ( "*" / "'" / "%" )
private val AttrCharPunctuation: Seq[Char] = Seq('!', '#', '$', '&', '+', '-', '.', '^', '_', '`', '|', '~')
// From https://tools.ietf.org/html/rfc2616#section-2.2
//
// separators = "(" | ")" | "<" | ">" | "@"
// | "," | ";" | ":" | "\\" | <">
// | "/" | "[" | "]" | "?" | "="
// | "{" | "}" | SP | HT
//
// Rich: We exclude <">, "\\" since they can be used for quoting/escaping and HT since it is
// rarely used and seems like it should be escaped.
private val Separators: Seq[Char] = Seq('(', ')', '<', '>', '@', ',', ';', ':', '/', '[', ']', '?', '=', '{', '}', ' ')
/**
* A subset of the 'qdtext' defined in https://tools.ietf.org/html/rfc2616#section-2.2. These are the
* characters which can be inside a 'quoted-string' parameter value. These should form a
* superset of the [[AttrChar]] set defined below. We exclude some characters which are technically
* valid, but might be problematic, e.g. "\\" and "%" could be treated as escape characters by some
* clients. We can be conservative because we can express these characters clearly as an extended
* parameter.
*/
private val PartialQuotedText: JBitSet = charSeqToBitSet(
AlphaNum ++ AttrCharPunctuation ++
// we include 'separators' plus some chars excluded from 'attr-char'
Separators ++ Seq('*', '\\''))
/**
* The 'attr-char' values defined in https://tools.ietf.org/html/rfc5987#section-3.2.1. Should be a
* subset of [[PartialQuotedText]] defined above.
*/
private val AttrChar: JBitSet = charSeqToBitSet(AlphaNum ++ AttrCharPunctuation)
private val PlaceholderChar: Char = '?'
/**
* Render a parameter name and value, handling character set issues as
* recommended in RFC5987.
*
* Examples:
* [[
* render("filename", "foo.txt") ==> "filename=foo.txt"
* render("filename", "naΓ―ve.txt") ==> "filename=na_ve.txt; filename*=utf8''na%C3%AFve.txt"
* ]]
*/
def encode(name: String, value: String): String = {
val builder = new JStringBuilder
encodeToBuilder(name, value, builder)
builder.toString
}
/**
* Render a parameter name and value, handling character set issues as
* recommended in RFC5987.
*
* Examples:
* [[
* render("filename", "foo.txt") ==> "filename=foo.txt"
* render("filename", "naΓ―ve.txt") ==> "filename=na_ve.txt; filename*=utf8''na%C3%AFve.txt"
* ]]
*/
def encodeToBuilder(name: String, value: String, builder: JStringBuilder): Unit = {
// This flag gets set if we encounter extended characters when rendering the
// regular parameter value.
var hasExtendedChars = false
// Render ASCII parameter
// E.g. naΓ―ve.txt --> "filename=na_ve.txt"
builder.append(name)
builder.append("=\\"")
// Iterate over code points here, because we only want one
// ASCII character or placeholder per logical character. If
// we use the value's encoded bytes or chars then we might
// end up with multiple placeholders per logical character.
value.codePoints().forEach(new IntConsumer {
override def accept(codePoint: Int): Unit = {
// We could support a wider range of characters here by using
// the 'token' or 'quoted printable' encoding, however it's
// simpler to use the subset of characters that is also valid
// for extended attributes.
if (codePoint >= 0 && codePoint <= 255 && PartialQuotedText.get(codePoint)) {
builder.append(codePoint.toChar)
} else {
// Set flag because we need to render an extended parameter.
hasExtendedChars = true
// Render a placeholder instead of the unsupported character.
builder.append(PlaceholderChar)
}
}
})
builder.append('"')
// Optionally render extended, UTF-8 encoded parameter
// E.g. naΓ―ve.txt --> "; filename*=utf8''na%C3%AFve.txt"
//
// Renders both regular and extended parameters, as suggested by:
// - https://tools.ietf.org/html/rfc5987#section-4.2
// - https://tools.ietf.org/html/rfc6266#section-4.3 (for Content-Disposition filename parameter)
if (hasExtendedChars) {
def hexDigit(x: Int): Char = (if (x < 10) (x + '0') else (x - 10 + 'a')).toChar
// From https://tools.ietf.org/html/rfc5987#section-3.2.1:
//
// Producers MUST use either the "UTF-8" ([RFC3629]) or the "ISO-8859-1"
// ([ISO-8859-1]) character set. Extension character sets (mime-
val CharacterSetName = "utf-8"
builder.append("; ")
builder.append(name)
builder.append("*=")
builder.append(CharacterSetName)
builder.append("''")
// From https://tools.ietf.org/html/rfc5987#section-3.2.1:
//
// Inside the value part, characters not contained in attr-char are
// encoded into an octet sequence using the specified character set.
// That octet sequence is then percent-encoded as specified in Section
// 2.1 of [RFC3986].
val bytes = value.getBytes(CharacterSetName)
for (b <- bytes) {
if (AttrChar.get(b & 0xFF)) {
builder.append(b.toChar)
} else {
builder.append('%')
builder.append(hexDigit((b >> 4) & 0xF))
builder.append(hexDigit(b & 0xF))
}
}
}
}
}
| Shenker93/playframework | framework/src/play/src/main/scala/play/core/utils/HttpHeaderEncoding.scala | Scala | apache-2.0 | 6,334 |
package templemore.onx.version4
/**
* @author Chris Turner
*/
trait GridLines {
this: Grid =>
private[this] val linePositions = List(Position(0, -1), Position(1, -1), Position(2, -1),
Position(-1, 0), Position(-1, 1), Position(-1, 2),
Position(0, 0), Position(2, 0))
def lines = structure ::: structure.transpose ::: diagonals
def linesWithPositions = lines zip linePositions
def emptyPositions(lines: List[Tuple2[List[Option[Token]], Position]]) =
lines.filter(_._1 contains None).flatMap(empties => empties._1 zip positions(empties._2)).filter(_._1 == None).map(_._2)
def emptyPosition(lines: List[Tuple2[List[Option[Token]], Position]]): Option[Position] = emptyPositions(lines) match {
case Nil => None
case x :: xs => Some(x)
}
def linesWithMatchingTokenAndTwoSpaces(token: Token) =
linesWithPositions.filter { line => line._1.count(_ == None) == 2 && line._1.contains(Some(token)) }
def allPositionsOnEmptyLines =
linesWithPositions.filter { line => line._1.forall(_ == None) }.map(_._2).distinct.flatMap(positions(_))
private[this] def diagonals = List(diagonalTopToBottom, diagonalBottomToTop)
private[this] def diagonalTopToBottom = List(token(Position(0,0)), token(Position(1,1)), token(Position(2,2)))
private[this] def diagonalBottomToTop = List(token(Position(2,0)), token(Position(1,1)), token(Position(0,2)))
private[this] def positions(position: Position) = position match {
case Position(row, -1) => List(Position(row, 0), Position(row, 1), Position(row, 2))
case Position(-1, col) => List(Position(0, col), Position(1, col), Position(2, col))
case Position(0, 0) => List(Position(0, 0), Position(1, 1), Position(2, 2))
case Position(2, 0) => List(Position(2, 0), Position(1, 1), Position(0, 2))
case _ => throw new IllegalStateException
}
} | skipoleschris/OandX | src/main/scala/templemore/onx/version4/GridLines.scala | Scala | apache-2.0 | 1,920 |
package controllers
import play.api._
import play.api.mvc._
import java.sql.DriverManager
import java.sql.ResultSet
import scala.collection.mutable.ArrayBuffer
import play.api.libs.iteratee.Enumerator
import play.api.libs.json._
import java.util.Properties
import java.io.FileInputStream
object Application extends Controller {
val defaultProps = readPropertiesFile("conf/etoxvault.properties")
val auth_key = defaultProps.getProperty("auth_key")
val server_db_url = defaultProps.getProperty("dbURL")
val user = defaultProps.getProperty("dbUser")
val password = defaultProps.getProperty("dbPassword")
var sqlConnection: java.sql.Connection = null
def readPropertiesFile(file: String) = {
var defaultProps = new Properties()
var in = new FileInputStream(file)
defaultProps.load(in)
in.close()
defaultProps
}
def getJsonForSoftware(idmodel: String): String = {
val sql = " select distinct software.softwaredescription,software.license " +
" from model, modelsoftware, software" +
" where model.idmodel=modelsoftware.idmodel and modelsoftware.idsoftware= software.idsoftware" +
" and model.idmodel=" + idmodel
getQueryJSONBase(sql, false)
}
def getRowJson(resultSet: ResultSet, rec: Boolean) = {
val metadata = resultSet.getMetaData()
val columnCount = metadata.getColumnCount()
var jsline = ""
val row = for (i <- 1 to columnCount) yield ("\\""+metadata.getColumnLabel(i)+"\\"" -> resultSet.getString(i))
val mp1 = row.toList.toMap
println(mp1)
val mp = if (rec)
mp1
else
mp1
val js = Json.toJson(mp)
js.toString
}
def getQueryJSONBase(query: String, rec: Boolean) = {
var resultSet = doQuerySQL(sqlConnection, query)
var metadata = resultSet.getMetaData()
var i = 1
var js = ""
var lines = ArrayBuffer[String]()
while (resultSet.next) {
val columnCount = metadata.getColumnCount()
var jsline = getRowJson(resultSet, rec)
jsline = "\\n" + jsline
lines += jsline
}
var json: java.lang.StringBuffer = new StringBuffer();
for (i <- 0 to lines.size - 1) {
if (i == lines.size - 1)
json.append(lines(i))
else
json.append(lines(i) + ",")
}
json.toString()
}
def doQuerySQL(sqlConnection: java.sql.Connection, query: String) = {
Logger.debug("doQuerySQL: \\n" + query)
var statement = sqlConnection.createStatement()
statement.execute(query)
statement.getResultSet()
}
def allmodels(authkey: String) =
Action {
if (authkey == this.auth_key) {
sqlConnection = DriverManager.getConnection(this.server_db_url, this.user, this.password)
val stmodel = getQueryJSONBase("SELECT idmodel, modeltitle, modelid, version , partner,modeltag,verification_status from model ", true)
if (stmodel == "") {
Logger.debug("Not found")
Result(
header = ResponseHeader(404, Map()),
body = Enumerator("Not found".getBytes()))
} else {
Logger.debug("Response: " + stmodel)
sqlConnection.close()
val sr = Result(
header = ResponseHeader(200, Map(CONTENT_TYPE -> "application/json")),
body = Enumerator(("[" ++ stmodel ++ "]").getBytes()))
sr
}
} else
Result(
header = ResponseHeader(403, Map()),
body = Enumerator("Not authorised".getBytes()))
}
def modelinfo(model_id: Int, authkey: String) = Action {
if (authkey == this.auth_key) {
sqlConnection = DriverManager.getConnection(this.server_db_url, this.user, this.password)
val stmodel = getQueryJSONBase("SELECT idmodel, creationdate, contactname, contactemail, `references`, modeltitle, modelid, version, keywords, relatedmodels, endpointdefinition, dependentvariable, datasource, trainingsize, trainingtype, trainingcuration, endpointpositivesperc, endpointmin, endpointmax, endpointavg, endpointskewed, compoundidentifier, compounddetails, modeltype, modelalgorithm, testselection, descriptorsdefinition, descriptorsselection, ADassessment, ADmethod, goftp, goftn, goffp, goffn, gofr2, gofrmse, inferentialstats, externaldetails, testsize, externaltp, externaltn, externalfp, externalfn, externalq2, externalsdep, externalother, internalq2, internalsdep, mechanisticbasis, mechanisticother, referencesother, supportinginfo, partner, endpointspecies, computationurl, valuetype, valuerange,interpretation,modeltag,verification_status,units " +
" FROM `etoxvault`.`model` " +
" where idmodel='" + model_id + "'",
true)
if (stmodel == "") {
Logger.debug("Not found")
Result(
header = ResponseHeader(404, Map()),
body = Enumerator("Not found".getBytes()))
} else {
val stsoft = getJsonForSoftware(model_id.toString)
val stsoft2 = " \\"software\\" : [ " + stsoft + "]"
Logger.debug("Response: " + (stmodel.take(stmodel.length - 1) + "," + stsoft2 + "}"))
sqlConnection.close()
val sr = Result(
header = ResponseHeader(200, Map(CONTENT_TYPE -> "application/json")),
body = Enumerator((stmodel.take(stmodel.length - 1) + "," + stsoft2 + "}").getBytes()))
sr
}
} else
Result(
header = ResponseHeader(403, Map()),
body = Enumerator("Not authorised".getBytes()))
}
def getModelIdForModelTag(modeltag: String) = {
val resultSet = doQuerySQL(sqlConnection, "select idmodel from model where modeltag='" + modeltag + "' order by version desc limit 1")
var metadata = resultSet.getMetaData()
resultSet.next
resultSet.getInt(1)
}
def getModelIdForModelTag_version(modeltag: String, version: String) = {
val resultSet = doQuerySQL(sqlConnection, "select idmodel from model where modeltag='" + modeltag + "' and version='" + version + "'")
var metadata = resultSet.getMetaData()
resultSet.next
resultSet.getInt(1)
}
def modelinfoByTag(modeltag: String, provider: String, authkey: String, version: Option[String]) = Action {
println("Input:")
println(provider)
println(modeltag)
println(version)
if (authkey == this.auth_key) {
sqlConnection = DriverManager.getConnection(this.server_db_url, this.user, this.password)
val stmodel = version match {
case None => getQueryJSONBase("SELECT idmodel, creationdate, contactname, contactemail, `references`, modeltitle, modelid, version, keywords, relatedmodels, endpointdefinition, dependentvariable, datasource, trainingsize, trainingtype, trainingcuration, endpointpositivesperc, endpointmin, endpointmax, endpointavg, endpointskewed, compoundidentifier, compounddetails, modeltype, modelalgorithm, testselection, descriptorsdefinition, descriptorsselection, ADassessment, ADmethod, goftp, goftn, goffp, goffn, gofr2, gofrmse, inferentialstats, externaldetails, testsize, externaltp, externaltn, externalfp, externalfn, externalq2, externalsdep, externalother, internalq2, internalsdep, mechanisticbasis, mechanisticother, referencesother, supportinginfo, partner, endpointspecies, computationurl, valuetype, valuerange,interpretation,modeltag,verification_status,units " +
"FROM `etoxvault`.`model` where modeltag='" + modeltag + "' and partner='" + provider + "' order by version desc limit 1", true)
case Some(version) => getQueryJSONBase("SELECT idmodel, creationdate, contactname, contactemail, `references`, modeltitle, modelid, version, keywords, relatedmodels, endpointdefinition, dependentvariable, datasource, trainingsize, trainingtype, trainingcuration, endpointpositivesperc, endpointmin, endpointmax, endpointavg, endpointskewed, compoundidentifier, compounddetails, modeltype, modelalgorithm, testselection, descriptorsdefinition, descriptorsselection, ADassessment, ADmethod, goftp, goftn, goffp, goffn, gofr2, gofrmse, inferentialstats, externaldetails, testsize, externaltp, externaltn, externalfp, externalfn, externalq2, externalsdep, externalother, internalq2, internalsdep, mechanisticbasis, mechanisticother, referencesother, supportinginfo, partner, endpointspecies, computationurl, valuetype, valuerange,interpretation,modeltag,verification_status,units " +
"FROM `etoxvault`.`model` where modeltag='" + modeltag + "' and partner='" + provider + "' and version='" + version + "'", true)
}
if (stmodel == "") {
Logger.debug("Not found")
Result(
header = ResponseHeader(404, Map()),
body = Enumerator("Not found".getBytes()))
} else {
//
val model_id = version match {
case None => getModelIdForModelTag(modeltag)
case Some(version) => getModelIdForModelTag_version(modeltag, version)
}
println("Model id:" + model_id)
//
val stsoft = getJsonForSoftware(model_id.toString)
val stsoft2 = " \\"software\\" : [ " + stsoft + "]"
Logger.debug("Response: " + (stmodel.take(stmodel.length - 1) + "," + stsoft2 + "}"))
sqlConnection.close()
val sr = Result(
header = ResponseHeader(200, Map(CONTENT_TYPE -> "application/json")),
body = Enumerator((stmodel.take(stmodel.length - 1) + "," + stsoft2 + "}").getBytes()))
sr
}
} else
Result(
header = ResponseHeader(403, Map()),
body = Enumerator("Not authorised".getBytes()))
}
}
| phi-grib/eTOX-vault-ws | app/controllers/Application.scala | Scala | gpl-3.0 | 9,488 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules.jvm
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules.ReLUBuilder
import edu.latrobe.{RealArrayTensor, _}
/**
* Optimized JVM implementation of ReLU.
*/
final class ReLU_JVM_Baseline(override val builder: ReLUBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends ReLU_JVM {
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
override protected def doPredict(output: RealArrayTensor)
: Unit = output.transform(
x => if (x > Real.zero) x else Real.zero
)
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
override protected def doDeriveInputError(input: RealArrayTensor,
error: RealArrayTensor)
: Unit = error.transform(input,
(dy, x) => if (x > Real.zero) dy else Real.zero
)
}
object ReLU_JVM_Baseline_Description
extends ModuleVariant_JVM_Description[ReLUBuilder] {
override def build(builder: ReLUBuilder,
hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: ReLU_JVM_Baseline = new ReLU_JVM_Baseline(
builder, hints, seed, weightsBuilder
)
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/jvm/ReLU_JVM_Baseline.scala | Scala | apache-2.0 | 2,376 |
/*
* Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package github4s.integration
import cats.effect.IO
import github4s.GHError
import github4s.Github
import github4s.domain._
import github4s.utils.{BaseIntegrationSpec, Integration}
trait UsersSpec extends BaseIntegrationSpec {
"Users >> Get" should "return the expected login for a valid username" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.get(validUsername, headerUserAgent)
}
.unsafeRunSync()
testIsRight[User](response, r => r.login shouldBe validUsername)
response.statusCode shouldBe okStatusCode
}
it should "return error on Left for invalid username" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.get(invalidUsername, headerUserAgent)
}
.unsafeRunSync()
testIsLeft[GHError.NotFoundError, User](response)
response.statusCode shouldBe notFoundStatusCode
}
"Users >> GetAuth" should "return error on Left when no accessToken is provided" taggedAs Integration in {
val response =
clientResource
.use(client => Github[IO](client).users.getAuth(headerUserAgent))
.unsafeRunSync()
testIsLeft[GHError.UnauthorizedError, User](response)
response.statusCode shouldBe unauthorizedStatusCode
}
"Users >> GetUsers" should "return users for a valid since value" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.getUsers(validSinceInt, None, headerUserAgent)
}
.unsafeRunSync()
testIsRight[List[User]](response, r => r.nonEmpty shouldBe true)
response.statusCode shouldBe okStatusCode
}
it should "return an empty list when a invalid since value is provided" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.getUsers(invalidSinceInt, None, headerUserAgent)
}
.unsafeRunSync()
testIsRight[List[User]](response, r => r.isEmpty shouldBe true)
response.statusCode shouldBe okStatusCode
}
"Users >> GetFollowing" should "return the expected following list for a valid username" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.getFollowing(validUsername, None, headerUserAgent)
}
.unsafeRunSync()
testIsRight[List[User]](response, r => r.nonEmpty shouldBe true)
response.statusCode shouldBe okStatusCode
}
it should "return error on Left for invalid username" taggedAs Integration in {
val response = clientResource
.use { client =>
Github[IO](client, accessToken).users
.getFollowing(invalidUsername, None, headerUserAgent)
}
.unsafeRunSync()
testIsLeft[GHError.NotFoundError, List[User]](response)
response.statusCode shouldBe notFoundStatusCode
}
}
| 47deg/github4s | github4s/src/test/scala/github4s/integration/UsersSpec.scala | Scala | apache-2.0 | 3,666 |
package almhirt.components
import scala.language.postfixOps
import scala.concurrent.duration._
import scalaz.syntax.validation._
import scalaz.Validation.FlatMap._
import akka.actor._
import akka.pattern._
import almhirt.common._
import almhirt.almvalidation.kit._
import almhirt.almfuture.all._
import almhirt.akkax._
import almhirt.akkax.reporting._
import almhirt.akkax.reporting.Implicits._
import almhirt.context.AlmhirtContext
import almhirt.context.HasAlmhirtContext
import almhirt.streaming.ActorDevNullSubscriberWithAutoSubscribe
import akka.stream.actor._
import org.reactivestreams.Subscriber
import akka.stream.scaladsl._
object EventLogWriter {
def propsRaw(
eventLogToResolve: ToResolve,
resolveSettings: ResolveSettings,
warningThreshold: FiniteDuration,
eventlogCallTimeout: FiniteDuration,
storeEventRetrySettings: RetryPolicyExt,
autoConnect: Boolean = false)(implicit ctx: AlmhirtContext): Props = {
Props(new EventLogWriterImpl(
eventLogToResolve,
resolveSettings,
warningThreshold,
autoConnect,
eventlogCallTimeout,
storeEventRetrySettings))
}
def props(implicit ctx: AlmhirtContext): AlmValidation[Props] = {
import almhirt.configuration._
val eventlogPath = ctx.localActorPaths.eventLogs / almhirt.eventlog.EventLog.actorname
for {
section β ctx.config.v[com.typesafe.config.Config]("almhirt.components.misc.event-sink-hub.event-publishers.event-log-writer")
enabled β section.v[Boolean]("enabled")
autoConnect β section.v[Boolean]("auto-connect")
res β if (enabled) {
for {
warningThreshold β section.v[FiniteDuration]("warning-threshold")
resolveSettings β section.v[ResolveSettings]("resolve-settings")
storeEventRetrySettings β section.v[RetryPolicyExt]("store-event-retry-settings")
eventlogCallTimeout β section.v[FiniteDuration]("event-log-call-timeout")
} yield propsRaw(ResolvePath(eventlogPath), resolveSettings, warningThreshold, eventlogCallTimeout, storeEventRetrySettings, autoConnect)
} else {
ActorDevNullSubscriberWithAutoSubscribe.props[Event](1, if (autoConnect) Some(ctx.eventStream) else None).success
}
} yield res
}
def apply(eventLogWriter: ActorRef): Subscriber[Event] =
ActorSubscriber[Event](eventLogWriter)
val actorname = "event-log-writer"
def path(root: RootActorPath) = EventSinkHub.path(root) / actorname
}
private[almhirt] class EventLogWriterImpl(
eventLogToResolve: ToResolve,
resolveSettings: ResolveSettings,
warningThreshold: FiniteDuration,
autoConnect: Boolean,
eventlogCallTimeout: FiniteDuration,
storeEventRetrySettings: RetryPolicyExt)(implicit override val almhirtContext: AlmhirtContext) extends ActorSubscriber with AlmActor with AlmActorLogging with ActorLogging with ControllableActor with StatusReportingActor {
import almhirt.eventlog.EventLog
override val componentControl = LocalComponentControl(self, ComponentControlActions.none, Some(logWarning))
implicit def implicitFlowMaterializer = akka.stream.ActorMaterializer()(this.context)
implicit val executor = almhirtContext.futuresContext
override val requestStrategy = ZeroRequestStrategy
private case object AutoConnect
private case object Resolve
private var loggedEvents = 0L
private var eventsNotLogged = 0L
private var eventsReceived = 0L
private var eventLog: ActorRef = null
def receiveResolve: Receive = startup() {
reportsStatusF(onReportRequested = createStatusReport) {
case Resolve β
context.resolveSingle(eventLogToResolve, resolveSettings, None, Some("event-log-resolver"))
case ActorMessages.ResolvedSingle(resolvedEventlog, _) β
logInfo("Found event log.")
this.eventLog = resolvedEventlog
if (autoConnect)
self ! AutoConnect
else
request(1)
context.become(receiveRunning)
case ActorMessages.SingleNotResolved(problem, _) β
logError(s"Could not resolve event log @${eventLogToResolve}:\\n$problem")
sys.error(s"Could not resolve event log @${eventLogToResolve}.")
reportCriticalFailure(problem)
}
}
def receiveRunning: Receive = running() {
reportsStatusF(onReportRequested = createStatusReport) {
case AutoConnect β
logInfo("Subscribing to event stream.")
Source.fromPublisher(almhirtContext.eventStream).to(Sink.fromSubscriber(EventLogWriter(self))).run()
request(1)
case ActorSubscriberMessage.OnNext(event: Event) β
eventsReceived = eventsReceived + 1L
if (!event.header.noLoggingSuggested) {
val start = Deadline.now
val f = {
this.retryFuture(storeEventRetrySettings) {
(eventLog ? EventLog.LogEvent(event, true))(eventlogCallTimeout).mapCastTo[EventLog.LogEventResponse].foldV(
fail β {
fail match {
case AlreadyExistsProblem(p) β
logWarning(s"Event event ${event.eventId} already existed. This can happen when a write operation timed out but the event was stored afterwards by the storage.")
EventLog.EventLogged(event.eventId).success
case _ β {
fail.failure
}
}
},
succ β succ.success)
}
}.onComplete({
case scalaz.Failure(problem) β
self ! EventLog.EventNotLogged(event.eventId, problem)
reportMissedEvent(event, MajorSeverity, problem)
reportMajorFailure(problem)
case scalaz.Success(rsp) β self ! rsp
})
f.onSuccess(rsp β
if (start.lapExceeds(warningThreshold))
logWarning(s"Writing event '${event.eventId.value}' took longer than ${warningThreshold.defaultUnitString}: ${start.lap.defaultUnitString}"))
} else {
request(1)
}
case ActorSubscriberMessage.OnNext(unprocessable) β
log.warning(s"Received unprocessable element $unprocessable.")
request(1)
case EventLog.EventLogged(id) β
loggedEvents = loggedEvents + 1L
request(1)
case EventLog.EventNotLogged(id, problem) β
eventsNotLogged = eventsNotLogged + 1L
logError(s"Could not log event '${id.value}':\\n$problem")
reportMajorFailure(problem)
request(1)
}
}
override def receive: Receive = receiveResolve
def createStatusReport(options: StatusReportOptions): AlmFuture[StatusReport] = {
val baseReport = StatusReport("EventLogWriter-Report").withComponentState(componentState) addMany
("events-received" -> eventsReceived,
"events-logged" -> loggedEvents,
"events-not-logged" -> eventsNotLogged,
"event-log" -> eventLog.path.toStringWithoutAddress)
AlmFuture.successful(baseReport)
}
override def preStart() {
super.preStart()
registerComponentControl()
registerStatusReporter(description = Some("Simply writes to an event log."))
context.parent ! ActorMessages.ConsiderMeForReporting
self ! Resolve
}
override def postStop() {
super.postStop()
deregisterComponentControl()
deregisterStatusReporter()
}
} | chridou/almhirt | almhirt-core/src/main/scala/almhirt/components/EventLogWriter.scala | Scala | apache-2.0 | 7,400 |
package scredis.exceptions
/**
* Base class of all exceptions thrown by scredis
*/
abstract class RedisException(
message: String = null,
cause: Throwable = null
) extends Exception(message, cause) | rileyberton/scredis | src/main/scala/scredis/exceptions/RedisException.scala | Scala | apache-2.0 | 204 |
package com.amadornes.modcast.bot.discord.commands
import com.amadornes.modcast.bot.database.Permission
import com.amadornes.modcast.bot.discord.{AbstractCommand, TCommandOptions}
import com.amadornes.modcast.bot.helpers.{MCWhitelistHelper, PermissionsHelper}
import sx.blah.discord.handle.obj.IUser
/**
* Created by rewbycraft on 11/9/16.
*/
class WhitelistCommand extends AbstractCommand[WhitelistCommand.Options] {
override def handle: Receive = {
case c@Execute(channel, user, _, arguments) =>
if (arguments.user == null || arguments.user.getID == user.getID || PermissionsHelper.getUserPermissionLevel(user) == Permission.ADMIN) {
val affectedUser = if (arguments.user != null) arguments.user else user
try {
if (arguments.deassociate) {
MCWhitelistHelper.deassociateMCAccountWithUser(affectedUser)
c.respond("Okay!")
}
else if (arguments.account != null) {
MCWhitelistHelper.associateMCAccountWithUser(affectedUser, arguments.account)
c.respond("Okay!")
} else
c.respond("You need to specify an account.")
} catch {
case e: IllegalArgumentException =>
c.respond("I couldn't find any information on that account.")
}
} else
c.respond("You don't have the required permissions.")
}
override def name: Array[String] = Array("whitelist")
override def parser(command: String): OptionParser = new OptionParser(command) {
note("Associate an MC account with a user.")
opt[String]('a', "account").text("Account to associate.").action((v, c) => c.copy(account = v)).optional()
opt[IUser]('u', "user").text("User to associate account with. Defaults to you. Using any other user here will require admin permissions.").action((v, c) => c.copy(user = v)).optional()
opt[Unit]('d', "deassociate").text("De-associate the MC account with this user.").action((_, c) => c.copy(deassociate = true))
}
override def permissionLevel: Permission = Permission.GUEST
}
object WhitelistCommand {
case class Options(user: IUser, account: String, deassociate: Boolean) extends TCommandOptions {
def this() = this(null, null, false)
}
}
| Modcast/ModcastBot | src/main/scala/com/amadornes/modcast/bot/discord/commands/WhitelistCommand.scala | Scala | mit | 2,159 |
package com.clarifi.reporting.ermine.core
import com.clarifi.reporting.ermine._
import java.util.Date
import scalaz._
import scalaz.Scalaz._
import Runtime.Thunk
class OpenException extends Exception
sealed abstract class Core[+A] extends Monadic[Core,A] with Traversable[A] {
def flatMap[B](f: A => Core[B]): Core[B]
def map[B](f: A => B): Core[B]
def foreach[U](f: A => U): Unit
def self = this
implicit def lift[B](v: Core[B]) = v
def when(b: Boolean): Core[Unit] = if (b) skip else CGoal(())
// check for unresolved goals
def close: Option[Core[Nothing]] =
if (forall(_ => false)) Some(this.asInstanceOf[Core[Nothing]])
else None
}
object Core {
type Env = Map[TermVar, Runtime]
implicit val coreMonad: Monad[Core] = new Monad[Core] {
def point[A](a : => A) = CGoal(a)
def bind[A,B](r : Core[A])(f : A => Core[B]) = r flatMap f
}
// def evalBinding(b: Binding, env: Env): Runtime = Runtime.accumArgs(b.arity) { evalAlts(b.loc, _, b.alts, env) }
def evalAlts(loc: Loc, rs: List[Runtime], e: List[CAlt[Runtime]], env: Env): Runtime = e match {
case CAlt(l,ps,t) :: ep => Pattern.matches(l, ps, rs) match {
case PatternMatched(delta) => eval(t, env ++ delta)
case PatternMismatch => evalAlts(loc, rs, ep, env)
case PatternExplosion(b) => b
}
case List() => Bottom(throw new AltException(loc, rs))
}
// NB: you can pass a Core[Nothing] to this function!
def eval(c: Core[Runtime], env: Env): Runtime = c match {
case CVar(v) => env.getOrElse(v, v.error("PANIC: Core.eval: unbound variable " + v))
case CGoal(r) => r
case CApp(f,x) => eval(f,env).apply(Thunk(eval(x,env)))
case CLam(l, n, b) => Fun(x => n(x) match {
case PatternMismatch => l.error("failed pattern match")
case PatternMatched(delta) => eval(b, env ++ delta)
case PatternExplosion(b) => b
})
case CCase(l, e, alts) => evalAlts(l, List(eval(e, env)), alts, env)
case CEval => Fun(_.whnfMatch("Core.eval") { case Prim(c : Core[Nothing]) => Core.eval(c, env) })
case e : Hardcore => e.eval
case CLet(bs, b) => var envp: Env = null
envp = env ++ bs.mapValues(b => Thunk(eval(b, envp)))
eval(b, envp)
}
}
case class CGoal[+A](a: A) extends Core[A] {
def flatMap[B](f: A => Core[B]) = f(a)
def map[B](f: A => B): Core[B] = CGoal(f(a))
def foreach[U](f: A => U) { f(a) }
}
case class CApp[+A](e1: Core[A], e2: Core[A]) extends Core[A] {
def flatMap[B](f: A => Core[B]) = CApp(e1.flatMap(f),e2.flatMap(f))
def map[B](f: A => B): Core[B] = CApp(e1.map(f),e2.map(f))
def foreach[U](f: A => U) { e1.foreach(f); e2.foreach(f) }
}
case class CLam[+A](loc: Loc, pat: Pattern, body: Core[A]) extends Core[A] with Located {
def flatMap[B](f: A => Core[B]) = CLam(loc, pat, body.flatMap(f))
def map[B](f: A => B): Core[B] = CLam(loc, pat, body.map(f))
def foreach[U](f: A => U) { body.foreach(f) }
}
case class CAlt[+A](loc: Loc, patterns: List[Pattern], body: Core[A]) extends Located with Traversable[A] {
def flatMap[B](f: A => Core[B]) = CAlt(loc, patterns, body.flatMap(f))
def map[B](f: A => B) = CAlt(loc, patterns, body.map[B](f))
def foreach[U](f: A => U) { body.foreach(f) }
}
case class CCase[+A](loc: Loc, expr: Core[A], alts: List[CAlt[A]]) extends Core[A] with Located {
def flatMap[B](f: A => Core[B]) = CCase(loc, expr.flatMap(f), alts.map(_.flatMap(f)))
def map[B](f: A => B) = CCase(loc, expr.map(f), alts.map(_.map(f)))
def foreach[U](f: A => U) { expr.foreach(f); for (a <- alts) a.foreach(f) }
}
case class CLet[+A](bindings: Map[TermVar,Core[A]], body: Core[A]) extends Core[A] {
def flatMap[B](f: A => Core[B]) = CLet(bindings.mapValues(_.flatMap(f)), body.flatMap(f))
def map[B](f: A => B) = CLet(bindings.mapValues(_.map(f)), body.map(f))
def foreach[U](f: A => U) { for (p <- bindings) p._2.foreach(f); body.foreach(f) }
}
case class CVar(v: TermVar) extends Core[Nothing] with Located {
def loc = v.loc
def flatMap[B](f: Nothing => Core[B]) = this
def map[B](f: Nothing => B) = this
def foreach[U](f: Nothing => U) {}
}
case object CEval extends Core[Nothing] {
def flatMap[B](f: Nothing => Core[B]) = this
def map[B](f: Nothing => B) = this
def foreach[U](f: Nothing => U) {}
}
sealed class Hardcore(val eval: Runtime) extends Core[Nothing] {
def flatMap[B](f: Nothing => Core[B]) = this
def map[B](f: Nothing => B) = this
def foreach[U](f: Nothing => U) {}
}
case class CInt(value: Int) extends Hardcore(Prim(value))
case class CLong(value: Long) extends Hardcore(Prim(value))
case class CByte(value: Byte) extends Hardcore(Prim(value))
case class CShort(value: Short) extends Hardcore(Prim(value))
case class CString(value: String) extends Hardcore(Prim(value))
case class CChar(value: Char) extends Hardcore(Prim(value))
case class CFloat(value: Float) extends Hardcore(Prim(value))
case class CDouble(value: Double) extends Hardcore(Prim(value))
case class CDate(value: Date) extends Hardcore(Prim(value))
case class CProduct(n: Int) extends Hardcore(Runtime.accumProduct(n))
case class CQuote(c: Core[Nothing]) extends Hardcore(Prim(c))
case object CEmptyRecord extends Hardcore(Rec(Map()))
| ermine-language/ermine-legacy | src/main/scala/com/clarifi/reporting/ermine/core/Core.scala | Scala | bsd-2-clause | 5,324 |
package fs2
package async
package mutable
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import cats.effect.Effect
import cats.implicits._
/**
* An asynchronous semaphore, useful as a concurrency primitive.
*/
abstract class Semaphore[F[_]] {
/** Returns the number of permits currently available. Always nonnegative. */
def available: F[Long]
/**
* Decrements the number of available permits by `n`, blocking until `n`
* are available. Error if `n < 0`. The blocking is semantic; we do not
* literally block a thread waiting for permits to become available.
* Note that decrements are satisfied in strict FIFO order, so given
* `s: Semaphore[F]` with 2 permits available, a `decrementBy(3)` will
* always be satisfied before a later call to `decrementBy(1)`.
*/
def decrementBy(n: Long): F[Unit]
/**
* Like [[decrementBy]] but limits the amount of time spent blocking for
* permits. If all permits have not been acquired after the timeout has been
* reached, the action completes with the number of permits remaining to be acquired.
* If the requested number of permits is acquired, the action completes with 0.
*/
def timedDecrementBy(n: Long, timeout: FiniteDuration, scheduler: Scheduler): F[Long]
/** Acquires `n` permits now and returns `true`, or returns `false` immediately. Error if `n < 0`. */
def tryDecrementBy(n: Long): F[Boolean]
/** Alias for `[[tryDecrementBy]](1)`. */
def tryDecrement: F[Boolean] = tryDecrementBy(1)
/**
* Increments the number of available permits by `n`. Error if `n < 0`.
* This will have the effect of unblocking `n` acquisitions.
*/
def incrementBy(n: Long): F[Unit]
/**
* Obtains a snapshot of the current count. May be out of date the instant
* after it is retrieved. Use `[[tryDecrement]]` or `[[tryDecrementBy]]`
* if you wish to attempt a decrement and return immediately if the
* current count is not high enough to satisfy the request.
*/
def count: F[Long]
/**
* Resets the count of this semaphore back to zero, and returns the previous count.
* Throws an `IllegalArgumentException` if count is below zero (due to pending
* decrements).
*/
def clear: F[Long]
/** Decrements the number of permits by 1. Alias for `[[decrementBy]](1)`. */
final def decrement: F[Unit] = decrementBy(1)
/**
* Like [[decrement]] but limits the amount of time spent blocking for
* a permit. If the permit has not been acquired after the timeout has been
* reached, the action completes with false. If a permit is acquired, the
* action completes with true.
*/
def timedDecrement(timeout: FiniteDuration, scheduler: Scheduler): F[Boolean]
/** Increments the number of permits by 1. Alias for `[[incrementBy]](1)`. */
final def increment: F[Unit] = incrementBy(1)
}
object Semaphore {
/** Creates a new `Semaphore`, initialized with `n` available permits. */
def apply[F[_]](n: Long)(implicit F: Effect[F], ec: ExecutionContext): F[Semaphore[F]] = {
def ensureNonneg(n: Long) =
assert(n >= 0, s"n must be nonnegative, was: $n ")
ensureNonneg(n)
// semaphore is either empty, and there are number of outstanding acquires (Left)
// or it is non-empty, and there are n permits available (Right)
type S = Either[Vector[(Long, async.Promise[F, Unit])], Long]
async.refOf[F, S](Right(n)).map { state =>
new Semaphore[F] {
private def open(gate: async.Promise[F, Unit]): F[Unit] =
gate.complete(())
def count = state.get.map(count_)
def decrementBy(n: Long) = {
ensureNonneg(n)
if (n == 0) F.unit
else async.promise[F, Unit].flatMap(decrementByImpl(n, _))
}
private def decrementByImpl(n: Long, gate: Promise[F, Unit]): F[Unit] =
state
.modify {
case Left(waiting) => Left(waiting :+ (n -> gate))
case Right(m) =>
if (n <= m) Right(m - n)
else Left(Vector((n - m) -> gate))
}
.flatMap { c =>
c.now match {
case Left(waiting) =>
def err =
sys.error("FS2 bug: Semaphore has empty waiting queue rather than 0 count")
waiting.lastOption.getOrElse(err)._2.get
case Right(_) => F.unit
}
}
def timedDecrementBy(n: Long, timeout: FiniteDuration, scheduler: Scheduler): F[Long] = {
ensureNonneg(n)
if (n == 0) F.pure(0)
else
async.promise[F, Unit].flatMap { gate =>
val timedOut: F[Long] = state
.modify {
case Left(waiting) =>
val w2 = waiting.filter(_._2 ne gate)
if (w2.isEmpty) Right(0) else Left(w2)
case Right(n) => Right(n)
}
.map { c =>
c.previous match {
case Left(w) =>
w.find(_._2 eq gate) match {
case Some((m, g)) => m
case None => 0
}
case Right(_) => 0
}
}
scheduler.effect.delayCancellable(timedOut, timeout).flatMap {
case (timer, cancelTimer) =>
async
.race(decrementByImpl(n, gate), timer)
.flatMap(_.fold(_ => cancelTimer.as(0), o => F.pure(o.getOrElse(0))))
}
}
}
def timedDecrement(timeout: FiniteDuration, scheduler: Scheduler): F[Boolean] =
timedDecrementBy(1, timeout, scheduler).map(_ == 0)
def clear: F[Long] =
state
.modify {
case Left(e) =>
throw new IllegalStateException("cannot clear a semaphore with negative count")
case Right(n) => Right(0)
}
.flatMap { c =>
c.previous match {
case Right(n) => F.pure(n)
case Left(_) => sys.error("impossible, exception thrown above")
}
}
private def count_(s: S): Long =
s.fold(ws => -ws.map(_._1).sum, identity)
def incrementBy(n: Long) = {
ensureNonneg(n)
if (n == 0) F.pure(())
else
state
.modify {
case Left(waiting) =>
// just figure out how many to strip from waiting queue,
// but don't run anything here inside the modify
var m = n
var waiting2 = waiting
while (waiting2.nonEmpty && m > 0) {
val (k, gate) = waiting2.head
if (k > m) {
waiting2 = (k - m, gate) +: waiting2.tail; m = 0;
} else { m -= k; waiting2 = waiting2.tail }
}
if (waiting2.nonEmpty) Left(waiting2)
else Right(m)
case Right(m) => Right(m + n)
}
.flatMap { change =>
// invariant: count_(change.now) == count_(change.previous) + n
change.previous match {
case Left(waiting) =>
// now compare old and new sizes to figure out which actions to run
val newSize = change.now.fold(_.size, _ => 0)
val released = waiting.size - newSize
waiting.take(released).foldRight(F.pure(())) { (hd, tl) =>
open(hd._2) *> tl
}
case Right(_) => F.pure(())
}
}
}
def tryDecrementBy(n: Long) = {
ensureNonneg(n)
if (n == 0) F.pure(true)
else
state
.modify {
case Right(m) if m >= n => Right(m - n)
case w => w
}
.map { c =>
c.now.fold(_ => false, n => c.previous.fold(_ => false, m => n != m))
}
}
def available = state.get.map {
case Left(_) => 0
case Right(n) => n
}
}
}
}
/** Creates a `Semaphore` with 0 initial permits. */
def empty[F[_]: Effect](implicit ec: ExecutionContext): F[Semaphore[F]] =
apply(0)
}
| zaneli/fs2 | core/shared/src/main/scala/fs2/async/mutable/Semaphore.scala | Scala | mit | 8,544 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Tue Aug 9 16:39:41 EDT 2016
* @see LICENSE (MIT style license file).
*
* Graph Data Structure Using Mutable Sets
*/
package scalation.graphalytics.mutable
import scala.collection.mutable.Map
import scala.collection.mutable.{Set => SET}
import scalation.graphalytics.mutable.{ExampleGraphI => EX_GRAPH}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `ExampleMGraphI` object contains example query and data multi-digraphs
* in which the vertex label type `TLabel` is `Int`.
*/
object ExampleMGraphI
{
// -----------------------------------------------------------------------
// Simple data and query multi-digraphs.
// -----------------------------------------------------------------------
// data multi-digraph g1 -------------------------------------------------
val g1 = MGraph (EX_GRAPH.g1,
Map ((1, 0) -> -1,
(1, 2) -> -1,
(1, 3) -> -1,
(1, 4) -> -1,
(2, 0) -> -1,
(3, 4) -> -2), // change from -1 to -2 filter out vertices
"g1")
// query multi-digraph q1 ------------------------------------------------
val q1 = MGraph (EX_GRAPH.q1,
Map ((0, 1) -> -1,
(0, 2) -> -1,
(2, 1) -> -1),
"q1")
val g1p = new MGraph (g1.ch, g1.label, g1.elabel, true, g1.name) // with parents
val q1p = new MGraph (q1.ch, q1.label, q1.elabel, true, q1.name) // with parents
// -----------------------------------------------------------------------
// Data and query graphs from the following paper:
// John A. Miller, Lakshmish Ramaswamy, Arash J.Z. Fard and Krys J. Kochut,
// "Research Directions in Big Data Graph Analytics,"
// Proceedings of the 4th IEEE International Congress on Big Data (ICBD'15),
// New York, New York (June-July 2015) pp. 785-794.
// -----------------------------------------------------------------------
// data multi-digraph g2 -------------------------------------------------
val g2 = MGraph (EX_GRAPH.g2,
Map ((0, 1) -> 1,
(1, 0) -> 1,
(1, 2) -> 1,
(1, 3) -> 1,
(1, 4) -> 1, // 2
(1, 5) -> 1,
(5, 6) -> 1,
(5, 10) -> 1,
(6, 7) -> 1,
(6, 4) -> 1, // 2
(6, 8) -> 1,
(6, 9) -> 1,
(7, 1) -> 1,
(10, 11) -> 1,
(11, 12) -> 1,
(12, 11) -> 1,
(12, 13) -> 1,
(14, 13) -> 1,
(14, 15) -> 1,
(15, 16) -> 1,
(16, 17) -> 1,
(16, 18) -> 1,
(17, 14) -> 1,
(17, 19) -> 1,
(18, 20) -> 1,
(19, 14) -> 1,
(20, 19) -> 1,
(20, 21) -> 1,
(22, 21) -> 1,
(22, 23) -> 1,
(23, 25) -> 1,
(25, 24) -> 1,
(25, 26) -> 1,
(26, 28) -> 1,
(28, 27) -> 1,
(28, 29) -> 1,
(29, 22) -> 1),
"g2")
// query multi-digraph q2 ------------------------------------------------
val q2 = MGraph (EX_GRAPH.q2,
Map ((0, 1) -> 1,
(1, 0) -> 1,
(1, 2) -> 1,
(1, 3) -> 1),
"q2")
val g2p = new MGraph (g2.ch, g2.label, g2.elabel, true, g2.name) // with parents
val q2p = new MGraph (q2.ch, q2.label, q2.elabel, true, q2.name) // with parents
} // ExampleMGraphI object
| NBKlepp/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/mutable/ExampleMGraphI.scala | Scala | mit | 4,503 |
package fr.neuville.lombok.processor
import scalaz._
import Scalaz._
import std.anyVal.booleanInstance.{conjunction, disjunction}
import java.util.{List => JList}
import java.lang.{Class => JClass}
import java.lang.annotation.{Annotation => JAnnotation}
import com.intellij.psi._
import de.plushnikov.intellij.lombok.problem.ProblemBuilder
import de.plushnikov.intellij.lombok.quickfix.PsiQuickFixFactory
import de.plushnikov.intellij.lombok.psi.{LombokLightMethodBuilder, LombokPsiElementFactory}
import de.plushnikov.intellij.lombok.util._
import LombokProcessorUtil._
import PsiElementUtil._
import PsiClassUtil._
import PsiMethodUtil._
import lombok.{NonNull, RequiredArgsConstructor, AllArgsConstructor}
import de.plushnikov.intellij.lombok.processor.field.AnnotationFieldProcessor
import lombok.experimental.Wither
import de.plushnikov.intellij.lombok.processor.clazz.AnnotationClassProcessor
import fr.neuville.lombok.{Problem, Error, Warning}
import Problem._
import de.plushnikov.intellij.lombok.LombokUtils
import scalaz.syntax.ApplicativeBuilder
final class WitherProc(val annotationClass: JClass[_ <: JAnnotation],
val genClass: JClass[_]) extends AnnotationClassProcessor(annotationClass, genClass) {
private val fieldProc = new WitherFieldProc(annotationClass, genClass)
def this() = this(classOf[Wither], classOf[PsiMethod])
override def validate(psiAnnotation: PsiAnnotation, psiClass: PsiClass, builder: ProblemBuilder): Boolean =
(validOnRightType(psiAnnotation, psiClass) |@| validHasVisibility(psiAnnotation))((_, _) => true)
.fold(((_: NonEmptyList[Problem]) foreach addToPbBuilder(builder)) andThen (_ => false), _ => true)
override def processIntern(psiClass: PsiClass, psiAnnotation: PsiAnnotation, target: JList[_ >: PsiElement]): Unit =
for {
tg <- Option(target).toSeq
method <- witherMethods(psiClass, psiAnnotation)
} tg add method
def witherMethods(clazz: PsiClass, anot: PsiAnnotation): Seq[LombokLightMethodBuilder] = {
implicit val M = conjunction
val canCreateWither: PsiField => Boolean = fieldProc.canCreateWither(anot)(_)(_ => Unit)
(for {
acLevel <- Option(getMethodModifier(anot)).toSeq
field <- clazz.getFields.toSeq
if (canCreateWither |+| hasNoWither |+| hasNoFront$)(field)
} yield fieldProc.witherMethod(field, acLevel)).flatten
}
private def validOnRightType(anot: PsiAnnotation, clazz: PsiClass): ValidationNel[Problem, Boolean] =
(!clazz.isAnnotationType && !clazz.isEnum && !clazz.isInterface) ?
true.successNel[Problem] |
Error(s"${anot.getQualifiedName} is only supported on a class or field type").failNel
private def validHasVisibility(anot: PsiAnnotation): ValidationNel[Problem, Boolean] =
LombokProcessorUtil.getMethodModifier(anot).some.isDefined ?
true.successNel[Problem] |
Error(s"${anot.getQualifiedName} value attribute must be defined").failNel
private def hasNoWither(field: PsiField) =
!field.getModifierList.getAnnotations.toVector.map(acceptElement(_, classOf[PsiMethod])).suml(disjunction)
private def hasNoFront$(field: PsiField) = !(field.getName startsWith LombokUtils.LOMBOK_INTERN_FIELD_MARKER)
}
/**
* Inspect and validate @Setter lombok annotation on a field
* Creates setter method for this field
*/
final class WitherFieldProc(val annotationClass: JClass[_ <: JAnnotation],
val genClass: JClass[_]) extends AnnotationFieldProcessor(annotationClass, genClass) {
def this() = this(classOf[Wither], classOf[PsiMethod])
override def validate(anot: PsiAnnotation, field: PsiField, pbb: ProblemBuilder): Boolean =
canCreateWither(anot)(field)(_ foreach addToPbBuilder(pbb))
override def processIntern(psiField: PsiField, psiAnnotation: PsiAnnotation, target: JList[_ >: PsiElement]): Unit =
for {
acLevel <- Option(getMethodModifier(psiAnnotation))
method <- witherMethod(psiField, acLevel)
tg <- Option(target)
} tg add method
def canCreateWither(anot: PsiAnnotation)(field: PsiField)(treatErrors: NonEmptyList[_ <: Problem] => Unit): Boolean =
(validNonStatic(field, anot) |@|
validHasConstructor(field) |@|
validIsWitherUnique(field, anot))((_, _, _) => true)
.fold(treatErrors andThen (_ => false), _ => true)
def witherMethod(psiField: PsiField, accessLevel: String): Option[LombokLightMethodBuilder] =
for {
field <- Option(psiField)
mgr <- Option(field.getManager)
name <- Option(field.getName)
ftype <- Option(field.getType)
clazz <- Option(field.getContainingClass)
returnType <- Option(getTypeWithGenerics(clazz))
} yield
LombokPsiElementFactory.getInstance()
.createLightMethod(mgr, witherName(name))
.withModifier(accessLevel)
.withMethodReturnType(returnType)
.withContainingClass(clazz)
.withParameter(name, ftype)
.withNavigationElement(field)
def witherName(fieldName: String): String = {
val suffix = (fieldName.startsWith("is") && fieldName(2).isUpper) ? fieldName.substring(2) | fieldName
s"with${suffix(0).toUpper}${suffix.substring(1)}"
}
private def validNonStatic(field: PsiField, annotation: PsiAnnotation): ValidationNel[Problem, Boolean] =
if (field.hasModifierProperty(PsiModifier.STATIC))
Error(s"${annotation.getQualifiedName} on static field is not allowed",
PsiQuickFixFactory.createModifierListFix(field, PsiModifier.STATIC, false, false)).failNel
else
true.success
private def validHasConstructor(field: PsiField): ValidationNel[Problem, Boolean] = {
val hasParam: PsiField => PsiMethod => Boolean = f => m =>
m.getParameterList.getParameters exists (p => typesAreEquivalent(p.getType, f.getType))
lazy val hasRightConstructor = Option(field.getContainingClass)
.map(psiClass => collectClassConstructorIntern(psiClass))
.exists(_.any(hasParam(field)))
lazy val isAnnotatedWith: JClass[_ <: JAnnotation] => PsiClass => Boolean =
clazz => psiClass => PsiAnnotationUtil.isAnnotatedWith(psiClass, clazz)
lazy val hasAllArgsConstAnot =
Option(field.getContainingClass).exists(isAnnotatedWith(classOf[AllArgsConstructor]))
lazy val hasRequiredArgsConstAnot =
Option(field.getContainingClass).exists(isAnnotatedWith(classOf[RequiredArgsConstructor]))
lazy val isFinal = field.hasModifierProperty(PsiModifier.FINAL)
lazy val hasNonNullAnot = PsiAnnotationUtil.isAnnotatedWith(field, classOf[NonNull])
if (hasRightConstructor ||
hasAllArgsConstAnot ||
(hasRequiredArgsConstAnot && (isFinal || hasNonNullAnot)))
true.success
else
Error(s"""Compilation will fail : no constructor
with a parameter of type ${field.getType.getCanonicalText} was found""").failNel
}
private def validIsWitherUnique(field: PsiField, annotation: PsiAnnotation): ValidationNel[Problem, Boolean] = {
val result =
for {
fieldName <- Option(field.getName)
psiClass <- Option(field.getContainingClass)
if hasSimilarMethod(collectClassMethodsIntern(psiClass), witherName(fieldName), 1)
} yield
Warning(s"""No ${annotation.getQualifiedName} generated : a method
named ${witherName(fieldName)} taking one parameter already exists""").failNel
result getOrElse true.success
}
}
| gneuvill/lombok-intellij-plugin | processor-core/src/main/scala/fr/neuville/lombok/processor/wither.scala | Scala | bsd-2-clause | 7,416 |
package com.joshcough.minecraft
import org.bukkit.{ChatColor, Effect, Location, Material, OfflinePlayer, Server, World}
import org.bukkit.block.Block
import org.bukkit.event.Cancellable
import org.bukkit.event.entity.EntityDamageByEntityEvent
import org.bukkit.event.weather.WeatherChangeEvent
import org.bukkit.inventory.ItemStack
import org.bukkit.plugin.{Plugin, PluginManager}
import ChatColor._
import Effect._
import Material._
import org.bukkit.entity.{LivingEntity, Entity, EntityType, Player}
import org.bukkit.event.player.PlayerInteractEvent
import util.Try
import Cube._
object BukkitEnrichment extends BukkitEnrichment{
object MaterialAndData {
val AIR = new MaterialAndData(Material.AIR, None)
}
case class MaterialAndData(m: Material, data: Option[Byte]){
def update(b: Block): Boolean = {
val oldM = b.getType
val oldData = b.getData
b setType m
data foreach b.setData
oldM != m || oldData != data.getOrElse(0)
}
def itemStack: ItemStack = data.fold(new ItemStack(m))(new ItemStack(m, 1, 0:Short, _))
}
}
/**
* Adds piles of missing functions to Bukkit classes.
*
* This is all done using Scala 2.10 enrichment classes, which work like this:
*
* implicit class RichThing(t: Thing) {
* def someNewFunction = ...
* }
*
* Where Thing is the class getting functions added to it. Any functions inside
* of the RichClass are added to Thing (or available at compile time, anyway).
*
* No implicit conversions are used here. Everything is explicit, in order to keep sanity.
* It would be easy to convert back and forth from say, a Block and a Location,
* but instead I provide functions like b.loc, and l.block. While I think this provides
* a good amount of sanity for the price of a little extra verbosity. This is especially true
* because this trait is mixed into ScalaPlugin, meaning that every ScalaPlugin
* has access to everything here. If there were a number of implicit conversions, things
* could potentially get ugly fast.
*/
trait BukkitEnrichment extends ScalaEnrichment {
import BukkitEnrichment._
/**
* Add a whole pile of awesomeness to Block.
*/
implicit class RichBlock(b:Block) {
// a pile of useful functions.
lazy val world = b.getWorld
lazy val loc = b.getLocation
lazy val (x, y, z) = (b.getX, b.getY, b.getZ)
lazy val (xd, yd, zd) = (b.getX.toDouble, b.getY.toDouble, b.getZ.toDouble)
lazy val chunk = world.getChunkAt(b)
lazy val blockNorth: Block = world(xd, yd, zd - 1)
lazy val blockSouth: Block = world(xd, yd, zd + 1)
lazy val blockEast : Block = world(xd + 1, yd, zd)
lazy val blockWest : Block = world(xd - 1, yd, zd)
lazy val blockNorthEast: Block = blockNorth.blockEast
lazy val blockSouthEast: Block = blockSouth.blockEast
lazy val blockNorthWest: Block = blockNorth.blockWest
lazy val blockSouthWest: Block = blockSouth.blockWest
def copy(x: Double = xd, y: Double = yd, z: Double = zd) = world(x, y, z)
// the block directly above b
lazy val blockAbove = world(xd, yd + 1, zd)
// the block directly below b
lazy val blockBelow = world(xd, yd - 1, zd)
// the nth block above b
def nthBlockAbove(n:Int) = world(xd, yd + n, zd)
// the nth block below b
def nthBlockBelow(n:Int) = world(xd, yd - n, zd)
// a Stream of all the Blocks above b
def blocksAbove : Stream[Block] = blockAbove #:: blockAbove.blocksAbove
// b, and all the blocks above b
def andBlocksAbove: Stream[Block] = b #:: blocksAbove
// a Stream of all the Blocks below b
def blocksBelow : Stream[Block] = blockBelow #:: blockBelow.blocksBelow
// b, and all the blocks below b
def andBlocksBelow: Stream[Block] = b #:: blocksBelow
// the four blocks north, south, east and west of b
def neighbors4: Stream[Block] =
blockNorth #:: blockSouth #:: blockEast #:: blockWest #:: Stream.empty
// b, and the four blocks north, south, east and west of b
def andNeighbors4: Stream[Block] = b #:: neighbors4
// the four blocks north, south, east and west of b
// and the four blocks northeast, southeast, northwest, and southwest of b
def neighbors8 : Stream[Block] = neighbors4 ++ (
blockNorthEast #:: blockSouthEast #:: blockNorthWest #:: blockSouthWest #:: Stream.empty
)
// b and all of b's neighbors8
def andNeighbors8: Stream[Block] = b #:: neighbors8
/**
* @return all of b's 26 neighbors in 3D space
*/
def neighbors : Stream[Block] =
neighbors8 ++ b.blockBelow.andNeighbors8 #::: b.blockAbove.andNeighbors8
/**
* @return b, and all of b's 26 neighbors in 3D space
*/
def andNeighbors : Stream[Block] = b #:: neighbors
def is(m:Material) = b.getType == m
def isA(m:Material) = b.getType == m
def isNot(m:Material) = b.getType != m
/**
* drop the item for the current material of this block, and then set this block to AIR
*/
def erase: Boolean = if(! (b is AIR)) {
b.world.dropItem (b.loc, b.itemStack)
b.world.playEffect(b.loc, SMOKE, 1)
changeTo(AIR)
} else false
/**
* Change this block to the given material.
*/
def changeTo(m: Material): Boolean = {
try if(! chunk.isLoaded) chunk.load
catch { case e: Exception => println("unable to load chunk.") }
MaterialAndData(m, None) update b
}
def itemStack = new ItemStack(b.getType, 1, b.getData)
def materialAndData = MaterialAndData(b.getType, Some(b.getData))
def point: Point = Point(b.x, b.y, b.z)
/**
* Returns a Cube of all of the blocks between two locations of the world.
*/
def cubeTo(b2: Block): Cube[Block] = b.loc.cubeTo(b2.loc)
}
/**
* Add some awesomeness to Material.
*/
implicit class RichMaterial(m: Material){
def itemStack = new ItemStack(m, 1)
def andData = MaterialAndData(m, None)
}
/**
* Add some awesomeness to Cancellable.
*/
implicit class RichCancellable(c:Cancellable){
def cancel: Unit = c.setCancelled(true)
def cancelIf(b: => Boolean, runBeforeCancelling: => Unit = () => ()): Unit =
if(b) { runBeforeCancelling; c.setCancelled(true) }
}
/**
* Add a bunch of awesomeness to Entity.
*/
implicit class RichEntity(e:Entity){
lazy val loc = e.getLocation
lazy val (x, y, z) = (loc.x, loc.y, loc.z)
def server = e.getServer
def world = e.getWorld
def isAn(et:EntityType) = e.getType == et
def isA (et:EntityType) = isAn(et)
/**
* Run f on e, if e is a Player
*/
def whenPlayer(f: Player => Unit): Unit = if(e.isInstanceOf[Player]) f(e.asInstanceOf[Player])
def shock = world strikeLightning loc
}
/**
* Add some awesomeness to LivingEntity.
*/
implicit class RichLivingEntity(e: LivingEntity){
def die: Unit = e setHealth 0
}
/**
* Add some awesomeness to ItemStack.
*/
implicit class RichItemStack(i:ItemStack){
def isA (m:Material) = i.getType == m
def isAn(m:Material) = i.getType == m
}
/**
* Add a whole pile of awesomeness to World.
*/
implicit class RichWorld(w:World){
def name = w.getName
def entities = w.getEntities
def apply(x: Int, y: Int, z: Int) : Block = blockAt(x.toDouble, y.toDouble, z.toDouble)
def apply(x: Double, y: Double, z: Double): Block = new Location(w, x, y, z).getBlock
def apply(p: Point): Block = this(p.x, p.y, p.z)
def blockAt(x: Int, y: Int, z: Int): Block = blockAt(x.toDouble, y.toDouble, z.toDouble)
def blockAt(x: Double, y: Double, z: Double): Block = new Location(w, x, y, z).getBlock
/**
* Returns an infinite Stream[Block] that increases positively in X (or EAST)
* starting at the given Location.
*/
def fromX(loc:Location): Stream[Block] = {
lazy val nats:Stream[Int] = 0 #:: 1 #:: nats.tail.map(_+1)
for (x<-nats) yield w(loc.x + x, loc.y, loc.z)
}
}
/**
* Add a whole pile of awesomeness to Location.
*/
implicit class RichLocation(loc: Location){
lazy val (x,y,z) = (loc.getX.toInt, loc.getY.toInt, loc.getZ.toInt)
lazy val xyz = (x, y, z)
lazy val (xd,yd,zd) = (loc.getX, loc.getY, loc.getZ)
lazy val xyzd = (xd, yd, zd)
def world = loc.getWorld
def block = loc.getBlock
def spawn(entityType: EntityType): Unit = world.spawnCreature(loc, entityType)
def spawnN(entityType: EntityType, n: Int): Unit = for (i <- 1 to n) spawn(entityType)
def dropItem(stack: ItemStack): Unit = loc.world.dropItem(loc, stack)
def dropItem(m: Material): Unit = dropItem(m.itemStack)
def point: Point = Point(loc.x, loc.y, loc.z)
/**
* Returns a Cube of all of the blocks between two locations of the world.
*/
def cubeTo(loc2: Location): Cube[Block] =
Cube[Block](loc.point, loc2.point)((p: Point) => loc.world(p.x, p.y, p.z))
}
implicit class RichCubeOfBlocks(c: Cube[Block]) {
import collection.JavaConversions.asScalaIterator
def world = c(Point(0,0,0)).world
def blocks = c.toStream
def blocksAndMaterials = blocks.map(b => (b, b.materialAndData))
def players: Iterator[Player] = world.getPlayers.iterator.filter(contains)
def contains(p: Player) : Boolean = c.contains(p.loc.point)
def contains(l: Location): Boolean = c.contains(l.point)
}
/**
* Add a whole pile of awesomeness to Server.
*/
implicit class RichServer(s:Server){
def findPlayer(name:String) = tryO(s.getPlayer(name))
def findOnlinePlayer = findPlayer _
def findOfflinePlayer(name:String) = Option(s.getOfflinePlayer(name))
def findOnlinePlayers(names: List[String]): List[Player] = names.map(findOnlinePlayer).flatten
def findOfflinePlayers(names: List[String]): List[OfflinePlayer] =
names.map(findOfflinePlayer).flatten
}
/**
* Add a whole pile of awesomeness to Player.
*/
implicit class RichPlayer(player:Player){
def loc = player.getLocation
def name = player.getName
def world = player.getWorld
def server = player.getServer
def inventory = player.getInventory
def is(pname: String) = name == pname
def holding = player.getItemInHand
def isHolding (m: Material) = player.getItemInHand.getType == m
def isHoldingA (m: Material) = isHolding(m)
def isHoldingAn(m: Material) = isHolding(m)
def isHoldingAnyOf(ms: Material*) = ms.exists(isHolding)
def blockOn = player.loc.block.blockBelow
def blockAboveHead = blockOn.nthBlockAbove(3)
def blocksAboveHead = blockAboveHead.blocksAbove
/**
* If this player were in a box, this function would return all the blocks in that box
* @return
*/
def blocksAround: Stream[Block] =
blockOn.nthBlockAbove(1).neighbors8 ++ // 8 blocks at the bottom half of the player
blockOn.nthBlockAbove(2).neighbors8 ++ // 8 blocks at the top half of the player
blockOn.andNeighbors8 #::: // 9 blocks below the player
blockOn.nthBlockAbove(3).andNeighbors8 // 9 blocks above the player.
/**
* Sends player a message.
*/
def ! (s: String): Unit = if(s != null) player.sendMessage(s)
/**
* Sends player all of the given messages
*/
def !* (ss: String*): Unit = ss.foreach(s => player ! s)
/**
* Sends the player the given message, but turns it red.
*/
def sendError(message:String): Unit = player.sendMessage(RED(message))
/**
* Send the player an error message, and then throw an exception violently.
*/
def bomb(message:String): Nothing = {
player ! RED(message)
throw new RuntimeException(message)
}
/**
* Brings the player UP to the top of the world (but at the same NSEW coordinate).
*/
def surface: Unit = teleportTo(world getHighestBlockAt loc)
// just a ton of utility functions that i don't feel like documenting
def findPlayer(name:String)(f: Player => Unit): Unit =
server.findPlayer(name).fold(sendError("kill could not find player: " + name))(f)
def findPlayers(names:List[String])(f: Player => Unit): Unit = names.foreach(n => findPlayer(n)(f))
def ban(reason:String){ player.setBanned(true); player.kickPlayer("banned: $reason") }
def kill(playerName:String): Unit = findPlayer(playerName)(kill)
def kill(p:Player): Unit = doTo(p, p.setHealth(0), "killed")
def teleportTo(otherPlayer: Player) = player.teleport(otherPlayer)
def teleportTo(b: Block): Unit = player.teleport(b.loc)
def shockWith(message:String) {
player.shock
player ! message
}
def withMaterial[T](nameOrId:String)(f: Material => T) {
attemptO(findMaterial(nameOrId))("No such material: $nameOrId", f)
}
def attemptO[T, U](ot: Option[T])(s: => String, f: T => U){
ot.fold(player ! s)(t => f(t))
}
def attempt[T](f: => T): Unit = try f catch {
case e: Exception => player ! RED(s"$e ${e.getMessage}\\n${e.getStackTraceString}")
}
def doTo(otherPlayer: Player, f: => Unit, actionName: String){
f
otherPlayer ! GREEN(s"you have been $actionName by ${player.name}")
player ! GREEN(s"you have $actionName ${otherPlayer.name}")
}
}
/**
* Add some awesomeness to EntityDamageByEntityEvent.
*/
implicit class RichEntityDamageByEntityEvent(e: EntityDamageByEntityEvent) {
def damager = e.getDamager
def damagee = e.getEntity
}
/**
* Add some awesomeness to PlayerInteractEvent.
*/
implicit class RichPlayerInteractEvent(e: PlayerInteractEvent) {
def block = e.getClickedBlock
def loc = block.loc
}
/**
* Add some awesomeness to WeatherChangeEvent.
*/
implicit class RichWeatherChangeEvent(e:WeatherChangeEvent) {
def rain = e.toWeatherState
def sun = ! rain
}
implicit class RichPluginManager(pm: PluginManager) {
def findPlugin(name: String): Option[Plugin] = tryO(pm.getPlugin(name))
def enable(plugin: String) : Unit = findPlugin(plugin).foreach(pm.enablePlugin)
def disable(plugin: String) : Unit = findPlugin(plugin).foreach(pm.disablePlugin)
def enableAll(plugins: String*) : Unit = plugins.foreach(enable)
def disableAll(plugins: String*): Unit = plugins.foreach(disable)
}
// arguably, these functions should be someplace else...
def tryO[T](f: => T): Option[T] = Try(Option(f)).getOrElse(None)
def findEntity(name:String) = Option(EntityType.fromName(name.toUpperCase)).orElse(
Option(EntityType.valueOf(name.toUpperCase))
)
def findMaterial(nameOrId: String) = Option(getMaterial(nameOrId.toUpperCase)).orElse(
tryO(getMaterial(nameOrId.toInt))
)
implicit class RichColor(c: ChatColor) {
def apply(s: String) = c + s
}
sealed case class Color(data:Byte){
def wool = MaterialAndData(WOOL, Some(data))
}
object Color {
val WHITE = new Color(0)
val ORANGE = new Color(1)
val MAGENTA = new Color(2)
val LIGHT_BLUE = new Color(3)
val YELLOW = new Color(4)
val LIGHT_GREEN = new Color(5)
val PINK = new Color(6)
val GREY = new Color(7)
val LIGHT_GREY = new Color(8)
val CYAN = new Color(9)
val VIOLET = new Color(10)
val BLUE = new Color(11)
val BROWN = new Color(12)
val GREEN = new Color(13)
val RED = new Color(14)
val BLACK = new Color(15)
}
}
| JunctionAt/JunctionAPI | src/main/scala/com/joshcough/minecraft/BukkitEnrichment.scala | Scala | agpl-3.0 | 15,576 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.data.stats.usage
import java.io.Closeable
import org.apache.accumulo.core.client.Connector
import org.apache.accumulo.core.security.Authorizations
import org.joda.time.Interval
import scala.reflect.ClassTag
trait GeoMesaUsageStats extends Closeable {
/**
* Writes a usage stat
*
* @param stat stat to write
* @param transform conversion to mutations
* @tparam T stat type
*/
def writeUsageStat[T <: UsageStat](stat: T)(implicit transform: UsageStatTransform[T]): Unit
/**
* Retrieves usage statistics
*
* @param typeName simple feature type name
* @param dates dates to retrieve stats for
* @param auths authorizations to query with
* @param transform conversion to mutations
* @tparam T stat type
* @return iterator of usage stats
*/
def getUsageStats[T <: UsageStat](typeName: String,
dates: Interval,
auths: Authorizations)
(implicit transform: UsageStatTransform[T]): Iterator[T]
}
trait HasGeoMesaUsageStats {
def usageStats: GeoMesaUsageStats
}
class GeoMesaUsageStatsImpl(connector: Connector, usageStatsTable: String, collectUsageStats: Boolean)
extends GeoMesaUsageStats {
private val usageWriter = if (collectUsageStats) new UsageStatWriter(connector, usageStatsTable) else null
private val usageReader = new UsageStatReader(connector, usageStatsTable)
override def writeUsageStat[T <: UsageStat](stat: T)(implicit transform: UsageStatTransform[T]): Unit =
if (usageWriter != null) { usageWriter.queueStat(stat)(transform) }
override def getUsageStats[T <: UsageStat](typeName: String,
dates: Interval,
auths: Authorizations)
(implicit transform: UsageStatTransform[T]): Iterator[T] =
usageReader.query(typeName, dates, auths)(transform)
override def close(): Unit = if (usageWriter != null) { usageWriter.close() }
}
| mdzimmerman/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/stats/usage/GeoMesaUsageStats.scala | Scala | apache-2.0 | 2,576 |
package uk.gov.dvla.vehicles.acquire.gatling
import io.gatling.core.Predef._
import uk.gov.dvla.vehicles.acquire.gatling.Helper.httpConf
import uk.gov.dvla.vehicles.acquire.gatling.Scenarios._
class AcquireSimulation extends Simulation {
private val oneUser = atOnceUsers(1)
setUp(
//verifyAssetsAreAccessible.inject(oneUser),
newBusinessKeeperBuysAVehicleFromTheTrade.inject(oneUser),
newBusinessKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn.inject(oneUser),
newPrivateKeeperBuysAVehicleFromTheTrade.inject(oneUser),
newPrivateKeeperBuysAVehicleFromTheTradeWithAllOptionalDataFilledIn.inject(oneUser),
vehicleLookupUnsuccessful.inject(oneUser)
).
protocols(httpConf).
assertions(global.failedRequests.count.is(0))
}
| dvla/vehicles-acquire-online | gatling-tests/src/test/scala/uk/gov/dvla/vehicles/acquire/gatling/AcquireSimulation.scala | Scala | mit | 771 |
package mesosphere.marathon.tasks
import org.apache.mesos.Protos._
import org.scalatest.{ Assertions, GivenWhenThen, FunSuite }
import scala.collection.JavaConverters._
class ResourceUtilTest extends FunSuite with GivenWhenThen with Assertions {
test("no base resources") {
val leftOvers = ResourceUtil.consumeResources(
Seq(),
Seq(ports("ports", 2 to 12))
)
assert(leftOvers == Seq())
}
test("resource mix") {
val leftOvers = ResourceUtil.consumeResources(
Seq(scalar("cpus", 3), ports("ports", 2 to 20), set("labels", Set("a", "b"))),
Seq(scalar("cpus", 2), ports("ports", 2 to 12), set("labels", Set("a")))
)
assert(leftOvers == Seq(scalar("cpus", 1), ports("ports", 13 to 20), set("labels", Set("b"))))
}
test("resource repeated consumed resources with the same name/role") {
val leftOvers = ResourceUtil.consumeResources(
Seq(scalar("cpus", 3)),
Seq(scalar("cpus", 2), scalar("cpus", 1))
)
assert(leftOvers == Seq())
}
test("resource consumption considers roles") {
val leftOvers = ResourceUtil.consumeResources(
Seq(scalar("cpus", 2), scalar("cpus", 2, role = "marathon")),
Seq(scalar("cpus", 0.5), scalar("cpus", 1, role = "marathon"), scalar("cpus", 0.5, role = "marathon"))
)
assert(leftOvers == Seq(scalar("cpus", 1.5), scalar("cpus", 0.5, role = "marathon")))
}
// in the middle
portsTest(consumedResource = Seq(10 to 10), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 9, 11 to 15)))
portsTest(consumedResource = Seq(10 to 11), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 9, 12 to 15)))
portsTest(consumedResource = Seq(10 to 11), baseResource = Seq(5 to 15, 30 to 31),
expectedResult = Some(Seq(5 to 9, 12 to 15, 30 to 31)))
portsTest(consumedResource = Seq(), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 15)))
portsTest(
consumedResource = Seq(31084 to 31084),
baseResource = Seq(31000 to 31096, 31098 to 32000), expectedResult = Some(Seq(31000 to 31083, 31085 to 31096, 31098 to 32000)))
// overlapping smaller
portsTest(consumedResource = Seq(2 to 5), baseResource = Seq(5 to 15), expectedResult = Some(Seq(6 to 15)))
portsTest(consumedResource = Seq(2 to 6), baseResource = Seq(5 to 15), expectedResult = Some(Seq(7 to 15)))
// overlapping bigger
portsTest(consumedResource = Seq(15 to 20), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 14)))
portsTest(consumedResource = Seq(14 to 20), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 13)))
// not contained in base resource
portsTest(consumedResource = Seq(5 to 15), baseResource = Seq(), expectedResult = None)
portsTest(consumedResource = Seq(2 to 4), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 15)))
portsTest(consumedResource = Seq(16 to 20), baseResource = Seq(5 to 15), expectedResult = Some(Seq(5 to 15)))
scalarTest(consumedResource = 3, baseResource = 10, expectedResult = Some(10.0 - 3.0))
scalarTest(consumedResource = 3, baseResource = 2, expectedResult = None)
setResourceTest(consumedResource = Set("a", "b"), baseResource = Set("a", "b", "c"), expectedResult = Some(Set("c")))
setResourceTest(consumedResource = Set("a", "b", "c"), baseResource = Set("a", "b", "c"), expectedResult = None)
private[this] def setResourceTest(
consumedResource: Set[String],
baseResource: Set[String],
expectedResult: Option[Set[String]]): Unit = {
test(s"consuming sets resource $consumedResource from $baseResource results in $expectedResult") {
val r1 = set("cpus", consumedResource)
val r2 = set("cpus", baseResource)
val r3 = expectedResult.map(set("cpus", _))
val result = ResourceUtil.consumeResource(r2, r1)
assert(result == r3)
}
}
private[this] def set(name: String, labels: Set[String]): Resource = {
Resource
.newBuilder()
.setName(name)
.setType(Value.Type.SET)
.setSet(Value.Set.newBuilder().addAllItem(labels.asJava))
.build()
}
private[this] def portsTest(
consumedResource: Seq[Range.Inclusive],
baseResource: Seq[Range.Inclusive],
expectedResult: Option[Seq[Range.Inclusive]]): Unit = {
test(s"consuming ports resource $consumedResource from $baseResource results in $expectedResult") {
val r1 = ports("cpus", consumedResource: _*)
val r2 = ports("cpus", baseResource: _*)
val r3 = expectedResult.map(ports("cpus", _: _*))
val result = ResourceUtil.consumeResource(r2, r1)
assert(result == r3)
}
}
private[this] def ports(name: String, ranges: Range.Inclusive*): Resource = {
def toRange(range: Range.Inclusive): Value.Range =
Value.Range
.newBuilder()
.setBegin(range.start.toLong).setEnd(range.end.toLong).build()
Resource
.newBuilder()
.setName(name)
.setType(Value.Type.RANGES)
.setRanges(Value.Ranges.newBuilder().addAllRange(ranges.map(toRange).asJava))
.build()
}
private[this] def scalarTest(consumedResource: Double, baseResource: Double, expectedResult: Option[Double]): Unit = {
test(s"consuming scalar resource $consumedResource from $baseResource results in $expectedResult") {
val r1 = scalar("cpus", consumedResource)
val r2 = scalar("cpus", baseResource)
val r3 = expectedResult.map(scalar("cpus", _))
val result = ResourceUtil.consumeResource(r2, r1)
assert(result == r3)
}
}
private[this] def scalar(name: String, d: Double, role: String = "*"): Resource = {
Resource
.newBuilder()
.setName(name)
.setType(Value.Type.SCALAR)
.setScalar(Value.Scalar.newBuilder().setValue(d))
.setRole(role)
.build()
}
}
| sepiroth887/marathon | src/test/scala/mesosphere/marathon/tasks/ResourceUtilTest.scala | Scala | apache-2.0 | 5,773 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.genetic.trees
import com.heatonresearch.aifh.evolutionary.genome.Genome
import com.heatonresearch.aifh.evolutionary.opp.EvolutionaryOperator
import com.heatonresearch.aifh.evolutionary.train.EvolutionaryAlgorithm
import com.heatonresearch.aifh.randomize.GenerateRandom
/**
* Cross over using a tree. Used for genetic programming.
*/
class CrossoverTree(owner: EvolutionaryAlgorithm) extends EvolutionaryOperator(owner,1,1) {
override def performOperation(rnd: GenerateRandom, parents: Array[Genome],
parentIndex: Int, offspring: Array[Genome], offspringIndex: Int) {
val parent1 = parents(parentIndex).asInstanceOf[TreeGenome]
val parent2 = parents(parentIndex).asInstanceOf[TreeGenome]
val eval = parent1.evaluator
val off1 = owner.population.genomeFactory.factor(parent1).asInstanceOf[TreeGenome]
val replacePoint = eval.sampleRandomNode(rnd, off1.root)
val copySource = eval.sampleRandomNode(rnd, parent2.root)
val actualCopy = copySource.child.copy
if (replacePoint.parent == null) {
off1.root = actualCopy
}
else {
val idx = replacePoint.parent.getChildren.indexOf(replacePoint.child)
replacePoint.parent.getChildren.set(idx, actualCopy)
}
offspring(0) = off1
}
} | PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/genetic/trees/CrossoverTree.scala | Scala | apache-2.0 | 2,250 |
package org.http4s
import cats.Applicative
import cats.data.{Kleisli, OptionT}
import cats.implicits._
import cats.effect.Sync
object AuthedRoutes {
/** Lifts a function into an [[AuthedRoutes]]. The application of `run`
* is suspended in `F` to permit more efficient combination of
* routes via `SemigroupK`.
*
* @tparam F the effect of the [[AuthedRoutes]]
* @tparam T the type of the auth info in the [[AuthedRequest]] accepted by the [[AuthedRoutes]]
* @param run the function to lift
* @return an [[AuthedRoutes]] that wraps `run`
*/
def apply[T, F[_]](run: AuthedRequest[F, T] => OptionT[F, Response[F]])(
implicit F: Sync[F]): AuthedRoutes[T, F] =
Kleisli(req => OptionT(F.suspend(run(req).value)))
/** Lifts a partial function into an [[AuthedRoutes]]. The application of the
* partial function is suspended in `F` to permit more efficient combination
* of authed services via `SemigroupK`.
*
* @tparam F the base effect of the [[AuthedRoutes]]
* @param pf the partial function to lift
* @return An [[AuthedRoutes]] that returns some [[Response]] in an `OptionT[F, ?]`
* wherever `pf` is defined, an `OptionT.none` wherever it is not
*/
def of[T, F[_]](pf: PartialFunction[AuthedRequest[F, T], F[Response[F]]])(
implicit F: Sync[F]): AuthedRoutes[T, F] =
Kleisli(req => OptionT(F.suspend(pf.lift(req).sequence)))
/**
* The empty service (all requests fallthrough).
*
* @tparam T - ignored.
* @return
*/
def empty[T, F[_]: Applicative]: AuthedRoutes[T, F] =
Kleisli.liftF(OptionT.none)
}
| aeons/http4s | core/src/main/scala/org/http4s/AuthedRoutes.scala | Scala | apache-2.0 | 1,622 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.KMDeleteTopicFeature
import kafka.manager.model.{Kafka_0_8_1_1, ActorModel}
import kafka.manager.utils.CuratorAwareTest
import ActorModel.TopicList
import kafka.test.{SimpleProducer, HighLevelConsumer, SeededBroker}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
class TestKafkaManager extends CuratorAwareTest {
private[this] val seededTopic = "km-api-test"
private[this] val broker = new SeededBroker(seededTopic,4)
private[this] val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] val akkaConfig: Properties = new Properties()
akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
akkaConfig.setProperty(KafkaManager.ZkHosts,testServer.getConnectString)
akkaConfig.setProperty(KafkaManager.BrokerViewUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.KafkaManagerUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.DeleteClusterUpdateSeconds,"1")
private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
private[this] val kafkaManager : KafkaManager = new KafkaManager(config)
private[this] val duration = FiniteDuration(10,SECONDS)
private[this] val createTopicNameA = "km-unit-test-a"
private[this] val createTopicNameB = "km-unit-test-b"
private[this] val createLogkafkaLogkafkaId = "km-unit-test-logkafka-logkafka_id"
private[this] val createLogkafkaLogPath = "/km-unit-test-logkafka-logpath"
private[this] val createLogkafkaTopic = "km-unit-test-logkafka-topic"
private[this] var hlConsumer : Option[HighLevelConsumer] = None
private[this] var hlConsumerThread : Option[Thread] = None
private[this] val hlShutdown = new AtomicBoolean(false)
private[this] var simpleProducer : Option[SimpleProducer] = None
private[this] var simpleProducerThread : Option[Thread] = None
override protected def beforeAll() : Unit = {
super.beforeAll()
Thread.sleep(2000)
hlConsumer = Option(broker.getHighLevelConsumer)
hlConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!hlShutdown.get()) {
hlConsumer.map(_.read { ba =>
Option(ba).map(asString).foreach( s => println(s"read message : $s"))
})
Thread.sleep(500)
}
}
})
hlConsumerThread.foreach(_.start())
simpleProducer = Option(broker.getSimpleProducer)
simpleProducerThread = Option(new Thread() {
override def run(): Unit = {
var count = 0
while(!hlShutdown.get()) {
simpleProducer.foreach { p =>
p.send(s"simple message $count")
count+=1
Thread.sleep(500)
}
}
}
})
simpleProducerThread.foreach(_.start())
Thread.sleep(1000)
}
override protected def afterAll(): Unit = {
Try(hlShutdown.set(true))
Try(simpleProducerThread.foreach(_.interrupt()))
Try(hlConsumerThread.foreach(_.interrupt()))
Try(hlConsumer.foreach(_.close()))
kafkaManager.shutdown()
Try(broker.shutdown())
super.afterAll()
}
private[this] def getTopicList() : TopicList = {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
result.toOption.get
}
test("add cluster") {
val future = kafkaManager.addCluster("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("create topic") {
val futureA = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val resultA = Await.result(futureA,duration)
val futureB = kafkaManager.createTopic("dev",createTopicNameB,4,1)
val resultB = Await.result(futureB,duration)
assert(resultA.isRight === true)
assert(resultB.isRight === true)
Thread.sleep(2000)
}
test("fail to create topic again") {
val future = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val result = Await.result(future,duration)
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get topic list") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
}
test("query request for invalid cluster") {
val future = kafkaManager.getTopicList("blah")
val result = Await.result(future,duration)
assert(result.isLeft === true)
assert(result.swap.toOption.get.msg.contains("blah") === true)
}
test("get broker list") {
val future = kafkaManager.getBrokerList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.nonEmpty === true)
}
test("get topic identity") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { topic =>
val future2 = kafkaManager.getTopicIdentity("dev",topic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
//seeded topic should have offsets
val future2 = kafkaManager.getTopicIdentity("dev",seededTopic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
assert(result2.toOption.get.summedTopicOffsets > 0)
}
test("get cluster list") {
val future = kafkaManager.getClusterList
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.active.nonEmpty === true)
}
test("get cluster view") {
val future = kafkaManager.getClusterView("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster config") {
val future = kafkaManager.getClusterConfig("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster context") {
val future = kafkaManager.getClusterContext("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterFeatures.features(KMDeleteTopicFeature))
}
test("get consumer list passive mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.head._1 === hlConsumer.get.groupId, s"Failed : ${result}")
}
test("get consumer identity passive mode") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId)
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("run preferred leader election") {
val topicList = getTopicList()
val future = kafkaManager.runPreferredLeaderElection("dev",topicList.list.toSet)
val result = Await.result(future,duration)
//TODO: this is a failure since there is nothing to do, need a better test
assert(result.isLeft === true)
Thread.sleep(3000)
}
test("get preferred leader election") {
val future = kafkaManager.getPreferredLeaderElection("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
println(result.toOption.get)
}
test("generate partition assignments") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev",topicList.list.toSet,Set(0))
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("run reassign partitions") {
val topicList = getTopicList()
val future = kafkaManager.runReassignPartitions("dev",topicList.list.toSet)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(3000)
}
test("get reassign partitions") {
val future = kafkaManager.getReassignPartitions("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("add topic partitions") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val future = kafkaManager.addTopicPartitions("dev",createTopicNameA,Seq(0),ti.partitions + 1,ti.readVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
//check new partition num
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.partitions === 5)
}
}
test("add multiple topics partitions") {
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
val newPartitionNum = tiA.partitions + 1
val future = kafkaManager.addMultipleTopicsPartitions("dev",Seq(createTopicNameA, createTopicNameB),Set(0),newPartitionNum,Map(createTopicNameA->tiA.readVersion,createTopicNameB->tiB.readVersion))
val result = Await.result(future,duration)
assert(result.isRight === true)
{
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
assert(tiA.partitions === newPartitionNum)
assert(tiB.partitions === newPartitionNum)
}
}
test("update topic config") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.zero82.LogConfig.RententionMsProp,"1800000")
val configReadVersion = ti.configReadVersion
val future = kafkaManager.updateTopicConfig("dev",createTopicNameA,config,configReadVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
//check new topic config
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.configReadVersion > configReadVersion)
assert(ti.config.toMap.apply(kafka.manager.utils.zero82.LogConfig.RententionMsProp) === "1800000")
}
}
test("delete topic") {
val futureA = kafkaManager.deleteTopic("dev",createTopicNameA)
val resultA = Await.result(futureA,duration)
assert(resultA.isRight === true, resultA)
Thread.sleep(1000)
val futureA2 = kafkaManager.getTopicList("dev")
val resultA2 = Await.result(futureA2,duration)
assert(resultA2.isRight === true, resultA2)
assert(resultA2.toOption.get.deleteSet(createTopicNameA),"Topic not in delete set")
val futureB = kafkaManager.deleteTopic("dev",createTopicNameB)
val resultB = Await.result(futureB,duration)
assert(resultB.isRight === true, resultB)
Thread.sleep(1000)
val futureB2 = kafkaManager.getTopicList("dev")
val resultB2 = Await.result(futureB2,duration)
assert(resultB2.isRight === true, resultB2)
assert(resultB2.toOption.get.deleteSet(createTopicNameB),"Topic not in delete set")
}
test("fail to delete non-existent topic") {
val future = kafkaManager.deleteTopic("dev","delete_me")
val result = Await.result(future,duration)
assert(result.isLeft === true)
}
test("update cluster zkhost") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.curatorConfig.zkConnect === testServer.getConnectString))
Thread.sleep(5000)
}
test("disable cluster") {
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.enabled === false))
Thread.sleep(5000)
}
test("enable cluster") {
val future = kafkaManager.enableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(3000)
}
test("update cluster version") {
val future = kafkaManager.updateCluster("dev","0.8.1.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.version === Kafka_0_8_1_1))
Thread.sleep(5000)
}
test("delete topic not supported prior to 0.8.2.0") {
val future = kafkaManager.deleteTopic("dev",createTopicNameA)
val result = Await.result(future,duration)
assert(result.isLeft === true, result)
assert(result.swap.toOption.get.msg.contains("not supported"))
Thread.sleep(2000)
}
test("update cluster logkafka enabled and activeOffsetCache enabled") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(3000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.active.find(c => c.name == "dev").get.logkafkaEnabled === true) &&
(result2.toOption.get.active.find(c => c.name == "dev").get.activeOffsetCacheEnabled === true))
Thread.sleep(3000)
}
/*
test("get consumer list active mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.head._1 === hlConsumer.get.groupId, s"Failed : ${result}")
}
test("get consumer identity active mode") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId)
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}*/
test("create logkafka") {
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
val future = kafkaManager.createLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get logkafka identity") {
val future = kafkaManager.getLogkafkaLogkafkaIdList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { logkafka_id =>
val future2 = kafkaManager.getLogkafkaIdentity("dev",logkafka_id)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
}
test("update logkafka config") {
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
config.put(kafka.manager.utils.logkafka82.LogConfig.PartitionProp,"1")
val future = kafkaManager.updateLogkafkaConfig("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(1000)
//check new logkafka config
{
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath).get._1.get.apply(kafka.manager.utils.logkafka82.LogConfig.PartitionProp) === "1")
}
}
test("delete logkafka") {
val future = kafkaManager.deleteLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath)
val result = Await.result(future,duration)
assert(result.isRight === true, result)
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath) === None)
Thread.sleep(2000)
}
test("delete cluster") {
//first have to disable in order to delete
{
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future, duration)
assert(result.isRight === true)
Thread.sleep(3000)
}
val future = kafkaManager.deleteCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(3000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert(result2.toOption.get.pending.isEmpty === true)
assert(result2.toOption.get.active.isEmpty === true)
}
}
| xuwei-k/kafka-manager | test/kafka/manager/TestKafkaManager.scala | Scala | apache-2.0 | 20,320 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.bytecode.optimization
import apparat.bytecode.Bytecode
import apparat.bytecode.operations._
import apparat.abc.{AbcNamespace, AbcQName, AbcNamespaceKind}
/**
* @author Joa Ebert
*/
object InlineMemory extends (Bytecode => Boolean) {
private val namespace0 = AbcNamespace(AbcNamespaceKind.Package, Symbol("apparat.memory"))
private val memory0 = AbcQName('Memory, namespace0)
// Backwards compatibility
private val namespace1 = AbcNamespace(AbcNamespaceKind.Package, Symbol("com.joa_ebert.apparat.memory"))
private val memory1 = AbcQName('Memory, namespace1)
private val namespace2 = AbcNamespace(AbcNamespaceKind.Package, Symbol("com.joa_ebert.abc.bytecode.asbridge"))
private val memory2 = AbcQName('Memory, namespace2)
override def apply(bytecode: Bytecode): Boolean = {
var removes = List.empty[AbstractOp]
var replacements = Map.empty[AbstractOp, AbstractOp]
var modified = false
var removePop = false
var balance = 0
for(op <- bytecode.ops) op match {
case Pop() if removePop => {
removes = op :: removes
removePop = false
}
case GetLex(typeName) if typeName == memory0 || typeName == memory1 || typeName == memory2 => {
removes = op :: removes
balance += 1
}
case CallPropVoid(property, numArguments) if balance > 0 => property match {
case AbcQName(name, _) => {
(name match {
case 'writeByte => Some(SetByte())
case 'writeShort => Some(SetShort())
case 'writeInt => Some(SetInt())
case 'writeFloat => Some(SetFloat())
case 'writeDouble => Some(SetDouble())
case 'select => {
removes = removes.tail
balance -= 1
None
}
case _ => None
}) match {
case Some(replacement) => {
balance -= 1
replacements += op -> replacement
modified = true
}
case None =>
}
}
case _ =>
}
case CallProperty(property, numArguments) if balance > 0 => property match {
case AbcQName(name, _) => {
(name match {
case 'readUnsignedByte => Some(GetByte())
case 'readUnsignedShort => Some(GetShort())
case 'readInt => Some(GetInt())
case 'readFloat => Some(GetFloat())
case 'readDouble => Some(GetDouble())
case 'signExtend1 => Some(Sign1())
case 'signExtend8 => Some(Sign8())
case 'signExtend16 => Some(Sign16())
case 'writeByte => {
removePop = true
Some(SetByte())
}
case 'writeShort => {
removePop = true
Some(SetShort())
}
case 'writeInt => {
removePop = true
Some(SetInt())
}
case 'writeFloat => {
removePop = true
Some(SetFloat())
}
case 'writeDouble => {
removePop = true
Some(SetDouble())
}
case 'select => {
removes = removes.tail
balance -= 1
None
}
case _ => None
}) match {
case Some(replacement) => {
balance -= 1
replacements += op -> replacement
modified = true
}
case None =>
}
}
case _ =>
}
case _ =>
}
if(modified) {
removes foreach { bytecode remove _ }
replacements.iterator foreach { bytecode replace _ }
true
} else {
false
}
}
}
| joa/apparat | apparat-core/src/main/scala/apparat/bytecode/optimization/InlineMemory.scala | Scala | lgpl-2.1 | 4,117 |
package section3
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
object Synchronized {
def main(args: Array[String]) {
// If two threads access and modify a single and same used object,
// we need to synchronize the access to prevent possible dead locks and
// race conditions.
var count: Int = 0
// Every scala object has the synchronizes method, which takes a
// method block. The lock is then the object of the synchronized method.
// In this example out main object `Synchronizes`.
def increment(): Unit = Synchronized.synchronized {
println(s"Accessing increment from thread ${Thread.currentThread().getId}")
val tmp = count + 1
Thread.sleep(1000)
count = tmp
}
// Two threads, which
val f1 = Future(increment())
val f2 = Future(increment())
Await.ready(f1, Duration.Inf)
Await.ready(f2, Duration.Inf)
println(count)
}
}
| DarkToast/scala-workshop | workshop/src/main/scala/section3/Synchronized.scala | Scala | mit | 1,015 |
package com.gilt.handlebars.scala.context
import com.gilt.handlebars.scala.binding.{ Binding, VoidBinding }
import com.gilt.handlebars.scala.logging.Loggable
import com.gilt.handlebars.scala.parser.IdentifierNode
class ChildContext[T](val binding: Binding[T], val parent: Context[T]) extends Context[T] {
def isRoot = false
def isVoid = !binding.isDefined
override def toString = s"Child context: binding[$binding] parent[$parent]"
}
class RootContext[T](val binding: Binding[T]) extends Context[T] {
def isRoot = true
def isVoid = !binding.isDefined
def parent = VoidContext[T]
override def toString = s"Root context: binding[$binding]"
}
trait Context[T] extends Loggable {
def isRoot: Boolean
def isVoid: Boolean
def binding: Binding[T]
def parent: Context[T]
def asOption: Option[Context[T]] = binding.asOption map { t => this }
def render: String = binding.render
def notEmpty[A](fallback: Context[A]): Context[A] = if (isVoid) fallback else this.asInstanceOf[Context[A]]
def lookup(path: IdentifierNode, args: Seq[Binding[T]] = Seq()): Context[T] =
lookup(path.value, args, dictionaryFallbackFlag = false)
/* mimic "falsy" values of Handlebars.js, plus care about Options
* @param a
* @return
*/
def truthValue = binding.isTruthy
/**
* Returns the parent of the provided context, but skips artificial levels in the hierarchy
* introduced by Iterable, Option, etc.
*/
def safeParent: Context[T] = {
if (isRoot || isVoid)
this
else if (parent.binding.isDictionary)
this.parent
else if (parent.binding.isCollection)
this.parent.safeParent
else
this.parent
}
// dictionaryFallbackFlag is work-around for a case in which a context is used to iterate a dictionary
// It'd be preferable to not create a context for the dictionary (thus preventing the need to skip it), or
// to capture signal somehow that the binding is being used that way
@scala.annotation.tailrec
final def lookup(path: Seq[String], args: Seq[Binding[T]], dictionaryFallbackFlag: Boolean): Context[T] =
if (path.isEmpty || isVoid) this
else if (path.head == "." || path.head == "this") {
if (path.size == 1) this else lookup(path.tail, args, dictionaryFallbackFlag = false)
} else if (path.head == "..") {
safeParent.lookup(path.tail, args, dictionaryFallbackFlag = true)
} else {
if (dictionaryFallbackFlag && binding.isDictionary) {
val nextChild = childContext(binding.traverse(path.head, args))
if (nextChild.isVoid) safeParent.lookup(path, args, dictionaryFallbackFlag = false)
else nextChild.lookup(path.tail, args, dictionaryFallbackFlag = false)
} else childContext(binding.traverse(path.head, args)).lookup(path.tail, args, dictionaryFallbackFlag = false)
}
def childContext(binding: Binding[T]): Context[T] =
new ChildContext[T](binding, this)
def map[R](mapFn: (Context[T], Option[Int]) => R): Iterable[R] =
if (binding.isCollection) binding.asCollection.zipWithIndex.toSeq.map { tu => mapFn(childContext(tu._1), Some(tu._2)) }
else Seq(mapFn(this, None))
}
object Context {
def apply[T](binding: Binding[T]): Context[T] =
new RootContext(binding)
}
object VoidContext extends Context[Any] {
def binding = VoidBinding[Any]
def parent = VoidContext
def isRoot = false
def isVoid = true
override def asOption = None
override def toString = "Void"
def apply[T] = this.asInstanceOf[Context[T]]
}
| QiaoBuTang/handlebars.scala | src/main/scala/com/gilt/handlebars/scala/context/Context.scala | Scala | apache-2.0 | 3,505 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.util
import scala.reflect.ClassTag
/**
*/
class CircularBuffer[A](val size : Int)(implicit m : ClassTag[A]) {
private val buffer = new Array[A](size)
private var position = 0
flush()
def flush() : Unit = {
var i = 0
while (i < size) {
buffer(i) = null.asInstanceOf[A]
i += 1
}
}
def +=(item : A) : A = {
val output = buffer(position)
buffer(position) = item
position += 1
position = position % size
return output
}
def apply(i : Int) : A = {
return buffer(i)
}
def getPosition() : Int = {
return position
}
def getLast() : A = {
if (position == 0) {
return apply(size-1)
} else {
return apply((position-1)%4)
}
}
override def toString() : String = {
val builder = new StringBuffer()
var i = 0
while (i < size) {
builder.append("(")
builder.append(i)
builder.append(",")
if (buffer(i) != null) {
builder.append(buffer(i).toString())
} else {
builder.append("Null")
}
builder.append(")")
i += 1
}
builder.toString()
}
}
| Craigacp/factorie | src/main/scala/cc/factorie/util/CircularBuffer.scala | Scala | apache-2.0 | 1,889 |
package lila.relay
package actorApi
import lila.socket.SocketMember
private[relay] case class Member(
channel: JsChannel,
userId: Option[String],
troll: Boolean) extends SocketMember
private[relay] object Member {
def apply(channel: JsChannel, user: Option[lila.user.User]): Member = Member(
channel = channel,
userId = user map (_.id),
troll = user.??(_.troll))
}
private[relay] case class Messadata(trollish: Boolean = false)
private[relay] case class Join(
uid: String,
user: Option[lila.user.User],
version: Int)
private[relay] case class Talk(tourId: String, u: String, t: String, troll: Boolean)
private[relay] case class Connected(enumerator: JsEnumerator, member: Member)
private[relay] case object Reload
private[relay] case object NotifyCrowd
| Happy0/lila | modules/relay/src/main/actorApi.scala | Scala | mit | 785 |
/**
* Copyright (C) 2009 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.util
import java.io._
import java.lang.{Long β JLong}
import org.apache.log4j.Level
import org.orbeon.oxf.common.Defaults
import org.orbeon.oxf.http.{HttpStatusCodeException, StatusCode, StreamedContent, Headers β HttpHeaders}
import org.orbeon.oxf.util.IOUtils._
import org.orbeon.oxf.xml.XMLParsing
import scala.util.Try
import scala.util.control.NonFatal
case class ConnectionResult(
url : String,
statusCode : Int,
headers : Map[String, List[String]],
content : StreamedContent,
hasContent : Boolean,
dontHandleResponse: Boolean // TODO: Should be outside of ConnectionResult.
) extends Logging {
import ConnectionResult._
val lastModified = HttpHeaders.firstDateHeaderIgnoreCase(headers, HttpHeaders.LastModified)
def lastModifiedJava = lastModified map (_.asInstanceOf[JLong]) orNull
def close() = content.close()
def isSuccessResponse = NetUtils.isSuccessCode(statusCode)
val mediatype =
content.contentType flatMap (ct β ContentTypes.getContentTypeMediaType(ct))
val charset =
content.contentType flatMap (ct β ContentTypes.getContentTypeCharset(ct))
def mediatypeOrDefault(default: String) =
mediatype getOrElse default
def charsetJava =
charset.orNull
def getHeaderIgnoreCase(name: String) = {
val nameLowercase = name.toLowerCase
headers collectFirst { case (k, v) if k.toLowerCase == nameLowercase β v } getOrElse Nil
}
def readTextResponseBody = mediatype collect {
case mediatype if ContentTypes.isXMLMediatype(mediatype) β
// TODO: RFC 7303 says that content type charset must take precedence with any XML mediatype.
//
// http://tools.ietf.org/html/rfc7303:
//
// The former confusion
// around the question of default character sets for the two text/ types
// no longer arises because
//
// [RFC7231] changes [RFC2616] by removing the ISO-8859-1 default and
// not defining any default at all;
//
// [RFC6657] updates [RFC2046] to remove the US-ASCII [ASCII]
//
// [...]
//
// this specification sets the priority as follows:
//
// A BOM (Section 3.3) is authoritative if it is present in an XML
// MIME entity;
//
// In the absence of a BOM (Section 3.3), the charset parameter is
// authoritative if it is present
//
useAndClose(XMLParsing.getReaderFromXMLInputStream(content.inputStream)) { reader β
NetUtils.readStreamAsString(reader)
}
case mediatype if ContentTypes.isTextOrJSONContentType(mediatype) β
readStreamAsText(content.inputStream, charset)
}
private var _didLogResponseDetails = false
// See https://github.com/orbeon/orbeon-forms/issues/1900
def logResponseDetailsOnce(logLevel: Level)(implicit logger: IndentedLogger): Unit = {
if (! _didLogResponseDetails) {
log(logLevel, "response", Seq("status code" β statusCode.toString))
if (headers.nonEmpty) {
val headersToLog =
for ((name, values) β headers; value β values)
yield name β value
log(logLevel, "response headers", headersToLog.toList)
}
_didLogResponseDetails = true
}
}
// See https://github.com/orbeon/orbeon-forms/issues/1900
def logResponseBody(logLevel: Level, logBody: Boolean)(implicit logger: IndentedLogger): Unit =
if (hasContent)
log(logLevel, "response has content")
else
log(logLevel, "response has no content")
}
object ConnectionResult {
def apply(
url : String,
statusCode : Int,
headers : Map[String, List[String]],
content : StreamedContent,
dontHandleResponse: Boolean = false // TODO: Should be outside of ConnectionResult.
): ConnectionResult = {
val (hasContent, resetInputStream) = {
val bis =
if (content.inputStream.markSupported)
content.inputStream
else
new BufferedInputStream(content.inputStream)
def hasContent(bis: InputStream) = {
bis.mark(1)
val result = bis.read != -1
bis.reset()
result
}
(hasContent(bis), bis)
}
ConnectionResult(
url = url,
statusCode = statusCode,
headers = headers,
content = content.copy(inputStream = resetInputStream),
hasContent = hasContent,
dontHandleResponse = dontHandleResponse
)
}
def withSuccessConnection[T](cxr: ConnectionResult, closeOnSuccess: Boolean)(body: InputStream β T): T =
tryWithSuccessConnection(cxr, closeOnSuccess)(body).get
def tryWithSuccessConnection[T](cxr: ConnectionResult, closeOnSuccess: Boolean)(body: InputStream β T): Try[T] = Try {
try {
cxr match {
case ConnectionResult(_, _, _, StreamedContent(inputStream, _, _, _), _, _) if cxr.isSuccessResponse β
val result = body(inputStream)
if (closeOnSuccess)
cxr.close() // this eventually calls InputStream.close()
result
case ConnectionResult(_, statusCode, _, _, _, _) β
throw HttpStatusCodeException(if (statusCode != StatusCode.Ok) statusCode else StatusCode.InternalServerError)
}
} catch {
case NonFatal(t) β
cxr.close()
throw t
}
}
// TODO: Move to some IOUtils object.
def readStreamAsText(is: InputStream, charset: Option[String]): String = {
// - JSON: "JSON text SHALL be encoded in Unicode. The default encoding is UTF-8."
// http://www.ietf.org/rfc/rfc4627.txt
// - other: we pick UTF-8 anyway (2014-09-18)
useAndClose(new InputStreamReader(is, charset getOrElse Defaults.DefaultEncodingForModernUse)) { reader β
NetUtils.readStreamAsString(reader)
}
}
}
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/util/ConnectionResult.scala | Scala | lgpl-2.1 | 6,574 |
// here we test unpickling a sealed child in another tasty file
package dottyi3149
object TestFooMatch {
def foo(f: Foo): Unit = f match {
case f: Foo.Bar => ()
}
}
| lrytz/scala | test/tasty/neg/src-2/TestFooMatch_fail.scala | Scala | apache-2.0 | 174 |
package org.bitcoins.crypto
class RandomTest extends BitcoinSCryptoTest {
it should "generate random bytes" in {
val rnd = 1.to(16).map(_ => BCryptoCryptoRuntime.randomBytes(32))
assert(rnd.size == 16)
assert(rnd.distinct.size == rnd.size)
}
}
| bitcoin-s/bitcoin-s | crypto-test/.js/src/test/scala/org/bitcoins/crypto/RandomTest.scala | Scala | mit | 263 |
package mesosphere.marathon.state
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.Protos
import org.scalatest.Matchers
import org.apache.mesos.{ Protos => mesos }
import scala.collection.immutable.Seq
import scala.collection.JavaConverters._
class ContainerTest extends MarathonSpec with Matchers {
class Fixture {
lazy val volumes = Seq(
Container.Volume("/etc/a", "/var/data/a", mesos.Volume.Mode.RO),
Container.Volume("/etc/b", "/var/data/b", mesos.Volume.Mode.RW)
)
lazy val container = Container(
`type` = mesos.ContainerInfo.Type.DOCKER,
volumes = volumes,
docker = Some(Container.Docker(image = "group/image"))
)
lazy val container2 = Container(
`type` = mesos.ContainerInfo.Type.DOCKER,
volumes = Nil,
docker = Some(
Container.Docker(
image = "group/image",
network = Some(mesos.ContainerInfo.DockerInfo.Network.BRIDGE),
portMappings = Some(Seq(
Container.Docker.PortMapping(8080, 32001, 9000, "tcp"),
Container.Docker.PortMapping(8081, 32002, 9001, "udp")
))
)
)
)
lazy val container3 = Container(
`type` = mesos.ContainerInfo.Type.DOCKER,
volumes = Nil,
docker = Some(
Container.Docker(
image = "group/image",
network = Some(mesos.ContainerInfo.DockerInfo.Network.NONE),
privileged = true,
parameters = Seq(
Parameter("abc", "123"),
Parameter("def", "456")
)
)
)
)
lazy val container4 = Container(
`type` = mesos.ContainerInfo.Type.DOCKER,
volumes = Nil,
docker = Some(
Container.Docker(
image = "group/image",
network = Some(mesos.ContainerInfo.DockerInfo.Network.NONE),
privileged = true,
parameters = Seq(
Parameter("abc", "123"),
Parameter("def", "456"),
Parameter("def", "789")
),
forcePullImage = true
)
)
)
}
def fixture(): Fixture = new Fixture
test("ToProto") {
val f = fixture()
val proto = f.container.toProto
assert(mesos.ContainerInfo.Type.DOCKER == proto.getType)
assert("group/image" == proto.getDocker.getImage)
assert(f.container.volumes == proto.getVolumesList.asScala.map(Container.Volume(_)))
assert(proto.getDocker.hasForcePullImage)
assert(f.container.docker.get.forcePullImage == proto.getDocker.getForcePullImage)
val proto2: mesosphere.marathon.Protos.ExtendedContainerInfo = f.container2.toProto
assert(mesos.ContainerInfo.Type.DOCKER == proto2.getType)
assert("group/image" == proto2.getDocker.getImage)
assert(f.container2.docker.get.network == Some(proto2.getDocker.getNetwork))
val portMappings = proto2.getDocker.getPortMappingsList.asScala
assert(f.container2.docker.get.portMappings == Some(portMappings.map(Container.Docker.PortMapping.apply)))
assert(proto2.getDocker.hasForcePullImage)
assert(f.container2.docker.get.forcePullImage == proto2.getDocker.getForcePullImage)
val proto3 = f.container3.toProto
assert(mesos.ContainerInfo.Type.DOCKER == proto3.getType)
assert("group/image" == proto3.getDocker.getImage)
assert(f.container3.docker.get.network == Some(proto3.getDocker.getNetwork))
assert(f.container3.docker.get.privileged == proto3.getDocker.getPrivileged)
assert(f.container3.docker.get.parameters.map(_.key) == proto3.getDocker.getParametersList.asScala.map(_.getKey))
assert(f.container3.docker.get.parameters.map(_.value) == proto3.getDocker.getParametersList.asScala.map(_.getValue))
assert(proto3.getDocker.hasForcePullImage)
assert(f.container3.docker.get.forcePullImage == proto3.getDocker.getForcePullImage)
}
test("ToMesos") {
val f = fixture()
val proto = f.container.toMesos
assert(mesos.ContainerInfo.Type.DOCKER == proto.getType)
assert("group/image" == proto.getDocker.getImage)
assert(f.container.volumes == proto.getVolumesList.asScala.map(Container.Volume(_)))
assert(proto.getDocker.hasForcePullImage)
assert(f.container.docker.get.forcePullImage == proto.getDocker.getForcePullImage)
val proto2 = f.container2.toMesos
assert(mesos.ContainerInfo.Type.DOCKER == proto2.getType)
assert("group/image" == proto2.getDocker.getImage)
assert(f.container2.docker.get.network == Some(proto2.getDocker.getNetwork))
val expectedPortMappings = Seq(
mesos.ContainerInfo.DockerInfo.PortMapping.newBuilder
.setContainerPort(8080)
.setHostPort(32001)
.setProtocol("tcp")
.build,
mesos.ContainerInfo.DockerInfo.PortMapping.newBuilder
.setContainerPort(8081)
.setHostPort(32002)
.setProtocol("udp")
.build
)
assert(expectedPortMappings == proto2.getDocker.getPortMappingsList.asScala)
assert(proto2.getDocker.hasForcePullImage)
assert(f.container2.docker.get.forcePullImage == proto2.getDocker.getForcePullImage)
val proto3 = f.container3.toMesos
assert(mesos.ContainerInfo.Type.DOCKER == proto3.getType)
assert("group/image" == proto3.getDocker.getImage)
assert(f.container3.docker.get.network == Some(proto3.getDocker.getNetwork))
assert(f.container3.docker.get.privileged == proto3.getDocker.getPrivileged)
assert(f.container3.docker.get.parameters.map(_.key) == proto3.getDocker.getParametersList.asScala.map(_.getKey))
assert(f.container3.docker.get.parameters.map(_.value) == proto3.getDocker.getParametersList.asScala.map(_.getValue))
assert(proto3.getDocker.hasForcePullImage)
assert(f.container3.docker.get.forcePullImage == proto3.getDocker.getForcePullImage)
}
test("ConstructFromProto") {
val f = fixture()
val containerInfo = Protos.ExtendedContainerInfo.newBuilder
.setType(mesos.ContainerInfo.Type.DOCKER)
.addAllVolumes(f.volumes.map(_.toProto).asJava)
.setDocker(f.container.docker.get.toProto)
.build
val container = Container(containerInfo)
assert(container == f.container)
val containerInfo2 = Protos.ExtendedContainerInfo.newBuilder
.setType(mesos.ContainerInfo.Type.DOCKER)
.setDocker(f.container2.docker.get.toProto)
.build
val container2 = Container(containerInfo2)
assert(container2 == f.container2)
val containerInfo3 = Protos.ExtendedContainerInfo.newBuilder
.setType(mesos.ContainerInfo.Type.DOCKER)
.setDocker(f.container3.docker.get.toProto)
.build
val container3 = Container(containerInfo3)
assert(container3 == f.container3)
}
test("SerializationRoundtrip") {
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import mesosphere.jackson.CaseClassModule
import mesosphere.marathon.api.v2.json.MarathonModule
val f = fixture()
val mapper = new ObjectMapper
mapper.registerModule(DefaultScalaModule)
mapper.registerModule(new MarathonModule)
mapper.registerModule(CaseClassModule)
val container1 = Container(`type` = mesos.ContainerInfo.Type.DOCKER)
val json1 = mapper.writeValueAsString(container1)
val readResult1 = mapper.readValue(json1, classOf[Container])
assert(readResult1 == container1)
val json2 = mapper.writeValueAsString(f.container)
val readResult2 = mapper.readValue(json2, classOf[Container])
assert(readResult2 == f.container)
val json3 =
"""
{
"type": "DOCKER",
"docker": {
"image": "group/image"
},
"volumes": [
{
"containerPath": "/etc/a",
"hostPath": "/var/data/a",
"mode": "RO"
},
{
"containerPath": "/etc/b",
"hostPath": "/var/data/b",
"mode": "RW"
}
]
}
"""
val readResult3 = mapper.readValue(json3, classOf[Container])
assert(readResult3 == f.container)
val json4 =
"""
{
"type": "DOCKER",
"docker": {
"image": "group/image",
"network": "BRIDGE",
"portMappings": [
{ "containerPort": 8080, "hostPort": 32001, "servicePort": 9000, "protocol": "tcp"},
{ "containerPort": 8081, "hostPort": 32002, "servicePort": 9001, "protocol": "udp"}
]
}
}
"""
val readResult4 = mapper.readValue(json4, classOf[Container])
assert(readResult4 == f.container2)
val json5 = mapper.writeValueAsString(f.container3)
val readResult5 = mapper.readValue(json5, classOf[Container])
assert(readResult5 == f.container3)
val json6 =
"""
{
"type": "DOCKER",
"docker": {
"image": "group/image",
"network": "NONE",
"privileged": true,
"parameters": [
{ "key": "abc", "value": "123" },
{ "key": "def", "value": "456" }
]
}
}
"""
val readResult6 = mapper.readValue(json6, classOf[Container])
assert(readResult6 == f.container3)
// Multiple values for a given key.
val json7 =
"""
{
"type": "DOCKER",
"docker": {
"image": "group/image",
"network": "NONE",
"privileged": true,
"parameters": [
{ "key": "abc", "value": "123" },
{ "key": "def", "value": "456" },
{ "key": "def", "value": "789" }
],
"forcePullImage": true
}
}
"""
val readResult7 = mapper.readValue(json7, classOf[Container])
assert(readResult7 == f.container4)
}
}
| quamilek/marathon | src/test/scala/mesosphere/marathon/state/ContainerTest.scala | Scala | apache-2.0 | 9,738 |
// Copyright (c) 2012 Thomas Suckow
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// which accompanies this distribution, and is available at
// http://www.eclipse.org/legal/epl-v10.html
package net.codingwell.weave
import com.google.inject.Inject
import com.google.inject.name._
import scala.collection.JavaConversions._
import scala.collection.{mutable => mu, immutable => im}
import akka.actor._
class LocalExecutor @Inject() ( @Named("LangCompilers") val compilers:im.Set[ActorRef] ) extends Actor {
def receive = {
case msg @ WeaveCompiler.NotifyWork(actor,source,target) =>
compilers foreach ( _.forward(msg) )
case unknown =>
println(this.toString() + " recieved unexpected message.")
}
}
| codingwell/Weave | core/src/main/scala/net/codingwell/weave/LocalExecutor.scala | Scala | epl-1.0 | 821 |
package info.armado.ausleihe.admin.util
import info.armado.ausleihe.database.barcode.Barcode
import info.armado.ausleihe.database.dataobjects.{GameDuration, PlayerCount}
import info.armado.ausleihe.database.entities._
import info.armado.ausleihe.admin.transport.dataobjects._
import java.time.Year
object DTOExtensions {
implicit class EnvelopeExtension(envelope: Envelope) {
def toEnvelopeDTO: EnvelopeDTO = {
val result = new EnvelopeDTO
result.barcode = envelope.barcode.toString
result.activated = envelope.available
result
}
}
implicit class EnvelopeDTOExtension(envelope: EnvelopeDTO) {
def toEnvelope: Envelope = {
val result = Envelope(Barcode(envelope.barcode))
Option(envelope.activated).foreach(activated => result.available = activated)
result
}
}
implicit class GameExtension(game: Game) {
def toGameDTO: GameDTO = {
val result = new GameDTO
result.barcode = game.barcode.toString
result.title = game.title
result.author = game.author
result.publisher = game.publisher
result.minAge = game.minimumAge
Option(game.gameDuration).foreach(duration => result.duration = DurationDTO(duration.minDuration, duration.maxDuration))
Option(game.playerCount).foreach(playerCount => result.playerCount = PlayerCountDTO(playerCount.minPlayerCount, playerCount.maxPlayerCount))
Option(game.releaseYear).foreach(releaseYear => result.releaseYear = releaseYear.getValue)
result.activated = game.available
result
}
}
implicit class GameDTOExtension(game: GameDTO) {
def toGame: Game = {
val newGame = Game(Barcode(game.barcode))
newGame.title = game.title
Option(game.author).foreach(author => newGame.author = author)
Option(game.publisher).foreach(publisher => newGame.publisher = publisher)
Option(game.comment).foreach(comment => newGame.comment = comment)
Option(game.minAge).foreach(minAge => newGame.minimumAge = minAge)
Option(game.playerCount).foreach(playerCount => newGame.playerCount = PlayerCount(playerCount.min, playerCount.max))
Option(game.duration).foreach(duration => newGame.gameDuration = GameDuration(duration.min, duration.max))
Option(game.releaseYear).foreach(releaseYear => newGame.releaseYear = Year.of(releaseYear))
Option(game.activated).foreach(activated => newGame.available = activated)
newGame
}
}
implicit class IdentityCardExtension(identityCard: IdentityCard) {
def toIdentityCardDTO: IdentityCardDTO = {
val result = new IdentityCardDTO
result.barcode = identityCard.barcode.toString
result.activated = identityCard.available
result
}
}
implicit class IdentityCardDTOExtension(identityCard: IdentityCardDTO) {
def toIdentityCard: IdentityCard = {
val result = IdentityCard(Barcode(identityCard.barcode))
Option(identityCard.activated).foreach(activated => result.available = activated)
result
}
}
implicit class LendGameExtension(lendGame: LendGame) {
def toLendGameDTO: LendGameDTO = {
val result = new LendGameDTO
result.barcode = lendGame.game.barcode.toString
result.lendTime = lendGame.lendTime.toString
result
}
}
implicit class LendIdentityCardExtension(lendIdentityCard: LendIdentityCard) {
def toLendIdentityCardGroupDTO: LendIdentityCardGroupDTO = {
val result = new LendIdentityCardGroupDTO
result.barcode = lendIdentityCard.identityCard.barcode.toString
result.lendGames = lendIdentityCard.currentLendGames.map(_.toLendGameDTO).toArray
result
}
def toLendIdentityCardDTO: LendIdentityCardDTO = {
val result = new LendIdentityCardDTO
result.identityCardBarcode = lendIdentityCard.identityCard.barcode.toString
result.envelopeBarcode = lendIdentityCard.envelope.barcode.toString
result.lendTime = lendIdentityCard.lendTime.toString
result.numberOfLendGames = lendIdentityCard.currentLendGames.length
result.owner = lendIdentityCard.owner
result
}
}
}
| Spielekreis-Darmstadt/lending | lending-admin-backend/src/main/scala/info/armado/ausleihe/admin/util/DTOExtensions.scala | Scala | apache-2.0 | 4,136 |
package io.continuum.bokeh
@model class ImageSource extends Model {
object url extends Field[String]
object extra_url_vars extends Field[Map[String, String]]
}
| bokeh/bokeh-scala | bokeh/src/main/scala/models/Images.scala | Scala | mit | 169 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel
package scala.dsl
import org.apache.camel.Predicate
import org.apache.camel.util.ObjectHelper.evaluateValuePredicate
class ScalaPredicate(function: Exchange => Any) extends Predicate {
override def matches(exchange: Exchange) = {
evaluateValuePredicate(function(exchange))
}
}
| kingargyle/turmeric-bot | components/camel-scala/src/main/scala/org/apache/camel/scala/ScalaPredicate.scala | Scala | apache-2.0 | 1,118 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import org.mockito.Matchers.any
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfter
import org.apache.spark._
import org.apache.spark.executor.{Executor, TaskMetrics, TaskMetricsSuite}
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.metrics.source.JvmSource
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.util._
class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSparkContext {
test("provide metrics sources") {
val filePath = getClass.getClassLoader.getResource("test_metrics_config.properties").getFile
val conf = new SparkConf(loadDefaults = false)
.set("spark.metrics.conf", filePath)
sc = new SparkContext("local", "test", conf)
val rdd = sc.makeRDD(1 to 1)
val result = sc.runJob(rdd, (tc: TaskContext, it: Iterator[Int]) => {
tc.getMetricsSources("jvm").count {
case source: JvmSource => true
case _ => false
}
}).sum
assert(result > 0)
}
test("calls TaskCompletionListener after failure") {
TaskContextSuite.completed = false
sc = new SparkContext("local", "test")
val rdd = new RDD[String](sc, List()) {
override def getPartitions = Array[Partition](StubPartition(0))
override def compute(split: Partition, context: TaskContext) = {
context.addTaskCompletionListener(context => TaskContextSuite.completed = true)
sys.error("failed")
}
}
val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
val func = (c: TaskContext, i: Iterator[String]) => i.next()
val taskBinary = sc.broadcast(JavaUtils.bufferToArray(closureSerializer.serialize((rdd, func))))
val task = new ResultTask[String, String](
0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties,
closureSerializer.serialize(TaskMetrics.registered).array())
intercept[RuntimeException] {
task.run(0, 0, null)
}
assert(TaskContextSuite.completed === true)
}
test("calls TaskFailureListeners after failure") {
TaskContextSuite.lastError = null
sc = new SparkContext("local", "test")
val rdd = new RDD[String](sc, List()) {
override def getPartitions = Array[Partition](StubPartition(0))
override def compute(split: Partition, context: TaskContext) = {
context.addTaskFailureListener((context, error) => TaskContextSuite.lastError = error)
sys.error("damn error")
}
}
val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
val func = (c: TaskContext, i: Iterator[String]) => i.next()
val taskBinary = sc.broadcast(JavaUtils.bufferToArray(closureSerializer.serialize((rdd, func))))
val task = new ResultTask[String, String](
0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties,
closureSerializer.serialize(TaskMetrics.registered).array())
intercept[RuntimeException] {
task.run(0, 0, null)
}
assert(TaskContextSuite.lastError.getMessage == "damn error")
}
test("all TaskCompletionListeners should be called even if some fail") {
val context = TaskContext.empty()
val listener = mock(classOf[TaskCompletionListener])
context.addTaskCompletionListener(_ => throw new Exception("blah"))
context.addTaskCompletionListener(listener)
context.addTaskCompletionListener(_ => throw new Exception("blah"))
intercept[TaskCompletionListenerException] {
context.markTaskCompleted()
}
verify(listener, times(1)).onTaskCompletion(any())
}
test("all TaskFailureListeners should be called even if some fail") {
val context = TaskContext.empty()
val listener = mock(classOf[TaskFailureListener])
context.addTaskFailureListener((_, _) => throw new Exception("exception in listener1"))
context.addTaskFailureListener(listener)
context.addTaskFailureListener((_, _) => throw new Exception("exception in listener3"))
val e = intercept[TaskCompletionListenerException] {
context.markTaskFailed(new Exception("exception in task"))
}
// Make sure listener 2 was called.
verify(listener, times(1)).onTaskFailure(any(), any())
// also need to check failure in TaskFailureListener does not mask earlier exception
assert(e.getMessage.contains("exception in listener1"))
assert(e.getMessage.contains("exception in listener3"))
assert(e.getMessage.contains("exception in task"))
}
test("TaskContext.attemptNumber should return attempt number, not task id (SPARK-4014)") {
sc = new SparkContext("local[1,2]", "test") // use maxRetries = 2 because we test failed tasks
// Check that attemptIds are 0 for all tasks' initial attempts
val attemptIds = sc.parallelize(Seq(1, 2), 2).mapPartitions { iter =>
Seq(TaskContext.get().attemptNumber).iterator
}.collect()
assert(attemptIds.toSet === Set(0))
// Test a job with failed tasks
val attemptIdsWithFailedTask = sc.parallelize(Seq(1, 2), 2).mapPartitions { iter =>
val attemptId = TaskContext.get().attemptNumber
if (iter.next() == 1 && attemptId == 0) {
throw new Exception("First execution of task failed")
}
Seq(attemptId).iterator
}.collect()
assert(attemptIdsWithFailedTask.toSet === Set(0, 1))
}
test("accumulators are updated on exception failures") {
// This means use 1 core and 4 max task failures
sc = new SparkContext("local[1,4]", "test")
// Create 2 accumulators, one that counts failed values and another that doesn't
val acc1 = AccumulatorSuite.createLongAccum("x", true)
val acc2 = AccumulatorSuite.createLongAccum("y", false)
// Fail first 3 attempts of every task. This means each task should be run 4 times.
sc.parallelize(1 to 10, 10).map { i =>
acc1.add(1)
acc2.add(1)
if (TaskContext.get.attemptNumber() <= 2) {
throw new Exception("you did something wrong")
} else {
0
}
}.count()
// The one that counts failed values should be 4x the one that didn't,
// since we ran each task 4 times
assert(AccumulatorContext.get(acc1.id).get.value === 40L)
assert(AccumulatorContext.get(acc2.id).get.value === 10L)
}
test("failed tasks collect only accumulators whose values count during failures") {
sc = new SparkContext("local", "test")
val acc1 = AccumulatorSuite.createLongAccum("x", false)
val acc2 = AccumulatorSuite.createLongAccum("y", true)
acc1.add(1)
acc2.add(1)
// Create a dummy task. We won't end up running this; we just want to collect
// accumulator updates from it.
val taskMetrics = TaskMetrics.empty
val task = new Task[Int](0, 0, 0) {
context = new TaskContextImpl(0, 0, 0L, 0,
new TaskMemoryManager(SparkEnv.get.memoryManager, 0L),
new Properties,
SparkEnv.get.metricsSystem,
taskMetrics)
taskMetrics.registerAccumulator(acc1)
taskMetrics.registerAccumulator(acc2)
override def runTask(tc: TaskContext): Int = 0
}
// First, simulate task success. This should give us all the accumulators.
val accumUpdates1 = task.collectAccumulatorUpdates(taskFailed = false)
TaskMetricsSuite.assertUpdatesEquals(accumUpdates1.takeRight(2), Seq(acc1, acc2))
// Now, simulate task failures. This should give us only the accums that count failed values.
val accumUpdates2 = task.collectAccumulatorUpdates(taskFailed = true)
TaskMetricsSuite.assertUpdatesEquals(accumUpdates2.takeRight(1), Seq(acc2))
}
test("only updated internal accumulators will be sent back to driver") {
sc = new SparkContext("local", "test")
// Create a dummy task. We won't end up running this; we just want to collect
// accumulator updates from it.
val taskMetrics = TaskMetrics.empty
val task = new Task[Int](0, 0, 0) {
context = new TaskContextImpl(0, 0, 0L, 0,
new TaskMemoryManager(SparkEnv.get.memoryManager, 0L),
new Properties,
SparkEnv.get.metricsSystem,
taskMetrics)
taskMetrics.incMemoryBytesSpilled(10)
override def runTask(tc: TaskContext): Int = 0
}
val updatedAccums = task.collectAccumulatorUpdates()
assert(updatedAccums.length == 2)
// the RESULT_SIZE accumulator will be sent back anyway.
assert(updatedAccums(0).name == Some(InternalAccumulator.RESULT_SIZE))
assert(updatedAccums(0).value == 0)
assert(updatedAccums(1).name == Some(InternalAccumulator.MEMORY_BYTES_SPILLED))
assert(updatedAccums(1).value == 10)
}
test("localProperties are propagated to executors correctly") {
sc = new SparkContext("local", "test")
sc.setLocalProperty("testPropKey", "testPropValue")
val res = sc.parallelize(Array(1), 1).map(i => i).map(i => {
val inTask = TaskContext.get().getLocalProperty("testPropKey")
val inDeser = Executor.taskDeserializationProps.get().getProperty("testPropKey")
s"$inTask,$inDeser"
}).collect()
assert(res === Array("testPropValue,testPropValue"))
}
test("immediately call a completion listener if the context is completed") {
var invocations = 0
val context = TaskContext.empty()
context.markTaskCompleted()
context.addTaskCompletionListener(_ => invocations += 1)
assert(invocations == 1)
context.markTaskCompleted()
assert(invocations == 1)
}
test("immediately call a failure listener if the context has failed") {
var invocations = 0
var lastError: Throwable = null
val error = new RuntimeException
val context = TaskContext.empty()
context.markTaskFailed(error)
context.addTaskFailureListener { (_, e) =>
lastError = e
invocations += 1
}
assert(lastError == error)
assert(invocations == 1)
context.markTaskFailed(error)
assert(lastError == error)
assert(invocations == 1)
}
}
private object TaskContextSuite {
@volatile var completed = false
@volatile var lastError: Throwable = _
}
private case class StubPartition(index: Int) extends Partition
| jianran/spark | core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala | Scala | apache-2.0 | 10,924 |
package toguru
import _root_.play.api.mvc._
import toguru.api.{Activations, ClientInfo, Toggling, ToguruClient}
package object play {
type PlayToguruClient = ToguruClient[RequestHeader]
type PlayClientProvider = ClientInfo.Provider[RequestHeader]
final class ToggledRequest[A](val client: ClientInfo, val activations: Activations, request: Request[A])
extends WrappedRequest[A](request)
with Toggling
}
| AutoScout24/toguru-scala-client | play/src/main/scala/toguru/play/package.scala | Scala | mit | 426 |
/*
* Copyright 2015 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.jobs.scalding.serialization
import com.esotericsoftware.kryo.io.{Input, Output}
import com.twitter.chill.config.Config
import org.junit.runner.RunWith
import org.locationtech.geomesa.feature.{ScalaSimpleFeature, ScalaSimpleFeatureFactory}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Kryo2SimpleFeatureSerializerTest extends Specification {
"Kryo2SimpleFeatureSerializer" should {
"read and write simple features" in {
val kryo = new SimpleFeatureKryoHadoop(new Config(){
override def set(key: String, value: String) = {}
override def get(key: String) = null
}).newKryo()
val sft = SimpleFeatureTypes.createType("test", "name:String,dtg:Date,*geom:Point:srid=4326")
val sf = ScalaSimpleFeatureFactory.buildFeature(sft, Seq("myname", "2014-01-10T00:00:00.000Z", "POINT(45 46)"), "fid-1")
val output = new Output(1024, -1)
val input = new Input(Array.empty[Byte])
kryo.writeObject(output, sf)
input.setBuffer(output.toBytes)
val deserialized = kryo.readObject(input, classOf[ScalaSimpleFeature])
deserialized mustEqual(sf)
deserialized.getFeatureType mustEqual(sft)
}
}
}
| jnh5y/geomesa | geomesa-jobs/src/test/scala/org/locationtech/geomesa/jobs/scalding/serialization/Kryo2SimpleFeatureSerializerTest.scala | Scala | apache-2.0 | 1,952 |
package japgolly.scalajs.react.vdom
import scala.scalajs.LinkingInfo.developmentMode
/**
* Represents a value that can be nested within a [[TagOf]]. This can be
* another [[TagMod]], but can also be a CSS style or HTML attribute binding,
* which will add itself to the node's attributes but not appear in the final
* `children` list.
*/
trait TagMod {
/**
* Applies this modifier to the specified [[Builder]], such that when
* rendering is complete the effect of adding this modifier can be seen.
*/
def applyTo(b: Builder): Unit
final def when(condition: Boolean): TagMod =
if (condition) this else TagMod.empty
final def unless(condition: Boolean): TagMod =
when(!condition)
def apply(ms: TagMod*): TagMod =
TagMod.Composite((Vector.newBuilder[TagMod] += this ++= ms).result())
/**
* Converts this VDOM and all its potential children into raw JS values.
*
* Meant for very advanced usage.
*
* Do not use this unless you know what you're doing (and you're doing something very funky)!
*/
final def toJs: Builder.ToJs = {
val t = new Builder.ToJs {}
applyTo(t)
t
}
}
object TagMod {
def fn(f: Builder => Unit): TagMod =
new TagMod {
override def applyTo(b: Builder): Unit =
f(b)
}
def apply(ms: TagMod*): TagMod =
fromTraversableOnce(ms)
def fromTraversableOnce(t: TraversableOnce[TagMod]): TagMod = {
val v = t.toVector
v.length match {
case 1 => v.head
case 0 => empty
case _ => Composite(v)
}
}
final case class Composite(mods: Vector[TagMod]) extends TagMod {
override def applyTo(b: Builder): Unit =
mods.foreach(_ applyTo b)
override def apply(ms: TagMod*) =
Composite(mods ++ ms)
}
val empty: TagMod =
new TagMod {
override def applyTo(b: Builder) = ()
override def apply(ms: TagMod*) = TagMod.fromTraversableOnce(ms)
}
def devOnly(m: => TagMod): TagMod =
if (developmentMode)
m
else
empty
def when(cond: Boolean)(t: => TagMod): TagMod =
if (cond) t else empty
@inline def unless(cond: Boolean)(t: => TagMod): TagMod =
when(!cond)(t)
def intercalate(as: TraversableOnce[TagMod], sep: TagMod): TagMod =
if (as.isEmpty)
empty
else {
val it = as.toIterator
val first = it.next()
if (it.isEmpty)
first
else {
val b = Vector.newBuilder[TagMod]
b += first
for (a <- it) {
b += sep
b += a
}
Composite(b.result())
}
}
}
| matthughes/scalajs-react | core/src/main/scala/japgolly/scalajs/react/vdom/TagMod.scala | Scala | apache-2.0 | 2,576 |
package threesixty.visualizer.util.param
import threesixty.data.Data.Timestamp
case class AxisParam(
val label: String,
val labelSize: Int,
val labelFontFamily: String,
val minPxBetweenGridPoints: Int,
val unitLabelSize: Int,
val unitLabelFontFamily: String,
val showGrid: Boolean,
val showLabels: Boolean,
val arrowSize: Int,
val arrowFilled: Boolean) {
require(minPxBetweenGridPoints > 0, "Value for minPxBetweenGridPoints must be greater than 0.")
}
abstract class OptAxisParam(
val label: Option[String],
val labelSize: Option[Int],
val labelFontFamily: Option[String],
val minPxBetweenGridPoints: Option[Int],
val unitLabelSize: Option[Int],
val unitLabelFontFamily: Option[String],
val showGrid: Option[Boolean],
val showLabels: Option[Boolean],
val arrowSize: Option[Int],
val arrowFilled: Option[Boolean]) {
}
case class OptValueAxisParam(override val label: Option[String] = None,
override val labelSize: Option[Int],
override val labelFontFamily: Option[String] = None,
val min: Option[Double] = None,
val max: Option[Double] = None,
override val minPxBetweenGridPoints: Option[Int] = None,
val unit: Option[Double] = None,
override val unitLabelSize: Option[Int] = None,
override val unitLabelFontFamily: Option[String] = None,
override val showGrid: Option[Boolean] = None,
override val showLabels: Option[Boolean] = None,
override val arrowSize: Option[Int] = None,
override val arrowFilled: Option[Boolean] = None) extends OptAxisParam(
label,
labelSize,
labelFontFamily,
minPxBetweenGridPoints,
unitLabelSize,
unitLabelFontFamily,
showGrid,
showLabels,
arrowSize,
arrowFilled) {
}
case class OptTimeAxisParam(override val label: Option[String] = None,
override val labelSize: Option[Int] = None,
override val labelFontFamily: Option[String] = None,
val min: Option[Timestamp] = None,
val max: Option[Timestamp] = None,
override val minPxBetweenGridPoints: Option[Int] = None,
val unit: Option[String] = None,
override val unitLabelSize: Option[Int] = None,
override val unitLabelFontFamily: Option[String] = None,
override val showGrid: Option[Boolean] = None,
override val showLabels: Option[Boolean] = None,
override val arrowSize: Option[Int] = None,
override val arrowFilled: Option[Boolean] = None) extends OptAxisParam(
label,
labelSize,
labelFontFamily,
minPxBetweenGridPoints,
unitLabelSize,
unitLabelFontFamily,
showGrid,
showLabels,
arrowSize,
arrowFilled) {
}
| elordin/threesixty | src/main/scala/threesixty/visualizer/util/param/AxisParam.scala | Scala | mit | 3,243 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
// $example on$
import org.apache.spark.mllib.feature.ChiSqSelector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
// $example off$
object ChiSqSelectorExample {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("ChiSqSelectorExample")
val sc = new SparkContext(conf)
// $example on$
// Load some data in libsvm format
val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
// Discretize data in 16 equal bins since ChiSqSelector requires categorical features
// Even though features are doubles, the ChiSqSelector treats each unique value as a category
val discretizedData = data.map { lp =>
LabeledPoint(lp.label, Vectors.dense(lp.features.toArray.map { x => (x / 16).floor }))
}
// Create ChiSqSelector that will select top 50 of 692 features
val selector = new ChiSqSelector(50)
// Create ChiSqSelector model (selecting features)
val transformer = selector.fit(discretizedData)
// Filter the top 50 features from each feature vector
val filteredData = discretizedData.map { lp =>
LabeledPoint(lp.label, transformer.transform(lp.features))
}
// $example off$
println("filtered data: ")
filteredData.foreach(x => println(x))
sc.stop()
}
}
// scalastyle:on println
| mrchristine/spark-examples-dbc | src/main/scala/org/apache/spark/examples/mllib/ChiSqSelectorExample.scala | Scala | apache-2.0 | 2,351 |
package cpup.mc.oldenMagic.content.runes
import cpup.mc.oldenMagic.api.oldenLanguage.runeParsing.TVerbRune
import cpup.mc.oldenMagic.api.oldenLanguage.runes.TRuneType
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.util.IIcon
import cpup.mc.oldenMagic.OldenMagicMod
import net.minecraft.nbt.NBTTagCompound
import cpup.mc.oldenMagic.api.oldenLanguage.casting.{CastingRegistry, TCancellableAction, TAction, CastingContext}
import cpup.mc.lib.util.pos.BlockPos
import net.minecraft.entity.{EntityLiving, Entity}
import cpup.mc.oldenMagic.api.oldenLanguage.textParsing.{TextRune, TParsingContext, TTransform}
import net.minecraftforge.event.entity.living.LivingSetAttackTargetEvent
import cpup.mc.oldenMagic.content.targets.EntityCaster
import net.minecraft.client.renderer.texture.IIconRegister
import cpup.mc.lib.targeting.TargetingRegistry
class SeenRune extends TVerbRune {
def runeType = SeenRune
def writeToNBT(nbt: NBTTagCompound) {}
def act(context: CastingContext, pos: BlockPos) {}
def act(context: CastingContext, entity: Entity) {}
@SideOnly(Side.CLIENT)
def icons = List(SeenRune.icon)
}
object SeenRune extends TRuneType {
def mod = OldenMagicMod
def name = s"${mod.ref.modID}:seen"
def runeClass = classOf[SeenRune]
def readFromNBT(nbt: NBTTagCompound) = new SeenRune
@SideOnly(Side.CLIENT)
var icon: IIcon = null
@SideOnly(Side.CLIENT)
def registerIcons(register: IIconRegister) {
icon = register.registerIcon(s"${mod.ref.modID}:runes/seen")
}
}
object SeenTransform extends TTransform {
def transform(context: TParsingContext, content: String) = new SeenRune
}
class SeenAction(val e: LivingSetAttackTargetEvent) extends TAction with TCancellableAction {
val affectedTarget = TargetingRegistry.wrapEntity(e.target).flatMap(CastingRegistry.wrap(_)).get
override def runeType = SeenRune
def cancel {
// e.entityLiving.setRevengeTarget(null)
// e.entityLiving.setLastAttacker(null)
if(e.entityLiving.isInstanceOf[EntityLiving]) {
e.entityLiving.asInstanceOf[EntityLiving].setAttackTarget(null)
}
}
def uncancel {
// e.entityLiving.setRevengeTarget(e.target)
// e.entityLiving.setLastAttacker(e.target)
if(e.entityLiving.isInstanceOf[EntityLiving]) {
e.entityLiving.asInstanceOf[EntityLiving].setAttackTarget(e.target)
}
}
def isCanceled = e.isCanceled
} | CoderPuppy/oldenmagic-mc | src/main/scala/cpup/mc/oldenMagic/content/runes/SeenRune.scala | Scala | mit | 2,341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
/*
package integration.socket
import java.io.File
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import akka.util.Timeout
import org.apache.toree.kernel.protocol.v5.client.ZMQMessage
import org.apache.toree.kernel.protocol.v5.SocketType
import org.apache.toree.kernel.protocol.v5.socket._
import org.apache.toree.kernel.protocol.v5.socket.SocketConfig
import com.typesafe.config.ConfigFactory
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import scala.concurrent.duration._
class ClientToHeartbeatSpecForIntegration extends TestKit(ActorSystem("HeartbeatActorSpec"))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
describe("HeartbeatActor") {
implicit val timeout = Timeout(1.minute)
val clientSocketFactory = mock[ClientSocketFactory]
val serverSocketFactory = mock[ServerSocketFactory]
val probe: TestProbe = TestProbe()
val probeClient: TestProbe = TestProbe()
when(serverSocketFactory.Heartbeat(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probe.ref)
when(clientSocketFactory.HeartbeatClient(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probeClient.ref)
val heartbeat = system.actorOf(Props(classOf[Heartbeat], serverSocketFactory))
val heartbeatClient = system.actorOf(Props(classOf[HeartbeatClient], clientSocketFactory))
describe("send heartbeat") {
it("should send and receive same ZMQMessage") {
heartbeatClient ? HeartbeatMessage
probeClient.expectMsgClass(classOf[ZMQMessage])
probeClient.forward(heartbeat)
probe.expectMsgClass(classOf[ZMQMessage])
probe.forward(heartbeatClient)
}
}
describe("send heartbeat") {
it("should work with real actorsystem and no probes") {
val system = ActorSystem("iopubtest")
val socketConfig = SocketConfig.fromConfig(ConfigFactory.parseString(
"""
{
"stdin_port": 8000,
"ip": "127.0.0.1",
"control_port": 8001,
"hb_port": 8002,
"signature_scheme": "hmac-sha256",
"key": "",
"shell_port": 8003,
"transport": "tcp",
"iopub_port": 8004
}
""".stripMargin)
)
val clientSocketFactory = new ClientSocketFactory(socketConfig)
val ioPUB = system.actorOf(Props(classOf[ActorRef], serverSocketFactory), name = SocketType.IOPub.toString)
}
}
}
}
*/ | chipsenkbeil/incubator-toree | client/src/test/scala/integration/unused.integration.socket/ClientToHeartbeatSpecForIntegration.scala | Scala | apache-2.0 | 3,496 |
package com.thisisfranklin.crdt
object BooleanLattice {
def bottom = BooleanLattice(false)
}
case class BooleanLattice(value: Boolean) extends Lattice[Boolean, BooleanLattice] {
def merge(other: BooleanLattice): BooleanLattice = copy(value = value || other.value)
def tryCompareTo(other: BooleanLattice): Option[Int] = {
if (value ^ other.value) {
if (value)
Some(1)
else
Some(-1)
} else {
Some(0)
}
}
}
| franklinhu/crdt | src/main/scala/com/thisisfranklin/crdt/BooleanLattice.scala | Scala | apache-2.0 | 461 |
package spotlight.publish
import java.io.ByteArrayOutputStream
import java.net.{ InetAddress, InetSocketAddress, Socket }
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger }
import javax.net._
import javax.script.{ Compilable, ScriptEngineManager, SimpleBindings }
import akka.actor.{ ActorSystem, Props }
import akka.testkit.{ TestActorRef, TestProbe }
import akka.util.ByteString
import com.typesafe.config.{ Config, ConfigFactory }
import spotlight.model.outlier.{ NoOutliers, AnalysisPlan, SeriesOutliers }
import spotlight.model.timeseries.{ DataPoint, ThresholdBoundary, TimeSeries, Topic }
import spotlight.protocol.PythonPickleProtocol
import spotlight.testkit.ParallelAkkaSpec
import org.joda.{ time β joda }
import org.mockito.Mockito._
import org.mockito.ArgumentMatchers._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.python.core.{ PyList, PyTuple }
import org.scalatest.Tag
import org.scalatest.mockito.MockitoSugar
import omnibus.commons.log.Trace
/** Created by rolfsd on 12/31/15.
*/
object GraphitePublisherSpec {
val engine = new ScriptEngineManager().getEngineByName( "python" )
val compilable = engine.asInstanceOf[Compilable]
val unpickleScript = compilable.compile(
"""
|import pickle
|import struct
|format = '!L'
|headerLength = struct.calcsize(format)
|print "D headerLength=%d" % headerLength
|payloadLength, = struct.unpack(format, payload[:headerLength])
|print "E payloadLength=%d" % payloadLength
|batchLength = headerLength + payloadLength
|print "F batchLength=%d" % batchLength
|metrics = pickle.loads(payload[headerLength:batchLength])
|print "G metrics=%s" % metrics
""".stripMargin
)
}
class GraphitePublisherSpec
extends ParallelAkkaSpec
with MockitoSugar {
import GraphitePublisher._
import GraphitePublisherSpec._
import OutlierPublisher._
override val trace = Trace[GraphitePublisherSpec]
override def createAkkaFixture( test: OneArgTest, config: Config, system: ActorSystem, slug: String ): Fixture = {
new Fixture( config, system, slug )
}
class Fixture( _config: Config, _system: ActorSystem, _slug: String ) extends AkkaFixture( _config, _system, _slug ) { outer β
val senderProbe = TestProbe( "test-sender" )
val connected: AtomicBoolean = new AtomicBoolean( true )
val closed: AtomicBoolean = new AtomicBoolean( false )
val openCount: AtomicInteger = new AtomicInteger( 0 )
val address: InetSocketAddress = new InetSocketAddress( "example.com", 1234 )
val output: ByteArrayOutputStream = spy( new ByteArrayOutputStream )
val socketFactory: SocketFactory = mock[SocketFactory]
val plan = mock[AnalysisPlan]
when( plan.name ) thenReturn "plan"
// when( plan.algorithmConfig ) thenReturn ConfigFactory.parseString( "" )
val socket: Socket = mock[Socket]
when( socket.isConnected ).thenAnswer(
new Answer[Boolean] {
override def answer( invocation: InvocationOnMock ): Boolean = connected.get
}
)
when( socket.isClosed ).thenAnswer(
new Answer[Boolean] {
override def answer( invocation: InvocationOnMock ): Boolean = closed.get
}
)
doAnswer(
new Answer[Unit] {
override def answer( invocation: InvocationOnMock ): Unit = {
connected set false
closed set true
}
}
).when( socket ).close
when( socket.getOutputStream ).thenReturn( output )
// Mock behavior of socket.getOutputStream().close() calling socket.close();
doAnswer(
new Answer[Unit] {
override def answer( invocation: InvocationOnMock ): Unit = {
invocation.callRealMethod()
socket.close
}
}
).when( output ).close()
when( socketFactory.createSocket( any( classOf[InetAddress] ), anyInt ) ).thenReturn( socket )
val publisherProps = Props(
new GraphitePublisher with GraphitePublisher.PublishProvider {
import scala.concurrent.duration._
override lazy val maxOutstanding: Int = 1000000
override lazy val separation: FiniteDuration = 1.second
override def initializeMetrics(): Unit = {}
override lazy val batchSize: Int = 100
override lazy val destinationAddress: InetSocketAddress = outer.address
override def createSocket( address: InetSocketAddress ): Socket = {
openCount.incrementAndGet()
outer.socket
}
override def publishingTopic( p: AnalysisPlan, t: Topic ): Topic = t
}
)
val graphite = TestActorRef[GraphitePublisher]( publisherProps )
val dp1 = DataPoint( new joda.DateTime( 100000L ), 17D )
val dp1b = DataPoint( new joda.DateTime( 103000L ), 19D )
val dp2 = DataPoint( new joda.DateTime( 117000L ), 3.1415926D )
val dp3 = DataPoint( new joda.DateTime( 9821000L ), 983.120D )
def unpickleOutput( pickle: ByteString = ByteString( output.toByteArray ) ): String = {
import scala.collection.mutable
val results = mutable.StringBuilder.newBuilder
// the charset is important. if the GraphitePickleReporter and this test
// don't agree, the header is not always correctly unpacked.
val payload = pickle.decodeString( "UTF-8" )
trace( s"payload = $payload" )
val result = new PyList
var nextIndex = 0
while ( nextIndex < payload.length ) {
val bindings = new SimpleBindings
bindings.put( "payload", payload substring nextIndex )
unpickleScript eval bindings
result.addAll( result.size, bindings.get( "metrics" ).asInstanceOf[java.util.Collection[_]] )
nextIndex += bindings.get( "batchLength" ).asInstanceOf[java.math.BigInteger].intValue()
}
import scala.collection.JavaConverters._
result.iterator.asScala.foreach {
case datapoint: PyTuple β
val name = datapoint.get( 0 ).toString
val valueTuple = datapoint.get( 1 ).asInstanceOf[PyTuple]
val timestamp = valueTuple get 0
val value = valueTuple get 1
results.append( name ).append( " " ).append( value ).append( " " ).append( timestamp ).append( "\\n" )
}
results.toString()
}
}
val DONE = Tag( "done" )
"GraphitePublisher" should {
"disconnects from graphite" in { f: Fixture β
import f._
graphite ! Close
// graphite.receive( Close )
verify( socket ).close()
}
"first replicate python protocol test" taggedAs WIP in { f: Fixture β
import f._
import org.joda.{ time β joda }
import spotlight.model.timeseries._
val batch = Seq(
( "foo".toTopic, new joda.DateTime( 100000L ), 1D ),
( "bar".toTopic, new joda.DateTime( 117000L ), 0D ),
( "zed".toTopic, new joda.DateTime( 9821000L ), 0D )
)
unpickleOutput( new PythonPickleProtocol().pickleFlattenedTimeSeries( batch: _* ) ) mustBe {
// timestamp long are be divided by 1000L to match graphite's epoch time
"foo 1.0 100\\nbar 0.0 117\\nzed 0.0 9821\\n"
}
}
"write one-point batch" in { f: Fixture β
import f._
val outliers = NoOutliers(
algorithms = Set( "dbscan" ),
source = TimeSeries( "foo", Seq( dp1 ) ),
plan = plan
)
graphite.receive( Publish( outliers ) )
graphite.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
val actual = ByteString( output.toByteArray )
unpickleOutput( actual ) mustBe "foo 0.0 100\\n"
}
"write full batch" in { f: Fixture β
import f._
val outliers = SeriesOutliers(
algorithms = Set( "dbscan" ),
source = TimeSeries( "foo", Seq( dp1, dp2 ) ),
outliers = Seq( dp2 ),
plan = plan
)
// NoOutlier pickle will be include 0.0 for each second in source range
graphite.receive( Publish( outliers ) )
graphite.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
unpickleOutput() mustBe "foo 0.0 100\\nfoo 1.0 117\\n"
}
"write past full batch" in { f: Fixture β
import f._
val graphite2 = TestActorRef[GraphitePublisher](
Props(
new GraphitePublisher with GraphitePublisher.PublishProvider {
import scala.concurrent.duration._
override val maxOutstanding: Int = 1000000
override val separation: FiniteDuration = 1.second
override def initializeMetrics(): Unit = {}
override val batchSize: Int = 100
override def destinationAddress: InetSocketAddress = f.address
override def createSocket( address: InetSocketAddress ): Socket = {
openCount.incrementAndGet()
f.socket
}
override def publishingTopic( p: AnalysisPlan, t: Topic ): Topic = "spotlight.outlier." + super.publishingTopic( p, t )
}
)
)
val outliers = SeriesOutliers(
algorithms = Set( "dbscan" ),
source = TimeSeries( "foo", Seq( dp1, dp2, dp3 ) ),
outliers = Seq( dp1 ),
plan = plan
)
graphite2.receive( Publish( outliers ) )
graphite2.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
unpickleOutput() mustBe "spotlight.outlier.plan.foo 1.0 100\\nspotlight.outlier.plan.foo 0.0 117\\nspotlight.outlier.plan.foo 0.0 9821\\n"
}
"write full no-outlier batch" in { f: Fixture β
import f._
val outliers = NoOutliers(
algorithms = Set( "dbscan" ),
source = TimeSeries( "foo", Seq( dp1, dp1b ) ),
plan = plan
)
// NoOutlier pickle will be include 0.0 for each second in source range
graphite.receive( Publish( outliers ) )
graphite.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
unpickleOutput() mustBe "foo 0.0 100\\nfoo 0.0 101\\nfoo 0.0 102\\nfoo 0.0 103\\n"
}
"write sanitize names" in { f: Fixture β
import f._
val outliers = SeriesOutliers(
algorithms = Set( "dbscan" ),
source = TimeSeries( "foo bar", Seq( dp1, dp2, dp3 ) ),
outliers = Seq( dp1 ),
plan = plan
)
graphite.receive( Publish( outliers ) )
graphite.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
unpickleOutput() mustBe "foo-bar 1.0 100\\nfoo-bar 0.0 117\\nfoo-bar 0.0 9821\\n"
}
"ignores double opens" in { f: Fixture β
import f._
graphite.receive( Open )
graphite.receive( Open )
openCount.get mustBe 1
}
"write past full batch with threshold boundaries" in { f: Fixture β
import f._
val algConfig = ConfigFactory.parseString( "publish-threshold: yes" )
val algos = Map( "dbscan" β algConfig, "x" β algConfig, "y" β algConfig )
when( plan.algorithms ) thenReturn algos
algos foreach { case ( a, c ) β c.getBoolean( s"publish-threshold" ) mustBe true }
val graphite2 = TestActorRef[GraphitePublisher](
Props(
new GraphitePublisher with GraphitePublisher.PublishProvider {
import scala.concurrent.duration._
override val maxOutstanding: Int = 1000000
override val separation: FiniteDuration = 1.second
override def initializeMetrics(): Unit = {}
override val batchSize: Int = 2
override def destinationAddress: InetSocketAddress = f.address
override def createSocket( address: InetSocketAddress ): Socket = {
openCount.incrementAndGet()
f.socket
}
override def publishingTopic( p: AnalysisPlan, t: Topic ): Topic = "spotlight.outlier." + super.publishingTopic( p, t )
}
)
)
val controlBoundaries = Map(
"x" β Seq(
ThresholdBoundary.fromExpectedAndDistance( dp1.timestamp, 1, 0.1 ),
ThresholdBoundary.fromExpectedAndDistance( dp2.timestamp, 1, 0.25 ),
ThresholdBoundary.fromExpectedAndDistance( dp3.timestamp, 1, 0.3 )
),
"y" β Seq(
ThresholdBoundary.fromExpectedAndDistance( dp1.timestamp, 3, 0.3 ),
ThresholdBoundary.fromExpectedAndDistance( dp2.timestamp, 3, 0.5 ),
ThresholdBoundary.fromExpectedAndDistance( dp3.timestamp, 3, 0.7 )
)
)
val outliers = SeriesOutliers(
algorithms = algos.keySet,
source = TimeSeries( "foo", Seq( dp1, dp2, dp3 ) ),
outliers = Seq( dp1 ),
plan = plan,
thresholdBoundaries = controlBoundaries
)
graphite2.receive( Publish( outliers ) )
graphite2.receive( Flush, senderProbe.ref )
senderProbe.expectMsg( GraphitePublisher.Flushed( true ) )
unpickleOutput() mustBe (
"spotlight.outlier.plan.foo 1.0 100\\nspotlight.outlier.plan.foo 0.0 117\\nspotlight.outlier.plan.foo 0.0 9821\\n" +
"spotlight.outlier.plan.x.floor.foo 0.9 100\\nspotlight.outlier.plan.x.expected.foo 1.0 100\\nspotlight.outlier.plan.x.ceiling.foo 1.1 100\\n" +
"spotlight.outlier.plan.x.floor.foo 0.75 117\\nspotlight.outlier.plan.x.expected.foo 1.0 117\\nspotlight.outlier.plan.x.ceiling.foo 1.25 117\\n" +
"spotlight.outlier.plan.x.floor.foo 0.7 9821\\nspotlight.outlier.plan.x.expected.foo 1.0 9821\\nspotlight.outlier.plan.x.ceiling.foo 1.3 9821\\n" +
"spotlight.outlier.plan.y.floor.foo 2.7 100\\nspotlight.outlier.plan.y.expected.foo 3.0 100\\nspotlight.outlier.plan.y.ceiling.foo 3.3 100\\n" +
"spotlight.outlier.plan.y.floor.foo 2.5 117\\nspotlight.outlier.plan.y.expected.foo 3.0 117\\nspotlight.outlier.plan.y.ceiling.foo 3.5 117\\n" +
"spotlight.outlier.plan.y.floor.foo 2.3 9821\\nspotlight.outlier.plan.y.expected.foo 3.0 9821\\nspotlight.outlier.plan.y.ceiling.foo 3.7 9821\\n"
)
}
}
}
| dmrolfs/lineup | core/src/test/scala/spotlight/publish/GraphitePublisherSpec.scala | Scala | mit | 14,055 |
package ru.zconstz.shortener.service
import akka.actor.Actor
import scala.slick.driver.PostgresDriver.simple._
import ru.zconstz.shortener.db.{DbHolder, DataBaseSchema}
import DataBaseSchema._
import DbHolder._
import ru.zconstz.shortener.http.HttpEntities.{TokenGetResponse, TokenGetRequest}
class TokenActor extends Actor {
def receive = {
case TokenGetRequest(userId, secret) => {
sender ! dataBase.withSession { implicit session:Session =>
Query(Users).where(_.id === userId).firstOption.map(user => TokenGetResponse(user._2))
}
}
}
}
| konstantin-zlobin/url-shortener | src/main/scala/ru/zconstz/shortener/service/TokenActor.scala | Scala | apache-2.0 | 573 |
package pl.writeonly.son2.path.glue
import pl.writeonly.son2.funs.glue.CreatorConverterOr
class CreatorConverterOrPath
extends CreatorConverterOr(
new ChainNotationConfigPath().get,
new ChainNotationRWTPath()
)
class CreatorConverterOrPathMain
extends CreatorConverterOr(
new ChainNotationConfigPathMain().get,
new ChainNotationRWTPath()
)
| writeonly/son2 | scallions-impl/scallions-path/src/main/scala/pl/writeonly/son2/path/glue/CreatorConverterOrPath.scala | Scala | apache-2.0 | 383 |
package org.infinispan.demo
import scala.io.Source
import org.infinispan.client.hotrod.RemoteCacheManager
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder
import org.infinispan.client.hotrod.impl.ConfigurationProperties
object IspnFileUpload {
val ISPN_IP = "127.0.0.1";
def main(args: Array[String]) {
val builder = new ConfigurationBuilder()
builder.addServer().host(ISPN_IP).port(ConfigurationProperties.DEFAULT_HOTROD_PORT)
val cacheManager = new RemoteCacheManager(builder.build())
val cache = cacheManager.getCache[String, String]()
val input = Source.fromFile(TextSearch.filePath)
try {
var i = 0
input.getLines().foreach { line => { i = i+1; cache.put(i.toString(), line) } }
println("Uploaded lines: " + i)
} finally {
input.close()
}
}
} | vjuranek/infinispan-snippets | spark/spark-rdd/spark2-text-search/src/main/scala/org/infinispan/demo/IspnFileUpload.scala | Scala | mit | 901 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.Stop
import monix.execution.{Ack, Scheduler}
import monix.execution.cancelables.AssignableCancelable
import scala.util.control.NonFatal
import monix.reactive.Consumer
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
/** Implementation for [[monix.reactive.Consumer.contramap]]. */
private[reactive]
final class ContraMapConsumer[In2, -In, +R](source: Consumer[In, R], f: In2 => In)
extends Consumer[In2, R] {
def createSubscriber(cb: Callback[Throwable, R], s: Scheduler): (Subscriber[In2], AssignableCancelable) = {
val (out, c) = source.createSubscriber(cb, s)
val out2 = new Subscriber[In2] {
implicit val scheduler = out.scheduler
// For protecting the contract
private[this] var isDone = false
def onError(ex: Throwable): Unit =
if (!isDone) { isDone = true; out.onError(ex) }
def onComplete(): Unit =
if (!isDone) { isDone = true; out.onComplete() }
def onNext(elem2: In2): Future[Ack] = {
// Protects calls to user code from within the operator and
// stream the error downstream if it happens, but if the
// error happens because of calls to `onNext` or other
// protocol calls, then the behavior should be undefined.
var streamErrors = true
try {
val elem = f(elem2)
streamErrors = false
out.onNext(elem)
} catch {
case NonFatal(ex) if streamErrors =>
onError(ex)
Stop
}
}
}
(out2, c)
}
}
| Wogan/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/ContraMapConsumer.scala | Scala | apache-2.0 | 2,315 |
package dk.bayes.dsl.variable.gaussian.univariate
import dk.bayes.dsl.InferEngine
import dk.bayes.dsl.factor.DoubleFactor
import dk.bayes.math.gaussian.Gaussian
import dk.bayes.dsl.epnaivebayes.inferPosterior
object inferUnivariateGaussianEPNaiveBayes extends InferEngine[UnivariateGaussian, UnivariateGaussian] {
def isSupported(x: UnivariateGaussian): Boolean = {
!x.hasParents &&
x.getChildren.size > 0 &&
x.getChildren.filter(c => c.hasChildren).size == 0 &&
x.getChildren.filter(c => !c.isInstanceOf[DoubleFactor[_, _]]).size == 0
}
def infer(x: UnivariateGaussian): UnivariateGaussian = {
val prior = x
val likelihoods = x.getChildren.map(c => c.asInstanceOf[DoubleFactor[Gaussian, _]])
val posterior = inferPosterior(prior, likelihoods)
UnivariateGaussian(posterior.m, posterior.v)
}
} | danielkorzekwa/bayes-scala | src/main/scala/dk/bayes/dsl/variable/gaussian/univariate/inferUnivariateGaussianEPNaiveBayes.scala | Scala | bsd-2-clause | 846 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.