code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.ponkotuy.intercept
import com.netaporter.uri.Uri
import org.jboss.netty.handler.codec.http.{HttpResponse, HttpRequest}
/**
*
* @author ponkotuy
* Date: 14/02/18.
*/
trait Intercepter {
def input(req: HttpRequest, res: HttpResponse, uri: Uri): Unit
}
| nekoworkshop/MyFleetGirls | client/src/main/scala/com/ponkotuy/intercept/Intercepter.scala | Scala | mit | 270 |
package io.buoyant.namerd.storage.etcd
import com.twitter.finagle.Path
import io.buoyant.config.Parser
import io.buoyant.config.types.Port
import io.buoyant.namerd.DtabStoreConfig
import org.scalatest.{FunSuite, OptionValues}
class EtcdConfigTest extends FunSuite with OptionValues {
test("sanity") {
val store = EtcdConfig(None, None, Some(Path.read("/foo/bar"))).mkDtabStore
}
test("parse config") {
val yaml =
"""|kind: io.l5d.etcd
|experimental: true
|pathPrefix: /foo/bar
|host: etcd.dentist
|port: 80
""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(EtcdDtabStoreInitializer)))
val etcd = mapper.readValue[DtabStoreConfig](yaml).asInstanceOf[EtcdConfig]
assert(etcd.host.value == "etcd.dentist")
assert(etcd.port.value == Port(80))
assert(etcd.pathPrefix == Some(Path.read("/foo/bar")))
}
}
| hhtpcd/linkerd | namerd/storage/etcd/src/test/scala/io/buoyant/namerd/storage/etcd/EtcdConfigTest.scala | Scala | apache-2.0 | 904 |
package io.fintrospect.formats
import org.scalatest.{FunSpec, Matchers}
class MsgPackMsgTest extends FunSpec with Matchers {
describe("MsgPackMsg") {
it("can round trip to bytes and back again") {
val letter = Letter(StreetAddress("bob"), StreetAddress("jim"), "rita")
MsgPackMsg(letter).as[Letter] shouldBe letter
}
}
}
| daviddenton/fintrospect | msgpack/src/test/scala/io/fintrospect/formats/MsgPackMsgTest.scala | Scala | apache-2.0 | 349 |
package com.geishatokyo.diffsql.ast
import com.geishatokyo.diffsql.Name
import scala.language.implicitConversions
/**
* Created by takeshita on 14/02/14.
*/
case class DataType(name : Name, args : List[Int] = Nil){
def length : Int = args.head
def precision : Tuple2[Int,Int] = args(0) -> args(1)
def ===(dataType : DataType)(implicit eq : DataTypeEquality) = {
if(dataType == null) false
else eq.equal(this,dataType)
}
def !==(dataType : DataType)(implicit eq : DataTypeEquality) = {
!(this === dataType)
}
override def toString: String = {
if(args.isEmpty){
name.name
}else{
s"${name.name}(${args.mkString(",")})"
}
}
}
object DataType{
implicit def fromString( simpleType : String) = DataType(simpleType)
}
trait DataTypeEquality{
def normalize(d: DataType): DataType = d
def equal(d1 : DataType , d2 : DataType) : Boolean = {
managedEqual( normalize(d1),normalize(d2))
}
/**
* Normalized data types are passed.
* @param d1
* @param d2
* @return
*/
def managedEqual(d1 : DataType,d2 : DataType) : Boolean
}
object DataTypeEquality{
trait OnlyName extends DataTypeEquality{
def managedEqual(d1: DataType, d2: DataType) = {
d1.name.toLowerCase() == d2.name.toLowerCase
}
}
object OnlyName extends OnlyName
trait NameAndLength extends DataTypeEquality{
def managedEqual(d1: DataType, d2: DataType): Boolean = {
d1.name.toLowerCase == d2.name.toLowerCase && {
d1.args == d2.args ||
d1.args.isEmpty || d2.args.isEmpty
}
}
}
object NameAndLength extends NameAndLength
} | geishatokyo/diff-sql-table | parser/src/main/scala/com/geishatokyo/diffsql/ast/DataTypes.scala | Scala | mit | 1,629 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.http.scalabodyparsers {
import akka.stream.ActorMaterializer
import play.api.http.Writeable
import play.api.libs.json.{Json, JsValue}
import play.api.mvc._
import play.api.test._
import play.api.test.Helpers._
import org.specs2.mutable.Specification
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import scala.concurrent.Future
import java.io.File
import org.specs2.execute.AsResult
@RunWith(classOf[JUnitRunner])
class ScalaBodyParsersSpec extends Specification with Controller {
def helloRequest = FakeRequest("POST", "/").withJsonBody(Json.obj("name" -> "foo"))
"A scala body parser" should {
"parse request as json" in {
import scala.concurrent.ExecutionContext.Implicits.global
//#access-json-body
def save = Action { request =>
val body: AnyContent = request.body
val jsonBody: Option[JsValue] = body.asJson
// Expecting json body
jsonBody.map { json =>
Ok("Got: " + (json \\ "name").as[String])
}.getOrElse {
BadRequest("Expecting application/json request body")
}
}
//#access-json-body
testAction(save, helloRequest)
}
"body parser json" in {
//#body-parser-json
def save = Action(parse.json) { request =>
Ok("Got: " + (request.body \\ "name").as[String])
}
//#body-parser-json
testAction(save, helloRequest)
}
"body parser tolerantJson" in {
//#body-parser-tolerantJson
def save = Action(parse.tolerantJson) { request =>
Ok("Got: " + (request.body \\ "name").as[String])
}
//#body-parser-tolerantJson
testAction(save, helloRequest)
}
"body parser file" in {
//#body-parser-file
def save = Action(parse.file(to = new File("/tmp/upload"))) { request =>
Ok("Saved the request content to " + request.body)
}
//#body-parser-file
testAction(save, helloRequest.withSession("username" -> "player"))
}
"body parser combining" in {
val save = scalaguide.http.scalabodyparsers.full.Application.save
testAction(save, helloRequest.withSession("username" -> "player"))
}
"body parser limit text" in {
val text = "hello"
//#body-parser-limit-text
// Accept only 10KB of data.
def save = Action(parse.text(maxLength = 1024 * 10)) { request =>
Ok("Got: " + text)
}
//#body-parser-limit-text
testAction(save, FakeRequest("POST", "/").withTextBody("foo"))
}
"body parser limit file" in {
running() { app =>
implicit val mat = ActorMaterializer()(app.actorSystem)
val storeInUserFile = scalaguide.http.scalabodyparsers.full.Application.storeInUserFile
//#body-parser-limit-file
// Accept only 10KB of data.
def save = Action(parse.maxLength(1024 * 10, storeInUserFile)) { request =>
Ok("Saved the request content to " + request.body)
}
//#body-parser-limit-file
val result = call(save, helloRequest.withSession("username" -> "player"))
status(result) must_== OK
}
}
"forward the body" in new WithApplication() {
//#forward-body
import javax.inject._
import play.api.mvc._
import play.api.libs.streams._
import play.api.libs.ws._
import scala.concurrent.ExecutionContext
import akka.util.ByteString
class MyController @Inject() (ws: WSClient)(implicit ec: ExecutionContext) {
def forward(request: WSRequest): BodyParser[WSResponse] = BodyParser { req =>
Accumulator.source[ByteString].mapFuture { source =>
request
.withBody(StreamedBody(source))
.execute()
.map(Right.apply)
}
}
def myAction = Action(forward(ws.url("https://example.com"))) { req =>
Ok("Uploaded")
}
}
//#forward-body
ok
}
"parse the body as csv" in new WithApplication() {
import scala.concurrent.ExecutionContext.Implicits.global
//#csv
import play.api.mvc._
import play.api.libs.streams._
import akka.util.ByteString
import akka.stream.scaladsl._
val csv: BodyParser[Seq[Seq[String]]] = BodyParser { req =>
// A flow that splits the stream into CSV lines
val sink: Sink[ByteString, Future[Seq[Seq[String]]]] = Flow[ByteString]
// We split by the new line character, allowing a maximum of 1000 characters per line
.via(Framing.delimiter(ByteString("\\n"), 1000, allowTruncation = true))
// Turn each line to a String and split it by commas
.map(_.utf8String.trim.split(",").toSeq)
// Now we fold it into a list
.toMat(Sink.fold(Seq.empty[Seq[String]])(_ :+ _))(Keep.right)
// Convert the body to a Right either
Accumulator(sink).map(Right.apply)
}
//#csv
testAction(Action(csv)(req => Ok(req.body(1)(2))), FakeRequest("POST", "/").withTextBody("1,2\\n3,4,foo\\n5,6"))
}
}
def testAction[A: Writeable](action: EssentialAction, request: => FakeRequest[A], expectedResponse: Int = OK) = {
assertAction(action, request, expectedResponse) { result => success }
}
def assertAction[A: Writeable, T: AsResult](action: EssentialAction, request: => FakeRequest[A], expectedResponse: Int = OK)(assertions: Future[Result] => T) = {
running() { app =>
implicit val mat = ActorMaterializer()(app.actorSystem)
val result = call(action, request)
status(result) must_== expectedResponse
assertions(result)
}
}
}
package scalaguide.http.scalabodyparsers.full {
import akka.util.ByteString
import play.api.libs.streams.Accumulator
import play.api.mvc._
object Application extends Controller {
def file(to: File) = parse.file(to)
//#body-parser-combining
val storeInUserFile = parse.using { request =>
request.session.get("username").map { user =>
file(to = new File("/tmp/" + user + ".upload"))
}.getOrElse {
sys.error("You don't have the right to upload here")
}
}
def save = Action(storeInUserFile) { request =>
Ok("Saved the request content to " + request.body)
}
//#body-parser-combining
}
object CodeShow {
//#action
trait Action[A] extends (Request[A] => Result) {
def parser: BodyParser[A]
}
//#action
//#request
trait Request[+A] extends RequestHeader {
def body: A
}
//#request
//#body-parser
trait BodyParser[+A] extends (RequestHeader => Accumulator[ByteString, Either[Result, A]])
//#body-parser
}
}
}
| aradchykov/playframework | documentation/manual/working/scalaGuide/main/http/code/ScalaBodyParsers.scala | Scala | apache-2.0 | 7,096 |
package org.veripacks.reader.dependencies
import org.objectweb.asm.{Opcodes, ClassVisitor}
class SourceFileNameVisitor extends ClassVisitor(Opcodes.ASM5) {
var sourceFileName: String = _
override def visitSource(source: String, debug: String) {
sourceFileName = source
}
}
| adamw/veripacks | verifier/src/main/scala/org/veripacks/reader/dependencies/SourceFileNameVisitor.scala | Scala | apache-2.0 | 286 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
package org.stairwaybook.simulation
//
// Same code as Simulator1, but built from code extracted from small
// snippets within chapter instead of from long listings.
//
object Simulator2 {
abstract class BasicCircuitSimulation extends Simulation {
def InverterDelay: Int
def AndGateDelay: Int
def OrGateDelay: Int
class Wire {
private var sigVal = false
private var actions: List[Action] = List()
def getSignal = sigVal
def setSignal(s: Boolean) =
if (s != sigVal) {
sigVal = s
actions foreach (_ ())
}
def addAction(a: Action) = {
actions = a :: actions
a()
}
}
def inverter(input: Wire, output: Wire) = {
def invertAction() {
val inputSig = input.getSignal
afterDelay(InverterDelay) {
output setSignal !inputSig
}
}
input addAction invertAction
}
def andGate(a1: Wire, a2: Wire, output: Wire) = {
def andAction() = {
val a1Sig = a1.getSignal
val a2Sig = a2.getSignal
afterDelay(AndGateDelay) {
output setSignal (a1Sig & a2Sig)
}
}
a1 addAction andAction
a2 addAction andAction
}
def orGate(o1: Wire, o2: Wire, output: Wire) {
def orAction() {
val o1Sig = o1.getSignal
val o2Sig = o2.getSignal
afterDelay(OrGateDelay) {
output setSignal (o1Sig | o2Sig)
}
}
o1 addAction orAction
o2 addAction orAction
}
def probe(name: String, wire: Wire) {
def probeAction() {
println(name +" "+ currentTime +
" new-value = "+ wire.getSignal)
}
wire addAction probeAction
}
}
abstract class Simulation {
type Action = () => Unit
case class WorkItem(time: Int, action: Action)
private var curtime: Int = 0
def currentTime: Int = curtime
private var agenda: List[WorkItem] = List()
private def insert(ag: List[WorkItem],
item: WorkItem): List[WorkItem] = {
if (ag.isEmpty || item.time < ag.head.time) item :: ag
else ag.head :: insert(ag.tail, item)
}
def afterDelay(delay: Int)(block: => Unit) {
val item = WorkItem(currentTime + delay, () => block)
agenda = insert(agenda, item)
}
private def next() {
(agenda: @unchecked) match {
case item :: rest =>
agenda = rest
curtime = item.time
item.action()
}
}
def run() {
afterDelay(0) {
println("*** simulation started, time = "+
currentTime +" ***")
}
while (!agenda.isEmpty) next()
}
}
abstract class CircuitSimulation
extends BasicCircuitSimulation {
def halfAdder(a: Wire, b: Wire, s: Wire, c: Wire) {
val d, e = new Wire
orGate(a, b, d)
andGate(a, b, c)
inverter(c, e)
andGate(d, e, s)
}
def fullAdder(a: Wire, b: Wire, cin: Wire,
sum: Wire, cout: Wire) {
val s, c1, c2 = new Wire
halfAdder(a, cin, s, c1)
halfAdder(b, s, sum, c2)
orGate(c1, c2, cout)
}
}
}
object MySimulation2 extends Simulator2.CircuitSimulation {
def InverterDelay = 1
def AndGateDelay = 3
def OrGateDelay = 5
def main(args: Array[String]) {
val input1, input2, sum, carry = new Wire
probe("sum", sum)
probe("carry", carry)
halfAdder(input1, input2, sum, carry)
input1 setSignal true
run()
input2 setSignal true
run()
}
}
| peachyy/scalastu | stateful-objects/Simulator2.scala | Scala | apache-2.0 | 4,394 |
package fr.univ_lille.cristal.emeraude.chasqui.core.synchronization
import akka.actor.{Actor, ActorRef, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props}
import akka.pattern.ask
import akka.util.Timeout
import fr.univ_lille.cristal.emeraude.chasqui.core.Node._
import fr.univ_lille.cristal.emeraude.chasqui.core._
import fr.univ_lille.cristal.emeraude.chasqui.core.synchronization.GlobalSynchronizerWithLocalQueueStrategy.{NotifyFinishedTime, RegisterNode}
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future}
import scala.util._
abstract class SynchronizerStrategyCompanion {
def buildFor(system: ActorSystem): SynchronizerStrategy
}
object GlobalSynchronizerWithLocalQueueStrategy extends SynchronizerStrategyCompanion {
override def buildFor(system: ActorSystem): SynchronizerStrategy = new GlobalSynchronizerWithLocalQueueStrategy(system)
case class RegisterNode(node: ActorRef)
case class NotifyFinishedTime(node: ActorRef, finishedQuantum: Long, messageQueueSize: Int, messageDelta: Int, incomingQuantum: Option[Long])
}
class GlobalSynchronizerWithLocalQueueStrategy(system: ActorSystem) extends SynchronizerStrategy {
private var sentMessagesInQuantum = 0
private var receivedMessagesInQuantum = 0
private val messageQueue = scala.collection.mutable.PriorityQueue[Message]()(Ordering.fromLessThan((s1, s2) => s1.getTimestamp > s2.getTimestamp))
def registerNode(node: Node): Unit = {
this.getSynchronizerActor() ! RegisterNode(node.getActorRef)
}
def notifyFinishedTime(nodeActorRef: ActorRef, node: Node, t: Long, messageDelta: Int): Unit = {
this.getSynchronizerActor() ! NotifyFinishedTime(nodeActorRef, t, this.messageQueue.size, messageDelta, node.getRealIncomingQuantum)
}
def getSynchronizerActor(): ActorRef = {
GlobalSynchronizerWithLocalQueueStrategyAccessor(system).instance
}
override def handleSynchronizationMessage(message: SynchronizationMessage, sender: ActorRef, receiver: Node, t: Long): Unit = {
//Nothing
}
override def sendMessage(senderNode: Node, receiverActor: ActorRef, messageTimestamp: Long, message: Any): Unit = {
this.sentMessagesInQuantum += 1
receiverActor ! ScheduleMessage(message, messageTimestamp, senderNode.getActorRef)
}
override def scheduleMessage(receiverNode: Node, senderActor: ActorRef, messageTimestamp: Long, message: Any): Unit = {
this.receivedMessagesInQuantum += 1
if (messageTimestamp < receiverNode.getCurrentSimulationTime) {
//The message is in the past.
//This is a Causality error unless it is a SynchronizationMessages
if (!message.isInstanceOf[SynchronizationMessage]){
receiverNode.getCausalityErrorStrategy.handleCausalityError(messageTimestamp, receiverNode.getCurrentSimulationTime, receiverNode, senderActor, message)
}
return
}
if (receiverNode.getCurrentSimulationTime == messageTimestamp){
receiverNode.handleIncomingMessage(message, senderActor)
} else {
this.queueMessage(senderActor, messageTimestamp, message)
}
receiverNode.notifyFinishedQuantum()
}
private def queueMessage(senderActor: ActorRef, messageTimestamp: Long, message: Any) = {
messageQueue += new Message(message, messageTimestamp, senderActor)
}
override def getMessageQueue: scala.collection.mutable.PriorityQueue[Message] = this.messageQueue
}
abstract class AbstractGlobalSynchronizerWithLocalQueueSingletonActor extends Actor {
val nodes = new collection.mutable.HashSet[ActorRef]()
def registerNode(node: ActorRef): Unit = {
nodes += node
}
def notifyFinishedTime(nodeActorRef: ActorRef, t: Long, queueSize: Int, messageDelta: Int, incomingQuantum: Option[Long]): Unit
override def receive: Receive = {
case RegisterNode(node) => this.registerNode(node)
case NotifyFinishedTime(node: ActorRef, finishedQuantum: Long, messageQueueSize: Int, messageDelta: Int, incomingQuantum: Option[Long]) =>
this.notifyFinishedTime(node, finishedQuantum, messageQueueSize, messageDelta, incomingQuantum)
}
}
class GlobalSynchronizerWithLocalQueueSingletonActor extends AbstractGlobalSynchronizerWithLocalQueueSingletonActor {
import scala.concurrent.duration._
implicit val ec = ExecutionContext.Implicits.global
implicit lazy val timeout = Timeout(5 seconds)
val nodeMessageDeltas = new mutable.HashMap[ActorRef, Int]()
val nodeIncomingQuantums = new mutable.HashMap[ActorRef, Option[Long]]()
val nodesFinishedThisQuantum = new collection.mutable.HashSet[ActorRef]()
var messagesToBeProcessedFollowingQuantums: Int = 0
protected def allNodesAreReady(): Boolean = {
nodes.forall(this.nodesFinishedThisQuantum.contains)
}
def getNextQuantum(): Option[Long] = {
nodeIncomingQuantums.values.foldLeft[Option[Long]](None)( (accum, each) =>
if (accum.isEmpty) {
each
} else if (each.isEmpty) {
accum
} else {
Some(accum.get.min(each.get))
}
)
}
def numberOfPendingMessagesInQuantum(): Int = nodeMessageDeltas.values.sum
def setMessageDelta(nodeActorRef: ActorRef, messageDelta: Int) = {
nodeMessageDeltas(nodeActorRef) = messageDelta
}
def setIncomingQuantum(nodeActorRef: ActorRef, incomingQuantum: Option[Long]) = {
nodeIncomingQuantums(nodeActorRef) = incomingQuantum
}
def notifyFinishedTime(nodeActorRef: ActorRef, t: Long, queueSize: Int, messageDelta: Int, incomingQuantum: Option[Long]): Unit = {
this.nodesFinishedThisQuantum += nodeActorRef
this.setMessageDelta(nodeActorRef, messageDelta)
this.setIncomingQuantum(nodeActorRef, incomingQuantum)
this.messagesToBeProcessedFollowingQuantums += queueSize
val allNodesReady = this.allNodesAreReady()
val numberOfPendingMessagesInQuantum = this.numberOfPendingMessagesInQuantum()
val existPendingMessages = this.messagesToBeProcessedFollowingQuantums != 0
if (allNodesReady && numberOfPendingMessagesInQuantum == 0 && existPendingMessages) {
val maybeNextQuantum = this.getNextQuantum()
if (maybeNextQuantum.isDefined) {
val sequence = this.nodes.map(node => (node ? AdvanceSimulationTime(maybeNextQuantum.get)).asInstanceOf[Future[Int]])
Future.sequence(sequence)
.onComplete {
case Success(result) => {
//TODO: println(s"Quantum achieved: ${maybeNextQuantum.get} with: $result")
this.nodes.foreach(n => n ! ProcessNextQuantum)
}
case Failure(_) => {
println("Error while resuming next quantum!")
}
}
//Cleanup local state
this.nodesFinishedThisQuantum.clear()
this.nodeIncomingQuantums.clear()
this.nodeMessageDeltas.clear()
this.messagesToBeProcessedFollowingQuantums = 0
} else {
//Finished?
}
} else {
//println(s"Not ready to advance yet at t=$t. Nodes ready: $allNodesReady, all messages in quantum processed: $numberOfPendingMessagesInQuantum, existing pending messages: $existPendingMessages")
}
}
}
class GlobalSynchronizerWithLocalQueueSingleton(system: ActorSystem, props: Props, name: String) extends Extension {
val instance: ActorRef = system.actorOf(props, name)
}
object GlobalSynchronizerWithLocalQueueStrategyAccessor extends ExtensionId[GlobalSynchronizerWithLocalQueueSingleton] with ExtensionIdProvider {
final override def lookup = this
final override def createExtension(system: ExtendedActorSystem) = new GlobalSynchronizerWithLocalQueueSingleton(system, instanceProps, instanceName)
lazy val instanceProps = Props[GlobalSynchronizerWithLocalQueueSingletonActor]
lazy val instanceName = "global-synchronizer-actor"
} | guillep/chasqui | src/main/scala/fr/univ_lille/cristal/emeraude/chasqui/core/synchronization/GlobalSynchronizerWithLocalQueueStrategy.scala | Scala | mit | 7,769 |
package pl.touk.nussknacker.ui.security.oauth2
import cats.data.NonEmptyList.one
import com.typesafe.scalalogging.LazyLogging
import io.circe.Decoder
import io.circe.generic.extras.{Configuration, ConfiguredJsonCodec, JsonKey}
import pl.touk.nussknacker.ui.security.oauth2.OAuth2ErrorHandler.{OAuth2AccessTokenRejection, OAuth2CompoundException}
import sttp.client.{NothingT, SttpBackend}
import scala.concurrent.duration.{Deadline, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
class BaseOAuth2Service[
UserInfoData,
AuthorizationData <: OAuth2AuthorizationData
](protected val clientApi: OAuth2ClientApi[UserInfoData, AuthorizationData])
(implicit ec: ExecutionContext) extends OAuth2Service[UserInfoData, AuthorizationData] with LazyLogging {
final def obtainAuthorizationAndUserInfo(authorizationCode: String, redirectUri: String): Future[(AuthorizationData, UserInfoData)] = {
for {
authorizationData <- obtainAuthorization(authorizationCode, redirectUri)
userInfo <- obtainUserInfo(authorizationData)
} yield (authorizationData, userInfo)
}
final def checkAuthorizationAndObtainUserinfo(accessToken: String): Future[(UserInfoData, Option[Deadline])] =
for {
deadline <- introspectAccessToken(accessToken)
userInfo <- obtainUserInfo(accessToken)
} yield (userInfo, deadline)
protected def obtainAuthorization(authorizationCode: String, redirectUri: String): Future[AuthorizationData] =
clientApi.accessTokenRequest(authorizationCode, redirectUri)
/*
Override this method in a subclass making use of signed tokens or an introspection endpoint
or use a CachingOAuthService wrapper so that only previously-stored (immediately after retrieval) tokens are accepted
or do both.
*/
protected def introspectAccessToken(accessToken: String): Future[Option[Deadline]] = {
Future.failed(OAuth2CompoundException(one(OAuth2AccessTokenRejection("The access token cannot be validated"))))
}
/*
OAuth2 itself is not an authentication framework. However, we can treat it so. All we need is a restricted resource
that provides information about a user only with his valid access token.
The following two methods shall call such a resource.
*/
protected def obtainUserInfo(authorizationData: AuthorizationData): Future[UserInfoData] =
obtainUserInfo(authorizationData.accessToken)
protected def obtainUserInfo(accessToken: String): Future[UserInfoData] =
clientApi.profileRequest(accessToken)
}
@ConfiguredJsonCodec case class DefaultOAuth2AuthorizationData
(
@JsonKey("access_token") accessToken: String,
@JsonKey("token_type") tokenType: String,
@JsonKey("refresh_token") refreshToken: Option[String] = None,
@JsonKey("expires_in") expirationPeriod: Option[FiniteDuration] = None
) extends OAuth2AuthorizationData
object DefaultOAuth2AuthorizationData extends EpochSecondsCodecs {
implicit val config: Configuration = Configuration.default
}
object BaseOAuth2Service {
def apply[
UserInfoData: Decoder
](configuration: OAuth2Configuration)(implicit ec: ExecutionContext, backend: SttpBackend[Future, Nothing, NothingT]): BaseOAuth2Service[UserInfoData, DefaultOAuth2AuthorizationData] =
new BaseOAuth2Service(OAuth2ClientApi[UserInfoData, DefaultOAuth2AuthorizationData](configuration))
} | TouK/nussknacker | security/src/main/scala/pl/touk/nussknacker/ui/security/oauth2/BaseOAuth2Service.scala | Scala | apache-2.0 | 3,329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml._
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.{HasFeaturesCol, HasLabelCol, HasWeightCol}
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.ml.tree.impl.TreeTests
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
object MLTestingUtils extends SparkFunSuite {
def checkCopyAndUids[T <: Estimator[_]](estimator: T, model: Model[_]): Unit = {
assert(estimator.uid === model.uid, "Model uid does not match parent estimator")
// copied model must have the same parent
val copied = model.copy(ParamMap.empty)
.asInstanceOf[Model[_]]
assert(copied.parent == model.parent)
assert(copied.parent.uid == model.parent.uid)
}
def checkNumericTypes[M <: Model[M], T <: Estimator[M]](
estimator: T,
spark: SparkSession,
isClassification: Boolean = true)(check: (M, M) => Unit): Unit = {
val dfs = if (isClassification) {
genClassifDFWithNumericLabelCol(spark)
} else {
genRegressionDFWithNumericLabelCol(spark)
}
val finalEstimator = estimator match {
case weighted: Estimator[M] with HasWeightCol =>
weighted.set(weighted.weightCol, "weight")
weighted
case _ => estimator
}
val expected = finalEstimator.fit(dfs(DoubleType))
val actuals = dfs.keys.filter(_ != DoubleType).map { t =>
finalEstimator.fit(dfs(t))
}
actuals.foreach(actual => check(expected, actual))
val dfWithStringLabels = spark.createDataFrame(Seq(
("0", 1, Vectors.dense(0, 2, 3), 0.0)
)).toDF("label", "weight", "features", "censor")
val thrown = intercept[IllegalArgumentException] {
estimator.fit(dfWithStringLabels)
}
assert(thrown.getMessage.contains(
"Column label must be of type NumericType but was actually of type StringType"))
estimator match {
case weighted: Estimator[M] with HasWeightCol =>
val dfWithStringWeights = spark.createDataFrame(Seq(
(0, "1", Vectors.dense(0, 2, 3), 0.0)
)).toDF("label", "weight", "features", "censor")
weighted.set(weighted.weightCol, "weight")
val thrown = intercept[IllegalArgumentException] {
weighted.fit(dfWithStringWeights)
}
assert(thrown.getMessage.contains(
"Column weight must be of type NumericType but was actually of type StringType"))
case _ =>
}
}
def checkNumericTypes[T <: Evaluator](evaluator: T, spark: SparkSession): Unit = {
val dfs = genEvaluatorDFWithNumericLabelCol(spark, "label", "prediction")
val expected = evaluator.evaluate(dfs(DoubleType))
val actuals = dfs.keys.filter(_ != DoubleType).map(t => evaluator.evaluate(dfs(t)))
actuals.foreach(actual => assert(expected === actual))
val dfWithStringLabels = spark.createDataFrame(Seq(
("0", 0d)
)).toDF("label", "prediction")
val thrown = intercept[IllegalArgumentException] {
evaluator.evaluate(dfWithStringLabels)
}
assert(thrown.getMessage.contains(
"Column label must be of type NumericType but was actually of type StringType"))
}
def genClassifDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
featuresColName: String = "features",
weightColName: String = "weight"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(0, Vectors.dense(0, 2, 3)),
(1, Vectors.dense(0, 3, 1)),
(0, Vectors.dense(0, 2, 2)),
(1, Vectors.dense(0, 3, 9)),
(0, Vectors.dense(0, 2, 6))
)).toDF(labelColName, featuresColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types.map { t =>
val castDF = df.select(col(labelColName).cast(t), col(featuresColName))
t -> TreeTests.setMetadata(castDF, 2, labelColName, featuresColName)
.withColumn(weightColName, round(rand(seed = 42)).cast(t))
}.toMap
}
def genRegressionDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
weightColName: String = "weight",
featuresColName: String = "features",
censorColName: String = "censor"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(1, Vectors.dense(1)),
(2, Vectors.dense(2)),
(3, Vectors.dense(3)),
(4, Vectors.dense(4))
)).toDF(labelColName, featuresColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types.map { t =>
val castDF = df.select(col(labelColName).cast(t), col(featuresColName))
t -> TreeTests.setMetadata(castDF, 0, labelColName, featuresColName)
.withColumn(censorColName, lit(0.0))
.withColumn(weightColName, round(rand(seed = 42)).cast(t))
}.toMap
}
def genEvaluatorDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
predictionColName: String = "prediction"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(0, 0d),
(1, 1d),
(2, 2d),
(3, 3d),
(4, 4d)
)).toDF(labelColName, predictionColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types
.map(t => t -> df.select(col(labelColName).cast(t), col(predictionColName)))
.toMap
}
/**
* Given a DataFrame, generate two output DataFrames: one having the original rows oversampled
* an integer number of times, and one having the original rows but with a column of weights
* proportional to the number of oversampled instances in the oversampled DataFrames.
*/
def genEquivalentOversampledAndWeightedInstances(
data: Dataset[LabeledPoint],
seed: Long): (Dataset[Instance], Dataset[Instance]) = {
import data.sparkSession.implicits._
val rng = new scala.util.Random(seed)
val sample: () => Int = () => rng.nextInt(10) + 1
val sampleUDF = udf(sample)
val rawData = data.select("label", "features").withColumn("samples", sampleUDF())
val overSampledData = rawData.rdd.flatMap { case Row(label: Double, features: Vector, n: Int) =>
Iterator.fill(n)(Instance(label, 1.0, features))
}.toDS()
rng.setSeed(seed)
val weightedData = rawData.rdd.map { case Row(label: Double, features: Vector, n: Int) =>
Instance(label, n.toDouble, features)
}.toDS()
(overSampledData, weightedData)
}
/**
* Helper function for testing sample weights. Tests that oversampling each point is equivalent
* to assigning a sample weight proportional to the number of samples for each point.
*/
def testOversamplingVsWeighting[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
modelEquals: (M, M) => Unit,
seed: Long): Unit = {
val (overSampledData, weightedData) = genEquivalentOversampledAndWeightedInstances(
data, seed)
val weightedModel = estimator.set(estimator.weightCol, "weight").fit(weightedData)
val overSampledModel = estimator.set(estimator.weightCol, "").fit(overSampledData)
modelEquals(weightedModel, overSampledModel)
}
/**
* Helper function for testing sample weights. Tests that injecting a large number of outliers
* with very small sample weights does not affect fitting. The predictor should learn the true
* model despite the outliers.
*/
def testOutliersWithSmallWeights[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
numClasses: Int,
modelEquals: (M, M) => Unit,
outlierRatio: Int): Unit = {
import data.sqlContext.implicits._
val outlierDS = data.withColumn("weight", lit(1.0)).as[Instance].flatMap {
case Instance(l, w, f) =>
val outlierLabel = if (numClasses == 0) -l else numClasses - l - 1
List.fill(outlierRatio)(Instance(outlierLabel, 0.0001, f)) ++ List(Instance(l, w, f))
}
val trueModel = estimator.set(estimator.weightCol, "").fit(data)
val outlierModel = estimator.set(estimator.weightCol, "weight").fit(outlierDS)
modelEquals(trueModel, outlierModel)
}
/**
* Helper function for testing sample weights. Tests that giving constant weights to each data
* point yields the same model, regardless of the magnitude of the weight.
*/
def testArbitrarilyScaledWeights[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
modelEquals: (M, M) => Unit): Unit = {
estimator.set(estimator.weightCol, "weight")
val models = Seq(0.001, 1.0, 1000.0).map { w =>
val df = data.withColumn("weight", lit(w))
estimator.fit(df)
}
models.sliding(2).foreach { case Seq(m1, m2) => modelEquals(m1, m2)}
}
/**
* Helper function for testing different input types for "features" column. Given a DataFrame,
* generate three output DataFrames: one having vector "features" column with float precision,
* one having double array "features" column with float precision, and one having float array
* "features" column.
*/
def generateArrayFeatureDataset(dataset: Dataset[_],
featuresColName: String = "features"): (Dataset[_], Dataset[_], Dataset[_]) = {
val toFloatVectorUDF = udf { (features: Vector) =>
Vectors.dense(features.toArray.map(_.toFloat.toDouble))}
val toDoubleArrayUDF = udf { (features: Vector) => features.toArray}
val toFloatArrayUDF = udf { (features: Vector) => features.toArray.map(_.toFloat)}
val newDataset = dataset.withColumn(featuresColName, toFloatVectorUDF(col(featuresColName)))
val newDatasetD = newDataset.withColumn(featuresColName, toDoubleArrayUDF(col(featuresColName)))
val newDatasetF = newDataset.withColumn(featuresColName, toFloatArrayUDF(col(featuresColName)))
assert(newDataset.schema(featuresColName).dataType.equals(new VectorUDT))
assert(newDatasetD.schema(featuresColName).dataType.equals(new ArrayType(DoubleType, false)))
assert(newDatasetF.schema(featuresColName).dataType.equals(new ArrayType(FloatType, false)))
(newDataset, newDatasetD, newDatasetF)
}
}
| bravo-zhang/spark | mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala | Scala | apache-2.0 | 11,403 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import com.biganiseed.reindeer.R
import java.util
import java.util.concurrent.TimeUnit
import android.app.Application
import com.github.shadowsocks.database.DBHelper
import com.github.shadowsocks.utils.Utils
import com.google.android.gms.analytics.GoogleAnalytics
import com.google.android.gms.common.api.ResultCallback
import com.google.android.gms.tagmanager.Container.FunctionCallMacroCallback
import com.google.android.gms.tagmanager.{ContainerHolder, TagManager}
class ShadowsocksApplication extends Application {
lazy val dbHelper = new DBHelper(this)
lazy val SIG_FUNC = "getSignature"
var containerHolder: ContainerHolder = null
lazy val tracker = GoogleAnalytics.getInstance(this).newTracker(R.xml.tracker)
override def onCreate() {
val tm = TagManager.getInstance(this)
val pending = tm.loadContainerPreferNonDefault("GTM-NT8WS8", R.raw.gtm_default_container)
val callback = new ResultCallback[ContainerHolder] {
override def onResult(holder: ContainerHolder): Unit = {
if (!holder.getStatus.isSuccess) {
return
}
containerHolder = holder
val container = holder.getContainer
container.registerFunctionCallMacroCallback(SIG_FUNC, new FunctionCallMacroCallback {
override def getValue(functionName: String, parameters: util.Map[String, AnyRef]): AnyRef = {
if (functionName == SIG_FUNC) {
Utils.getSignature(getApplicationContext)
}
null
}
})
}
}
pending.setResultCallback(callback, 2, TimeUnit.SECONDS)
}
}
| alexliao/reshadow | src/main/scala/com/github/shadowsocks/ShadowsocksApplication.scala | Scala | gpl-3.0 | 3,403 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.Timestamp
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.analysis.SimpleAnalyzer
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
import org.apache.spark.sql.catalyst.expressions.objects.AssertNotNull
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Project}
import org.apache.spark.sql.types._
class NullExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
def testAllTypes(testFunc: (Any, DataType) => Unit): Unit = {
testFunc(false, BooleanType)
testFunc(1.toByte, ByteType)
testFunc(1.toShort, ShortType)
testFunc(1, IntegerType)
testFunc(1L, LongType)
testFunc(1.0F, FloatType)
testFunc(1.0, DoubleType)
testFunc(Decimal(1.5), DecimalType(2, 1))
testFunc(new java.sql.Date(10), DateType)
testFunc(new java.sql.Timestamp(10), TimestampType)
testFunc("abcd", StringType)
}
test("isnull and isnotnull") {
testAllTypes { (value: Any, tpe: DataType) =>
checkEvaluation(IsNull(Literal.create(value, tpe)), false)
checkEvaluation(IsNotNull(Literal.create(value, tpe)), true)
checkEvaluation(IsNull(Literal.create(null, tpe)), true)
checkEvaluation(IsNotNull(Literal.create(null, tpe)), false)
}
}
test("AssertNotNUll") {
val ex = intercept[RuntimeException] {
evaluateWithoutCodegen(AssertNotNull(Literal(null)))
}.getMessage
assert(ex.contains("Null value appeared in non-nullable field"))
}
test("IsNaN") {
checkEvaluation(IsNaN(Literal(Double.NaN)), true)
checkEvaluation(IsNaN(Literal(Float.NaN)), true)
checkEvaluation(IsNaN(Literal(math.log(-3))), true)
checkEvaluation(IsNaN(Literal.create(null, DoubleType)), false)
checkEvaluation(IsNaN(Literal(Double.PositiveInfinity)), false)
checkEvaluation(IsNaN(Literal(Float.MaxValue)), false)
checkEvaluation(IsNaN(Literal(5.5f)), false)
}
test("nanvl") {
checkEvaluation(NaNvl(Literal(5.0), Literal.create(null, DoubleType)), 5.0)
checkEvaluation(NaNvl(Literal.create(null, DoubleType), Literal(5.0)), null)
checkEvaluation(NaNvl(Literal.create(null, DoubleType), Literal(Double.NaN)), null)
checkEvaluation(NaNvl(Literal(Double.NaN), Literal(5.0)), 5.0)
checkEvaluation(NaNvl(Literal(Double.NaN), Literal.create(null, DoubleType)), null)
assert(NaNvl(Literal(Double.NaN), Literal(Double.NaN)).
eval(EmptyRow).asInstanceOf[Double].isNaN)
}
test("coalesce") {
testAllTypes { (value: Any, tpe: DataType) =>
val lit = Literal.create(value, tpe)
val nullLit = Literal.create(null, tpe)
checkEvaluation(Coalesce(Seq(nullLit)), null)
checkEvaluation(Coalesce(Seq(lit)), value)
checkEvaluation(Coalesce(Seq(nullLit, lit)), value)
checkEvaluation(Coalesce(Seq(nullLit, lit, lit)), value)
checkEvaluation(Coalesce(Seq(nullLit, nullLit, lit)), value)
}
val coalesce = Coalesce(Seq(
Literal.create(null, ArrayType(IntegerType, containsNull = false)),
Literal.create(Seq(1, 2, 3), ArrayType(IntegerType, containsNull = false)),
Literal.create(Seq(1, 2, 3, null), ArrayType(IntegerType, containsNull = true))))
assert(coalesce.dataType === ArrayType(IntegerType, containsNull = true))
checkEvaluation(coalesce, Seq(1, 2, 3))
}
test("SPARK-16602 Nvl should support numeric-string cases") {
def analyze(expr: Expression): Expression = {
val relation = LocalRelation()
SimpleAnalyzer.execute(Project(Seq(Alias(expr, "c")()), relation)).expressions.head
}
val intLit = Literal.create(1, IntegerType)
val doubleLit = Literal.create(2.2, DoubleType)
val stringLit = Literal.create("c", StringType)
val nullLit = Literal.create(null, NullType)
val floatNullLit = Literal.create(null, FloatType)
val floatLit = Literal.create(1.01f, FloatType)
val timestampLit = Literal.create(Timestamp.valueOf("2017-04-12 00:00:00"), TimestampType)
val decimalLit = Literal.create(BigDecimal.valueOf(10.2), DecimalType(20, 2))
assert(analyze(new Nvl(decimalLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(doubleLit, decimalLit)).dataType == DoubleType)
assert(analyze(new Nvl(decimalLit, doubleLit)).dataType == DoubleType)
assert(analyze(new Nvl(decimalLit, floatLit)).dataType == DoubleType)
assert(analyze(new Nvl(floatLit, decimalLit)).dataType == DoubleType)
assert(analyze(new Nvl(timestampLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(intLit, doubleLit)).dataType == DoubleType)
assert(analyze(new Nvl(intLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(stringLit, doubleLit)).dataType == StringType)
assert(analyze(new Nvl(doubleLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(nullLit, intLit)).dataType == IntegerType)
assert(analyze(new Nvl(doubleLit, nullLit)).dataType == DoubleType)
assert(analyze(new Nvl(nullLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(floatLit, stringLit)).dataType == StringType)
assert(analyze(new Nvl(floatLit, doubleLit)).dataType == DoubleType)
assert(analyze(new Nvl(floatNullLit, intLit)).dataType == FloatType)
}
test("AtLeastNNonNulls") {
val mix = Seq(Literal("x"),
Literal.create(null, StringType),
Literal.create(null, DoubleType),
Literal(Double.NaN),
Literal(5f))
val nanOnly = Seq(Literal("x"),
Literal(10.0),
Literal(Float.NaN),
Literal(math.log(-2)),
Literal(Double.MaxValue))
val nullOnly = Seq(Literal("x"),
Literal.create(null, DoubleType),
Literal.create(null, DecimalType.USER_DEFAULT),
Literal(Float.MaxValue),
Literal(false))
checkEvaluation(AtLeastNNonNulls(2, mix), true, EmptyRow)
checkEvaluation(AtLeastNNonNulls(3, mix), false, EmptyRow)
checkEvaluation(AtLeastNNonNulls(3, nanOnly), true, EmptyRow)
checkEvaluation(AtLeastNNonNulls(4, nanOnly), false, EmptyRow)
checkEvaluation(AtLeastNNonNulls(3, nullOnly), true, EmptyRow)
checkEvaluation(AtLeastNNonNulls(4, nullOnly), false, EmptyRow)
}
test("Coalesce should not throw 64KiB exception") {
val inputs = (1 to 2500).map(x => Literal(s"x_$x"))
checkEvaluation(Coalesce(inputs), "x_1")
}
test("SPARK-22705: Coalesce should use less global variables") {
val ctx = new CodegenContext()
Coalesce(Seq(Literal("a"), Literal("b"))).genCode(ctx)
assert(ctx.inlinedMutableStates.size == 1)
}
test("AtLeastNNonNulls should not throw 64KiB exception") {
val inputs = (1 to 4000).map(x => Literal(s"x_$x"))
checkEvaluation(AtLeastNNonNulls(1, inputs), true)
}
}
| pgandhi999/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/NullExpressionsSuite.scala | Scala | apache-2.0 | 7,564 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.utils.stats
import java.io.{InputStream, FilterInputStream, IOException}
/**
* Counting input stream from guava, extended to provide reset of count
*/
class CountingInputStream(in: InputStream) extends FilterInputStream(in) {
private var count: Long = 0L
private var mark: Long = -1
/** Returns the number of bytes read. */
def getCount: Long = count
def resetCount(): Unit = count = 0L
override def read: Int = {
val result: Int = in.read
if (result != -1) {
count += 1
}
result
}
override def read(b: Array[Byte], off: Int, len: Int): Int = {
val result: Int = in.read(b, off, len)
if (result != -1) {
count += result
}
result
}
override def skip(n: Long): Long = {
val result: Long = in.skip(n)
count += result
result
}
override def mark(readlimit: Int): Unit = {
in.mark(readlimit)
mark = count
}
override def reset(): Unit = {
if (!in.markSupported) {
throw new IOException("Mark not supported")
}
if (mark == -1) {
throw new IOException("Mark not set")
}
in.reset()
count = mark
}
}
| locationtech/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/CountingInputStream.scala | Scala | apache-2.0 | 2,230 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils.zero81
import kafka.common.TopicAndPartition
import kafka.manager.utils._
import kafka.manager.ActorModel.{TopicPartitionIdentity, TopicIdentity}
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.KeeperException.NodeExistsException
import org.slf4j.LoggerFactory
import scala.util.Try
/**
* Borrowed from kafka 0.8.1.1, adapted to use curator framework
* https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=blob;f=core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
*/
import kafka.manager.utils.zero81.ReassignPartitionErrors._
class ReassignPartitionCommand(adminUtils: AdminUtils) {
private[this] val logger = LoggerFactory.getLogger(this.getClass)
def generateAssignment(brokerList: Seq[Int], currentTopicIdentity: TopicIdentity): Try[TopicIdentity] = {
Try {
val assignedReplicas = adminUtils.assignReplicasToBrokers(
brokerList,
currentTopicIdentity.partitions,
currentTopicIdentity.replicationFactor)
val newTpi: Map[Int, TopicPartitionIdentity] = currentTopicIdentity.partitionsIdentity.map { case (part, tpi) =>
val newReplicaSeq = assignedReplicas.get(part)
checkCondition(newReplicaSeq.isDefined, MissingReplicaSetForPartition(part))
val newReplicaSet = newReplicaSeq.get.toSet
checkCondition(newReplicaSeq.get.size == newReplicaSet.size,
DuplicateFoundInReplicaSetForPartition(newReplicaSeq.get,part,currentTopicIdentity.topic))
(part, tpi.copy(replicas = newReplicaSeq.get))
}
logger.info(s"Generated topic replica assignment topic=${currentTopicIdentity.topic}, $newTpi")
TopicIdentity(
currentTopicIdentity.topic,
currentTopicIdentity.readVersion,
currentTopicIdentity.partitions,
newTpi,
currentTopicIdentity.numBrokers,
currentTopicIdentity.configReadVersion,
currentTopicIdentity.config,
currentTopicIdentity.deleteSupported,
currentTopicIdentity.clusterConfig
)
}
}
def validateAssignment(current: TopicIdentity, generated: TopicIdentity): Unit = {
//perform validation
checkCondition(generated.partitionsIdentity.nonEmpty, ReassignmentDataEmptyForTopic(current.topic))
checkCondition(current.partitions == generated.partitions, PartitionsOutOfSync(current.partitions, generated.partitions))
checkCondition(current.replicationFactor == generated.replicationFactor, ReplicationOutOfSync(current.replicationFactor, generated.replicationFactor))
}
def getValidAssignments(currentTopicIdentity: Map[String, TopicIdentity],
generatedTopicIdentity: Map[String, TopicIdentity]): Try[Map[TopicAndPartition, Seq[Int]]] = {
Try {
currentTopicIdentity.flatMap { case (topic, current) =>
generatedTopicIdentity.get(topic).fold {
logger.info(s"No generated assignment found for topic=$topic, skipping")
Map.empty[TopicAndPartition, Seq[Int]]
} { generated =>
validateAssignment(current, generated)
for {
//match up partitions from current to generated
(currentPart, currentTpi) <- current.partitionsIdentity
generatedTpi <- generated.partitionsIdentity.get(currentPart)
} yield {
logger.info("Reassigning replicas for topic=%s, partition=%s, current=%s, generated=%s"
.format(topic, currentPart, current.partitionsIdentity, generated.partitionsIdentity))
(TopicAndPartition(topic, currentPart), generatedTpi.replicas.toSeq)
}
}
}
}
}
def executeAssignment(curator: CuratorFramework,
currentTopicIdentity: Map[String, TopicIdentity],
generatedTopicIdentity: Map[String, TopicIdentity]): Try[Unit] = {
getValidAssignments(currentTopicIdentity, generatedTopicIdentity).flatMap {
validAssignments =>
Try {
checkCondition(validAssignments.nonEmpty, NoValidAssignments)
val jsonReassignmentData = ZkUtils.getPartitionReassignmentZkData(validAssignments)
try {
logger.info(s"Creating reassign partitions path ${ZkUtils.ReassignPartitionsPath} : $jsonReassignmentData")
//validate parsing of generated json
ReassignPartitionCommand.parsePartitionReassignmentZkData(jsonReassignmentData)
ZkUtils.createPersistentPath(curator, ZkUtils.ReassignPartitionsPath, jsonReassignmentData)
} catch {
case ze: NodeExistsException =>
throwError(AlreadyInProgress)
case e: Throwable =>
throwError(FailedToReassignPartitionReplicas(e))
}
}
}
}
}
object ReassignPartitionCommand {
def parsePartitionReassignmentZkData(json : String) : Map[TopicAndPartition, Seq[Int]] = {
import org.json4s.JsonAST._
parseJson(json).findField(_._1 == "partitions") match {
case Some((_, arr)) =>
val result : List[(TopicAndPartition, Seq[Int])] = for {
JArray(elements) <- arr
JObject(children) <- elements
JField("topic", JString(t)) <- children
JField("partition", JInt(i)) <- children
JField("replicas", arr2) <- children
JArray(assignments) <- arr2
} yield (TopicAndPartition(t,i.toInt),assignments.map(_.extract[Int]))
checkCondition(result.nonEmpty, NoValidAssignments)
result.foreach { case (tAndP, a) =>
checkCondition(a.nonEmpty, ReassignmentDataEmptyForTopic(tAndP.topic))
}
result.toMap
case None =>
throwError(NoValidAssignments)
}
}
}
object ReassignPartitionErrors {
class MissingReplicaSetForPartition private[ReassignPartitionErrors](part: Int) extends UtilError(s"Failed to find new replica set for partition $part")
class ReassignmentDataEmptyForTopic private[ReassignPartitionErrors](topic: String) extends UtilError(s"Partition reassignment data is empty for topic $topic")
class PartitionsOutOfSync private[ReassignPartitionErrors](current: Int, generated: Int) extends UtilError(
"Current partitions and generated partition replicas are out of sync current=%s, generated=%s , please regenerate"
.format(current, generated))
class ReplicationOutOfSync private[ReassignPartitionErrors](current: Int, generated: Int) extends UtilError(
"Current replication factor and generated replication factor for replicas are out of sync current=%s, generated=%s , please regenerate"
.format(current, generated))
class NoValidAssignments private[ReassignPartitionErrors] extends UtilError("Cannot reassign partitions with no valid assignments!")
class ReassignmentAlreadyInProgress private[ReassignPartitionErrors] extends UtilError("Partition reassignment currently in " +
"progress for. Aborting operation")
class FailedToReassignPartitionReplicas private[ReassignPartitionErrors] (t: Throwable) extends UtilError(
s"Failed to reassign partition replicas ${t.getStackTrace.mkString("[","\\n","]")}")
class DuplicateFoundInReplicaSetForPartition private[ReassignPartitionErrors](replicas: Seq[Int], part: Int, topic: String) extends UtilError(
s"Duplicate found in replica set $replicas for partition $part for topic $topic"
)
def MissingReplicaSetForPartition(part: Int) = new MissingReplicaSetForPartition(part)
def ReassignmentDataEmptyForTopic(topic: String) = new ReassignmentDataEmptyForTopic(topic)
def PartitionsOutOfSync(current: Int, generated: Int) = new PartitionsOutOfSync(current,generated)
def ReplicationOutOfSync(current: Int, generated: Int) = new ReplicationOutOfSync(current,generated)
val NoValidAssignments = new NoValidAssignments
val AlreadyInProgress = new ReassignmentAlreadyInProgress
def FailedToReassignPartitionReplicas(t: Throwable) = new FailedToReassignPartitionReplicas(t)
def DuplicateFoundInReplicaSetForPartition(replicas: Seq[Int], part: Int, topic: String) =
new DuplicateFoundInReplicaSetForPartition(replicas,part,topic)
}
| cvcal/kafka-manager | app/kafka/manager/utils/zero81/ReassignPartitionCommand.scala | Scala | apache-2.0 | 8,918 |
package controllers
import play.api.mvc._
class Application extends Controller {
def index: Action[AnyContent] = Action {
Ok("Application started.")
}
}
| SBP07/backend | app/controllers/Application.scala | Scala | gpl-2.0 | 165 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.thermal
import squants._
import squants.Platform.crossFormat
import squants.energy.Joules
import scala.util.{ Failure, Success, Try }
/**
* Represents a quantity of temperature
*
* Temperatures are somewhat unique in the world of quantities for a couple of reasons.
*
* First, different units (scales) have different "zero" values. This means that these scales
* are not simple multiples of the others. There is a "zero offset" that must be applied to conversions
* from one scale to another.
*
* Second, temperatures are often quoted as though they were quantities, when in fact they are just points
* on a scale. Similar to a mile marker on a highway, the quantity represented is the number degrees (miles)
* from a specific "zero" value on the scale.
*
* In fact an absolute quantity of thermodynamic temperature should be measured from absolute zero.
* Thus, Kelvin, is the SI Base unit for temperature.
*
* The other scales supported here, Celsius and Fahrenheit, are known as empirical scales.
* Of course, these scales set their respective zero values well above absolute zero.
* This is done to provide a granular and reasonably sized ranges of values for dealing with everyday temperatures.
*
* This library supports another absolute scale, the Rankine scale. Rankine sets its zero at absolute zero,
* but degrees are measure in Fahrenheit (as opposed to Celsius, as the Kelvin scale uses).
*
* In consideration of these more unique scale conversions, two conversion types are supported: Degrees and Scale.
*
* Scale based conversions DO adjust for the zero offset.
* Thus 5 degrees C is the same as 41 degrees F on the thermometer.
*
* Degrees based conversions DO NOT adjust for the zero point.
* Thus 5 degrees C|K is the same amount of temperature as 9 degrees F|R.
*
* When creating a temperature it is not important to consider these differences.
* It is also irrelevant when performing operation on temperatures in the same scale.
* However, when performing operations on two temperatures of different scales these factors do become important.
*
* The Quantity.to(unit) and Quantity.in(unit) methods are overridden to use Scale conversions for convenience
*
* The Ordered.compare method is implemented to use Scale conversions
*
* The Quantity.plus and Quantity.minus methods are implemented to treat right operands as Quantity of Degrees and not a scale Temperature.
* Operands that differ in scale will use Degree conversions.
* This supports mixed scale expressions:
*
* val temp = Fahrenheit(100) - Celsius(5) // returns Fahrenheit(91)
*
* This also supports declaring temperature ranges using typical nomenclature:
*
* val tempRange = 65.F +- 5.C // returns QuantityRange(56.0°F,74.0°F)
*
* The toDegrees(unit) methods are implemented to use Degree conversions.
*
* @author garyKeorkunian
* @since 0.1
*
* @param value the value of the temperature
*/
final class Temperature private (val value: Double, val unit: TemperatureScale)
extends Quantity[Temperature] {
def dimension = Temperature
override def plus(that: Temperature): Temperature = Temperature(this.value + that.convert(unit, withOffset = false).value, unit)
override def minus(that: Temperature): Temperature = Temperature(this.value - that.convert(unit, withOffset = false).value, unit)
def *(that: ThermalCapacity) = Joules(this.toKelvinScale * that.toJoulesPerKelvin)
override def toString: String = unit match {
case Kelvin => super.toString
case _ => crossFormat(value) + unit.symbol // Non-Kelvin units are treated in a special manner, they do not get a space between the value and symbol.
}
def toString(unit: TemperatureScale): String = in(unit).toString
private def convert(toScale: TemperatureScale, withOffset: Boolean = true): Temperature = (unit, toScale, withOffset) match {
case (Fahrenheit, Fahrenheit, _) ⇒ this
case (Celsius, Celsius, _) ⇒ this
case (Kelvin, Kelvin, _) ⇒ this
case (Rankine, Rankine, _) ⇒ this
case (Fahrenheit, Celsius, true) ⇒ Celsius(TemperatureConversions.fahrenheitToCelsiusScale(value))
case (Celsius, Fahrenheit, true) ⇒ Fahrenheit(TemperatureConversions.celsiusToFahrenheitScale(value))
case (Celsius, Kelvin, true) ⇒ Kelvin(TemperatureConversions.celsiusToKelvinScale(value))
case (Kelvin, Celsius, true) ⇒ Celsius(TemperatureConversions.kelvinToCelsiusScale(value))
case (Fahrenheit, Kelvin, true) ⇒ Kelvin(TemperatureConversions.fahrenheitToKelvinScale(value))
case (Kelvin, Fahrenheit, true) ⇒ Fahrenheit(TemperatureConversions.kelvinToFahrenheitScale(value))
case (Fahrenheit, Rankine, true) ⇒ Rankine(TemperatureConversions.fahrenheitToRankineScale(value))
case (Rankine, Fahrenheit, true) ⇒ Fahrenheit(TemperatureConversions.rankineToFahrenheitScale(value))
case (Celsius, Rankine, true) ⇒ Rankine(TemperatureConversions.celsiusToRankineScale(value))
case (Rankine, Celsius, true) ⇒ Celsius(TemperatureConversions.rankineToCelsiusScale(value))
case (Kelvin, Rankine, true) ⇒ Rankine(TemperatureConversions.kelvinToRankineScale(value))
case (Rankine, Kelvin, true) ⇒ Kelvin(TemperatureConversions.rankineToKelvinScale(value))
case (Fahrenheit, Celsius, false) ⇒ Celsius(TemperatureConversions.fahrenheitToCelsiusDegrees(value))
case (Celsius, Fahrenheit, false) ⇒ Fahrenheit(TemperatureConversions.celsiusToFahrenheitDegrees(value))
case (Celsius, Kelvin, false) ⇒ Kelvin(TemperatureConversions.celsiusToKelvinDegrees(value))
case (Kelvin, Celsius, false) ⇒ Celsius(TemperatureConversions.kelvinToCelsiusDegrees(value))
case (Fahrenheit, Kelvin, false) ⇒ Kelvin(TemperatureConversions.fahrenheitToKelvinDegrees(value))
case (Kelvin, Fahrenheit, false) ⇒ Fahrenheit(TemperatureConversions.kelvinToFahrenheitDegrees(value))
case (Fahrenheit, Rankine, false) ⇒ Rankine(TemperatureConversions.fahrenheitToRankineDegrees(value))
case (Rankine, Fahrenheit, false) ⇒ Fahrenheit(TemperatureConversions.rankineToFahrenheitDegrees(value))
case (Celsius, Rankine, false) ⇒ Rankine(TemperatureConversions.celsiusToRankineDegrees(value))
case (Rankine, Celsius, false) ⇒ Celsius(TemperatureConversions.rankineToCelsiusDegrees(value))
case (Kelvin, Rankine, false) ⇒ Rankine(TemperatureConversions.kelvinToRankineDegrees(value))
case (Rankine, Kelvin, false) ⇒ Kelvin(TemperatureConversions.rankineToKelvinDegrees(value))
}
def in(unit: TemperatureScale) = convert(unit, withOffset = true)
def inFahrenheit: Temperature = convert(Fahrenheit)
def inCelsius: Temperature = convert(Celsius)
def inKelvin: Temperature = convert(Kelvin)
def to(unit: TemperatureScale) = toScale(unit)
def toScale(unit: TemperatureScale) = convert(unit, withOffset = true).value
def toFahrenheitScale = toScale(Fahrenheit)
def toCelsiusScale = toScale(Celsius)
def toKelvinScale = toScale(Kelvin)
def toDegrees(unit: TemperatureScale) = convert(unit, withOffset = false).value
def toFahrenheitDegrees = toDegrees(Fahrenheit)
def toCelsiusDegrees = toDegrees(Celsius)
def toKelvinDegrees = toDegrees(Kelvin)
}
/**
* Temperature companion object
*/
object Temperature extends Dimension[Temperature] with BaseDimension {
def apply[A](n: A, scale: TemperatureScale)(implicit num: Numeric[A]) = new Temperature(num.toDouble(n), scale)
def apply(s: String): Try[Temperature] = {
val regex = "([-+]?[0-9]*\\\\.?[0-9]+(?:[eE][-+]?[0-9]+)?) *°? *(f|F|c|C|k|K|r|R)".r
s match {
case regex(value, unit) => unit match {
case "f" | "F" => Success(Fahrenheit(value.toDouble))
case "c" | "C" => Success(Celsius(value.toDouble))
case "k" | "K" => Success(Kelvin(value.toDouble))
case "r" | "R" => Success(Rankine(value.toDouble))
}
case _ => Failure(QuantityParseException("Unable to parse Temperature", s))
}
}
def name = "Temperature"
def primaryUnit = Kelvin
def siUnit = Kelvin
def units = Set(Kelvin, Fahrenheit, Celsius, Rankine)
def dimensionSymbol = "Θ"
}
/**
* Base trait for units of [[squants.thermal.Temperature]]
*/
sealed trait TemperatureScale extends UnitOfMeasure[Temperature] {
def self: TemperatureScale
def apply[A](n: A)(implicit num: Numeric[A]) = Temperature(num.toDouble(n), this)
}
object Celsius extends TemperatureScale {
val symbol = "°C"
val self = this
protected def converterFrom = TemperatureConversions.celsiusToKelvinScale
protected def converterTo = TemperatureConversions.kelvinToCelsiusScale
def apply(temperature: Temperature): Temperature = temperature.inCelsius
}
object Fahrenheit extends TemperatureScale {
val symbol = "°F"
val self = this
protected def converterFrom = TemperatureConversions.fahrenheitToKelvinScale
protected def converterTo = TemperatureConversions.kelvinToFahrenheitScale
def apply(temperature: Temperature): Temperature = temperature.inFahrenheit
}
object Kelvin extends TemperatureScale with PrimaryUnit with SiBaseUnit {
val symbol = "K"
val self = this
def apply(temperature: Temperature): Temperature = temperature.inKelvin
}
object Rankine extends TemperatureScale {
val symbol = "°R"
val self = this
protected def converterFrom = TemperatureConversions.rankineToKelvinScale
protected def converterTo = TemperatureConversions.kelvinToRankineScale
def apply(temperature: Temperature): Temperature = temperature.in(Rankine)
}
object TemperatureConversions {
lazy val kelvin = Kelvin(1)
lazy val fahrenheit = Fahrenheit(1)
lazy val celsius = Celsius(1)
lazy val rankine = Rankine(1)
/*
* Degree conversions are used to convert a quantity of degrees from one scale to another.
* These conversions do not adjust for the zero offset.
* Essentially they only do the 9:5 conversion between F degrees and C|K degrees
*/
def celsiusToFahrenheitDegrees(celsius: Double) = celsius * 9d / 5d
def fahrenheitToCelsiusDegrees(fahrenheit: Double) = fahrenheit * 5d / 9d
def celsiusToKelvinDegrees(celsius: Double) = celsius
def kelvinToCelsiusDegrees(kelvin: Double) = kelvin
def fahrenheitToKelvinDegrees(fahrenheit: Double) = fahrenheit * 5d / 9d
def kelvinToFahrenheitDegrees(kelvin: Double) = kelvin * 9d / 5d
def celsiusToRankineDegrees(celsius: Double) = celsius * 9d / 5d
def rankineToCelsiusDegrees(rankine: Double) = rankine * 5d / 9d
def fahrenheitToRankineDegrees(fahrenheit: Double) = fahrenheit
def rankineToFahrenheitDegrees(rankine: Double) = rankine
def kelvinToRankineDegrees(kelvin: Double) = kelvin * 9d / 5d
def rankineToKelvinDegrees(rankine: Double) = rankine * 5d / 9d
/*
* Scale conversions are used to convert a "thermometer" temperature from one scale to another.
* These conversions will adjust the result by the zero offset.
* They are used to find the equivalent absolute temperature in the other scale.
*/
def celsiusToFahrenheitScale(celsius: Double) = celsius * 9d / 5d + 32d
def fahrenheitToCelsiusScale(fahrenheit: Double) = (fahrenheit - 32d) * 5d / 9d
def celsiusToKelvinScale(celsius: Double) = celsius + 273.15
def kelvinToCelsiusScale(kelvin: Double) = kelvin - 273.15
def fahrenheitToKelvinScale(fahrenheit: Double) = (fahrenheit + 459.67) * 5d / 9d
def kelvinToFahrenheitScale(kelvin: Double) = kelvin * 9d / 5d - 459.67
def celsiusToRankineScale(celsius: Double) = (celsius + 273.15) * 9d / 5d
def rankineToCelsiusScale(rankine: Double) = (rankine - 491.67) * 5d / 9d
def fahrenheitToRankineScale(fahrenheit: Double) = fahrenheit + 459.67
def rankineToFahrenheitScale(rankine: Double) = rankine - 459.67
def kelvinToRankineScale(kelvin: Double) = kelvin * 9d / 5d
def rankineToKelvinScale(rankine: Double) = rankine * 5d / 9d
implicit class TemperatureConversions[A](n: A)(implicit num: Numeric[A]) {
def C = Celsius(n)
def celsius = Celsius(n)
def degreesCelsius = Celsius(n)
def F = Fahrenheit(n)
def Fah = Fahrenheit(n) // F conflicts with (Float) in the console; Fah is provided as an alternative
def fahrenheit = Fahrenheit(n)
def degreesFahrenheit = Fahrenheit(n)
def K = Kelvin(n)
def kelvin = Kelvin(n)
def degreesKelvin = Kelvin(n)
def R = Rankine(n)
def rankine = Rankine(n)
def degreesRankine = Rankine(n)
}
implicit class TemperatureStringConversion(s: String) {
def toTemperature = Temperature(s)
}
}
| garyKeorkunian/squants | shared/src/main/scala/squants/thermal/Temperature.scala | Scala | apache-2.0 | 13,103 |
package com.emajliramokade
package services
package interfaces
import api.model.EmailProvjera.Odgovor
import email.Email
import scala.concurrent.Future
trait EmailSender {
def send(email: Email): Future[Odgovor]
}
| element-doo/ekade | code/scala/Services/src/main/scala/com/emajliramokade/services/interfaces/EmailSender.scala | Scala | bsd-3-clause | 219 |
case class B(var x: Int) {
def succ(): Unit = {
x = x + 1
}
}
object Test {
def main(args: Array[String]): Unit = {
val b = B(0)
b match {
case B(x) =>
//println(x)
b.succ()
println(x)
}
}
}
| yusuke2255/dotty | tests/run/t5158.scala | Scala | bsd-3-clause | 246 |
package gitbucket.core.view
import java.text.Normalizer
import java.util.regex.Pattern
import java.util.Locale
import gitbucket.core.controller.Context
import gitbucket.core.service.{RepositoryService, RequestCache}
import gitbucket.core.util.StringUtil
import io.github.gitbucket.markedj._
import io.github.gitbucket.markedj.Utils._
object Markdown {
/**
* Converts Markdown of Wiki pages to HTML.
*
* @param repository the repository which contains the markdown
* @param enableWikiLink if true then wiki style link is available in markdown
* @param enableRefsLink if true then issue reference (e.g. #123) is rendered as link
* @param enableAnchor if true then anchor for headline is generated
* @param enableTaskList if true then task list syntax is available
* @param hasWritePermission true if user has writable to ths given repository
* @param pages the list of existing Wiki pages
*/
def toHtml(markdown: String,
repository: RepositoryService.RepositoryInfo,
enableWikiLink: Boolean,
enableRefsLink: Boolean,
enableAnchor: Boolean,
enableTaskList: Boolean = false,
hasWritePermission: Boolean = false,
pages: List[String] = Nil)(implicit context: Context): String = {
// escape issue id
val s = if(enableRefsLink){
markdown.replaceAll("(?<=(\\\\W|^))#(\\\\d+)(?=(\\\\W|$))", "issue:$2")
} else markdown
// escape task list
val source = if(enableTaskList){
escapeTaskList(s)
} else s
val options = new Options()
options.setSanitize(true)
val renderer = new GitBucketMarkedRenderer(options, repository, enableWikiLink, enableRefsLink, enableAnchor, enableTaskList, hasWritePermission, pages)
Marked.marked(source, options, renderer)
}
/**
* Extends markedj Renderer for GitBucket
*/
class GitBucketMarkedRenderer(options: Options, repository: RepositoryService.RepositoryInfo,
enableWikiLink: Boolean, enableRefsLink: Boolean, enableAnchor: Boolean, enableTaskList: Boolean, hasWritePermission: Boolean,
pages: List[String])
(implicit val context: Context) extends Renderer(options) with LinkConverter with RequestCache {
override def heading(text: String, level: Int, raw: String): String = {
val id = generateAnchorName(text)
val out = new StringBuilder()
out.append("<h" + level + " id=\\"" + options.getHeaderPrefix + id + "\\" class=\\"markdown-head\\">")
if(enableAnchor){
out.append("<a class=\\"markdown-anchor-link\\" href=\\"#" + id + "\\"></a>")
out.append("<a class=\\"markdown-anchor\\" name=\\"" + id + "\\"></a>")
}
out.append(text)
out.append("</h" + level + ">\\n")
out.toString()
}
override def code(code: String, lang: String, escaped: Boolean): String = {
"<pre class=\\"prettyprint" + (if(lang != null) s" ${options.getLangPrefix}${lang}" else "" )+ "\\">" +
(if(escaped) code else escape(code, true)) + "</pre>"
}
override def list(body: String, ordered: Boolean): String = {
var listType: String = null
if (ordered) {
listType = "ol"
}
else {
listType = "ul"
}
if(body.contains("""class="task-list-item-checkbox"""")){
return "<" + listType + " class=\\"task-list\\">\\n" + body + "</" + listType + ">\\n"
} else {
return "<" + listType + ">\\n" + body + "</" + listType + ">\\n"
}
}
override def listitem(text: String): String = {
if(text.contains("""class="task-list-item-checkbox" """)){
return "<li class=\\"task-list-item\\">" + text + "</li>\\n"
} else {
return "<li>" + text + "</li>\\n"
}
}
override def text(text: String): String = {
// convert commit id and username to link.
val t1 = if(enableRefsLink) convertRefsLinks(text, repository, "issue:", false) else text
// convert task list to checkbox.
val t2 = if(enableTaskList) convertCheckBox(t1, hasWritePermission) else t1
t2
}
override def link(href: String, title: String, text: String): String = {
super.link(fixUrl(href, false), title, text)
}
override def image(href: String, title: String, text: String): String = {
super.image(fixUrl(href, true), title, text)
}
override def nolink(text: String): String = {
if(enableWikiLink && text.startsWith("[[") && text.endsWith("]]")){
val link = text.replaceAll("(^\\\\[\\\\[|\\\\]\\\\]$)", "")
val (label, page) = if(link.contains('|')){
val i = link.indexOf('|')
(link.substring(0, i), link.substring(i + 1))
} else {
(link, link)
}
val url = repository.httpUrl.replaceFirst("/git/", "/").stripSuffix(".git") + "/wiki/" + StringUtil.urlEncode(page)
if(pages.contains(page)){
"<a href=\\"" + url + "\\">" + escape(label) + "</a>"
} else {
"<a href=\\"" + url + "\\" class=\\"absent\\">" + escape(label) + "</a>"
}
} else {
escape(text)
}
}
private def fixUrl(url: String, isImage: Boolean = false): String = {
if(url.startsWith("http://") || url.startsWith("https://") || url.startsWith("/")){
url
} else if(url.startsWith("#")){
("#" + generateAnchorName(url.substring(1)))
} else if(!enableWikiLink){
if(context.currentPath.contains("/blob/")){
url + (if(isImage) "?raw=true" else "")
} else if(context.currentPath.contains("/tree/")){
val paths = context.currentPath.split("/")
val branch = if(paths.length > 3) paths.drop(4).mkString("/") else repository.repository.defaultBranch
repository.httpUrl.replaceFirst("/git/", "/").stripSuffix(".git") + "/blob/" + branch + "/" + url + (if(isImage) "?raw=true" else "")
} else {
val paths = context.currentPath.split("/")
val branch = if(paths.length > 3) paths.last else repository.repository.defaultBranch
repository.httpUrl.replaceFirst("/git/", "/").stripSuffix(".git") + "/blob/" + branch + "/" + url + (if(isImage) "?raw=true" else "")
}
} else {
repository.httpUrl.replaceFirst("/git/", "/").stripSuffix(".git") + "/wiki/_blob/" + url
}
}
}
def escapeTaskList(text: String): String = {
Pattern.compile("""^( *)- \\[([x| ])\\] """, Pattern.MULTILINE).matcher(text).replaceAll("$1* task:$2: ")
}
def generateAnchorName(text: String): String = {
val normalized = Normalizer.normalize(text.replaceAll("<.*>", "").replaceAll("[\\\\s]", "-"), Normalizer.Form.NFD)
val encoded = StringUtil.urlEncode(normalized)
encoded.toLowerCase(Locale.ENGLISH)
}
def convertCheckBox(text: String, hasWritePermission: Boolean): String = {
val disabled = if (hasWritePermission) "" else "disabled"
text.replaceAll("task:x:", """<input type="checkbox" class="task-list-item-checkbox" checked="checked" """ + disabled + "/>")
.replaceAll("task: :", """<input type="checkbox" class="task-list-item-checkbox" """ + disabled + "/>")
}
}
| uli-heller/gitbucket | src/main/scala/gitbucket/core/view/Markdown.scala | Scala | apache-2.0 | 7,218 |
import scala.quoted.*
object SourceFiles {
type Macro[X] = (=> Quotes) ?=> Expr[X]
implicit inline def getThisFile: String =
${getThisFileImpl}
def getThisFileImpl: Macro[String] =
val q = quotes // Quotes is ByName and hence not stable (q stabilizes it)
Expr(q.reflect.SourceFile.current.name)
}
| dotty-staging/dotty | tests/run-macros/tasty-getfile-implicit-by-name-fun-context/Macro_1.scala | Scala | apache-2.0 | 320 |
package is.hail.expr.ir.analyses
import is.hail.HailContext
import is.hail.expr.ir._
object ComputeMethodSplits {
def apply(ir: IR, controlFlowPreventsSplit: Memo[Unit]): Memo[Unit] = {
val m = Memo.empty[Unit]
val splitThreshold = HailContext.getFlag("method_split_ir_limit").toInt
require(splitThreshold > 0, s"invalid method_split_ir_limit")
def recurAndComputeSizeUnderneath(x: IR): Int = {
val sizeUnderneath = x.children.iterator.map { case child: IR => recurAndComputeSizeUnderneath(child) }.sum
val shouldSplit = !controlFlowPreventsSplit.contains(x) && (x match {
case _: TailLoop => true
// stream consumers
case _: ToArray => true
case _: ToSet => true
case _: ToDict => true
case _: StreamFold => true
case _: StreamFold2 => true
case _: StreamLen => true
case _: StreamFor => true
case _ => sizeUnderneath > splitThreshold
})
if (shouldSplit) {
m.bind(x, ())
0 // method call is small
} else {
sizeUnderneath + (x match {
case _: Ref => 0
case _: In => 0
case _ if IsConstant(x) => 0
case _ => 1
})
}
}
recurAndComputeSizeUnderneath(ir)
m
}
}
| hail-is/hail | hail/src/main/scala/is/hail/expr/ir/analyses/ComputeMethodSplits.scala | Scala | mit | 1,283 |
package sample
import language._
import scala.concurrent.duration._
import scala.collection.mutable.ListBuffer
import akka.actor._
import akka.cluster._
import akka.cluster.ClusterEvent._
import akka.pattern._
import akka.util._
import akka.event._
import akka.event.Logging._
import akka.io._
import com.typesafe.config.ConfigFactory
import scala.concurrent._
import spray.can._
import spray.can.server._
import spray.util._
import spray.http._
import HttpMethods._
import MediaTypes._
//#messages
case class TransformationJob(text: String)
case class TransformationResult(text: String)
case class JobFailed(reason: String, job: TransformationJob)
case object BackendRegistration
//#messages
object ConsoleSample {
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val config =
(if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}")
else ConfigFactory.empty).withFallback(
ConfigFactory.parseString("akka.cluster.roles = [console]")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
val console = system.actorOf(Props[Manager], name = "console")
}
}
class Manager extends ClusterConsole
object TransformationFrontend {
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val config =
(if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}")
else ConfigFactory.empty).withFallback(
ConfigFactory.parseString("akka.cluster.roles = [frontend]")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
val frontend = system.actorOf(Props[TransformationFrontend], name = "frontend")
import system.dispatcher
implicit val timeout = Timeout(5 seconds)
for (n <- 1 to 120) {
(frontend ? TransformationJob("hello-" + n)) onSuccess {
case result => println(result)
}
// wait a while until next request,
// to avoid flooding the console with output
Thread.sleep(2000)
}
system.shutdown()
}
}
//#frontend
class TransformationFrontend extends Actor {
var backends = IndexedSeq.empty[ActorRef]
var jobCounter = 0
override def receive = {
case job: TransformationJob if backends.isEmpty =>
sender ! JobFailed("Service unavailable, try again later", job)
case job: TransformationJob =>
jobCounter += 1
backends(jobCounter % backends.size) forward job
case BackendRegistration if !backends.contains(sender) =>
context watch sender
backends = backends :+ sender
case Terminated(a) =>
backends = backends.filterNot(_ == a)
}
}
//#frontend
object TransformationBackend {
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val config =
(if (args.nonEmpty) ConfigFactory.parseString(s"akka.remote.netty.tcp.port=${args(0)}")
else ConfigFactory.empty).withFallback(
ConfigFactory.parseString("akka.cluster.roles = [backend]")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
system.actorOf(Props[TransformationBackend], name = "backend")
}
}
//#backend
class TransformationBackend extends Actor {
val cluster = Cluster(context.system)
// subscribe to cluster changes, MemberUp
// re-subscribe when restart
override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp])
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case TransformationJob(text) => sender ! TransformationResult(text.toUpperCase)
case state: CurrentClusterState =>
state.members.filter(_.status == MemberStatus.Up) foreach register
case MemberUp(m) => register(m)
}
def register(member: Member): Unit =
if (member.hasRole("frontend"))
context.actorSelection(RootActorPath(member.address) / "user" / "frontend") !
BackendRegistration
}
//#backend | ograycode/akka-console | src/main/scala/Sample.scala | Scala | apache-2.0 | 4,132 |
package reopp.common.guardedcommands.dataconnectors
import reopp.common.{Predicate, Utils}
import Utils._
import reopp.common.guardedcommands._
import reopp.common.IntPredicate
/**
* Creates a Filter: a lossy sync that loses data exactly when a predicate does not hold.
*
* Created by jose on 07/06/12.
*/
class GCFilter(a: String, b: String,p: Predicate, positive:Boolean = true) extends GCConnector(List(a,b)) {
// /**
// * Build guard (formula) from a Predicate
// * @param a source end
// * @param b sink end
// * @param uid unique channel id
// * @param p predicate
// */
// def this(a: String, b:String, uid: Int, p: Predicate) {
// this(a,b,uid,Pred(dataVar(a,uid),p))
// }
// /**
// * Build guard (formula) from a Predicate
// * @param a source end
// * @param b sink end
// * @param uid unique channel id
// * @param p predicate
// * @param positive if false consider the negation of the predicate
// */
// def this(a: String, b:String, uid: Int, p: Predicate, positive: Boolean) {
// this(a, b, uid, if (positive) Pred(dataVar(a,uid),p)
// else Neg(Pred(dataVar(a,uid),p)))
//}
protected def guard: Guard = if (positive) Pred(a.data,p)
else Neg(Pred(a.data,p))
def getConstraints = Formula(
b --> a,
b --> (b := a), //VarAssgn(dataVar(b,uid),dataVar(a,uid)),
// bv := av ,
b --> guard,
(a /\\ guard) --> b
)
if (!useData) throw new Exception("Filter requires 'useData' option")
if (useCC3) throw new Exception("CC3 not implemented")
}
//object GCFilter {
//
//}
class GCTFilter[A](a: String, b: String, filter: (A) => Boolean)
extends GCFilter(a, b, Predicate()(filter))
class GCIFilter[A](a: String, b: String, p: IntPredicate, positive:Boolean = true)
extends GCFilter(a, b, p, positive) {
override def guard = if (positive) IntPred(a.data,p)
else Neg(IntPred(a.data,p))
}
class GCGenFilter(a: String, b:String, gfunc: Var => Guard)
extends GCFilter(a,b,null) {
override def guard: Guard = gfunc.apply( Var(Utils.mkDataVar(a)) )
}
| joseproenca/ip-constraints | code/src/main/scala/reopp/common/guardedcommands/dataconnectors/GCFilter.scala | Scala | mit | 2,137 |
package org.elasticsearch.spark.sql
import scala.collection.Map
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory
import org.apache.spark.Partition
import org.apache.spark.SparkContext
import org.apache.spark.TaskContext
import org.apache.spark.sql.api.java.Row
import org.elasticsearch.hadoop.cfg.Settings
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.rest.RestService.PartitionDefinition
import org.elasticsearch.spark.rdd.AbstractEsRDD
import org.elasticsearch.spark.rdd.AbstractEsRDDIterator
import org.elasticsearch.spark.rdd.EsPartition
// see the comments in ScalaEsRowRDD
private[spark] class JavaEsRowRDD(
@transient sc: SparkContext,
params: Map[String, String] = Map.empty,
schema: MappingUtils.Schema)
extends AbstractEsRDD[Row](sc, params) {
override def compute(split: Partition, context: TaskContext): JavaEsRowRDDIterator = {
new JavaEsRowRDDIterator(context, split.asInstanceOf[EsPartition].esPartition, schema)
}
}
private[spark] class JavaEsRowRDDIterator(
context: TaskContext,
partition: PartitionDefinition,
schema: MappingUtils.Schema)
extends AbstractEsRDDIterator[Row](context, partition) {
override def getLogger() = LogFactory.getLog(classOf[JavaEsRowRDD])
override def initReader(settings: Settings, log: Log) = {
InitializationUtils.setValueReaderIfNotSet(settings, classOf[JavaEsRowValueReader], log)
// parse the structure and save the order (requested by Spark) for each Row (root and nested)
// since the data returned from Elastic is likely to not be in the same order
MappingUtils.setRowOrder(settings, schema.struct)
}
override def createValue(value: Array[Object]): Row = {
// drop the ID
value(1).asInstanceOf[JavaEsRow]
}
} | huangll/elasticsearch-hadoop | spark/sql-12/src/main/scala/org/elasticsearch/spark/sql/JavaEsRowRDD.scala | Scala | apache-2.0 | 1,805 |
package com.tajpure.scheme.compiler.value
import com.tajpure.scheme.compiler.ast.Node
import com.tajpure.scheme.compiler.exception.RunTimeException
class IntValue(val value: Long) extends Value {
override
def +(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new IntValue(value + that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new FloatValue(value + that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new CharValue(value + that.asInstanceOf[CharValue].value)
}
else if (that.isInstanceOf[StringValue]) {
new StringValue(value + that.asInstanceOf[StringValue].value)
}
else {
throw new RunTimeException("type error")
}
}
override
def -(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new IntValue(value - that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new FloatValue(value - that.asInstanceOf[FloatValue].value)
}
else {
throw new RunTimeException("type error")
}
}
override
def *(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new IntValue(value * that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new FloatValue(value * that.asInstanceOf[FloatValue].value)
}
else {
throw new RunTimeException("type error")
}
}
override
def /(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new IntValue(value / that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new FloatValue(value / that.asInstanceOf[FloatValue].value)
}
else {
throw new RunTimeException("type error")
}
}
override
def >(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new BoolValue(value > that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new BoolValue(value > that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new BoolValue(value > that.asInstanceOf[CharValue].value.charAt(0))
}
else {
throw new RunTimeException("type error")
}
}
override
def <(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new BoolValue(value < that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new BoolValue(value < that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new BoolValue(value < that.asInstanceOf[CharValue].value.charAt(0))
}
else {
throw new RunTimeException("type error")
}
}
override
def <=(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new BoolValue(value <= that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new BoolValue(value <= that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new BoolValue(value <= that.asInstanceOf[CharValue].value.charAt(0))
}
else {
throw new RunTimeException("type error")
}
}
override
def >=(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new BoolValue(value >= that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new BoolValue(value >= that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new BoolValue(value >= that.asInstanceOf[CharValue].value.charAt(0))
}
else {
throw new RunTimeException("type error")
}
}
override
def ==(that: Value): Value = {
if (that.isInstanceOf[IntValue]) {
new BoolValue(value == that.asInstanceOf[IntValue].value)
}
else if (that.isInstanceOf[FloatValue]) {
new BoolValue(value == that.asInstanceOf[FloatValue].value)
}
else if (that.isInstanceOf[CharValue]) {
new BoolValue(value == that.asInstanceOf[CharValue].value.charAt(0))
}
else {
throw new RunTimeException("type error")
}
}
override
def toString(): String = {
value.toString()
}
}
object IntValue {
// val intVal = new IntValue(10)
} | tajpure/SoScheme | src/main/scala/com/tajpure/scheme/compiler/value/IntValue.scala | Scala | gpl-3.0 | 4,244 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.utils
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.TableSchema
import org.apache.flink.table.descriptors.ConnectorDescriptorValidator.{CONNECTOR_PROPERTY_VERSION, CONNECTOR_TYPE}
import org.apache.flink.table.descriptors.DescriptorProperties._
import org.apache.flink.table.descriptors.Rowtime._
import org.apache.flink.table.descriptors.Schema._
import org.apache.flink.table.descriptors.{DescriptorProperties, SchemaValidator}
import org.apache.flink.table.factories.{StreamTableSinkFactory, StreamTableSourceFactory, TableFactory}
import org.apache.flink.table.sinks.StreamTableSink
import org.apache.flink.table.sources.StreamTableSource
import org.apache.flink.table.types.logical.LogicalTypeRoot
import org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToLegacyInfo
import org.apache.flink.types.Row
import java.sql.Timestamp
import java.util
/**
* Factory for creating stream table sources and sinks.
*
* See [[MemoryTableSourceSinkUtil.UnsafeMemoryTableSource]] and
* [[MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink]].
*
* @param terminationCount determines when to shutdown the streaming source function
*/
class InMemoryTableFactory(terminationCount: Int)
extends TableFactory
with StreamTableSourceFactory[Row]
with StreamTableSinkFactory[Row] {
override def createStreamTableSink(
properties: util.Map[String, String])
: StreamTableSink[Row] = {
val params: DescriptorProperties = new DescriptorProperties(true)
params.putProperties(properties)
// validate
new SchemaValidator(true, true, true).validate(params)
val tableSchema = SchemaValidator.deriveTableSinkSchema(params)
val fieldTypes = tableSchema.getFieldDataTypes.map(t => {
if (t.getLogicalType.getTypeRoot == LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE) {
// force to use Timestamp because old planner only support Timestamp
t.bridgedTo(classOf[Timestamp])
} else {
t
}
})
new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink()
.configure(tableSchema.getFieldNames, fromDataTypeToLegacyInfo(fieldTypes))
.asInstanceOf[StreamTableSink[Row]]
}
override def createStreamTableSource(
properties: util.Map[String, String])
: StreamTableSource[Row] = {
val params: DescriptorProperties = new DescriptorProperties(true)
params.putProperties(properties)
// validate
new SchemaValidator(true, true, true).validate(params)
val tableSchema = params.getTableSchema(SCHEMA)
// proctime
val proctimeAttributeOpt = SchemaValidator.deriveProctimeAttribute(params)
val fieldTypes = tableSchema.getFieldDataTypes.map(t => {
if (t.getLogicalType.getTypeRoot == LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE) {
// force to use Timestamp because old planner only support Timestamp
t.bridgedTo(classOf[Timestamp])
} else {
t
}
})
val (names, types) = tableSchema.getFieldNames.zip(fromDataTypeToLegacyInfo(fieldTypes))
.filter(_._1 != proctimeAttributeOpt.get()).unzip
// rowtime
val rowtimeDescriptors = SchemaValidator.deriveRowtimeAttributes(params)
new MemoryTableSourceSinkUtil.UnsafeMemoryTableSource(
TableSchema.builder().fields(tableSchema.getFieldNames, fieldTypes).build(),
new RowTypeInfo(types, names),
rowtimeDescriptors,
proctimeAttributeOpt.get(),
terminationCount)
}
override def requiredContext(): util.Map[String, String] = {
val context: util.Map[String, String] = new util.HashMap[String, String]
context.put(CONNECTOR_TYPE, "memory")
context.put(CONNECTOR_PROPERTY_VERSION, "1") // backwards compatibility
context
}
override def supportedProperties(): util.List[String] = {
val properties = new util.ArrayList[String]()
// schema
properties.add(SCHEMA + ".#." + SCHEMA_TYPE)
properties.add(SCHEMA + ".#." + SCHEMA_DATA_TYPE)
properties.add(SCHEMA + ".#." + SCHEMA_NAME)
properties.add(SCHEMA + ".#." + SCHEMA_FROM)
// time attributes
properties.add(SCHEMA + ".#." + SCHEMA_PROCTIME)
properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_TYPE)
properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_FROM)
properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_CLASS)
properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_SERIALIZED)
properties.add(SCHEMA + ".#." + ROWTIME_WATERMARKS_TYPE)
properties.add(SCHEMA + ".#." + ROWTIME_WATERMARKS_CLASS)
properties.add(SCHEMA + ".#." + ROWTIME_WATERMARKS_SERIALIZED)
properties.add(SCHEMA + ".#." + ROWTIME_WATERMARKS_DELAY)
// watermark
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_ROWTIME);
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_EXPR);
properties.add(SCHEMA + "." + WATERMARK + ".#." + WATERMARK_STRATEGY_DATA_TYPE);
// computed column
properties.add(SCHEMA + ".#." + TABLE_SCHEMA_EXPR)
properties
}
}
| GJL/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/utils/InMemoryTableFactory.scala | Scala | apache-2.0 | 5,862 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
import generic._
import mutable.{ Builder }
import scala.annotation.migration
import scala.annotation.unchecked.{ uncheckedVariance => uV }
import parallel.ParIterable
import scala.language.higherKinds
/** A template trait for traversable collections of type `Traversable[A]`.
*
* $traversableInfo
* @define mutability
* @define traversableInfo
* This is a base trait of all kinds of $mutability Scala collections. It
* implements the behavior common to all collections, in terms of a method
* `foreach` with signature:
* {{{
* def foreach[U](f: Elem => U): Unit
* }}}
* Collection classes mixing in this trait provide a concrete
* `foreach` method which traverses all the
* elements contained in the collection, applying a given function to each.
* They also need to provide a method `newBuilder`
* which creates a builder for collections of the same kind.
*
* A traversable class might or might not have two properties: strictness
* and orderedness. Neither is represented as a type.
*
* The instances of a strict collection class have all their elements
* computed before they can be used as values. By contrast, instances of
* a non-strict collection class may defer computation of some of their
* elements until after the instance is available as a value.
* A typical example of a non-strict collection class is a
* [[scala.collection.immutable.Stream]].
* A more general class of examples are `TraversableViews`.
*
* If a collection is an instance of an ordered collection class, traversing
* its elements with `foreach` will always visit elements in the
* same order, even for different runs of the program. If the class is not
* ordered, `foreach` can visit elements in different orders for
* different runs (but it will keep the same order in the same run).'
*
* A typical example of a collection class which is not ordered is a
* `HashMap` of objects. The traversal order for hash maps will
* depend on the hash codes of its elements, and these hash codes might
* differ from one run to the next. By contrast, a `LinkedHashMap`
* is ordered because its `foreach` method visits elements in the
* order they were inserted into the `HashMap`.
*
* @author Martin Odersky
* @version 2.8
* @since 2.8
* @tparam A the element type of the collection
* @tparam Repr the type of the actual collection containing the elements.
*
* @define Coll Traversable
* @define coll traversable collection
*/
trait TraversableLike[+A, +Repr] extends Any
with HasNewBuilder[A, Repr]
with FilterMonadic[A, Repr]
with TraversableOnce[A]
with GenTraversableLike[A, Repr]
with Parallelizable[A, ParIterable[A]]
{
self =>
import Traversable.breaks._
/** The type implementing this traversable */
protected[this] type Self = Repr
/** The collection of type $coll underlying this `TraversableLike` object.
* By default this is implemented as the `TraversableLike` object itself,
* but this can be overridden.
*/
def repr: Repr = this.asInstanceOf[Repr]
final def isTraversableAgain: Boolean = true
/** The underlying collection seen as an instance of `$Coll`.
* By default this is implemented as the current collection object itself,
* but this can be overridden.
*/
protected[this] def thisCollection: Traversable[A] = this.asInstanceOf[Traversable[A]]
/** A conversion from collections of type `Repr` to `$Coll` objects.
* By default this is implemented as just a cast, but this can be overridden.
*/
protected[this] def toCollection(repr: Repr): Traversable[A] = repr.asInstanceOf[Traversable[A]]
/** Creates a new builder for this collection type.
*/
protected[this] def newBuilder: Builder[A, Repr]
protected[this] def parCombiner = ParIterable.newCombiner[A]
/** Applies a function `f` to all elements of this $coll.
*
* @param f the function that is applied for its side-effect to every element.
* The result of function `f` is discarded.
*
* @tparam U the type parameter describing the result of function `f`.
* This result will always be ignored. Typically `U` is `Unit`,
* but this is not necessary.
*
* @usecase def foreach(f: A => Unit): Unit
* @inheritdoc
*
* Note: this method underlies the implementation of most other bulk operations.
* It's important to implement this method in an efficient way.
*
*/
def foreach[U](f: A => U): Unit
/** Tests whether this $coll is empty.
*
* @return `true` if the $coll contain no elements, `false` otherwise.
*/
def isEmpty: Boolean = {
var result = true
breakable {
for (x <- this) {
result = false
break
}
}
result
}
def hasDefiniteSize = true
def ++[B >: A, That](that: GenTraversableOnce[B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
if (that.isInstanceOf[IndexedSeqLike[_, _]]) b.sizeHint(this, that.seq.size)
b ++= thisCollection
b ++= that.seq
b.result
}
/** As with `++`, returns a new collection containing the elements from the left operand followed by the
* elements from the right operand.
*
* It differs from `++` in that the right operand determines the type of
* the resulting collection rather than the left one.
* Mnemonic: the COLon is on the side of the new COLlection type.
*
* @param that the traversable to append.
* @tparam B the element type of the returned collection.
* @tparam That $thatinfo
* @param bf $bfinfo
* @return a new collection of type `That` which contains all elements
* of this $coll followed by all elements of `that`.
*
* @usecase def ++:[B](that: TraversableOnce[B]): $Coll[B]
* @inheritdoc
*
* Example:
* {{{
* scala> val x = List(1)
* x: List[Int] = List(1)
*
* scala> val y = LinkedList(2)
* y: scala.collection.mutable.LinkedList[Int] = LinkedList(2)
*
* scala> val z = x ++: y
* z: scala.collection.mutable.LinkedList[Int] = LinkedList(1, 2)
* }}}
*
* @return a new $coll which contains all elements of this $coll
* followed by all elements of `that`.
*/
def ++:[B >: A, That](that: TraversableOnce[B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
if (that.isInstanceOf[IndexedSeqLike[_, _]]) b.sizeHint(this, that.size)
b ++= that
b ++= thisCollection
b.result
}
/** As with `++`, returns a new collection containing the elements from the
* left operand followed by the elements from the right operand.
*
* It differs from `++` in that the right operand determines the type of
* the resulting collection rather than the left one.
* Mnemonic: the COLon is on the side of the new COLlection type.
*
* Example:
* {{{
* scala> val x = List(1)
* x: List[Int] = List(1)
*
* scala> val y = LinkedList(2)
* y: scala.collection.mutable.LinkedList[Int] = LinkedList(2)
*
* scala> val z = x ++: y
* z: scala.collection.mutable.LinkedList[Int] = LinkedList(1, 2)
* }}}
*
* This overload exists because: for the implementation of `++:` we should
* reuse that of `++` because many collections override it with more
* efficient versions.
*
* Since `TraversableOnce` has no `++` method, we have to implement that
* directly, but `Traversable` and down can use the overload.
*
* @param that the traversable to append.
* @tparam B the element type of the returned collection.
* @tparam That $thatinfo
* @param bf $bfinfo
* @return a new collection of type `That` which contains all elements
* of this $coll followed by all elements of `that`.
*/
def ++:[B >: A, That](that: Traversable[B])(implicit bf: CanBuildFrom[Repr, B, That]): That =
(that ++ seq)(breakOut)
def map[B, That](f: A => B)(implicit bf: CanBuildFrom[Repr, B, That]): That = {
def builder = { // extracted to keep method size under 35 bytes, so that it can be JIT-inlined
val b = bf(repr)
b.sizeHint(this)
b
}
val b = builder
for (x <- this) b += f(x)
b.result
}
def flatMap[B, That](f: A => GenTraversableOnce[B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
def builder = bf(repr) // extracted to keep method size under 35 bytes, so that it can be JIT-inlined
val b = builder
for (x <- this) b ++= f(x).seq
b.result
}
private[scala] def filterImpl(p: A => Boolean, isFlipped: Boolean): Repr = {
val b = newBuilder
for (x <- this)
if (p(x) != isFlipped) b += x
b.result
}
/** Selects all elements of this $coll which satisfy a predicate.
*
* @param p the predicate used to test elements.
* @return a new $coll consisting of all elements of this $coll that satisfy the given
* predicate `p`. The order of the elements is preserved.
*/
def filter(p: A => Boolean): Repr = filterImpl(p, isFlipped = false)
/** Selects all elements of this $coll which do not satisfy a predicate.
*
* @param p the predicate used to test elements.
* @return a new $coll consisting of all elements of this $coll that do not satisfy the given
* predicate `p`. The order of the elements is preserved.
*/
def filterNot(p: A => Boolean): Repr = filterImpl(p, isFlipped = true)
def collect[B, That](pf: PartialFunction[A, B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
foreach(pf.runWith(b += _))
b.result
}
/** Builds a new collection by applying an option-valued function to all
* elements of this $coll on which the function is defined.
*
* @param f the option-valued function which filters and maps the $coll.
* @tparam B the element type of the returned collection.
* @tparam That $thatinfo
* @param bf $bfinfo
* @return a new collection of type `That` resulting from applying the option-valued function
* `f` to each element and collecting all defined results.
* The order of the elements is preserved.
*
* @usecase def filterMap[B](f: A => Option[B]): $Coll[B]
* @inheritdoc
*
* @param pf the partial function which filters and maps the $coll.
* @return a new $coll resulting from applying the given option-valued function
* `f` to each element and collecting all defined results.
* The order of the elements is preserved.
def filterMap[B, That](f: A => Option[B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
for (x <- this)
f(x) match {
case Some(y) => b += y
case _ =>
}
b.result
}
*/
/** Partitions this $coll in two ${coll}s according to a predicate.
*
* @param p the predicate on which to partition.
* @return a pair of ${coll}s: the first $coll consists of all elements that
* satisfy the predicate `p` and the second $coll consists of all elements
* that don't. The relative order of the elements in the resulting ${coll}s
* is the same as in the original $coll.
*/
def partition(p: A => Boolean): (Repr, Repr) = {
val l, r = newBuilder
for (x <- this) (if (p(x)) l else r) += x
(l.result, r.result)
}
def groupBy[K](f: A => K): immutable.Map[K, Repr] = {
val m = mutable.Map.empty[K, Builder[A, Repr]]
for (elem <- this) {
val key = f(elem)
val bldr = m.getOrElseUpdate(key, newBuilder)
bldr += elem
}
val b = immutable.Map.newBuilder[K, Repr]
for ((k, v) <- m)
b += ((k, v.result))
b.result
}
def forall(p: A => Boolean): Boolean = {
var result = true
breakable {
for (x <- this)
if (!p(x)) { result = false; break }
}
result
}
/** Tests whether a predicate holds for at least one element of this $coll.
*
* $mayNotTerminateInf
*
* @param p the predicate used to test elements.
* @return `false` if this $coll is empty, otherwise `true` if the given predicate `p`
* holds for some of the elements of this $coll, otherwise `false`
*/
def exists(p: A => Boolean): Boolean = {
var result = false
breakable {
for (x <- this)
if (p(x)) { result = true; break }
}
result
}
def find(p: A => Boolean): Option[A] = {
var result: Option[A] = None
breakable {
for (x <- this)
if (p(x)) { result = Some(x); break }
}
result
}
def scan[B >: A, That](z: B)(op: (B, B) => B)(implicit cbf: CanBuildFrom[Repr, B, That]): That = scanLeft(z)(op)
def scanLeft[B, That](z: B)(op: (B, A) => B)(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
b.sizeHint(this, 1)
var acc = z
b += acc
for (x <- this) { acc = op(acc, x); b += acc }
b.result
}
@migration("The behavior of `scanRight` has changed. The previous behavior can be reproduced with scanRight.reverse.", "2.9.0")
def scanRight[B, That](z: B)(op: (A, B) => B)(implicit bf: CanBuildFrom[Repr, B, That]): That = {
var scanned = List(z)
var acc = z
for (x <- reversed) {
acc = op(x, acc)
scanned ::= acc
}
val b = bf(repr)
for (elem <- scanned) b += elem
b.result
}
/** Selects the first element of this $coll.
* $orderDependent
* @return the first element of this $coll.
* @throws NoSuchElementException if the $coll is empty.
*/
def head: A = {
var result: () => A = () => throw new NoSuchElementException
breakable {
for (x <- this) {
result = () => x
break
}
}
result()
}
/** Optionally selects the first element.
* $orderDependent
* @return the first element of this $coll if it is nonempty,
* `None` if it is empty.
*/
def headOption: Option[A] = if (isEmpty) None else Some(head)
/** Selects all elements except the first.
* $orderDependent
* @return a $coll consisting of all elements of this $coll
* except the first one.
* @throws `UnsupportedOperationException` if the $coll is empty.
*/
override def tail: Repr = {
if (isEmpty) throw new UnsupportedOperationException("empty.tail")
drop(1)
}
/** Selects the last element.
* $orderDependent
* @return The last element of this $coll.
* @throws NoSuchElementException If the $coll is empty.
*/
def last: A = {
var lst = head
for (x <- this)
lst = x
lst
}
/** Optionally selects the last element.
* $orderDependent
* @return the last element of this $coll$ if it is nonempty,
* `None` if it is empty.
*/
def lastOption: Option[A] = if (isEmpty) None else Some(last)
/** Selects all elements except the last.
* $orderDependent
* @return a $coll consisting of all elements of this $coll
* except the last one.
* @throws UnsupportedOperationException if the $coll is empty.
*/
def init: Repr = {
if (isEmpty) throw new UnsupportedOperationException("empty.init")
var lst = head
var follow = false
val b = newBuilder
b.sizeHint(this, -1)
for (x <- this) {
if (follow) b += lst
else follow = true
lst = x
}
b.result
}
def take(n: Int): Repr = slice(0, n)
def drop(n: Int): Repr =
if (n <= 0) {
val b = newBuilder
b.sizeHint(this)
(b ++= thisCollection).result
}
else sliceWithKnownDelta(n, Int.MaxValue, -n)
def slice(from: Int, until: Int): Repr =
sliceWithKnownBound(scala.math.max(from, 0), until)
// Precondition: from >= 0, until > 0, builder already configured for building.
private[this] def sliceInternal(from: Int, until: Int, b: Builder[A, Repr]): Repr = {
var i = 0
breakable {
for (x <- this) {
if (i >= from) b += x
i += 1
if (i >= until) break
}
}
b.result
}
// Precondition: from >= 0
private[scala] def sliceWithKnownDelta(from: Int, until: Int, delta: Int): Repr = {
val b = newBuilder
if (until <= from) b.result
else {
b.sizeHint(this, delta)
sliceInternal(from, until, b)
}
}
// Precondition: from >= 0
private[scala] def sliceWithKnownBound(from: Int, until: Int): Repr = {
val b = newBuilder
if (until <= from) b.result
else {
b.sizeHintBounded(until - from, this)
sliceInternal(from, until, b)
}
}
def takeWhile(p: A => Boolean): Repr = {
val b = newBuilder
breakable {
for (x <- this) {
if (!p(x)) break
b += x
}
}
b.result
}
def dropWhile(p: A => Boolean): Repr = {
val b = newBuilder
var go = false
for (x <- this) {
if (!go && !p(x)) go = true
if (go) b += x
}
b.result
}
def span(p: A => Boolean): (Repr, Repr) = {
val l, r = newBuilder
var toLeft = true
for (x <- this) {
toLeft = toLeft && p(x)
(if (toLeft) l else r) += x
}
(l.result, r.result)
}
def splitAt(n: Int): (Repr, Repr) = {
val l, r = newBuilder
l.sizeHintBounded(n, this)
if (n >= 0) r.sizeHint(this, -n)
var i = 0
for (x <- this) {
(if (i < n) l else r) += x
i += 1
}
(l.result, r.result)
}
/** Iterates over the tails of this $coll. The first value will be this
* $coll and the final one will be an empty $coll, with the intervening
* values the results of successive applications of `tail`.
*
* @return an iterator over all the tails of this $coll
* @example `List(1,2,3).tails = Iterator(List(1,2,3), List(2,3), List(3), Nil)`
*/
def tails: Iterator[Repr] = iterateUntilEmpty(_.tail)
/** Iterates over the inits of this $coll. The first value will be this
* $coll and the final one will be an empty $coll, with the intervening
* values the results of successive applications of `init`.
*
* @return an iterator over all the inits of this $coll
* @example `List(1,2,3).inits = Iterator(List(1,2,3), List(1,2), List(1), Nil)`
*/
def inits: Iterator[Repr] = iterateUntilEmpty(_.init)
def copyToArray[B >: A](xs: Array[B], start: Int, len: Int) {
var i = start
val end = (start + len) min xs.length
breakable {
for (x <- this) {
if (i >= end) break
xs(i) = x
i += 1
}
}
}
@deprecatedOverriding("Enforce contract of toTraversable that if it is Traversable it returns itself.", "2.11.0")
def toTraversable: Traversable[A] = thisCollection
def toIterator: Iterator[A] = toStream.iterator
def toStream: Stream[A] = toBuffer.toStream
// Override to provide size hint.
override def to[Col[_]](implicit cbf: CanBuildFrom[Nothing, A, Col[A @uV]]): Col[A @uV] = {
val b = cbf()
b.sizeHint(this)
b ++= thisCollection
b.result
}
/** Converts this $coll to a string.
*
* @return a string representation of this collection. By default this
* string consists of the `stringPrefix` of this $coll, followed
* by all elements separated by commas and enclosed in parentheses.
*/
override def toString = mkString(stringPrefix + "(", ", ", ")")
/** Defines the prefix of this object's `toString` representation.
*
* @return a string representation which starts the result of `toString`
* applied to this $coll. By default the string prefix is the
* simple name of the collection class $coll.
*/
def stringPrefix: String = {
/* This method is written in a style that avoids calling `String.split()`
* as well as methods of java.lang.Character that require the Unicode
* database information. This is mostly important for Scala.js, so that
* using the collection library does automatically bring java.util.regex.*
* and the Unicode database in the generated code.
*
* This algorithm has the additional benefit that it won't allocate
* anything except the result String in the common case, where the class
* is not an inner class (i.e., when the result contains no '.').
*/
val fqn = repr.getClass.getName
var pos: Int = fqn.length - 1
// Skip trailing $'s
while (pos != -1 && fqn.charAt(pos) == '$') {
pos -= 1
}
if (pos == -1 || fqn.charAt(pos) == '.') {
return ""
}
var result: String = ""
while (true) {
// Invariant: if we enter the loop, there is a non-empty part
// Look for the beginning of the part, remembering where was the last non-digit
val partEnd = pos + 1
while (pos != -1 && fqn.charAt(pos) <= '9' && fqn.charAt(pos) >= '0') {
pos -= 1
}
val lastNonDigit = pos
while (pos != -1 && fqn.charAt(pos) != '$' && fqn.charAt(pos) != '.') {
pos -= 1
}
val partStart = pos + 1
// A non-last part which contains only digits marks a method-local part -> drop the prefix
if (pos == lastNonDigit && partEnd != fqn.length) {
return result
}
// Skip to the next part, and determine whether we are the end
while (pos != -1 && fqn.charAt(pos) == '$') {
pos -= 1
}
val atEnd = pos == -1 || fqn.charAt(pos) == '.'
// Handle the actual content of the part (we ignore parts that are likely synthetic)
def isPartLikelySynthetic = {
val firstChar = fqn.charAt(partStart)
(firstChar > 'Z' && firstChar < 0x7f) || (firstChar < 'A')
}
if (atEnd || !isPartLikelySynthetic) {
val part = fqn.substring(partStart, partEnd)
result = if (result.isEmpty) part else part + '.' + result
if (atEnd)
return result
}
}
// dead code
result
}
/** Creates a non-strict view of this $coll.
*
* @return a non-strict view of this $coll.
*/
def view = new TraversableView[A, Repr] {
protected lazy val underlying = self.repr
override def foreach[U](f: A => U) = self foreach f
}
/** Creates a non-strict view of a slice of this $coll.
*
* Note: the difference between `view` and `slice` is that `view` produces
* a view of the current $coll, whereas `slice` produces a new $coll.
*
* Note: `view(from, to)` is equivalent to `view.slice(from, to)`
* $orderDependent
*
* @param from the index of the first element of the view
* @param until the index of the element following the view
* @return a non-strict view of a slice of this $coll, starting at index `from`
* and extending up to (but not including) index `until`.
*/
def view(from: Int, until: Int): TraversableView[A, Repr] = view.slice(from, until)
/** Creates a non-strict filter of this $coll.
*
* Note: the difference between `c filter p` and `c withFilter p` is that
* the former creates a new collection, whereas the latter only
* restricts the domain of subsequent `map`, `flatMap`, `foreach`,
* and `withFilter` operations.
* $orderDependent
*
* @param p the predicate used to test elements.
* @return an object of class `WithFilter`, which supports
* `map`, `flatMap`, `foreach`, and `withFilter` operations.
* All these operations apply to those elements of this $coll
* which satisfy the predicate `p`.
*/
def withFilter(p: A => Boolean): FilterMonadic[A, Repr] = new WithFilter(p)
/** A class supporting filtered operations. Instances of this class are
* returned by method `withFilter`.
*/
class WithFilter(p: A => Boolean) extends FilterMonadic[A, Repr] {
/** Builds a new collection by applying a function to all elements of the
* outer $coll containing this `WithFilter` instance that satisfy predicate `p`.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned collection.
* @tparam That $thatinfo
* @param bf $bfinfo
* @return a new collection of type `That` resulting from applying
* the given function `f` to each element of the outer $coll
* that satisfies predicate `p` and collecting the results.
*
* @usecase def map[B](f: A => B): $Coll[B]
* @inheritdoc
*
* @return a new $coll resulting from applying the given function
* `f` to each element of the outer $coll that satisfies
* predicate `p` and collecting the results.
*/
def map[B, That](f: A => B)(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
for (x <- self)
if (p(x)) b += f(x)
b.result
}
/** Builds a new collection by applying a function to all elements of the
* outer $coll containing this `WithFilter` instance that satisfy
* predicate `p` and concatenating the results.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned collection.
* @tparam That $thatinfo
* @param bf $bfinfo
* @return a new collection of type `That` resulting from applying
* the given collection-valued function `f` to each element
* of the outer $coll that satisfies predicate `p` and
* concatenating the results.
*
* @usecase def flatMap[B](f: A => TraversableOnce[B]): $Coll[B]
* @inheritdoc
*
* The type of the resulting collection will be guided by the static type
* of the outer $coll.
*
* @return a new $coll resulting from applying the given
* collection-valued function `f` to each element of the
* outer $coll that satisfies predicate `p` and concatenating
* the results.
*/
def flatMap[B, That](f: A => GenTraversableOnce[B])(implicit bf: CanBuildFrom[Repr, B, That]): That = {
val b = bf(repr)
for (x <- self)
if (p(x)) b ++= f(x).seq
b.result
}
/** Applies a function `f` to all elements of the outer $coll containing
* this `WithFilter` instance that satisfy predicate `p`.
*
* @param f the function that is applied for its side-effect to every element.
* The result of function `f` is discarded.
*
* @tparam U the type parameter describing the result of function `f`.
* This result will always be ignored. Typically `U` is `Unit`,
* but this is not necessary.
*
* @usecase def foreach(f: A => Unit): Unit
* @inheritdoc
*/
def foreach[U](f: A => U): Unit =
for (x <- self)
if (p(x)) f(x)
/** Further refines the filter for this $coll.
*
* @param q the predicate used to test elements.
* @return an object of class `WithFilter`, which supports
* `map`, `flatMap`, `foreach`, and `withFilter` operations.
* All these operations apply to those elements of this $coll which
* satisfy the predicate `q` in addition to the predicate `p`.
*/
def withFilter(q: A => Boolean): WithFilter =
new WithFilter(x => p(x) && q(x))
}
// A helper for tails and inits.
private def iterateUntilEmpty(f: Traversable[A @uV] => Traversable[A @uV]): Iterator[Repr] = {
val it = Iterator.iterate(thisCollection)(f) takeWhile (x => !x.isEmpty)
it ++ Iterator(Nil) map (x => (newBuilder ++= x).result)
}
}
| felixmulder/scala | src/library/scala/collection/TraversableLike.scala | Scala | bsd-3-clause | 28,643 |
package org.dsa.test
/**
* Created by xubo on 2016/11/18.
*/
object test1118 {
def main(args: Array[String]) {
println('a'.toInt)
println('A'.toInt)
println()
}
def compute(a: Int, b: Int) {
// (a > b) ? a: b
}
}
| xubo245/CloudSW | src/test/scala/org/dsa/test/test1118.scala | Scala | gpl-2.0 | 246 |
package org.aja.tej.examples.sparksql.sql
import org.aja.tej.utils.TejUtils
import org.apache.spark.sql._
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.{SparkConf, SparkContext}
/*
An UDAF inherits the base class UserDefinedAggregateFunction and implements the following eight methods,
which are defined as:
*/
/**
* Created by mageswaran on 24/1/16.
*/
object UDAF {
//
// A UDAF that sums sales over $500
//
private class ScalaAggregateFunction extends UserDefinedAggregateFunction {
//inputSchema: inputSchema returns a StructType and every field of this StructType
// represents an input argument of this UDAF.
// an aggregation function can take multiple arguments in general. but
// this one just takes one
def inputSchema: StructType =
new StructType().add("sales", DoubleType)
//bufferSchema: bufferSchema returns a StructType and every field of this StructType
// represents a field of this UDAF’s intermediate results.
// the aggregation buffer can also have multiple values in general but
// this one just has one: the partial sum
def bufferSchema: StructType =
new StructType().add("sumLargeSales", DoubleType)
//dataType: dataType returns a DataType representing the data type of this UDAF’s returned value.
// returns just a double: the sum
def dataType: DataType = DoubleType
//deterministic: deterministic returns a boolean indicating if this UDAF always generate the same
// result for a given set of input values.
// always gets the same result/
def deterministic: Boolean = true
//initialize: initialize is used to initialize values of an aggregation buffer, represented by a MutableAggregationBuffer.
// each partial sum is initialized to zero
def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer.update(0, 0.0)
}
//update: update is used to update an aggregation buffer represented by a MutableAggregationBuffer for an input Row.
// an individual sales value is incorporated by adding it if it exceeds 500.0
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val sum = buffer.getDouble(0)
if (!input.isNullAt(0)) {
val sales = input.getDouble(0)
if (sales > 500.0) {
buffer.update(0, sum + sales)
}
}
}
// merge: merge is used to merge two aggregation buffers and store the result to a MutableAggregationBuffer.
// buffers are merged by adding the single values in them
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1.update(0, buffer1.getDouble(0) + buffer2.getDouble(0))
}
//evaluate: evaluate is used to generate the final result value of this UDAF based on values
// stored in an aggregation buffer represented by a Row.
// the aggregation buffer just has one value: so return it
def evaluate(buffer: Row): Any = {
buffer.getDouble(0)
}
}
def main(args: Array[String]) {
val sc = TejUtils.getSparkContext(this.getClass.getSimpleName)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
val customers = Seq(
(1, "Mageswaran", "TN", 15000.00, 150),
(2, "Michael", "JR", 24000.00, 300),
(3, "Antony Leo", "TN", 10000.00, 50),
(4, "Arun", "TN", 18000.00, 90),
(5, "Venkat", "ANDRA", 5000.00, 0),
(6, "Sathis", "TN", 150000.00, 3000)
)
val customerDF = sc.parallelize(customers, 4).toDF("id", "name", "state", "purchaseAmt", "discountAmt")
val mysum = new ScalaAggregateFunction()
customerDF.printSchema()
// register as a temporary table
customerDF.registerTempTable("customers")
sqlContext.udf.register("mysum", mysum)
// now use it in a query
val sqlResult =
sqlContext.sql(
s"""
| SELECT state, mysum(purchaseAmt) AS bigsales
| FROM customers
| GROUP BY state
""".stripMargin)
sqlResult.
printSchema()
println()
sqlResult.
show()
}
}
| Mageswaran1989/aja | src/examples/scala/org/aja/tej/examples/sparksql/sql/UDAF.scala | Scala | apache-2.0 | 4,184 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.core
import Numeric._
import grizzled.math.stats._
package object power {
type PowerUnit = org.powerapi.core.power.PowerConverter.PowerUnitVal
final val MILLIWATTS = org.powerapi.core.power.PowerConverter.MILLIWATTS
final val WATTS = org.powerapi.core.power.PowerConverter.WATTS
final val KILOWATTS = org.powerapi.core.power.PowerConverter.KILOWATTS
final val MEGAWATTS = org.powerapi.core.power.PowerConverter.MEGAWATTS
def MAX(s: Seq[Power]): Power = s.map(_.toMilliWatts).max.mW
def MIN(s: Seq[Power]): Power = s.map(_.toMilliWatts).min.mW
def SUM(s : Seq[Power]): Power = s.map(_.toMilliWatts).sum.mW
def MEAN(s: Seq[Power]): Power = mean(s.map(_.toMilliWatts): _*).mW
def MEDIAN(s: Seq[Power]): Power = median(s.map(_.toMilliWatts): _*).mW
implicit final class DoublePower(private val value: Double) extends AnyVal {
def mW: Power = Power(value, MILLIWATTS)
def W: Power = Power(value, WATTS)
def kW: Power = Power(value, KILOWATTS)
def MW: Power = Power(value, MEGAWATTS)
}
implicit final class LongPower(private val value: Long) extends AnyVal {
def mW: Power = Power(value.toDouble, MILLIWATTS)
def W: Power = Power(value.toDouble, WATTS)
def kW: Power = Power(value.toDouble, KILOWATTS)
def MW: Power = Power(value.toDouble, MEGAWATTS)
}
implicit final class IntPower(private val value: Int) extends AnyVal {
def mW: Power = Power(value.toDouble, MILLIWATTS)
def W: Power = Power(value.toDouble, WATTS)
def kW: Power = Power(value.toDouble, KILOWATTS)
def MW: Power = Power(value.toDouble, MEGAWATTS)
}
}
| Spirals-Team/powerapi | powerapi-core/src/main/scala/org/powerapi/core/power/package.scala | Scala | agpl-3.0 | 2,514 |
package scawler
import akka.actor.{ActorRef, ActorSystem}
import akka.util.Timeout
import scala.concurrent.Future
import scala.io.Source
/** A companion object to provide constants and/or factory methods for the DSL. */
private[scawler] object WebCrawlerDSL {
val HttpPrefix = "http"
}
/** Provides an easy-to-use mini DSL for crawling and scraping web pages. */
private[scawler] trait WebCrawlerDSL {
import WebCrawlerDSL._
import akka.pattern.ask
import scala.concurrent.duration._
/** Defines a new crawler with its own `ActorSystem`. */
def newCrawler(systemName: String)(f: ActorRef => Unit): Unit =
f(ActorSystem(systemName).actorOf(HtmlScrapeScatterer()))
/** Dispatches the given `ScrapeRequest` to the recipient actor. */
def scrape[T <: ScrapeRequest](req: T)(implicit recipient: ActorRef, timeout: Timeout = Timeout(5.seconds)): Future[Any] =
recipient ? req
/** Synchronously fetches a collection of URLs to scrape data from. */
def forAllURLs(source: String)(f: String => Unit): Unit =
Source.fromFile(source).getLines().collect({
case src if src.startsWith(HttpPrefix) => src
case src => HttpPrefix + "://" + src
}) foreach f
}
| scawler/scawler-core | src/main/scala/scawler/WebCrawlerDSL.scala | Scala | apache-2.0 | 1,198 |
package com.github.mdr.mash.integration
import com.github.mdr.mash.os.MockFileObject._
import com.github.mdr.mash.os.MockFileSystem
import com.github.mdr.mash.repl.LineBufferTestHelper._
import com.github.mdr.mash.repl.completions.IncrementalCompletionState
import com.github.mdr.mash.utils.Region
class IncrementalCompletionIntegrationTest extends AbstractIntegrationTest {
"Incremental completion" should "stay incremental as you type characters" in {
val repl = makeRepl()
repl.input("whe")
repl.complete().text should equal("where")
val Some(completionState: IncrementalCompletionState) = repl.state.completionStateOpt
completionState.replacementLocation should equal(Region(0, "where".length))
val completions = completionState.completions.map(_.displayText)
completions should contain("where")
completions should contain("whereNot")
repl.input("N")
val Some(completionState2: IncrementalCompletionState) = repl.state.completionStateOpt
}
it should "leave incremental completion mode if you type an exact match" in {
val repl = makeRepl()
repl.input("where").complete().input("Not")
repl.state.completionStateOpt should equal(None)
}
it should "leave incremental completion mode if no longer have any matches" in {
val repl = makeRepl()
repl.input("where").complete().input("a")
repl.state.completionStateOpt should equal(None)
}
it should "should remove added characters by pressing backspace" in {
val repl = makeRepl()
repl.input("where").complete().input("N").backspace()
repl.text should equal("where")
val completionState = repl.incrementalCompletionState
val completions = completionState.completions.map(_.displayText)
completions should contain("where")
completions should contain("whereNot")
}
it should "should allow further tab completions" in {
val repl = makeRepl()
repl.input("where").complete().input("N").complete()
repl.text should equal("whereNot")
repl.state.completionStateOpt should equal(None)
}
it should "leave incremental completion mode if backspace past the first completion" in {
val repl = makeRepl()
repl.input("where").complete().input("N").backspace().backspace()
repl.state.completionStateOpt should equal(None)
}
it should "partially complete a common prefix, handling any required escaping" in {
val repl = makeRepl(
new MockFileSystem(Directory(
"foo$bar" -> File(),
"foo$baz" -> File())))
repl.input("foo")
repl.complete()
repl.lineBuffer should equal(lineBuffer(""""foo`$ba"◀"""))
val Some(completionState: IncrementalCompletionState) = repl.state.completionStateOpt
val completions = completionState.completions.map(_.displayText)
completions should equal(Seq("foo$bar", "foo$baz"))
}
it should "partially complete a common fragment" in {
val repl = makeRepl(
new MockFileSystem(Directory(
"---foobar---" -> File(),
"--goobab--" -> File())))
repl.input("ob")
repl.complete()
repl.lineBuffer should equal(lineBuffer(""""ooba"◀"""))
val Some(completionState: IncrementalCompletionState) = repl.state.completionStateOpt
val completions = completionState.completions.map(_.displayText)
completions should equal(Seq("---foobar---", "--goobab--"))
completionState.getCommonDisplayFragment.text should equal("ooba")
}
it should "handle escaped characters in a common fragment" in {
val repl = makeRepl(new MockFileSystem(Directory(
"---foob$ar---" -> File(),
"--goob$ab--" -> File())))
repl.input("ob")
repl.complete()
repl.lineBuffer should equal(lineBuffer(""""oob`$a"◀"""))
val Some(completionState: IncrementalCompletionState) = repl.state.completionStateOpt
val completions = completionState.completions.map(_.displayText)
completions should equal(Seq("---foob$ar---", "--goob$ab--"))
completionState.getCommonDisplayFragment.text should equal("oob$a")
}
it should "not have a bug after completing a path" in {
val repl = makeRepl(new MockFileSystem(Directory(
"etc" -> Directory(
"foo.conf" -> File(),
"bar.conf" -> File()))))
repl.input("/etc/").complete()
repl.lineBuffer should equal(lineBuffer(""""/etc/▶""""))
}
it should "substring complete with path prefixes" in {
val repl = makeRepl(new MockFileSystem(Directory(
"etc" -> Directory(
"foobar" -> File(),
"gooban" -> File()))))
repl.input("/etc/ob").complete()
repl.lineBuffer should equal(lineBuffer(""""/etc/ooba▶""""))
val Some(completionState: IncrementalCompletionState) = repl.state.completionStateOpt
completionState.getCommonDisplayFragment.prefix should equal("/etc/")
completionState.getCommonDisplayFragment.text should equal("ooba")
val completions = completionState.completions.map(_.displayText)
completions should equal(Seq("/etc/foobar", "/etc/gooban"))
}
} | mdr/mash | src/test/scala/com/github/mdr/mash/integration/IncrementalCompletionIntegrationTest.scala | Scala | mit | 5,011 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.ksmpartners.ernie.engine.report
import com.ksmpartners.ernie.model.{ ReportType, DefinitionEntity }
import org.joda.time.DateTime
/**
* Immutable wrapper class for sharing DefinitionEntity data
*/
class Definition protected[report] (defEntity: DefinitionEntity) {
private lazy val paramNames: Array[String] = {
val jParamNames = defEntity.getParamNames
if (jParamNames == null) new Array(0) else jParamNames.toArray.map({ _.toString })
}
private lazy val unsupportedReportTypes: Array[ReportType] = {
val jUnsupportedReportTypes: java.util.List[ReportType] = defEntity.getUnsupportedReportTypes
if (jUnsupportedReportTypes == null) new Array(0) else jUnsupportedReportTypes.toArray.map(f => f.asInstanceOf[ReportType])
}
def getCreatedDate: DateTime = defEntity.getCreatedDate
def getDefId: String = defEntity.getDefId
def getCreatedUser: String = defEntity.getCreatedUser
def getParamNames: Array[String] = paramNames
def getDefDescription: String = defEntity.getDefDescription
def getUnsupportedReportTypes: Array[ReportType] = unsupportedReportTypes
/**
* Get a mutable DefinitionEntity, a representation of the definition that is serializable and used for persistence.
*/
def getEntity: DefinitionEntity = {
val defEnt = new DefinitionEntity()
defEnt.setCreatedDate(defEntity.getCreatedDate)
defEnt.setCreatedUser(defEntity.getCreatedUser)
defEnt.setDefDescription(defEntity.getDefDescription)
defEnt.setDefId(defEntity.getDefId)
defEnt.setParamNames(defEntity.getParamNames)
defEnt.setParams(defEntity.getParams)
defEnt.setUnsupportedReportTypes(defEntity.getUnsupportedReportTypes)
defEnt
}
}
| ksmpartners/ernie | ernie-engine/src/main/scala/com/ksmpartners/ernie/engine/report/Definition.scala | Scala | apache-2.0 | 2,275 |
package com.amazon.datagen.api.rof
import com.amazon.mqa.datagen.supplier.{AlphanumericStringSupplier, MinMaxIntegerSupplier}
/**
* Created by chengpan on 4/20/15.
*/
object ObjectFactoryTest {
def main(args: Array[String]) {
val factory = ReflectionObjectFactory(
ReflectionObjectFactory.fromSupplier(new MinMaxIntegerSupplier(1, 11)),
ReflectionObjectFactory.fromSupplier(new AlphanumericStringSupplier)
)
println(factory.create[String])
println(factory.create[Integer])
println(factory.create[Int])
val x = factory.create[TestClassA]
println(x)
println(factory.create[List[Int]])
}
}
| leakingtapan/rof-scala | src/test/scala/com/amazon/datagen/api/rof/ObjectFactoryTest.scala | Scala | apache-2.0 | 643 |
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.WorkflowNodeTrl
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Workflow Node Trl Service
*/
trait WorkflowNodeTrlService extends Service with api.service.WorkflowNodeTrlService {
override def getAll() : ServiceCall[NotUsed, List[WorkflowNodeTrl]]
override def getById(id: Int): ServiceCall[NotUsed, WorkflowNodeTrl]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, WorkflowNodeTrl]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[WorkflowNodeTrl]]
def descriptor = {
import Service._
named("workflowNodeTrl").withCalls(
pathCall("/api/v1_0_0/workflowNodeTrl/all", getAll _) ,
pathCall("/api/v1_0_0/workflowNodeTrl/:id", getById _),
pathCall("/api/v1_0_0/workflowNodeTrl/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/workflowNodeTrl?pageNo&pageSize", getAllByPage _)
)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/WorkflowNodeTrlService.scala | Scala | gpl-3.0 | 2,096 |
package nexus.diff.syntax
import nexus.diff._
/**
* @author Tongfei Chen
*/
trait TupleExprOpsMixin {
implicit class Tuple2ExprOps[F[_], X1, X2](val p: (F[X1], F[X2]))(implicit F: Algebra[F]) {
def |>[Y]
(f: Func2[X1, X2, Y]): F[Y] =
f(p._1, p._2)
def |>[Y]
(f: PolyFunc2)
(implicit ff: f.P[X1, X2, Y]): F[Y] =
f(p._1, p._2)
}
implicit class Tuple3ExprOps[F[_], X1, X2, X3](val t: (F[X1], F[X2], F[X3]))(implicit F: Algebra[F]) {
def |>[Y]
(f: Func3[X1, X2, X3, Y]): F[Y] =
f(t._1, t._2, t._3)
def |>[Y]
(f: PolyFunc3)
(implicit ff: f.P[X1, X2, X3, Y]): F[Y] =
f(t._1, t._2, t._3)
}
}
| ctongfei/nexus | diff/src/main/scala/nexus/diff/syntax/TupleExprOps.scala | Scala | mit | 671 |
package org.scalarules.finance.nl
import org.scalatest.{FlatSpec, Matchers}
class PerTest extends FlatSpec with Matchers {
it should "do arithmetic when Termijn is not specified" in {
val a: Bedrag Per Termijn = 10.euro per Maand
val b: Bedrag Per Termijn = 2.euro per Maand
a + b should be(12.euro per Maand)
}
it should "do addition on a number-like value" in {
val a = 1 per Jaar
val b = 2 per Jaar
a + b should be(3 per Jaar)
}
it should "throw an exception when Termijnen don't match on addition" in {
val a: Bedrag Per Termijn = 10.euro per Maand
val b: Bedrag Per Termijn = 2.euro per Jaar
intercept[IllegalArgumentException] { a + b }
}
it should "do subtraction on a number-like value" in {
val a = 3.euro per Maand
val b = 2.euro per Maand
a - b should be(Bedrag(1) per Maand)
}
it should "throw an exception when Termijnen don't match on subtraction" in {
val a: Bedrag Per Termijn = 10.euro per Maand
val b: Bedrag Per Termijn = 2.euro per Jaar
intercept[IllegalArgumentException] { a - b }
}
it should "do multiplication on a number-like value" in {
val a = 2 per Halfjaar
val b = 3
a * b should be(6 per Halfjaar)
}
it should "do division on a number-like value" in {
val a = 6.euro per Jaar
val b = 3
a / b should be(2.euro per Jaar)
}
it should "not do arithmetic on a non-number" in {
"""("hello" per Maand) + ("world" per Maand)""" shouldNot typeCheck
}
it should "not mix Periodes" in {
"val x: BigDecimal Per Jaar = 10 per Jaar" should compile
"val x: BigDecimal Per Jaar = 10 per Maand" shouldNot typeCheck
}
it should "not mix specified and non-specified Periodes" in {
"val x: BigDecimal Per Jaar = 10" shouldNot typeCheck
"val x: BigDecimal = 10 per Jaar" shouldNot typeCheck
}
it should "be convertable from Per Maand to Per Maand" in {
(1 per Maand).maandelijks should be(1 per Maand)
(3 per Maand).maandelijks should be(3 per Maand)
}
it should "be convertable from Per Kwartaal to Per Maand" in {
(3 per Kwartaal).maandelijks should be(1 per Maand)
(7 per Kwartaal).maandelijks should be((BigDecimal(7) / BigDecimal(3)) per Maand)
}
it should "be convertable from Per Halfjaar to Per Maand" in {
(12 per Halfjaar).maandelijks should be(2 per Maand)
(9 per Halfjaar).maandelijks should be(BigDecimal("1.5") per Maand)
}
it should "be convertable from Per Jaar to Per Maand" in {
(12 per Jaar).maandelijks should be(1 per Maand)
(13 per Jaar).maandelijks should be((BigDecimal(13) / BigDecimal(12)) per Maand)
}
it should "be convertable from Per Maand to Per Jaar" in {
(2 per Maand).jaarlijks should be(24 per Jaar)
}
it should "be convertable from Per Kwartaal to Per Jaar" in {
(3 per Kwartaal).jaarlijks should be(12 per Jaar)
}
it should "be convertable from Per Halfjaar to Per Jaar" in {
(11 per Halfjaar).jaarlijks should be(22 per Jaar)
}
it should "be convertable from Per Jaar to Per Jaar" in {
(24 per Jaar).jaarlijks should be(24 per Jaar)
}
it should "map over its contents, retaining the same type" in {
(1 per Jaar) map (_ + 2) should be(3 per Jaar)
("123.456".euro per Maand) map (_.afgekaptOpEuros) should be("123".euro per Maand)
}
it should "map over its contents, changing to a different type" in {
(1 per Maand) map (_.toString()) should be("1" per Maand)
(10.procent per Jaar) map (_ * 10.euro) should be(1.euro per Jaar)
}
it should "flatMap over its contents, retaining the same type" in {
(1 per Jaar) flatMap (_ + 2 per Jaar) should be(3 per Jaar)
("123.456".euro per Maand) flatMap (_.afgekaptOpEuros per Maand) should be("123".euro per Maand)
}
it should "flatMap over its contents, changing to a different type" in {
(1 per Maand) flatMap (_.toString per Maand) should be("1" per Maand)
(10.procent per Jaar) flatMap (_ * 10.euro per Jaar) should be(1.euro per Jaar)
}
it should "be possible to create a for comprehension using Pers" in {
val xpm = 10 per Maand
val ypm = 12 per Maand
val result = for {
x <- xpm
y <- ypm
} yield x + y
result should be(22 per Maand)
}
it should "toString" in {
(1 per Maand).toString should be(s"1 per ${Maand.toString}")
}
}
class PerImplicitsTest extends FlatSpec with Matchers {
it should "work with boolean operators from Ordered trait" in {
val a = Bedrag(10) per Kwartaal
val b = Bedrag(2) per Kwartaal
a > b should be(true)
a < b should be(false)
}
it should "work with boolean operators from Ordered trait on types that don't directly extend Ordering" in {
val a = 10 per Jaar
val b = 2 per Jaar
a > b should be(true)
a < b should be(false)
}
it should "be Orderable for BigDecimal" in {
val list = List(3 per Kwartaal, 2 per Kwartaal, 4 per Kwartaal)
list.sorted should be(List(2 per Kwartaal, 3 per Kwartaal, 4 per Kwartaal))
}
it should "be Numeric for BigDecimal" in {
val list = List(3 per Kwartaal, 2 per Kwartaal, 4 per Kwartaal)
list.sum should be(9 per Kwartaal)
}
it should "be Orderable for Bedrag" in {
val list = List(3.euro per Maand, 2.euro per Maand, 4.euro per Maand)
list.sorted should be(List(2.euro per Maand, 3.euro per Maand, 4.euro per Maand))
}
it should "be Numeric for Bedrag" in {
val list = List(3.euro per Maand, 2.euro per Maand, 4.euro per Maand)
list.sum should be(9.euro per Maand)
}
it should "work with boolean operators from Ordered trait when Termijn is not specified" in {
val a: Bedrag Per Termijn = 10.euro per Maand
val b: Bedrag Per Termijn = 2.euro per Maand
a > b should be(true)
}
it should "throw an exception when Termijnen don't match on boolean operations" in {
val a: Bedrag Per Termijn = 10.euro per Maand
val b: Bedrag Per Termijn = 2.euro per Jaar
intercept[IllegalArgumentException] {
a > b
}
}
}
class NumericPerPeriodeTest extends FlatSpec with Matchers {
val evPeriode = implicitly[Numeric[BigDecimal Per Halfjaar]]
val xp = 8 per Halfjaar
val yp = 3 per Halfjaar
it should "add" in {
evPeriode.plus(xp, yp) should be(11 per Halfjaar)
}
it should "subtract" in {
evPeriode.minus(xp, yp) should be(5 per Halfjaar)
}
it should "refuse to multiply because of the resulting unit" in {
intercept[IllegalStateException] {
evPeriode.times(xp, yp)
}
}
it should "negate" in {
evPeriode.negate(xp) should be(-8 per Halfjaar)
}
it should "convert from Int" in {
evPeriode.fromInt(5) should be(5 per Halfjaar)
}
it should "convert to Int" in {
evPeriode.toInt(xp) should be(8)
}
it should "convert to Long" in {
evPeriode.toLong(xp) should be(8L)
}
it should "convert to Float" in {
evPeriode.toFloat(xp) should be(8.0F)
}
it should "convert to Double" in {
evPeriode.toDouble(xp) should be(8.0D)
}
it should "compare" in {
evPeriode.compare(xp, yp) should be > 0
}
}
class NumericPerTermijnTest extends FlatSpec with Matchers {
val evTermijn = implicitly[Numeric[BigDecimal Per Termijn]]
val xt: BigDecimal Per Termijn = 8 per Halfjaar
val yt: BigDecimal Per Termijn = 3 per Halfjaar
it should "add" in {
evTermijn.plus(xt, yt) should be (11 per Halfjaar)
}
it should "subtract" in {
evTermijn.minus(xt, yt) should be (5 per Halfjaar)
}
it should "refuse to multiply because of the resulting unit" in {
intercept[IllegalStateException] {
evTermijn.times(xt, yt)
}
}
it should "negate" in {
evTermijn.negate(xt) should be (-8 per Halfjaar)
}
it should "refuse to convert from Int because we don't know the Termijn" in {
intercept[IllegalStateException] {
evTermijn.fromInt(5)
}
}
it should "convert to Int" in {
evTermijn.toInt(xt) should be (8)
}
it should "convert to Long" in {
evTermijn.toLong(xt) should be (8L)
}
it should "convert to Float" in {
evTermijn.toFloat(xt) should be (8.0F)
}
it should "convert to Double" in {
evTermijn.toDouble(xt) should be (8.0D)
}
it should "compare" in {
evTermijn.compare(xt, yt) should be > 0
}
}
| scala-rules/finance-dsl | src/test/scala/org/scalarules/finance/nl/PerTests.scala | Scala | mit | 8,284 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics
import java.io.{File, FileWriter, PrintWriter}
import scala.collection.mutable.ArrayBuffer
import org.apache.commons.lang3.RandomUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit => OldFileSplit, InputSplit => OldInputSplit, JobConf, LineRecordReader => OldLineRecordReader, RecordReader => OldRecordReader, Reporter, TextInputFormat => OldTextInputFormat}
import org.apache.hadoop.mapred.lib.{CombineFileInputFormat => OldCombineFileInputFormat, CombineFileRecordReader => OldCombineFileRecordReader, CombineFileSplit => OldCombineFileSplit}
import org.apache.hadoop.mapreduce.{InputSplit => NewInputSplit, RecordReader => NewRecordReader, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.input.{CombineFileInputFormat => NewCombineFileInputFormat, CombineFileRecordReader => NewCombineFileRecordReader, CombineFileSplit => NewCombineFileSplit, FileSplit => NewFileSplit, TextInputFormat => NewTextInputFormat}
import org.apache.hadoop.mapreduce.lib.output.{TextOutputFormat => NewTextOutputFormat}
import org.scalatest.BeforeAndAfter
import org.apache.spark.{SharedSparkContext, SparkFunSuite}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.util.Utils
class InputOutputMetricsSuite extends SparkFunSuite with SharedSparkContext
with BeforeAndAfter {
@transient var tmpDir: File = _
@transient var tmpFile: File = _
@transient var tmpFilePath: String = _
@transient val numRecords: Int = 100000
@transient val numBuckets: Int = 10
before {
tmpDir = Utils.createTempDir()
val testTempDir = new File(tmpDir, "test")
testTempDir.mkdir()
tmpFile = new File(testTempDir, getClass.getSimpleName + ".txt")
val pw = new PrintWriter(new FileWriter(tmpFile))
for (x <- 1 to numRecords) {
// scalastyle:off println
pw.println(RandomUtils.nextInt(0, numBuckets))
// scalastyle:on println
}
pw.close()
// Path to tmpFile
tmpFilePath = "file://" + tmpFile.getAbsolutePath
}
after {
Utils.deleteRecursively(tmpDir)
}
test("input metrics for old hadoop with coalesce") {
val bytesRead = runAndReturnBytesRead {
sc.textFile(tmpFilePath, 4).count()
}
val bytesRead2 = runAndReturnBytesRead {
sc.textFile(tmpFilePath, 4).coalesce(2).count()
}
assert(bytesRead != 0)
assert(bytesRead == bytesRead2)
assert(bytesRead2 >= tmpFile.length())
}
test("input metrics with cache and coalesce") {
// prime the cache manager
val rdd = sc.textFile(tmpFilePath, 4).cache()
rdd.collect()
val bytesRead = runAndReturnBytesRead {
rdd.count()
}
val bytesRead2 = runAndReturnBytesRead {
rdd.coalesce(4).count()
}
// for count and coalesce, the same bytes should be read.
assert(bytesRead != 0)
assert(bytesRead2 == bytesRead)
}
test("input metrics for new Hadoop API with coalesce") {
val bytesRead = runAndReturnBytesRead {
sc.newAPIHadoopFile(tmpFilePath, classOf[NewTextInputFormat], classOf[LongWritable],
classOf[Text]).count()
}
val bytesRead2 = runAndReturnBytesRead {
sc.newAPIHadoopFile(tmpFilePath, classOf[NewTextInputFormat], classOf[LongWritable],
classOf[Text]).coalesce(5).count()
}
assert(bytesRead != 0)
assert(bytesRead2 == bytesRead)
assert(bytesRead >= tmpFile.length())
}
test("input metrics when reading text file") {
val bytesRead = runAndReturnBytesRead {
sc.textFile(tmpFilePath, 2).count()
}
assert(bytesRead >= tmpFile.length())
}
test("input metrics on records read - simple") {
val records = runAndReturnRecordsRead {
sc.textFile(tmpFilePath, 4).count()
}
assert(records == numRecords)
}
test("input metrics on records read - more stages") {
val records = runAndReturnRecordsRead {
sc.textFile(tmpFilePath, 4)
.map(key => (key.length, 1))
.reduceByKey(_ + _)
.count()
}
assert(records == numRecords)
}
test("input metrics on records - New Hadoop API") {
val records = runAndReturnRecordsRead {
sc.newAPIHadoopFile(tmpFilePath, classOf[NewTextInputFormat], classOf[LongWritable],
classOf[Text]).count()
}
assert(records == numRecords)
}
test("input metrics on records read with cache") {
// prime the cache manager
val rdd = sc.textFile(tmpFilePath, 4).cache()
rdd.collect()
val records = runAndReturnRecordsRead {
rdd.count()
}
assert(records == numRecords)
}
/**
* Tests the metrics from end to end.
* 1) reading a hadoop file
* 2) shuffle and writing to a hadoop file.
* 3) writing to hadoop file.
*/
test("input read/write and shuffle read/write metrics all line up") {
var inputRead = 0L
var outputWritten = 0L
var shuffleRead = 0L
var shuffleWritten = 0L
sc.addSparkListener(new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd) {
val metrics = taskEnd.taskMetrics
inputRead += metrics.inputMetrics.recordsRead
outputWritten += metrics.outputMetrics.recordsWritten
shuffleRead += metrics.shuffleReadMetrics.recordsRead
shuffleWritten += metrics.shuffleWriteMetrics.recordsWritten
}
})
val tmpFile = new File(tmpDir, getClass.getSimpleName)
sc.textFile(tmpFilePath, 4)
.map(key => (key, 1))
.reduceByKey(_ + _)
.saveAsTextFile("file://" + tmpFile.getAbsolutePath)
sc.listenerBus.waitUntilEmpty(500)
assert(inputRead == numRecords)
// Only supported on newer Hadoop
if (SparkHadoopUtil.get.getFSBytesWrittenOnThreadCallback().isDefined) {
assert(outputWritten == numBuckets)
}
assert(shuffleRead == shuffleWritten)
}
test("input metrics with interleaved reads") {
val numPartitions = 2
val cartVector = 0 to 9
val cartFile = new File(tmpDir, getClass.getSimpleName + "_cart.txt")
val cartFilePath = "file://" + cartFile.getAbsolutePath
// write files to disk so we can read them later.
sc.parallelize(cartVector).saveAsTextFile(cartFilePath)
val aRdd = sc.textFile(cartFilePath, numPartitions)
val tmpRdd = sc.textFile(tmpFilePath, numPartitions)
val firstSize = runAndReturnBytesRead {
aRdd.count()
}
val secondSize = runAndReturnBytesRead {
tmpRdd.count()
}
val cartesianBytes = runAndReturnBytesRead {
aRdd.cartesian(tmpRdd).count()
}
// Computing the amount of bytes read for a cartesian operation is a little involved.
// Cartesian interleaves reads between two partitions eg. p1 and p2.
// Here are the steps:
// 1) First it creates an iterator for p1
// 2) Creates an iterator for p2
// 3) Reads the first element of p1 and then all the elements of p2
// 4) proceeds to the next element of p1
// 5) Creates a new iterator for p2
// 6) rinse and repeat.
// As a result we read from the second partition n times where n is the number of keys in
// p1. Thus the math below for the test.
assert(cartesianBytes != 0)
assert(cartesianBytes == firstSize * numPartitions + (cartVector.length * secondSize))
}
private def runAndReturnBytesRead(job: => Unit): Long = {
runAndReturnMetrics(job, _.taskMetrics.inputMetrics.bytesRead)
}
private def runAndReturnRecordsRead(job: => Unit): Long = {
runAndReturnMetrics(job, _.taskMetrics.inputMetrics.recordsRead)
}
private def runAndReturnRecordsWritten(job: => Unit): Long = {
runAndReturnMetrics(job, _.taskMetrics.outputMetrics.recordsWritten)
}
private def runAndReturnMetrics(job: => Unit, collector: (SparkListenerTaskEnd) => Long): Long = {
val taskMetrics = new ArrayBuffer[Long]()
// Avoid receiving earlier taskEnd events
sc.listenerBus.waitUntilEmpty(500)
sc.addSparkListener(new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd) {
taskMetrics += collector(taskEnd)
}
})
job
sc.listenerBus.waitUntilEmpty(500)
taskMetrics.sum
}
test("output metrics on records written") {
// Only supported on newer Hadoop
if (SparkHadoopUtil.get.getFSBytesWrittenOnThreadCallback().isDefined) {
val file = new File(tmpDir, getClass.getSimpleName)
val filePath = "file://" + file.getAbsolutePath
val records = runAndReturnRecordsWritten {
sc.parallelize(1 to numRecords).saveAsTextFile(filePath)
}
assert(records == numRecords)
}
}
test("output metrics on records written - new Hadoop API") {
// Only supported on newer Hadoop
if (SparkHadoopUtil.get.getFSBytesWrittenOnThreadCallback().isDefined) {
val file = new File(tmpDir, getClass.getSimpleName)
val filePath = "file://" + file.getAbsolutePath
val records = runAndReturnRecordsWritten {
sc.parallelize(1 to numRecords).map(key => (key.toString, key.toString))
.saveAsNewAPIHadoopFile[NewTextOutputFormat[String, String]](filePath)
}
assert(records == numRecords)
}
}
test("output metrics when writing text file") {
val fs = FileSystem.getLocal(new Configuration())
val outPath = new Path(fs.getWorkingDirectory, "outdir")
if (SparkHadoopUtil.get.getFSBytesWrittenOnThreadCallback().isDefined) {
val taskBytesWritten = new ArrayBuffer[Long]()
sc.addSparkListener(new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd) {
taskBytesWritten += taskEnd.taskMetrics.outputMetrics.bytesWritten
}
})
val rdd = sc.parallelize(Array("a", "b", "c", "d"), 2)
try {
rdd.saveAsTextFile(outPath.toString)
sc.listenerBus.waitUntilEmpty(500)
assert(taskBytesWritten.length == 2)
val outFiles = fs.listStatus(outPath).filter(_.getPath.getName != "_SUCCESS")
taskBytesWritten.zip(outFiles).foreach { case (bytes, fileStatus) =>
assert(bytes >= fileStatus.getLen)
}
} finally {
fs.delete(outPath, true)
}
}
}
test("input metrics with old CombineFileInputFormat") {
val bytesRead = runAndReturnBytesRead {
sc.hadoopFile(tmpFilePath, classOf[OldCombineTextInputFormat], classOf[LongWritable],
classOf[Text], 2).count()
}
assert(bytesRead >= tmpFile.length())
}
test("input metrics with new CombineFileInputFormat") {
val bytesRead = runAndReturnBytesRead {
sc.newAPIHadoopFile(tmpFilePath, classOf[NewCombineTextInputFormat], classOf[LongWritable],
classOf[Text], new Configuration()).count()
}
assert(bytesRead >= tmpFile.length())
}
}
/**
* Hadoop 2 has a version of this, but we can't use it for backwards compatibility
*/
class OldCombineTextInputFormat extends OldCombineFileInputFormat[LongWritable, Text] {
override def getRecordReader(split: OldInputSplit, conf: JobConf, reporter: Reporter)
: OldRecordReader[LongWritable, Text] = {
new OldCombineFileRecordReader[LongWritable, Text](conf,
split.asInstanceOf[OldCombineFileSplit], reporter, classOf[OldCombineTextRecordReaderWrapper]
.asInstanceOf[Class[OldRecordReader[LongWritable, Text]]])
}
}
class OldCombineTextRecordReaderWrapper(
split: OldCombineFileSplit,
conf: Configuration,
reporter: Reporter,
idx: Integer) extends OldRecordReader[LongWritable, Text] {
val fileSplit = new OldFileSplit(split.getPath(idx),
split.getOffset(idx),
split.getLength(idx),
split.getLocations())
val delegate: OldLineRecordReader = new OldTextInputFormat().getRecordReader(fileSplit,
conf.asInstanceOf[JobConf], reporter).asInstanceOf[OldLineRecordReader]
override def next(key: LongWritable, value: Text): Boolean = delegate.next(key, value)
override def createKey(): LongWritable = delegate.createKey()
override def createValue(): Text = delegate.createValue()
override def getPos(): Long = delegate.getPos
override def close(): Unit = delegate.close()
override def getProgress(): Float = delegate.getProgress
}
/**
* Hadoop 2 has a version of this, but we can't use it for backwards compatibility
*/
class NewCombineTextInputFormat extends NewCombineFileInputFormat[LongWritable, Text] {
def createRecordReader(split: NewInputSplit, context: TaskAttemptContext)
: NewRecordReader[LongWritable, Text] = {
new NewCombineFileRecordReader[LongWritable, Text](split.asInstanceOf[NewCombineFileSplit],
context, classOf[NewCombineTextRecordReaderWrapper])
}
}
class NewCombineTextRecordReaderWrapper(
split: NewCombineFileSplit,
context: TaskAttemptContext,
idx: Integer) extends NewRecordReader[LongWritable, Text] {
val fileSplit = new NewFileSplit(split.getPath(idx),
split.getOffset(idx),
split.getLength(idx),
split.getLocations())
val delegate = new NewTextInputFormat().createRecordReader(fileSplit, context)
override def initialize(split: NewInputSplit, context: TaskAttemptContext): Unit = {
delegate.initialize(fileSplit, context)
}
override def nextKeyValue(): Boolean = delegate.nextKeyValue()
override def getCurrentKey(): LongWritable = delegate.getCurrentKey
override def getCurrentValue(): Text = delegate.getCurrentValue
override def getProgress(): Float = delegate.getProgress
override def close(): Unit = delegate.close()
}
| Panos-Bletsos/spark-cost-model-optimizer | core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala | Scala | apache-2.0 | 14,397 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.csv
import java.util.Locale
import scala.util.control.Exception.allCatch
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.util.TimestampFormatter
import org.apache.spark.sql.types._
class CSVInferSchema(val options: CSVOptions) extends Serializable {
private val timestampParser = TimestampFormatter(
options.timestampFormat,
options.zoneId,
options.locale)
private val decimalParser = if (options.locale == Locale.US) {
// Special handling the default locale for backward compatibility
s: String => new java.math.BigDecimal(s)
} else {
ExprUtils.getDecimalParser(options.locale)
}
/**
* Similar to the JSON schema inference
* 1. Infer type of each row
* 2. Merge row types to find common type
* 3. Replace any null types with string type
*/
def infer(
tokenRDD: RDD[Array[String]],
header: Array[String]): StructType = {
val fields = if (options.inferSchemaFlag) {
val startType: Array[DataType] = Array.fill[DataType](header.length)(NullType)
val rootTypes: Array[DataType] =
tokenRDD.aggregate(startType)(inferRowType, mergeRowTypes)
toStructFields(rootTypes, header)
} else {
// By default fields are assumed to be StringType
header.map(fieldName => StructField(fieldName, StringType, nullable = true))
}
StructType(fields)
}
def toStructFields(
fieldTypes: Array[DataType],
header: Array[String]): Array[StructField] = {
header.zip(fieldTypes).map { case (thisHeader, rootType) =>
val dType = rootType match {
case _: NullType => StringType
case other => other
}
StructField(thisHeader, dType, nullable = true)
}
}
def inferRowType(rowSoFar: Array[DataType], next: Array[String]): Array[DataType] = {
var i = 0
while (i < math.min(rowSoFar.length, next.length)) { // May have columns on right missing.
rowSoFar(i) = inferField(rowSoFar(i), next(i))
i+=1
}
rowSoFar
}
def mergeRowTypes(first: Array[DataType], second: Array[DataType]): Array[DataType] = {
first.zipAll(second, NullType, NullType).map { case (a, b) =>
compatibleType(a, b).getOrElse(NullType)
}
}
/**
* Infer type of string field. Given known type Double, and a string "1", there is no
* point checking if it is an Int, as the final type must be Double or higher.
*/
def inferField(typeSoFar: DataType, field: String): DataType = {
if (field == null || field.isEmpty || field == options.nullValue) {
typeSoFar
} else {
typeSoFar match {
case NullType => tryParseInteger(field)
case IntegerType => tryParseInteger(field)
case LongType => tryParseLong(field)
case _: DecimalType =>
// DecimalTypes have different precisions and scales, so we try to find the common type.
compatibleType(typeSoFar, tryParseDecimal(field)).getOrElse(StringType)
case DoubleType => tryParseDouble(field)
case TimestampType => tryParseTimestamp(field)
case BooleanType => tryParseBoolean(field)
case StringType => StringType
case other: DataType =>
throw new UnsupportedOperationException(s"Unexpected data type $other")
}
}
}
private def isInfOrNan(field: String): Boolean = {
field == options.nanValue || field == options.negativeInf || field == options.positiveInf
}
private def tryParseInteger(field: String): DataType = {
if ((allCatch opt field.toInt).isDefined) {
IntegerType
} else {
tryParseLong(field)
}
}
private def tryParseLong(field: String): DataType = {
if ((allCatch opt field.toLong).isDefined) {
LongType
} else {
tryParseDecimal(field)
}
}
private def tryParseDecimal(field: String): DataType = {
val decimalTry = allCatch opt {
// The conversion can fail when the `field` is not a form of number.
val bigDecimal = decimalParser(field)
// Because many other formats do not support decimal, it reduces the cases for
// decimals by disallowing values having scale (eg. `1.1`).
if (bigDecimal.scale <= 0) {
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
DecimalType(bigDecimal.precision, bigDecimal.scale)
} else {
tryParseDouble(field)
}
}
decimalTry.getOrElse(tryParseDouble(field))
}
private def tryParseDouble(field: String): DataType = {
if ((allCatch opt field.toDouble).isDefined || isInfOrNan(field)) {
DoubleType
} else {
tryParseTimestamp(field)
}
}
private def tryParseTimestamp(field: String): DataType = {
// This case infers a custom `dataFormat` is set.
if ((allCatch opt timestampParser.parse(field)).isDefined) {
TimestampType
} else {
tryParseBoolean(field)
}
}
private def tryParseBoolean(field: String): DataType = {
if ((allCatch opt field.toBoolean).isDefined) {
BooleanType
} else {
stringType()
}
}
// Defining a function to return the StringType constant is necessary in order to work around
// a Scala compiler issue which leads to runtime incompatibilities with certain Spark versions;
// see issue #128 for more details.
private def stringType(): DataType = {
StringType
}
/**
* Returns the common data type given two input data types so that the return type
* is compatible with both input data types.
*/
private def compatibleType(t1: DataType, t2: DataType): Option[DataType] = {
TypeCoercion.findTightestCommonType(t1, t2).orElse(findCompatibleTypeForCSV(t1, t2))
}
/**
* The following pattern matching represents additional type promotion rules that
* are CSV specific.
*/
private val findCompatibleTypeForCSV: (DataType, DataType) => Option[DataType] = {
case (StringType, t2) => Some(StringType)
case (t1, StringType) => Some(StringType)
// These two cases below deal with when `IntegralType` is larger than `DecimalType`.
case (t1: IntegralType, t2: DecimalType) =>
compatibleType(DecimalType.forType(t1), t2)
case (t1: DecimalType, t2: IntegralType) =>
compatibleType(t1, DecimalType.forType(t2))
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, _: DecimalType) | (_: DecimalType, DoubleType) =>
Some(DoubleType)
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
Some(DoubleType)
} else {
Some(DecimalType(range + scale, scale))
}
case _ => None
}
}
| pgandhi999/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala | Scala | apache-2.0 | 7,905 |
package util
import org.scalajs.dom
import util.logger.log
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
object FutureUtils {
type LazyFuture[T] = () => Future[T]
// Execute a list of LazyFutures, continuing and ignoring if there is any error.
def sequenceWithDelay[T](tasks: List[LazyFuture[T]], ms: Int, ignoreErrors: Boolean)(implicit executionContext: ExecutionContext): Future[List[T]] = {
val p = Promise[List[T]]()
def exec(acc: List[T], remaining: List[LazyFuture[T]]): Unit = {
remaining match {
case head :: tail =>
delay(ms) { () =>
head().map { result =>
exec(acc :+ result, tail)
}.recover {
case NonFatal(ex) if ignoreErrors =>
log.info(s"Unable to process, Future Skipped - ${ex.getMessage}")
exec(acc, tail)
case NonFatal(ex) =>
log.info(s"Unable to process, Stopping - ${ex.getMessage}")
p.failure(ex)
}
}
case Nil => p.success(acc)
}
}
exec(List.empty, tasks)
p.future
}
// task like Image GC, need some delay to keep responsive the ui
val LongDelay = 200
val SmallDelay = 20
def delay[T](ms: Int)(task: () => Unit) = dom.setTimeout(task, ms)
}
| felixgborrego/simple-docker-ui | src/main/scala/util/FutureUtils.scala | Scala | mit | 1,351 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import kafka.common.{BaseEnum, KafkaException}
import org.apache.kafka.common.acl.AclPermissionType
sealed trait PermissionType extends BaseEnum {
val toJava: AclPermissionType
}
case object Allow extends PermissionType {
val name = "Allow"
val toJava = AclPermissionType.ALLOW
}
case object Deny extends PermissionType {
val name = "Deny"
val toJava = AclPermissionType.DENY
}
object PermissionType {
def fromString(permissionType: String): PermissionType = {
val pType = values.find(pType => pType.name.equalsIgnoreCase(permissionType))
pType.getOrElse(throw new KafkaException(permissionType + " not a valid permissionType name. The valid names are " + values.mkString(",")))
}
def fromJava(permissionType: AclPermissionType): PermissionType = fromString(permissionType.toString)
def values: Seq[PermissionType] = List(Allow, Deny)
}
| ollie314/kafka | core/src/main/scala/kafka/security/auth/PermissionType.scala | Scala | apache-2.0 | 1,708 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecOverAggregate
import org.apache.flink.table.planner.plan.nodes.exec.spec.{OverSpec, PartitionSpec}
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.flink.table.planner.plan.utils.OverAggregateUtil
import org.apache.calcite.plan._
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Window
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for sort-based over [[Window]] aggregate.
*/
class BatchPhysicalOverAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
windowGroups: Seq[Window.Group],
logicWindow: Window)
extends BatchPhysicalOverAggregateBase(
cluster,
traitSet,
inputRel,
outputRowType,
inputRowType,
windowGroups,
logicWindow) {
override def deriveRowType: RelDataType = outputRowType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new BatchPhysicalOverAggregate(
cluster,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
windowGroups,
logicWindow)
}
override def translateToExecNode(): ExecNode[_] = {
new BatchExecOverAggregate(
new OverSpec(
new PartitionSpec(partitionKeyIndices),
offsetAndInsensitiveSensitiveGroups.map(OverAggregateUtil.createGroupSpec(_, logicWindow)),
logicWindow.constants,
OverAggregateUtil.calcOriginalInputFields(logicWindow)),
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalOverAggregate.scala | Scala | apache-2.0 | 2,720 |
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
package registry
// Specs2
import org.specs2.Specification
import org.specs2.scalaz.ValidationMatchers
// Scalaz
import scalaz._
import Scalaz._
/**
* Tests CampaignAttributionEnrichment
*/
class CampaignAttributionEnrichmentSpec extends Specification with ValidationMatchers {
def is = s2"""
This is a specification to test the CampaignAttributionEnrichment
extractMarketingFields should create an empty MarketingCampaign if no campaign fields are specified $e1
extractMarketingFields should create a MarketingCampaign using the standard Google-style settings $e2
extractMarketingFields should create a MarketingCampaign using the standard Omniture settings $e3
extractMarketingFields should create a MarketingCampaign using the correct order of precedence $e4
extractMarketingFields should create a MarketingCampaign with clickId and network fields $e5
"""
val google_uri = Map(
"utm_source" -> "GoogleSearch",
"utm_medium" -> "cpc",
"utm_term" -> "native american tarot deck",
"utm_content" -> "39254295088",
"utm_campaign" -> "uk-tarot--native-american"
)
val omniture_uri = Map("cid" -> "uk-tarot--native-american")
val heterogeneous_uri = Map(
"utm_source" -> "GoogleSearch",
"source" -> "bad_source",
"utm_medium" -> "cpc",
"legacy_term" -> "bad_term",
"utm_term" -> "native american tarot deck",
"legacy_campaign" -> "bad_campaign",
"cid" -> "uk-tarot--native-american"
)
val clickid_uri = Map(
"utm_source" -> "GoogleSearch",
"source" -> "bad_source",
"utm_medium" -> "cpc",
"legacy_term" -> "bad_term",
"utm_term" -> "native american tarot deck",
"legacy_campaign" -> "bad_campaign",
"cid" -> "uk-tarot--native-american",
"msclkid" -> "500"
)
def e1 = {
val config = CampaignAttributionEnrichment(
List(),
List(),
List(),
List(),
List(),
List()
)
config.extractMarketingFields(google_uri) must beSuccessful(
MarketingCampaign(None, None, None, None, None, None, None))
}
def e2 = {
val config = CampaignAttributionEnrichment(
List("utm_medium"),
List("utm_source"),
List("utm_term"),
List("utm_content"),
List("utm_campaign"),
List()
)
config.extractMarketingFields(google_uri) must beSuccessful(
MarketingCampaign(Some("cpc"),
Some("GoogleSearch"),
Some("native american tarot deck"),
Some("39254295088"),
Some("uk-tarot--native-american"),
None,
None))
}
def e3 = {
val config = CampaignAttributionEnrichment(
List(),
List(),
List(),
List(),
List("cid"),
List()
)
config.extractMarketingFields(omniture_uri) must beSuccessful(
MarketingCampaign(None, None, None, None, Some("uk-tarot--native-american"), None, None))
}
def e4 = {
val config = CampaignAttributionEnrichment(
List("utm_medium", "medium"),
List("utm_source", "source"),
List("utm_term", "legacy_term"),
List("utm_content"),
List("utm_campaign", "cid", "legacy_campaign"),
List()
)
config.extractMarketingFields(heterogeneous_uri) must beSuccessful(
MarketingCampaign(Some("cpc"),
Some("GoogleSearch"),
Some("native american tarot deck"),
None,
Some("uk-tarot--native-american"),
None,
None))
}
def e5 = {
val config = CampaignAttributionEnrichment(
List("utm_medium", "medium"),
List("utm_source", "source"),
List("utm_term", "legacy_term"),
List("utm_content"),
List("utm_campaign", "cid", "legacy_campaign"),
List(
"gclid" -> "Google",
"msclkid" -> "Microsoft",
"dclid" -> "DoubleClick"
)
)
config.extractMarketingFields(clickid_uri) must beSuccessful(
MarketingCampaign(Some("cpc"),
Some("GoogleSearch"),
Some("native american tarot deck"),
None,
Some("uk-tarot--native-american"),
Some("500"),
Some("Microsoft")))
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/CampaignAttributionEnrichmentSpec.scala | Scala | apache-2.0 | 5,287 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.encoder.auth
import java.nio.charset.Charset
object AuthenticationMethod {
final val Native = "mysql_native_password"
final val Old = "mysql_old_password"
final val Availables = Map(
Native -> MySQLNativePasswordAuthentication,
Old -> OldPasswordAuthentication
)
}
trait AuthenticationMethod {
def generateAuthentication( charset : Charset, password : Option[String], seed : Array[Byte] ) : Array[Byte]
}
| outbrain/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/encoder/auth/AuthenticationMethod.scala | Scala | apache-2.0 | 1,111 |
package org.jetbrains.sbt
package project.structure
import com.intellij.execution.process.{ProcessAdapter, ProcessEvent, ProcessOutputTypes}
import com.intellij.openapi.util.Key
/**
* @author Pavel Fatin
*/
class ListenerAdapter(listener: (OutputType, String) => Unit) extends ProcessAdapter {
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = {
val textType = outputType match {
case ProcessOutputTypes.STDOUT => Some(OutputType.StdOut)
case ProcessOutputTypes.STDERR => Some(OutputType.StdErr)
case ProcessOutputTypes.SYSTEM => Some(OutputType.MySystem)
case other => Some(OutputType.Other(other))
}
textType.foreach(t => listener(t, event.getText))
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/project/structure/ListenerAdapter.scala | Scala | apache-2.0 | 748 |
package debop4s.data.orm.jpa.spring
import java.util.Properties
import javax.sql.DataSource
import debop4s.data.orm.DataConst._
import org.hibernate.cfg.AvailableSettings
/**
* JPA 에서 PostgreSql 을 사용하기 위한 환경설정
* Created by debop on 2014. 1. 29..
*/
abstract class AbstractJpaPostgreSqlConfiguration extends AbstractJpaConfiguration {
override def dataSource: DataSource = {
buildDataSource(DRIVER_CLASS_POSTGRESQL,
s"jdbc:postgresql://localhost:5432/$getDatabaseName?charSet=UTF8",
"root",
"root")
}
override def jpaProperties: Properties = {
val props = super.jpaProperties
props.put(AvailableSettings.DIALECT, DIALECT_POSTGRESQL)
props
}
}
| debop/debop4s | debop4s-data-orm/src/main/scala/debop4s/data/orm/jpa/spring/AbstractJpaPostgreSqlConfiguration.scala | Scala | apache-2.0 | 718 |
package cz.kamenitxan.jakon.core.deploy.entity
import java.time.LocalDateTime
class Server(
val id: Int,
val url: String,
val path: String,
var lastDeployed: LocalDateTime
) {
override def toString = s"Server(id=$id, url=$url, path=$path, lastDeployed=$lastDeployed)"
}
| kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/core/deploy/entity/Server.scala | Scala | bsd-3-clause | 336 |
package uk.gov.digital.ho.proving.financialstatus.domain
import java.time.{Clock, LocalDate, Period}
object LeaveToRemainCalculator {
private val ONE_YEAR = 1
private val SIX_MONTHS = 6
private def calcWrapUpPeriod(coursePeriod: Period, preSessional: Boolean) = {
if (coursePeriod.getYears >= ONE_YEAR) Period.ofMonths(4)
else if (coursePeriod.getMonths >= SIX_MONTHS) Period.ofMonths(2)
else if (preSessional) Period.ofMonths(1) else Period.ofDays(7)
}
private def calculatePeriod(start: LocalDate, end: LocalDate, inclusive: Boolean = true) = {
Period.between(start, end.plusDays(if (inclusive) 1 else 0))
}
private def calculatePeriodInclusive(start: LocalDate, end: LocalDate) = calculatePeriod(start, end)
private def calculatePeriodExclusive(start: LocalDate, end: LocalDate) = calculatePeriod(start, end, inclusive = false)
def calculateLeaveToRemain(clock: Clock,
courseStartDate: Option[LocalDate],
courseEndDate: Option[LocalDate],
originalCourseStartDate: Option[LocalDate],
preSessional: Boolean): Option[LocalDate] = {
for {
start <- courseStartDate
end <- courseEndDate
} yield {
val startDate = originalCourseStartDate match {
case Some(originalStart) => originalStart
case None => start
}
val considerationDate = LocalDate.now(clock)
val endDate = if (end.isBefore(considerationDate)) {
considerationDate
} else {
end
}
val coursePeriod = calculatePeriodInclusive(startDate, end)
println(s"coursePeriod: $coursePeriod")
val wrapUpPeriod = calcWrapUpPeriod(coursePeriod, preSessional)
println(s"wrapUpPeriod: $wrapUpPeriod")
endDate.plus(wrapUpPeriod)
}
}
def calculateLeaveToRemain(clock: Clock,
courseStartDate: LocalDate,
courseEndDate: LocalDate,
originalCourseStartDate:
Option[LocalDate], preSessional: Boolean): LocalDate = {
val startDate = originalCourseStartDate match {
case Some(originalStart) => originalStart
case None => courseStartDate
}
val considerationDate = LocalDate.now(clock)
val endDate = if (courseEndDate.isBefore(considerationDate)) {
considerationDate
} else {
courseEndDate
}
val coursePeriod = calculatePeriodInclusive(startDate, courseEndDate)
val wrapUpPeriod = calcWrapUpPeriod(coursePeriod, preSessional)
endDate.plus(wrapUpPeriod)
}
def calculateFixedLeaveToRemain(courseEndDate: LocalDate, period: Period): LocalDate = {
courseEndDate.plus(period)
}
}
| UKHomeOffice/pttg-fs-api | src/main/scala/uk/gov/digital/ho/proving/financialstatus/domain/LeaveToRemainCalculator.scala | Scala | mit | 2,908 |
package colossus.protocols.memcache
import org.scalatest._
import akka.util.ByteString
import colossus.protocols.memcache.MemcacheCommand._
class MemcacheCommandSuite extends FlatSpec with Matchers {
"MemcacheCommand" should "format a GET correctly" in {
val experimental = Get(ByteString("test"))
experimental.toString() should equal("get test\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("get test\\r\\n"))
}
it should "format a GETS correctly" in {
val experimental = Gets(ByteString("test"))
experimental.toString() should equal("gets test\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("gets test\\r\\n"))
}
it should "format a SET correctly" in {
val experimental = Set(ByteString("key"), ByteString("value"), 30) // key, value, ttl
experimental.toString() should equal("set key 0 30 5\\r\\nvalue\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("set key 0 30 5\\r\\nvalue\\r\\n"))
}
it should "format a CAS correctly" in {
val experimental = Cas(ByteString("key"), ByteString("value"), 30, casUniqueMaybe = Some(1337)) // key, value, ttl, cas
experimental.toString() should equal("cas key 0 30 5 1337\\r\\nvalue\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("cas key 0 30 5 1337\\r\\nvalue\\r\\n"))
}
it should "format an ADD correctly" in {
val experimental = Add(ByteString("key"), ByteString("magic"), 30)
experimental.toString() should equal("add key 0 30 5\\r\\nmagic\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("add key 0 30 5\\r\\nmagic\\r\\n"))
}
it should "format a REPLACE correctly" in {
val experimental = Replace(ByteString("key"), ByteString("magic"), 30)
experimental.toString() should equal("replace key 0 30 5\\r\\nmagic\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("replace key 0 30 5\\r\\nmagic\\r\\n"))
}
it should "format an APPEND correctly" in {
val experimental = Append(ByteString("key"), ByteString("magic"))
experimental.toString() should equal("append key 0 0 5\\r\\nmagic\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("append key 0 0 5\\r\\nmagic\\r\\n"))
}
it should "format a PREPEND correctly" in {
val experimental = Prepend(ByteString("key"), ByteString("magic"))
experimental.toString() should equal("prepend key 0 0 5\\r\\nmagic\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("prepend key 0 0 5\\r\\nmagic\\r\\n"))
}
it should "format DELETE correctly" in {
val experimental = Delete(ByteString("key"))
experimental.toString() should equal("delete key\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("delete key\\r\\n"))
}
it should "format INCR correctly" in {
val experimental = Incr(ByteString("key"), 1L)
experimental.toString() should equal("incr key 1\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("incr key 1\\r\\n"))
}
it should "format DECR correctly" in {
val experimental = Decr(ByteString("key"), 1L)
experimental.toString() should equal("decr key 1\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("decr key 1\\r\\n"))
}
it should "format TOUCH correctly" in {
val experimental = Touch(ByteString("key"), 30)
experimental.toString() should equal("touch key 30\\r\\n")
experimental.memcacheCommandMsg() should equal(ByteString("touch key 30\\r\\n"))
}
}
| tumblr/colossus | colossus-tests/src/test/scala/colossus/protocols/memcache/MemcacheCommandSpec.scala | Scala | apache-2.0 | 3,457 |
package sativum
import org.joda.time.LocalDate
import peapod.Task
/*
* This is a Task that is run on a daily basis or which can be tied to a date
*/
trait DatedTask extends Task[Any] {
val partition: LocalDate
override lazy val name: String = baseName+ "/" + partition.toString()
override lazy val versionName = baseName
override lazy val dir = p.path + "/" + baseName + "/" + storageRecursiveVersion() + "/" +
partition.toString("yyyy/MM/dd") + "/"
}
| mindfulmachines/sativum | src/main/scala/sativum/DatedTask.scala | Scala | mit | 476 |
package controllers
import play.api.mvc._
import java.util.Date
import model.EstateProvider
import org.joda.time.DateTime
class Management(
estateProvider: EstateProvider,
controllerComponents: ControllerComponents
) extends AbstractController(controllerComponents) {
def healthcheck = Action {
estateProvider().lastUpdated match {
case Some(dt) if dt.isBefore(DateTime.now().minusMinutes(5)) => InternalServerError("Out of date data")
case _ => Ok("")
}
}
def manifest() = Action {
val data = Map(
"Build" -> BuildInfo.buildNumber,
"Commit" -> BuildInfo.gitCommitId,
"Date" -> new Date(BuildInfo.buildTime).toString,
"Dependencies" -> BuildInfo.libraryDependencies.mkString(", ")
)
Ok(data map { case (k, v) => s"$k: $v"} mkString "\\n")
}
}
| guardian/status-app | app/controllers/Management.scala | Scala | apache-2.0 | 815 |
package main.scala.overlapping.timeSeriesOld.secondOrder.univariate.Procedures
import breeze.linalg._
import main.scala.overlapping.timeSeries._
import main.scala.overlapping.timeSeriesOld.SecondOrderSignature
/**
* Created by Francois Belletti on 7/14/15.
*/
/*
This calibrate one univariate AR model per columns.
Returns an array of calibrated parameters (Coeffs, variance of noise).
Check out Brockwell, Davis, Time Series: Theory and Methods, 1987 (p 234)
TODO: shield procedure against the following edge cases, autoCov.size < 1, autoCov(0) = 0.0
*/
object DurbinLevinson extends Serializable{
def apply(h: Int, autoCov: DenseVector[Double]): SecondOrderSignature ={
var prevPhiEst = DenseVector.zeros[Double](1)
prevPhiEst(0) = autoCov(1) / autoCov(0)
var prevVarEst: Double = autoCov(0) * (1.0 - prevPhiEst(0) * prevPhiEst(0))
var newVarEst: Double = 0.0
for(m <- 2 to h){
val newPhiEst = DenseVector.zeros[Double](m)
val temp = reverse(autoCov(1 until m))
newPhiEst(m - 1) = (autoCov(m) - sum(prevPhiEst :* temp)) / prevVarEst
newPhiEst(0 to (m - 2)) := prevPhiEst - (reverse(prevPhiEst) :* newPhiEst(m - 1))
newVarEst = prevVarEst * (1.0 - newPhiEst(m - 1) * newPhiEst(m - 1))
prevPhiEst = newPhiEst
prevVarEst = newVarEst
}
SecondOrderSignature(prevPhiEst, prevVarEst)
}
}
| bellettif/sparkGeoTS | sparkTS/src/main/scala/overlapping/timeSeriesOld/secondOrder/univariate/Procedures/DurbinLevinson.scala | Scala | bsd-3-clause | 1,471 |
package org.jetbrains.plugins.scala.lang.completion
import com.intellij.codeInsight.completion.{CompletionResultSet, InsertHandler}
import com.intellij.codeInsight.lookup.{AutoCompletionPolicy, LookupElement, LookupElementPresentation, LookupElementRenderer}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope}
import com.intellij.psi.{PsiClass, PsiDocCommentOwner, PsiElement, PsiNamedElement}
import com.intellij.util.Processor
import org.jetbrains.plugins.scala.extensions.{toPsiClassExt, toPsiMemberExt, toPsiModifierListOwnerExt, toPsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.completion.handlers.ScalaConstructorInsertHandler
import org.jetbrains.plugins.scala.lang.completion.lookups.ScalaLookupItem
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScStableCodeReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScExtendsBlock}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.resolve.ResolveUtils
import scala.collection.mutable
import com.intellij.ide.IconDescriptorUpdaters
/**
* @author Alefas
* @since 27.03.12
*/
object ScalaAfterNewCompletionUtil {
lazy val afterNewPattern = ScalaSmartCompletionContributor.superParentsPattern(classOf[ScStableCodeReferenceElement],
classOf[ScSimpleTypeElement], classOf[ScConstructor], classOf[ScClassParents], classOf[ScExtendsBlock], classOf[ScNewTemplateDefinition])
def getLookupElementFromClass(expectedTypes: Array[ScType], clazz: PsiClass,
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): LookupElement = {
val undefines: Seq[ScUndefinedType] = clazz.getTypeParameters.map(ptp =>
new ScUndefinedType(new ScTypeParameterType(ptp, ScSubstitutor.empty))
)
val predefinedType =
if (clazz.getTypeParameters.length == 1) {
ScParameterizedType(ScDesignatorType(clazz), undefines)
}
else
ScDesignatorType(clazz)
val noUndefType =
if (clazz.getTypeParameters.length == 1) {
ScParameterizedType(ScDesignatorType(clazz), clazz.getTypeParameters.map(ptp =>
new ScTypeParameterType(ptp, ScSubstitutor.empty)
))
}
else
ScDesignatorType(clazz)
val iterator = expectedTypes.iterator
while (iterator.hasNext) {
val typez = iterator.next()
if (predefinedType.conforms(typez)) {
val undef = Conformance.undefinedSubst(typez, predefinedType)
undef.getSubstitutor match {
case Some(subst) =>
val lookupElement = getLookupElementFromTypeAndClass(subst.subst(noUndefType), clazz,
ScSubstitutor.empty, new AfterNewLookupElementRenderer(_, _, _), new ScalaConstructorInsertHandler, renamesMap)
for (undefine <- undefines) {
subst.subst(undefine) match {
case ScUndefinedType(_) =>
lookupElement.typeParametersProblem = true
case _ =>
}
}
return lookupElement
case _ =>
}
}
}
val lookupElement = getLookupElementFromTypeAndClass(noUndefType, clazz, ScSubstitutor.empty,
new AfterNewLookupElementRenderer(_, _, _), new ScalaConstructorInsertHandler, renamesMap)
if (undefines.length > 0) {
lookupElement.typeParametersProblem = true
}
lookupElement
}
class AfterNewLookupElementRenderer(tp: ScType, psiClass: PsiClass,
subst: ScSubstitutor) extends LookupElementRenderer[LookupElement] {
def renderElement(ignore: LookupElement, presentation: LookupElementPresentation) {
var isDeprecated = false
psiClass match {
case doc: PsiDocCommentOwner if doc.isDeprecated => isDeprecated = true
case _ =>
}
var tailText: String = ""
val itemText: String = psiClass.name + (tp match {
case ScParameterizedType(_, tps) =>
tps.map(tp => ScType.presentableText(subst.subst(tp))).mkString("[", ", ", "]")
case _ => ""
})
psiClass match {
case clazz: PsiClass => {
if (psiClass.isInterface || psiClass.isInstanceOf[ScTrait] ||
psiClass.hasModifierPropertyScala("abstract")) {
tailText += " {...}"
}
val location: String = clazz.getPresentation.getLocationString
presentation.setTailText(tailText + " " + location, true)
}
case _ =>
}
presentation.setIcon(IconDescriptorUpdaters.getIcon(psiClass, 0))
presentation.setStrikeout(isDeprecated)
presentation.setItemText(itemText)
}
}
private def getLookupElementFromTypeAndClass(tp: ScType, psiClass: PsiClass, subst: ScSubstitutor,
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement],
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): ScalaLookupItem = {
val name: String = psiClass.name
val isRenamed = renamesMap.filter {
case (aName, (renamed, aClazz)) => aName == name && aClazz == psiClass
}.map(_._2._1).headOption
val lookupElement: ScalaLookupItem = new ScalaLookupItem(psiClass, isRenamed.getOrElse(name)) {
override def renderElement(presentation: LookupElementPresentation) {
renderer(tp, psiClass, subst).renderElement(this, presentation)
isRenamed match {
case Some(name) => presentation.setItemText(name + " <= " + presentation.getItemText)
case _ =>
}
}
}
lookupElement.isRenamed = isRenamed
if (ApplicationManager.getApplication.isUnitTestMode || psiClass.isInterface ||
psiClass.isInstanceOf[ScTrait] || psiClass.hasModifierPropertyScala("abstract"))
lookupElement.setAutoCompletionPolicy(if (ApplicationManager.getApplication.isUnitTestMode) AutoCompletionPolicy.ALWAYS_AUTOCOMPLETE
else AutoCompletionPolicy.NEVER_AUTOCOMPLETE)
val qualName = psiClass.qualifiedName
if (ScalaCodeStyleSettings.getInstance(psiClass.getProject).hasImportWithPrefix(qualName)) {
lookupElement.prefixCompletion = true
}
lookupElement.setInsertHandler(new ScalaConstructorInsertHandler)
tp match {
case ScParameterizedType(_, tps) => lookupElement.typeParameters = tps
case _ =>
}
lookupElement
}
def convertTypeToLookupElement(tp: ScType, place: PsiElement, addedClasses: mutable.HashSet[String],
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement],
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): ScalaLookupItem = {
ScType.extractClassType(tp, Some(place.getProject)) match {
case Some((clazz: PsiClass, subst: ScSubstitutor)) =>
//filter base types (it's important for scala 2.9)
clazz.qualifiedName match {
case "scala.Boolean" | "scala.Int" | "scala.Long" | "scala.Byte" | "scala.Short" | "scala.AnyVal" |
"scala.Char" | "scala.Unit" | "scala.Float" | "scala.Double" | "scala.Any" => return null
case _ =>
}
//todo: filter inner classes smarter (how? don't forget deep inner classes)
if (clazz.containingClass != null && (!clazz.containingClass.isInstanceOf[ScObject] ||
clazz.hasModifierPropertyScala("static"))) return null
if (!ResolveUtils.isAccessible(clazz, place, forCompletion = true)) return null
if (addedClasses.contains(clazz.qualifiedName)) return null
addedClasses += clazz.qualifiedName
getLookupElementFromTypeAndClass(tp, clazz, subst, renderer, insertHandler, renamesMap)
case _ => null
}
}
def collectInheritorsForType(typez: ScType, place: PsiElement, addedClasses: mutable.HashSet[String],
result: CompletionResultSet,
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement], renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]) {
ScType.extractClassType(typez, Some(place.getProject)) match {
case Some((clazz, subst)) =>
//this change is important for Scala Worksheet/Script classes. Will not find inheritors, due to file copy.
val searchScope =
if (clazz.getUseScope.isInstanceOf[LocalSearchScope]) GlobalSearchScope.allScope(place.getProject)
else clazz.getUseScope
ClassInheritorsSearch.search(clazz, searchScope, true).forEach(new Processor[PsiClass] {
def process(clazz: PsiClass): Boolean = {
if (clazz.name == null || clazz.name == "") return true
val undefines: Seq[ScUndefinedType] = clazz.getTypeParameters.map(ptp =>
new ScUndefinedType(new ScTypeParameterType(ptp, ScSubstitutor.empty))
)
val predefinedType =
if (clazz.getTypeParameters.length >= 1) {
ScParameterizedType(ScDesignatorType(clazz), undefines)
}
else
ScDesignatorType(clazz)
val noUndefType =
if (clazz.getTypeParameters.length >= 1) {
ScParameterizedType(ScDesignatorType(clazz), clazz.getTypeParameters.map(ptp =>
new ScTypeParameterType(ptp, ScSubstitutor.empty)
))
}
else
ScDesignatorType(clazz)
if (!predefinedType.conforms(typez)) return true
val undef = Conformance.undefinedSubst(typez, predefinedType)
undef.getSubstitutor match {
case Some(undefSubst) =>
val lookupElement = convertTypeToLookupElement(undefSubst.subst(noUndefType), place, addedClasses,
renderer, insertHandler, renamesMap)
if (lookupElement != null) {
for (undefine <- undefines) {
undefSubst.subst(undefine) match {
case ScUndefinedType(_) =>
lookupElement.typeParametersProblem = true
case _ =>
}
}
result.addElement(lookupElement)
}
case _ =>
}
true
}
})
case _ =>
}
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/completion/ScalaAfterNewCompletionUtil.scala | Scala | apache-2.0 | 11,119 |
package com.sksamuel.elastic4s.akka
import java.util.concurrent.TimeUnit
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.settings.ConnectionPoolSettings
import com.typesafe.config.{Config, ConfigFactory}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.util.Try
object AkkaHttpClientSettings {
private def defaultConfig: Config =
ConfigFactory.load().getConfig("com.sksamuel.elastic4s.akka")
lazy val default: AkkaHttpClientSettings = apply(defaultConfig)
def apply(config: Config): AkkaHttpClientSettings = {
val cfg = config.withFallback(defaultConfig)
val hosts = cfg.getStringList("hosts").asScala.toVector
val username = Try(cfg.getString("username")).map(Some(_)).getOrElse(None)
val password = Try(cfg.getString("password")).map(Some(_)).getOrElse(None)
val queueSize = cfg.getInt("queue-size")
val https = cfg.getBoolean("https")
val verifySslCertificate = Try(cfg.getBoolean("verify-ssl-certificate")).toOption.getOrElse(true)
val blacklistMinDuration = Duration(
cfg.getDuration("blacklist.min-duration", TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS
)
val blacklistMaxDuration = Duration(
cfg.getDuration("blacklist.max-duration", TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS
)
val maxRetryTimeout = Duration(
cfg.getDuration("max-retry-timeout", TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS
)
val poolSettings = ConnectionPoolSettings(
cfg.withFallback(ConfigFactory.load())
)
AkkaHttpClientSettings(
https,
hosts,
username,
password,
queueSize,
poolSettings,
verifySslCertificate,
blacklistMinDuration,
blacklistMaxDuration,
maxRetryTimeout
)
}
def apply(): AkkaHttpClientSettings = {
default
}
def apply(hosts: Seq[String]): AkkaHttpClientSettings = {
apply().copy(hosts = hosts.toVector)
}
}
case class AkkaHttpClientSettings(
https: Boolean,
hosts: Vector[String],
username: Option[String],
password: Option[String],
queueSize: Int,
poolSettings: ConnectionPoolSettings,
verifySSLCertificate : Boolean,
blacklistMinDuration: FiniteDuration =
AkkaHttpClientSettings.default.blacklistMinDuration,
blacklistMaxDuration: FiniteDuration =
AkkaHttpClientSettings.default.blacklistMaxDuration,
maxRetryTimeout: FiniteDuration =
AkkaHttpClientSettings.default.maxRetryTimeout,
requestCallback: HttpRequest => HttpRequest = identity
) {
def hasCredentialsDefined: Boolean = username.isDefined && password.isDefined
}
| sksamuel/elastic4s | elastic4s-client-akka/src/main/scala/com/sksamuel/elastic4s/akka/AkkaHttpClientSettings.scala | Scala | apache-2.0 | 2,632 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.ToolBox
object Test extends dotty.runtime.LegacyApp {
val tb = runtimeMirror(getClass.getClassLoader).mkToolBox()
val tree1 = reify(new collection.immutable.HashMap[String, String])
val tree2 = reify(new collection.mutable.HashMap[String, String])
println(showRaw(tb.typecheck(tree1.tree), printTypes = true))
println(showRaw(tb.typecheck(tree2.tree), printTypes = true))
}
| yusuke2255/dotty | tests/disabled/macro/run/showraw_tree_types_typed.scala | Scala | bsd-3-clause | 452 |
package com.github.vitalsoftware.scalaredox.receiver
import com.github.vitalsoftware.scalaredox.receiver.ReceiveController._
import com.github.vitalsoftware.util.JsonNaming.KebabCase
import play.api.libs.json._
import play.api.mvc._
import play.api.Logger
import play.api.libs.json.JsonConfiguration.Aux
import scala.concurrent.Future
trait ReceiveController extends BaseController {
protected def verificationToken: String
val logger: Logger
/**
* Validate an initial challenge from Redox to authenticate our server as a
* destination, or validate a normal Redox event request and delegate to the
* verifiedAction
*
* Redox documentation says:
* > Verification POSTs will include a challenge value and your destination’s
* > verification token (that you specified when you set up the destination
* > record) in the body of the POST. Non-verification POSTs from Redox will
* > always include the verification token in the header of the message.
*
* @see http://developer.redoxengine.com/getting-started/create-a-destination/
*/
protected def validatedAction(verifiedAction: RedoxRequest => Future[Result]): Action[JsValue] =
Action(parse.json).async({ request: Request[JsValue] =>
RedoxRequest(request).token match {
case Some(token) if token == verificationToken =>
verifiedAction(RedoxRequest(request))
case Some(token) =>
// The verification token is incorrect
logger.error(s"Redox webhook had an incorrect token from ${RedoxRequest(request)}")
Future.successful(Forbidden(s"Validation failed."))
case None =>
challengeResponse(RedoxRequest(request))
}
})
private def challengeResponse(request: RedoxRequest): Future[Result] =
request.underlying.body
.validate[Challenge]
.fold(
(errors: Seq[(JsPath, Seq[JsonValidationError])]) => {
// The challenge has an invalid format
logger.error(s"Redox webhook challenge had errors (${JsError.toJson(errors)}) from $request")
logger.debug(s"Failed challenge body was: ${request.underlying.body.toString()}")
Future.successful(Forbidden(s"Challenge failed."))
},
(challenge: Challenge) => {
if (challenge.verificationToken == verificationToken) {
// The challenge is successful: we must respond with the challenge token (only)
logger.info(s"Redox endpoint initialized via challenge from $request")
Future.successful(Ok(challenge.challenge))
} else {
// The challenge has an invalid validation token
logger.error(s"Redox challenge had invalid token from $request")
Future.successful(Forbidden(s"Challenge failed."))
}
}
)
}
object ReceiveController {
case class Challenge(verificationToken: String, challenge: String)
implicit val config: Aux[Json.MacroOptions] = JsonConfiguration(KebabCase)
implicit val challengeFormat: Format[Challenge] = Json.format[Challenge]
}
| vital-software/scala-redox | src/main/scala/com/github/vitalsoftware/scalaredox/receiver/ReceiveController.scala | Scala | mit | 3,061 |
package models
case class Configuration(
version: String,
styleName: String,
scriptName: String) {
}
object Configuration {
def apply(conf: play.api.Configuration) = {
new Configuration(
version = conf.getString("application.version").getOrElse(""),
styleName = conf.getString("application.files.style").getOrElse(""),
scriptName = conf.getString("application.files.script").getOrElse("")
)
}
}
| bobeal/geebooks | app/models/Configuration.scala | Scala | agpl-3.0 | 440 |
package utils
object StringOpz {
implicit final class StrOpz(private val s: String) extends AnyVal {
def inlined = s.stripMargin.replace("\\n", "")
}
}
| falconepl/daarvin-protos | daarvin-proto/src/main/scala/utils/StringOpz.scala | Scala | gpl-2.0 | 162 |
// Sample adapters:
class logged extends EntryPoint.Adapter:
def wrapper(wrapped: EntryPoint.Wrapper): LoggedWrapper = LoggedWrapper(wrapped)
class LoggedWrapper(val wrapped: EntryPoint.Wrapper) extends Wrapper:
def adapt[A, R](op: A => R)(args: A): R =
val argsString: String = args match
case args: Array[_] => args.mkString(", ")
case args: Seq[_] => args.mkString(", ")
case args: Unit => "()"
case args => args.toString
val result = op(args)
println(s"[log] ${finalWrapped.entryPointName}($argsString) -> $result")
result
end LoggedWrapper
end logged
class split extends EntryPoint.Adapter:
def wrapper(wrapped: EntryPoint.Wrapper): SplitWrapper = SplitWrapper(wrapped)
class SplitWrapper(val wrapped: EntryPoint.Wrapper) extends Wrapper:
def adapt[R](op: Array[String] => R)(args: String): R = op(args.split(" "))
end split
class join extends EntryPoint.Adapter:
def wrapper(wrapped: EntryPoint.Wrapper): JoinWrapper = JoinWrapper(wrapped)
class JoinWrapper(val wrapped: EntryPoint.Wrapper) extends Wrapper:
def adapt[R](op: String => R)(args: Array[String]): R = op(args.mkString(" "))
end join
| dotty-staging/dotty | tests/run/decorators/sample-adapters.scala | Scala | apache-2.0 | 1,193 |
package pl.newicom.dddd.scheduling
import akka.actor.ActorPath
import akka.persistence.Recover
import org.json4s.JsonAST.JString
import org.json4s.{CustomSerializer, FullTypeHints}
import pl.newicom.dddd.actor.PassivationConfig
import pl.newicom.dddd.aggregate._
import pl.newicom.dddd.eventhandling.EventPublisher
import pl.newicom.dddd.messaging.event.EventMessage
import pl.newicom.dddd.scheduling.Scheduler.SchedulerState
import pl.newicom.dddd.serialization.JsonSerializationHints
object Scheduler {
//
// Serialization hints
//
object ActorPathSerializer extends CustomSerializer[ActorPath](format => (
{ case JString(s) => ActorPath.fromString(s) },
{ case x: ActorPath => JString(x.toSerializationFormat) }
))
val serializationHints = new JsonSerializationHints {
def typeHints = FullTypeHints(List(
classOf[EventScheduled],
classOf[EventMessage]
))
def serializers = List(ActorPathSerializer)
}
//
// State
//
case class SchedulerState() extends AggregateState {
override def apply = {
case e: EventScheduled => this
}
}
}
class Scheduler(val pc: PassivationConfig, businessUnit: String) extends AggregateRoot[SchedulerState] {
this: EventPublisher =>
override def persistenceId = s"${schedulingOffice.name}-$businessUnit"
// Skip recovery
override def preStart() = self ! Recover(toSequenceNr = 0L)
// Disable automated recovery on restart
override def preRestart(reason: Throwable, message: Option[Any]) = ()
override val factory: AggregateRootFactory = {
case EventScheduled(_, _, _, _, _) => SchedulerState()
}
override def handleCommand: Receive = {
case ScheduleEvent(_, target, deadline, msg) =>
raise(
EventScheduled(
businessUnit,
target,
deadline.withSecondOfMinute(0).withMillisOfSecond(0),
deadline.getMillis,
msg)
)
}
}
| ahjohannessen/akka-ddd | akka-ddd-scheduling/src/main/scala/pl/newicom/dddd/scheduling/Scheduler.scala | Scala | mit | 1,923 |
/*
* Copyright 2017 TabMo http://tabmo.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.tabmo.avro
import java.io.Closeable
import scala.util.{ Failure, Success, Try }
object TWR {
def twr[A <: Closeable, B](resource: A)(doWork: A => B): Try[B] =
try {
Success(doWork(resource))
} catch {
case e: Exception => Failure(e)
} finally {
try {
if (resource != null) {
resource.close()
}
} catch {
case e: Exception => //log.error(e, e.getMessage) // should be logged
}
}
def twrs[A <: Closeable, B](resources: A*)(doWork: Seq[A] => B): Try[B] =
try {
Success(doWork(resources.toSeq))
} catch {
case e: Exception => Failure(e)
} finally {
resources.foreach(
resource =>
try {
if (resource != null) {
resource.close()
}
} catch {
case e: Exception => //log.error(e, e.getMessage) // should be logged
}
)
}
}
| tabmo/parquet-avro-shapeless | src/main/scala/io/tabmo/avro/TWR.scala | Scala | apache-2.0 | 1,545 |
package longRunning
import ilc.examples.MapIntIntBenchData
import ilc.examples.MapSuccBaseGenerated
import ilc.examples.ReplacementChangeBenchmark
/**
* Benchmark generated derivative.
*/
object MapSuccBaseBenchmark extends ReplacementChangeBenchmark(new MapIntIntBenchData(MapSuccBaseGenerated))
| inc-lc/ilc-scala | clients/src/test/scala/longRunning/MapSuccBaseBenchmark.scala | Scala | mit | 303 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState}
import org.apache.spark.internal.Logging
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT_NAME
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils, Utils}
/**
* A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*/
private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
extends ExecutorAllocationClient with SchedulerBackend with Logging
{
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
protected val totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
protected val totalRegisteredExecutors = new AtomicInteger(0)
protected val conf = scheduler.sc.conf
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
private val _minRegisteredRatio =
math.min(1, conf.getDouble("spark.scheduler.minRegisteredResourcesRatio", 0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
private val maxRegisteredWaitingTimeMs =
conf.getTimeAsMs("spark.scheduler.maxRegisteredResourcesWaitingTime", "30s")
private val createTime = System.currentTimeMillis()
// Accessing `executorDataMap` in `DriverEndpoint.receive/receiveAndReply` doesn't need any
// protection. But accessing `executorDataMap` out of `DriverEndpoint.receive/receiveAndReply`
// must be protected by `CoarseGrainedSchedulerBackend.this`. Besides, `executorDataMap` should
// only be modified in `DriverEndpoint.receive/receiveAndReply` with protection by
// `CoarseGrainedSchedulerBackend.this`.
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors requested by the cluster manager, [[ExecutorAllocationManager]]
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private var requestedTotalExecutors = 0
// Number of executors requested from the cluster manager that have not registered yet
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private var numPendingExecutors = 0
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet; maps
// the executor ID to whether it was explicitly killed by the driver (and thus shouldn't
// be considered an app-related failure).
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private val executorsPendingToRemove = new HashMap[String, Boolean]
// A map to store hostname with its possible task number running on it
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var hostToLocalTaskCount: Map[String, Int] = Map.empty
// The number of pending tasks which is locality required
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var localityAwareTasks = 0
// The num of current max ExecutorId used to re-register appMaster
@volatile protected var currentExecutorIdCounter = 0
class DriverEndpoint(override val rpcEnv: RpcEnv, sparkProperties: Seq[(String, String)])
extends ThreadSafeRpcEndpoint with Logging {
// Executors that have been lost, but for which we don't yet know the real exit reason.
protected val executorsPendingLossReason = new HashSet[String]
protected val addressToExecutorId = new HashMap[RpcAddress, String]
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
override def onStart() {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.getTimeAsMs("spark.scheduler.revive.interval", "1s")
reviveThread.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
}
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def receive: PartialFunction[Any, Unit] = {
case StatusUpdate(executorId, taskId, state, data) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.freeCores += scheduler.CPUS_PER_TASK
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
s"from unknown executor with ID $executorId")
}
}
case ReviveOffers =>
makeOffers()
case KillTask(taskId, executorId, interruptThread, reason) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(
KillTask(taskId, executorId, interruptThread, reason))
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case KillExecutorsOnHost(host) =>
scheduler.getExecutorsAliveOnHost(host).foreach { exec =>
killExecutors(exec.toSeq, replace = true, force = true)
}
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls) =>
if (executorDataMap.contains(executorId)) {
executorRef.send(RegisterExecutorFailed("Duplicate executor ID: " + executorId))
context.reply(true)
} else if (scheduler.nodeBlacklist != null &&
scheduler.nodeBlacklist.contains(hostname)) {
// If the cluster manager gives us an executor on a blacklisted node (because it
// already started allocating those resources before we informed it of our blacklist,
// or if it ignored our blacklist), then we reject that executor immediately.
logInfo(s"Rejecting $executorId as it has been blacklisted.")
executorRef.send(RegisterExecutorFailed(s"Executor is blacklisted: $executorId"))
context.reply(true)
} else {
// If the executor's rpc env is not listening for incoming connections, `hostPort`
// will be null, and the client connection should be used to contact the executor.
val executorAddress = if (executorRef.address != null) {
executorRef.address
} else {
context.senderAddress
}
logInfo(s"Registered executor $executorRef ($executorAddress) with ID $executorId")
addressToExecutorId(executorAddress) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val data = new ExecutorData(executorRef, executorRef.address, hostname,
cores, cores, logUrls)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (currentExecutorIdCounter < executorId.toInt) {
currentExecutorIdCounter = executorId.toInt
}
if (numPendingExecutors > 0) {
numPendingExecutors -= 1
logDebug(s"Decremented number of pending executors ($numPendingExecutors left)")
}
}
executorRef.send(RegisteredExecutor)
// Note: some tests expect the reply to come after we put the executor in the map
context.reply(true)
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
makeOffers()
}
case StopDriver =>
context.reply(true)
stop()
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorEndpoint.send(StopExecutor)
}
context.reply(true)
case RemoveExecutor(executorId, reason) =>
// We will remove the executor's state and cannot restore it. However, the connection
// between the driver and the executor may be still alive so that the executor won't exit
// automatically, so try to tell the executor to stop itself. See SPARK-13519.
executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor))
removeExecutor(executorId, reason)
context.reply(true)
case RetrieveSparkAppConfig =>
val reply = SparkAppConfig(sparkProperties,
SparkEnv.get.securityManager.getIOEncryptionKey())
context.reply(reply)
}
// Make fake resource offers on all executors
private def makeOffers() {
// Make sure no executor is killed while some task is launching on it
val taskDescs = CoarseGrainedSchedulerBackend.this.synchronized {
// Filter out executors under killing
val activeExecutors = executorDataMap.filterKeys(executorIsAlive)
val workOffers = activeExecutors.map { case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores)
}.toIndexedSeq
scheduler.resourceOffers(workOffers)
}
if (!taskDescs.isEmpty) {
launchTasks(taskDescs)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
addressToExecutorId
.get(remoteAddress)
.foreach(removeExecutor(_, SlaveLost("Remote RPC client disassociated. Likely due to " +
"containers exceeding thresholds, or network issues. Check driver logs for WARN " +
"messages.")))
}
// Make fake resource offers on just one executor
private def makeOffers(executorId: String) {
// Make sure no executor is killed while some task is launching on it
val taskDescs = CoarseGrainedSchedulerBackend.this.synchronized {
// Filter out executors under killing
if (executorIsAlive(executorId)) {
val executorData = executorDataMap(executorId)
val workOffers = IndexedSeq(
new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores))
scheduler.resourceOffers(workOffers)
} else {
Seq.empty
}
}
if (!taskDescs.isEmpty) {
launchTasks(taskDescs)
}
}
private def executorIsAlive(executorId: String): Boolean = synchronized {
!executorsPendingToRemove.contains(executorId) &&
!executorsPendingLossReason.contains(executorId)
}
// Launch tasks returned by a set of resource offers
private def launchTasks(tasks: Seq[Seq[TaskDescription]]) {
for (task <- tasks.flatten) {
val serializedTask = TaskDescription.encode(task)
if (serializedTask.limit >= maxRpcMessageSize) {
scheduler.taskIdToTaskSetManager.get(task.taskId).foreach { taskSetMgr =>
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
"spark.rpc.message.maxSize (%d bytes). Consider increasing " +
"spark.rpc.message.maxSize or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit, maxRpcMessageSize)
taskSetMgr.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
val executorData = executorDataMap(task.executorId)
executorData.freeCores -= scheduler.CPUS_PER_TASK
logDebug(s"Launching task ${task.taskId} on executor id: ${task.executorId} hostname: " +
s"${executorData.executorHost}.")
executorData.executorEndpoint.send(LaunchTask(new SerializableBuffer(serializedTask)))
}
}
}
// Remove a disconnected slave from the cluster
private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
logDebug(s"Asked to remove executor $executorId with reason $reason")
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
val killed = CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingLossReason -= executorId
executorsPendingToRemove.remove(executorId).getOrElse(false)
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, if (killed) ExecutorKilled else reason)
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason.toString))
case None =>
// SPARK-15262: If an executor is still alive even after the scheduler has removed
// its metadata, we may receive a heartbeat from that executor and tell its block
// manager to reregister itself. If that happens, the block manager master will know
// about the executor, but the scheduler will not. Therefore, we should remove the
// executor from the block manager when we hit this case.
scheduler.sc.env.blockManager.master.removeExecutorAsync(executorId)
logInfo(s"Asked to remove non-existent executor $executorId")
}
}
/**
* Stop making resource offers for the given executor. The executor is marked as lost with
* the loss reason still pending.
*
* @return Whether executor should be disabled
*/
protected def disableExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
if (executorIsAlive(executorId)) {
executorsPendingLossReason += executorId
true
} else {
// Returns true for explicitly killed executors, we also need to get pending loss reasons;
// For others return false.
executorsPendingToRemove.contains(executorId)
}
}
if (shouldDisable) {
logInfo(s"Disabling executor $executorId.")
scheduler.executorLost(executorId, LossReasonPending)
}
shouldDisable
}
override def onStop() {
reviveThread.shutdownNow()
}
}
var driverEndpoint: RpcEndpointRef = null
protected def minRegisteredRatio: Double = _minRegisteredRatio
override def start() {
val properties = new ArrayBuffer[(String, String)]
for ((key, value) <- scheduler.sc.conf.getAll) {
if (key.startsWith("spark.")) {
properties += ((key, value))
}
}
// TODO (prashant) send conf instead of properties
driverEndpoint = createDriverEndpointRef(properties)
}
protected def createDriverEndpointRef(
properties: ArrayBuffer[(String, String)]): RpcEndpointRef = {
rpcEnv.setupEndpoint(ENDPOINT_NAME, createDriverEndpoint(properties))
}
protected def createDriverEndpoint(properties: Seq[(String, String)]): DriverEndpoint = {
new DriverEndpoint(rpcEnv, properties)
}
def stopExecutors() {
try {
if (driverEndpoint != null) {
logInfo("Shutting down all executors")
driverEndpoint.askSync[Boolean](StopExecutors)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop() {
stopExecutors()
try {
if (driverEndpoint != null) {
driverEndpoint.askSync[Boolean](StopDriver)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver endpoint", e)
}
}
/**
* Reset the state of CoarseGrainedSchedulerBackend to the initial state. Currently it will only
* be called in the yarn-client mode when AM re-registers after a failure.
* */
protected def reset(): Unit = {
val executors = synchronized {
requestedTotalExecutors = 0
numPendingExecutors = 0
executorsPendingToRemove.clear()
Set() ++ executorDataMap.keys
}
// Remove all the lingering executors that should be removed but not yet. The reason might be
// because (1) disconnected event is not yet received; (2) executors die silently.
executors.foreach { eid =>
removeExecutor(eid, SlaveLost("Stale executor after cluster manager re-registered."))
}
}
override def reviveOffers() {
driverEndpoint.send(ReviveOffers)
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String) {
driverEndpoint.send(KillTask(taskId, executorId, interruptThread, reason))
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
/**
* Called by subclasses when notified of a lost worker. It just fires the message and returns
* at once.
*/
protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
// Only log the failure since we don't care about the result.
driverEndpoint.ask[Boolean](RemoveExecutor(executorId, reason)).onFailure { case t =>
logError(t.getMessage, t)
}(ThreadUtils.sameThread)
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.currentTimeMillis() - createTime) >= maxRegisteredWaitingTimeMs) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTimeMs(ms)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
private def numExistingExecutors: Int = executorDataMap.size
override def getExecutorIds(): Seq[String] = {
executorDataMap.keySet.toSeq
}
/**
* Request an additional number of executors from the cluster manager.
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
val response = synchronized {
requestedTotalExecutors += numAdditionalExecutors
numPendingExecutors += numAdditionalExecutors
logDebug(s"Number of pending executors is now $numPendingExecutors")
if (requestedTotalExecutors !=
(numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size)) {
logDebug(
s"""requestExecutors($numAdditionalExecutors): Executor request doesn't match:
|requestedTotalExecutors = $requestedTotalExecutors
|numExistingExecutors = $numExistingExecutors
|numPendingExecutors = $numPendingExecutors
|executorsPendingToRemove = ${executorsPendingToRemove.size}""".stripMargin)
}
// Account for executors pending to be added or removed
doRequestTotalExecutors(requestedTotalExecutors)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
final override def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int]
): Boolean = {
if (numExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$numExecutors from the cluster manager. Please specify a positive number!")
}
val response = synchronized {
this.requestedTotalExecutors = numExecutors
this.localityAwareTasks = localityAwareTasks
this.hostToLocalTaskCount = hostToLocalTaskCount
numPendingExecutors =
math.max(numExecutors - numExistingExecutors + executorsPendingToRemove.size, 0)
doRequestTotalExecutors(numExecutors)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return a future whose evaluation indicates whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill the specified executors.
*
* When asking the executor to be replaced, the executor loss is considered a failure, and
* killed tasks that are running on the executor will count towards the failure limits. If no
* replacement is being requested, then the tasks will not count towards the limit.
*
* @param executorIds identifiers of executors to kill
* @param replace whether to replace the killed executors with new ones, default false
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
final override def killExecutors(
executorIds: Seq[String],
replace: Boolean,
force: Boolean): Seq[String] = {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val response = synchronized {
val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains)
unknownExecutors.foreach { id =>
logWarning(s"Executor to kill $id does not exist!")
}
// If an executor is already pending to be removed, do not kill it again (SPARK-9795)
// If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552)
val executorsToKill = knownExecutors
.filter { id => !executorsPendingToRemove.contains(id) }
.filter { id => force || !scheduler.isExecutorBusy(id) }
executorsToKill.foreach { id => executorsPendingToRemove(id) = !replace }
logInfo(s"Actual list of executor(s) to be killed is ${executorsToKill.mkString(", ")}")
// If we do not wish to replace the executors we kill, sync the target number of executors
// with the cluster manager to avoid allocating new ones. When computing the new target,
// take into account executors that are pending to be added or removed.
val adjustTotalExecutors =
if (!replace) {
requestedTotalExecutors = math.max(requestedTotalExecutors - executorsToKill.size, 0)
if (requestedTotalExecutors !=
(numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size)) {
logDebug(
s"""killExecutors($executorIds, $replace, $force): Executor counts do not match:
|requestedTotalExecutors = $requestedTotalExecutors
|numExistingExecutors = $numExistingExecutors
|numPendingExecutors = $numPendingExecutors
|executorsPendingToRemove = ${executorsPendingToRemove.size}""".stripMargin)
}
doRequestTotalExecutors(requestedTotalExecutors)
} else {
numPendingExecutors += knownExecutors.size
Future.successful(true)
}
val killExecutors: Boolean => Future[Boolean] =
if (!executorsToKill.isEmpty) {
_ => doKillExecutors(executorsToKill)
} else {
_ => Future.successful(false)
}
val killResponse = adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread)
killResponse.flatMap(killSuccessful =>
Future.successful (if (killSuccessful) executorsToKill else Seq.empty[String])
)(ThreadUtils.sameThread)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Kill the given list of executors through the cluster manager.
* @return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill all executors on a given host.
* @return whether the kill request is acknowledged.
*/
final override def killExecutorsOnHost(host: String): Boolean = {
logInfo(s"Requesting to kill any and all executors on host ${host}")
// A potential race exists if a new executor attempts to register on a host
// that is on the blacklist and is no no longer valid. To avoid this race,
// all executor registration and killing happens in the event loop. This way, either
// an executor will fail to register, or will be killed when all executors on a host
// are killed.
// Kill all the executors on this host in an event loop to ensure serialization.
driverEndpoint.send(KillExecutorsOnHost(host))
true
}
}
private[spark] object CoarseGrainedSchedulerBackend {
val ENDPOINT_NAME = "CoarseGrainedScheduler"
}
| saturday-shi/spark | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 28,972 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import junit.framework.Assert._
import kafka.api.{ApiVersion, KAFKA_082}
import kafka.message._
import kafka.utils.{CoreUtils, TestUtils}
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.{Assert, Test}
import org.scalatest.Assertions.intercept
class KafkaConfigTest {
@Test
def testLogRetentionTimeHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals(60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMinutesProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "30")
props.put(KafkaConfig.LogRetentionTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionTimeBothMinutesAndMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRetentionTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRetentionTimeMinutesProp, "10")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRetentionTimeMillis)
}
@Test
def testLogRetentionUnlimited() {
val props1 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props4 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
val props5 = TestUtils.createBrokerConfig(0,TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "-1")
props2.put("log.retention.minutes", "-1")
props3.put("log.retention.hours", "-1")
val cfg1 = KafkaConfig.fromProps(props1)
val cfg2 = KafkaConfig.fromProps(props2)
val cfg3 = KafkaConfig.fromProps(props3)
assertEquals("Should be -1", -1, cfg1.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg2.logRetentionTimeMillis)
assertEquals("Should be -1", -1, cfg3.logRetentionTimeMillis)
props4.put("log.retention.ms", "-1")
props4.put("log.retention.minutes", "30")
val cfg4 = KafkaConfig.fromProps(props4)
assertEquals("Should be -1", -1, cfg4.logRetentionTimeMillis)
props5.put("log.retention.ms", "0")
intercept[IllegalArgumentException] {
val cfg5 = KafkaConfig.fromProps(props5)
}
}
@Test
def testLogRetentionValid {
val props1 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props2 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val props3 = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props1.put("log.retention.ms", "0")
props2.put("log.retention.minutes", "0")
props3.put("log.retention.hours", "0")
intercept[IllegalArgumentException] {
val cfg1 = KafkaConfig.fromProps(props1)
}
intercept[IllegalArgumentException] {
val cfg2 = KafkaConfig.fromProps(props2)
}
intercept[IllegalArgumentException] {
val cfg3 = KafkaConfig.fromProps(props3)
}
}
@Test
def testAdvertiseDefaults() {
val port = "9999"
val hostName = "fake-host"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.remove(KafkaConfig.ListenersProp)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseConfigured() {
val advertisedHostName = "routable-host"
val advertisedPort = "1234"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testAdvertisePortDefault() {
val advertisedHostName = "routable-host"
val port = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.AdvertisedHostNameProp, advertisedHostName)
props.put(KafkaConfig.PortProp, port)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, advertisedHostName)
assertEquals(endpoint.port, port.toInt)
}
@Test
def testAdvertiseHostNameDefault() {
val hostName = "routable-host"
val advertisedPort = "9999"
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect)
props.put(KafkaConfig.HostNameProp, hostName)
props.put(KafkaConfig.AdvertisedPortProp, advertisedPort)
val serverConfig = KafkaConfig.fromProps(props)
val endpoints = serverConfig.advertisedListeners
val endpoint = endpoints.get(SecurityProtocol.PLAINTEXT).get
assertEquals(endpoint.host, hostName)
assertEquals(endpoint.port, advertisedPort.toInt)
}
@Test
def testDuplicateListeners() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// listeners with duplicate port
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
// listeners with duplicate protocol
props.put(KafkaConfig.ListenersProp, "PLAINTEXT://localhost:9091,PLAINTEXT://localhost:9092")
assert(!isValidKafkaConfig(props))
// advertised listeners with duplicate port
props.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://localhost:9091,TRACE://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testBadListenerProtocol() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
props.put(KafkaConfig.ListenersProp, "BAD://localhost:9091")
assert(!isValidKafkaConfig(props))
}
@Test
def testListenerDefaults() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
// configuration with host and port, but no listeners
props.put(KafkaConfig.HostNameProp, "myhost")
props.put(KafkaConfig.PortProp, "1111")
val conf = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://myhost:1111"), conf.listeners)
// configuration with null host
props.remove(KafkaConfig.HostNameProp)
val conf2 = KafkaConfig.fromProps(props)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.listeners)
assertEquals(CoreUtils.listenerListToEndPoints("PLAINTEXT://:1111"), conf2.advertisedListeners)
assertEquals(null, conf2.listeners(SecurityProtocol.PLAINTEXT).host)
// configuration with advertised host and port, and no advertised listeners
props.put(KafkaConfig.AdvertisedHostNameProp, "otherhost")
props.put(KafkaConfig.AdvertisedPortProp, "2222")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(conf3.advertisedListeners, CoreUtils.listenerListToEndPoints("PLAINTEXT://otherhost:2222"))
}
@Test
def testVersionConfiguration() {
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, "1")
props.put(KafkaConfig.ZkConnectProp, "localhost:2181")
val conf = KafkaConfig.fromProps(props)
assertEquals(ApiVersion.latestVersion, conf.interBrokerProtocolVersion)
props.put(KafkaConfig.InterBrokerProtocolVersionProp,"0.8.2.0")
val conf2 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_082, conf2.interBrokerProtocolVersion)
// check that 0.8.2.0 is the same as 0.8.2.1
props.put(KafkaConfig.InterBrokerProtocolVersionProp,"0.8.2.1")
val conf3 = KafkaConfig.fromProps(props)
assertEquals(KAFKA_082, conf3.interBrokerProtocolVersion)
//check that latest is newer than 0.8.2
assert(ApiVersion.latestVersion.onOrAfter(conf3.interBrokerProtocolVersion))
}
private def isValidKafkaConfig(props: Properties): Boolean = {
try {
KafkaConfig.fromProps(props)
true
} catch {
case e: IllegalArgumentException => false
}
}
@Test
def testUncleanLeaderElectionDefault() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionDisabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(false))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, false)
}
@Test
def testUncleanElectionEnabled() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, String.valueOf(true))
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.uncleanLeaderElectionEnable, true)
}
@Test
def testUncleanElectionInvalid() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.UncleanLeaderElectionEnableProp, "invalid")
intercept[ConfigException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testLogRollTimeMsProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
val cfg = KafkaConfig.fromProps(props)
assertEquals(30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeBothMsAndHoursProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.LogRollTimeMillisProp, "1800000")
props.put(KafkaConfig.LogRollTimeHoursProp, "1")
val cfg = KafkaConfig.fromProps(props)
assertEquals( 30 * 60L * 1000L, cfg.logRollTimeMillis)
}
@Test
def testLogRollTimeNoConfigProvided() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val cfg = KafkaConfig.fromProps(props)
assertEquals(24 * 7 * 60L * 60L * 1000L, cfg.logRollTimeMillis )
}
@Test
def testDefaultCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "producer")
}
@Test
def testValidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put("compression.type", "gzip")
val serverConfig = KafkaConfig.fromProps(props)
assertEquals(serverConfig.compressionType, "gzip")
}
@Test
def testInvalidCompressionType() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181)
props.put(KafkaConfig.CompressionTypeProp, "abc")
intercept[IllegalArgumentException] {
KafkaConfig.fromProps(props)
}
}
@Test
def testFromPropsInvalid() {
def getBaseProperties(): Properties = {
val validRequiredProperties = new Properties()
validRequiredProperties.put(KafkaConfig.ZkConnectProp, "127.0.0.1:2181")
validRequiredProperties
}
// to ensure a basis is valid - bootstraps all needed validation
KafkaConfig.fromProps(getBaseProperties())
KafkaConfig.configNames().foreach(name => {
name match {
case KafkaConfig.ZkConnectProp => // ignore string
case KafkaConfig.ZkSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkConnectionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkSyncTimeMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ZkEnableSecureAclsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean")
case KafkaConfig.BrokerIdProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumNetworkThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.NumIoThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.BackgroundThreadsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.QueuedMaxRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.RequestTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.AuthorizerClassNameProp => //ignore string
case KafkaConfig.PortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.HostNameProp => // ignore string
case KafkaConfig.AdvertisedHostNameProp => //ignore string
case KafkaConfig.AdvertisedPortProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.SocketSendBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.SocketReceiveBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.MaxConnectionsPerIpOverridesProp =>
assertPropertyInvalid(getBaseProperties(), name, "127.0.0.1:not_a_number")
case KafkaConfig.ConnectionsMaxIdleMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumPartitionsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogDirsProp => // ignore string
case KafkaConfig.LogDirProp => // ignore string
case KafkaConfig.LogSegmentBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", Message.MinHeaderSize - 1)
case KafkaConfig.LogRollTimeMillisProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRollTimeHoursProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeMillisProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeMinutesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionTimeHoursProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogRetentionBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanupIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogCleanupPolicyProp => assertPropertyInvalid(getBaseProperties(), name, "unknown_policy", "0")
case KafkaConfig.LogCleanerIoMaxBytesPerSecondProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerDedupeBufferSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "1024")
case KafkaConfig.LogCleanerDedupeBufferLoadFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean")
case KafkaConfig.LogCleanerDeleteRetentionMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogCleanerMinCleanRatioProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogIndexSizeMaxBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "3")
case KafkaConfig.LogFlushIntervalMessagesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.LogFlushSchedulerIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LogFlushIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumRecoveryThreadsPerDataDirProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.AutoCreateTopicsEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.MinInSyncReplicasProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.ControllerSocketTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.DefaultReplicationFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaLagTimeMaxMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaSocketTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-2")
case KafkaConfig.ReplicaSocketReceiveBufferBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchMaxBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchWaitMaxMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaFetchMinBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.NumReplicaFetchersProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.AutoLeaderRebalanceEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.LeaderImbalancePerBrokerPercentageProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.UncleanLeaderElectionEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.ControlledShutdownMaxRetriesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ControlledShutdownRetryBackoffMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.ControlledShutdownEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.GroupMinSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.GroupMaxSessionTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.OffsetMetadataMaxSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number")
case KafkaConfig.OffsetsLoadBufferSizeProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicReplicationFactorProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicPartitionsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicSegmentBytesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsTopicCompressionCodecProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1")
case KafkaConfig.OffsetsRetentionMinutesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetsRetentionCheckIntervalMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetCommitTimeoutMsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.OffsetCommitRequiredAcksProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-2")
case KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.NumQuotaSamplesProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.QuotaWindowSizeSecondsProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "0")
case KafkaConfig.DeleteTopicEnableProp => assertPropertyInvalid(getBaseProperties(), name, "not_a_boolean", "0")
case KafkaConfig.MetricNumSamplesProp => assertPropertyInvalid(getBaseProperties, name, "not_a_number", "-1", "0")
case KafkaConfig.MetricSampleWindowMsProp => assertPropertyInvalid(getBaseProperties, name, "not_a_number", "-1", "0")
case KafkaConfig.MetricReporterClassesProp => // ignore string
//SSL Configs
case KafkaConfig.PrincipalBuilderClassProp =>
case KafkaConfig.SslProtocolProp => // ignore string
case KafkaConfig.SslProviderProp => // ignore string
case KafkaConfig.SslEnabledProtocolsProp =>
case KafkaConfig.SslKeystoreTypeProp => // ignore string
case KafkaConfig.SslKeystoreLocationProp => // ignore string
case KafkaConfig.SslKeystorePasswordProp => // ignore string
case KafkaConfig.SslKeyPasswordProp => // ignore string
case KafkaConfig.SslTruststoreTypeProp => // ignore string
case KafkaConfig.SslTruststorePasswordProp => // ignore string
case KafkaConfig.SslTruststoreLocationProp => // ignore string
case KafkaConfig.SslKeyManagerAlgorithmProp =>
case KafkaConfig.SslTrustManagerAlgorithmProp =>
case KafkaConfig.SslClientAuthProp => // ignore string
case KafkaConfig.SslEndpointIdentificationAlgorithmProp => // ignore string
case KafkaConfig.SslCipherSuitesProp => // ignore string
//Sasl Configs
case KafkaConfig.SaslKerberosServiceNameProp => // ignore string
case KafkaConfig.SaslKerberosKinitCmdProp =>
case KafkaConfig.SaslKerberosTicketRenewWindowFactorProp =>
case KafkaConfig.SaslKerberosTicketRenewJitterProp =>
case KafkaConfig.SaslKerberosMinTimeBeforeReloginProp =>
case KafkaConfig.AuthToLocalProp => // ignore string
case nonNegativeIntProperty => assertPropertyInvalid(getBaseProperties(), name, "not_a_number", "-1")
}
})
}
@Test
def testSpecificProperties(): Unit = {
val defaults = new Properties()
defaults.put(KafkaConfig.ZkConnectProp, "127.0.0.1:2181")
// For ZkConnectionTimeoutMs
defaults.put(KafkaConfig.ZkSessionTimeoutMsProp, "1234")
defaults.put(KafkaConfig.MaxReservedBrokerIdProp, "1")
defaults.put(KafkaConfig.BrokerIdProp, "1")
defaults.put(KafkaConfig.HostNameProp, "127.0.0.1")
defaults.put(KafkaConfig.PortProp, "1122")
defaults.put(KafkaConfig.MaxConnectionsPerIpOverridesProp, "127.0.0.1:2, 127.0.0.2:3")
defaults.put(KafkaConfig.LogDirProp, "/tmp1,/tmp2")
defaults.put(KafkaConfig.LogRollTimeHoursProp, "12")
defaults.put(KafkaConfig.LogRollTimeJitterHoursProp, "11")
defaults.put(KafkaConfig.LogRetentionTimeHoursProp, "10")
//For LogFlushIntervalMsProp
defaults.put(KafkaConfig.LogFlushSchedulerIntervalMsProp, "123")
defaults.put(KafkaConfig.OffsetsTopicCompressionCodecProp, SnappyCompressionCodec.codec.toString)
val config = KafkaConfig.fromProps(defaults)
Assert.assertEquals("127.0.0.1:2181", config.zkConnect)
Assert.assertEquals(1234, config.zkConnectionTimeoutMs)
Assert.assertEquals(1, config.maxReservedBrokerId)
Assert.assertEquals(1, config.brokerId)
Assert.assertEquals("127.0.0.1", config.hostName)
Assert.assertEquals(1122, config.advertisedPort)
Assert.assertEquals("127.0.0.1", config.advertisedHostName)
Assert.assertEquals(Map("127.0.0.1" -> 2, "127.0.0.2" -> 3), config.maxConnectionsPerIpOverrides)
Assert.assertEquals(List("/tmp1", "/tmp2"), config.logDirs)
Assert.assertEquals(12 * 60L * 1000L * 60, config.logRollTimeMillis)
Assert.assertEquals(11 * 60L * 1000L * 60, config.logRollTimeJitterMillis)
Assert.assertEquals(10 * 60L * 1000L * 60, config.logRetentionTimeMillis)
Assert.assertEquals(123L, config.logFlushIntervalMs)
Assert.assertEquals(SnappyCompressionCodec, config.offsetsTopicCompressionCodec)
}
private def assertPropertyInvalid(validRequiredProps: => Properties, name: String, values: Any*) {
values.foreach((value) => {
val props = validRequiredProps
props.setProperty(name, value.toString)
intercept[Exception] {
KafkaConfig.fromProps(props)
}
})
}
}
| bluebreezecf/kafka | core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala | Scala | apache-2.0 | 27,979 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.PinStance
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 26/10/17.
*/
/**
* Pin Stance Mapping
*/
trait PinStanceMapping {
val queryPinStance = quote {
querySchema[PinStance]("AD_PinStance",
_.pinStanceId-> "AD_PinStance_ID",
_.tenantId-> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.processId-> "AD_Process_ID",
_.recordId-> "Record_ID",
_.isProcessing-> "IsProcessing",
_.created-> "Created",
_.userId-> "AD_User_ID",
_.updated-> "Updated",
_.result-> "Result",
_.errorMsg-> "ErrorMsg",
_.createdBy-> "CreatedBy",
_.updatedBy-> "UpdatedBy",
_.isActive-> "IsActive",
_.name-> "Name",
_.reportType-> "ReportType",
_.printFormatId-> "AD_PrintFormat_ID",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/PinStanceMapping.scala | Scala | gpl-3.0 | 1,869 |
package com.recursivity.bowler.stub
import com.recursivity.bowler.Component
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: Nov 10, 2010
* Time: 10:55:16 PM
* To change this template use File | Settings | File Templates.
*/
class ListModelComponent extends Component(None){
} | rossabaker/Handlebar | src/test/scala/com/recursivity/bowler/stub/ListModelComponent.scala | Scala | bsd-3-clause | 294 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
/**
* An append-only, non-threadsafe, array-backed vector that is optimized for primitive types.
*/
private[spark]
class PrimitiveVector[@specialized(Long, Int, Double) V: ClassManifest](initialSize: Int = 64) {
private var _numElements = 0
private var _array: Array[V] = _
// NB: This must be separate from the declaration, otherwise the specialized parent class
// will get its own array with the same initial size.
_array = new Array[V](initialSize)
def apply(index: Int): V = {
require(index < _numElements)
_array(index)
}
def +=(value: V) {
if (_numElements == _array.length) {
resize(_array.length * 2)
}
_array(_numElements) = value
_numElements += 1
}
def capacity: Int = _array.length
def length: Int = _numElements
def size: Int = _numElements
/** Gets the underlying array backing this vector. */
def array: Array[V] = _array
/** Trims this vector so that the capacity is equal to the size. */
def trim(): PrimitiveVector[V] = resize(size)
/** Resizes the array, dropping elements if the total length decreases. */
def resize(newLength: Int): PrimitiveVector[V] = {
val newArray = new Array[V](newLength)
_array.copyToArray(newArray)
_array = newArray
if (newLength < _numElements) {
_numElements = newLength
}
this
}
}
| windeye/spark | core/src/main/scala/org/apache/spark/util/collection/PrimitiveVector.scala | Scala | apache-2.0 | 2,185 |
package util
import scala.concurrent._
import ExecutionContext.Implicits.global
import org.apache.commons.mail.{DefaultAuthenticator, HtmlEmail}
import org.slf4j.LoggerFactory
import app.Context
import service.{AccountService, RepositoryService, IssuesService, SystemSettingsService}
import servlet.Database
import SystemSettingsService.Smtp
import _root_.util.ControlUtil.defining
trait Notifier extends RepositoryService with AccountService with IssuesService {
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context): Unit
protected def recipients(issue: model.Issue)(notify: String => Unit)(implicit context: Context) =
(
// individual repository's owner
issue.userName ::
// collaborators
getCollaborators(issue.userName, issue.repositoryName) :::
// participants
issue.openedUserName ::
getComments(issue.userName, issue.repositoryName, issue.issueId).map(_.commentedUserName)
)
.distinct
.withFilter ( _ != context.loginAccount.get.userName ) // the operation in person is excluded
.withFilter ( _ == issue.assignedUserName) // assigined user only.
.foreach ( getAccountByUserName(_) filterNot (_.isGroupAccount) foreach (x => notify(x.mailAddress)) )
}
object Notifier {
// TODO We want to be able to switch to mock.
def apply(): Notifier = new SystemSettingsService {}.loadSystemSettings match {
case settings if settings.notification => new Mailer(settings.smtp.get)
case _ => new MockMailer
}
def msgIssue(url: String) = (content: String) => s"""
|${content}<br/>
|--<br/>
|<a href="${url}">View it on GitBucket</a>
""".stripMargin
def msgPullRequest(url: String) = (content: String) => s"""
|${content}<hr/>
|View, comment on, or merge it at:<br/>
|<a href="${url}">${url}</a>
""".stripMargin
def msgComment(url: String) = (content: String) => s"""
|${content}<br/>
|--<br/>
|<a href="${url}">View it on GitBucket</a>
""".stripMargin
def msgStatus(url: String) = (content: String) => s"""
|${content} <a href="${url}">#${url split('/') last}</a>
""".stripMargin
}
class Mailer(private val smtp: Smtp) extends Notifier {
private val logger = LoggerFactory.getLogger(classOf[Mailer])
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context) = {
val database = Database(context.request.getServletContext)
val f = future {
// TODO Can we use the Database Session in other than Transaction Filter?
database withSession {
getIssue(r.owner, r.name, issueId.toString) foreach { issue =>
defining(
s"[${r.name}] ${issue.title} (#${issueId})" ->
msg(view.Markdown.toHtml(content, r, false, true))) { case (subject, msg) =>
recipients(issue) { to =>
val email = new HtmlEmail
email.setHostName(smtp.host)
email.setSmtpPort(smtp.port.get)
smtp.user.foreach { user =>
email.setAuthenticator(new DefaultAuthenticator(user, smtp.password.getOrElse("")))
}
smtp.ssl.foreach { ssl =>
email.setSSLOnConnect(ssl)
}
smtp.fromAddress
.map (_ -> smtp.fromName.orNull)
.orElse (Some("notifications@gitbucket.com" -> context.loginAccount.get.userName))
.foreach { case (address, name) =>
email.setFrom(address, name)
}
email.setCharset("UTF-8")
email.setSubject(subject)
email.setHtmlMsg(msg)
email.addTo(to).send
}
}
}
}
"Notifications Successful."
}
f onSuccess {
case s => logger.debug(s)
}
f onFailure {
case t => logger.error("Notifications Failed.", t)
}
}
}
class MockMailer extends Notifier {
def toNotify(r: RepositoryService.RepositoryInfo, issueId: Int, content: String)
(msg: String => String)(implicit context: Context): Unit = {}
} | toshi-saito/gitbucket | src/main/scala/util/Notifier.scala | Scala | apache-2.0 | 4,336 |
/*
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {2017} {Jamie Xia, Calvin Liu, Aditya Prasad}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.language.postfixOps;
object test extends RapBattle{
object DJKHALED extends Rapper{
name = "DJ Khaled";
}
object SNOOPDOG extends Rapper{
name = "Snoop Dog";
}
def main(args: Array[String]): Unit = {
SNOOPDOG YO;
SPIT VERSE "Gimme a number for collatz fam";
I GET PAID;
FO SHIZZLE;
SPIT FIRE;
EVERYBODY SQUAT 2;
PEACE OUT;
STRAIGHT OUTTA COMPTON;
HALF A MIL;
I GOT 2 PROBLEMS;
STRAIGHT OUTTA COMPTON;
I HAVE 3 STACKS;
ANOTHER ONE;
BOTTLES N MODELS;
BOTTLES N MODELS;
DJKHALED YO;
ANOTHER ONE;
MY NIZZLE;
SPIT VERSE "It took this many turns to get to 1: "
DJKHALED YO;
SPIT FIRE;
}
}
| xiazilla/Rap-Concert | src/test.scala | Scala | apache-2.0 | 1,916 |
package com.sksamuel.avro4s.schemas
import com.sksamuel.avro4s.typeutils.{CaseClassShape, DatatypeShape, SealedTraitShape}
import com.sksamuel.avro4s.{SchemaFor}
import magnolia1.{CaseClass, AutoDerivation, SealedTrait, TypeInfo}
import org.apache.avro.{Schema, SchemaBuilder}
import scala.deriving.Mirror
trait MagnoliaDerivedSchemas extends AutoDerivation[SchemaFor] :
def join[T](ctx: CaseClass[SchemaFor, T]): SchemaFor[T] =
DatatypeShape.of(ctx) match {
case CaseClassShape.Record => Records.schema(ctx)
case CaseClassShape.ValueType => ???
}
override def split[T](ctx: SealedTrait[SchemaFor, T]): SchemaFor[T] =
DatatypeShape.of[T](ctx) match {
case SealedTraitShape.TypeUnion => TypeUnions.schema(ctx)
case SealedTraitShape.Enum => SchemaFor[T](SealedTraits.schema(ctx))
} | sksamuel/avro4s | avro4s-core/src/main/scala/com/sksamuel/avro4s/schemas/magnolia.scala | Scala | apache-2.0 | 828 |
package org.jetbrains.plugins.scala
package extensions
import com.intellij.psi.PsiElement
/**
* Pavel Fatin
*/
object ElementText {
def unapply(e: PsiElement): Some[String] = Some(e.getText)
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/extensions/ElementText.scala | Scala | apache-2.0 | 199 |
package com.scalableminds.webknossos.datastore.helpers
import com.scalableminds.util.tools.Fox
import com.typesafe.scalalogging.LazyLogging
import net.liftweb.common.Full
import java.io.File
import java.nio.file.{Files, Path}
import scala.annotation.tailrec
import scala.concurrent.ExecutionContext
trait DataSetDeleter extends LazyLogging with DirectoryConstants {
def dataBaseDir: Path
def deleteOnDisk(organizationName: String,
dataSetName: String,
isInConversion: Boolean = false,
reason: Option[String] = None)(implicit ec: ExecutionContext): Fox[Unit] = {
@tailrec
def deleteWithRetry(sourcePath: Path, targetPath: Path, retryCount: Int = 0): Fox[Unit] =
try {
val deduplicatedTargetPath =
if (retryCount == 0) targetPath else targetPath.resolveSibling(targetPath.getFileName + s"($retryCount)")
val path = Files.move(sourcePath, deduplicatedTargetPath)
if (path == null) {
throw new Exception("Deleting dataset failed")
}
logger.info(s"Successfully moved dataset from $sourcePath to $targetPath...")
Fox.successful(())
} catch {
case _: java.nio.file.FileAlreadyExistsException => deleteWithRetry(sourcePath, targetPath, retryCount + 1)
case e: Exception => Fox.failure(s"Deleting dataset failed: ${e.toString}", Full(e))
}
val dataSourcePath =
if (isInConversion) dataBaseDir.resolve(organizationName).resolve(forConversionDir).resolve(dataSetName)
else dataBaseDir.resolve(organizationName).resolve(dataSetName)
val trashPath: Path = dataBaseDir.resolve(organizationName).resolve(trashDir)
val targetPath = trashPath.resolve(dataSetName)
new File(trashPath.toString).mkdirs()
logger.info(
s"Deleting dataset by moving it from $dataSourcePath to $targetPath${if (reason.isDefined) s" because ${reason.getOrElse("")}"
else "..."}")
deleteWithRetry(dataSourcePath, targetPath)
}
}
| scalableminds/webknossos | webknossos-datastore/app/com/scalableminds/webknossos/datastore/helpers/DataSetDeleter.scala | Scala | agpl-3.0 | 2,044 |
package com.twitter.finagle.thrift.exp.partitioning
import com.twitter.conversions.DurationOps._
import com.twitter.delivery.thriftscala.DeliveryService._
import com.twitter.delivery.thriftscala._
import com.twitter.finagle.Addr
import com.twitter.finagle.addr.WeightedAddress
import com.twitter.finagle.loadbalancer.BalancerRegistry
import com.twitter.finagle.param.CommonParams
import com.twitter.finagle.partitioning.ConsistentHashPartitioningService.NoPartitioningKeys
import com.twitter.finagle.partitioning.PartitionNodeManager.NoPartitionException
import com.twitter.finagle.partitioning.zk.ZkMetadata
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.thrift.exp.partitioning.PartitioningStrategy._
import com.twitter.finagle.thrift.exp.partitioning.ThriftPartitioningService.PartitioningStrategyException
import com.twitter.finagle.thrift.{ThriftRichClient, ThriftRichServer}
import com.twitter.finagle.{Address, ListeningServer, Name, Stack}
import com.twitter.scrooge.ThriftStructIface
import com.twitter.util.{Activity, Await, Awaitable, Duration, Future, Return, Throw, Var}
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.funsuite.AnyFunSuite
abstract class PartitionAwareClientEndToEndTest extends AnyFunSuite {
def await[T](a: Awaitable[T], d: Duration = 5.seconds): T =
Await.result(a, d)
type ClientType <: Stack.Parameterized[ClientType] with WithThriftPartitioningStrategy[
ClientType
] with ThriftRichClient
type ServerType <: Stack.Parameterized[ServerType] with CommonParams[
ServerType
] with ThriftRichServer
def clientImpl(): ClientType
def serverImpl(): ServerType
def newAddress(inet: InetSocketAddress, weight: Int): Address = {
val shardId = inet.getPort
val md = ZkMetadata.toAddrMetadata(ZkMetadata(Some(shardId)))
val addr = new Address.Inet(inet, md) {
override def toString: String = s"Address(${inet.getPort})-($shardId)"
}
WeightedAddress(addr, weight)
}
trait Ctx {
val iface = new DeliveryService.MethodPerEndpoint {
def getBox(addrInfo: AddrInfo, passcode: Byte): Future[Box] =
Future.value(Box(addrInfo, "hi"))
def getBoxes(
addrs: collection.Seq[AddrInfo],
passcode: Byte
): Future[collection.Seq[Box]] =
Future.value(
addrs.map(Box(_, s"size: ${addrs.size}")) // shows how many sub-requests hit an endpoint
)
def sendBox(box: Box): Future[String] = Future.value(box.item)
def sendBoxes(boxes: collection.Seq[Box]): Future[String] =
Future.value(boxes.map(_.item).mkString(","))
}
// response merger functions
val getBoxesRepMerger: ResponseMerger[Seq[Box]] = (successes, failures) =>
if (successes.nonEmpty) Return(successes.flatten)
else Throw(failures.head)
val sendBoxesRepMerger: ResponseMerger[String] = (successes, failures) =>
if (successes.nonEmpty) Return(successes.mkString(";"))
else Throw(failures.head)
val inetAddresses: Seq[InetSocketAddress] =
(0 until 5).map(_ => new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
val servers: Seq[ListeningServer] =
inetAddresses.map(inet => serverImpl().serveIface(inet, iface))
def fixedInetAddresses = servers.map(_.boundAddress.asInstanceOf[InetSocketAddress])
def addresses = servers.map { server =>
val inet = server.boundAddress.asInstanceOf[InetSocketAddress]
newAddress(inet, 1)
}
val addrInfo0 = AddrInfo("zero", 12345)
val addrInfo1 = AddrInfo("one", 11111)
val addrInfo11 = AddrInfo("one", 11112)
val addrInfo2 = AddrInfo("two", 22222)
val addrInfo3 = AddrInfo("three", 33333)
val addrInfo4 = AddrInfo("four", 44444)
}
trait HashingPartitioningCtx extends Ctx {
// request merger functions -- only used for hashing case when multiple keys fall in the same shard
val getBoxesReqMerger: RequestMerger[GetBoxes.Args] = listGetBoxes =>
GetBoxes.Args(listGetBoxes.map(_.listAddrInfo).flatten, listGetBoxes.head.passcode)
val sendBoxesReqMerger: RequestMerger[SendBoxes.Args] = listSendBoxes =>
SendBoxes.Args(listSendBoxes.flatMap(_.boxes))
val hashingPartitioningStrategy = new ClientHashingStrategy({
case getBox: GetBox.Args => Map(getBox.addrInfo.name -> getBox)
case getBoxes: GetBoxes.Args =>
getBoxes.listAddrInfo
.groupBy {
_.name
}.map {
case (hashingKey, subListAddrInfo) =>
hashingKey -> GetBoxes.Args(subListAddrInfo, getBoxes.passcode)
}
case sendBoxes: SendBoxes.Args =>
sendBoxes.boxes.groupBy(_.addrInfo.name).map {
case (hashingKey, boxes) => hashingKey -> SendBoxes.Args(boxes)
}
})
hashingPartitioningStrategy.requestMergerRegistry
.add(GetBoxes, getBoxesReqMerger)
.add(SendBoxes, sendBoxesReqMerger)
hashingPartitioningStrategy.responseMergerRegistry
.add(GetBoxes, getBoxesRepMerger)
.add(SendBoxes, sendBoxesRepMerger)
}
test("without partition strategy") {
new Ctx {
val client = clientImpl()
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
val expectedOneNode =
Seq(Box(addrInfo1, "size: 3"), Box(addrInfo2, "size: 3"), Box(addrInfo3, "size: 3"))
val result = await(client.getBoxes(Seq(addrInfo1, addrInfo2, addrInfo3), Byte.MinValue))
assert(result == expectedOneNode)
assert(await(client.sendBox(Box(addrInfo1, "test"))) == "test")
assert(
await(
client.sendBoxes(Seq(Box(addrInfo1, "test1"), Box(addrInfo2, "test2")))) == "test1,test2")
assert(await(client.getBox(addrInfo1, Byte.MinValue)) == Box(addrInfo1, "hi"))
client.asClosable.close()
servers.map(_.close())
}
}
test("with consistent hashing strategy") {
new HashingPartitioningCtx {
val client = clientImpl().withPartitioning
.strategy(hashingPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
// addrInfo1 and addrInfo11 have the same key ("one")
// addrInfo2 key ("two")
val expectedTwoNodes =
Seq(Box(addrInfo1, "size: 2"), Box(addrInfo11, "size: 2"), Box(addrInfo2, "size: 1")).toSet
val expectedOneNode =
Seq(Box(addrInfo1, "size: 3"), Box(addrInfo11, "size: 3"), Box(addrInfo2, "size: 3")).toSet
val result =
await(client.getBoxes(Seq(addrInfo1, addrInfo2, addrInfo11), Byte.MinValue)).toSet
// if two keys hash to a singleton partition, expect one node, otherwise, two nodes.
// local servers have random ports that are not consistent
assert(result == expectedOneNode || result == expectedTwoNodes)
client.asClosable.close()
servers.map(_.close())
}
}
test("with consistent hashing strategy, unspecified endpoint returns error") {
new HashingPartitioningCtx {
val client = clientImpl().withPartitioning
.strategy(hashingPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
val e = intercept[NoPartitioningKeys] {
await(client.sendBox(Box(addrInfo1, "test")))
}
assert(e.getMessage.contains("sendBox"))
client.asClosable.close()
servers.map(_.close())
}
}
test("with errored hashing strategy") {
val erroredHashingPartitioningStrategy = new ClientHashingStrategy({
case getBox: GetBox.Args => throw new Exception("something wrong")
})
new HashingPartitioningCtx {
val client = clientImpl().withPartitioning
.strategy(erroredHashingPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
intercept[PartitioningStrategyException] {
await(client.getBox(addrInfo1, Byte.MinValue))
}
client.asClosable.close()
servers.map(_.close())
}
}
test("with custom partitioning strategy, each shard is a partition") {
new Ctx {
// structs with key zero/two are routed to partition 0,
// and structs with key one are routed to partition 1
def lookUp(addrInfo: AddrInfo): Int = {
addrInfo.name match {
case "zero" | "two" => fixedInetAddresses(0).getPort
case "one" => fixedInetAddresses(1).getPort
}
}
val customPartitioningStrategy = ClientCustomStrategy.noResharding({
case getBox: GetBox.Args => Future.value(Map(lookUp(getBox.addrInfo) -> getBox))
case getBoxes: GetBoxes.Args =>
val partitionIdAndRequest: Map[Int, ThriftStructIface] =
getBoxes.listAddrInfo.groupBy(lookUp).map {
case (partitionId, listAddrInfo) =>
partitionId -> GetBoxes.Args(listAddrInfo, getBoxes.passcode)
}
Future.value(partitionIdAndRequest)
})
customPartitioningStrategy.responseMergerRegistry.add(GetBoxes, getBoxesRepMerger)
val client = clientImpl().withPartitioning
.strategy(customPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
val expectedTwoNodes =
Seq(Box(addrInfo0, "size: 2"), Box(addrInfo2, "size: 2"), Box(addrInfo1, "size: 1")).toSet
val result = await(client.getBoxes(Seq(addrInfo0, addrInfo1, addrInfo2), Byte.MinValue)).toSet
assert(result == expectedTwoNodes)
}
}
class CustomPartitioningCtx(lookUp: AddrInfo => Int) extends Ctx {
val customPartitioningStrategy = ClientCustomStrategy.noResharding(
{
case getBox: GetBox.Args => Future.value(Map(lookUp(getBox.addrInfo) -> getBox))
case getBoxes: GetBoxes.Args =>
val partitionIdAndRequest: Map[Int, ThriftStructIface] =
getBoxes.listAddrInfo.groupBy(lookUp).map {
case (partitionId, listAddrInfo) =>
partitionId -> GetBoxes.Args(listAddrInfo, getBoxes.passcode)
}
Future.value(partitionIdAndRequest)
},
{ instance: Int => // p0(0,1), p1(1,2), p2(3, 4)
val partitionPositions = List(0.to(1), 1.to(2), 3.until(fixedInetAddresses.size))
val position = fixedInetAddresses.indexWhere(_.getPort == instance)
partitionPositions.zipWithIndex
.filter { case (range, _) => range.contains(position) }.map(_._2)
}
)
customPartitioningStrategy.responseMergerRegistry.add(GetBoxes, getBoxesRepMerger)
}
test("with custom partitioning strategy, logical partition") {
// struct keys to partition Ids
def lookUp(addrInfo: AddrInfo): Int = {
addrInfo.name match {
case "four" => 0
case "three" | "two" => 1
case "one" | "zero" => 2
}
}
new CustomPartitioningCtx(lookUp) {
val client = clientImpl().withPartitioning
.strategy(customPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
val expectedThreeNodes =
Seq(
Box(addrInfo0, "size: 2"),
Box(addrInfo1, "size: 2"),
Box(addrInfo2, "size: 2"),
Box(addrInfo3, "size: 2"),
Box(addrInfo4, "size: 1")).toSet
val result = await(
client.getBoxes(
Seq(addrInfo0, addrInfo1, addrInfo2, addrInfo3, addrInfo4),
Byte.MinValue)).toSet
assert(result == expectedThreeNodes)
}
}
test("with errored custom strategy") {
val erroredCustomPartitioningStrategy = ClientCustomStrategy.noResharding({
case getBox: GetBox.Args => throw new Exception("something wrong")
})
new Ctx {
val client = clientImpl().withPartitioning
.strategy(erroredCustomPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
intercept[PartitioningStrategyException] {
await(client.getBox(addrInfo0, Byte.MinValue))
}
client.asClosable.close()
servers.map(_.close())
}
}
test("with custom strategy, no logical partition") {
def lookUp(addrInfo: AddrInfo): Int = {
addrInfo.name match {
case _ => 4
}
}
new CustomPartitioningCtx(lookUp) {
val client = clientImpl().withPartitioning
.strategy(customPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
intercept[NoPartitionException] {
await(
client
.getBoxes(Seq(addrInfo0, addrInfo1, addrInfo2, addrInfo3, addrInfo4), Byte.MinValue))
}
}
}
test("with custom strategy, unset endpoint") {
// struct keys to partition Ids
def lookUp(addrInfo: AddrInfo): Int = {
addrInfo.name match {
case "four" => 0
case "three" | "two" => 1
case "one" | "zero" => 2
}
}
new CustomPartitioningCtx(lookUp) {
val client = clientImpl().withPartitioning
.strategy(customPartitioningStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
intercept[PartitioningStrategyException] {
await(client.sendBoxes(Seq(Box(addrInfo1, "test1"), Box(addrInfo2, "test2"))))
}
}
}
test("with custom strategy, partitioning strategy dynamically changing") {
new Ctx {
val sr0 = new InMemoryStatsReceiver
val sr1 = new InMemoryStatsReceiver
var index = 0
// for testing purpose, set separate statsReceivers for instance0 and instance1
override val servers: Seq[ListeningServer] =
inetAddresses.map { inet =>
if (index == 0) {
index = index + 1
serverImpl().withStatsReceiver(sr0).serveIface(inet, iface)
} else if (index == 1) {
index = index + 1
serverImpl().withStatsReceiver(sr1).serveIface(inet, iface)
} else {
serverImpl().serveIface(inet, iface)
}
}
// the observable state is a dynamic Integer,
// when state changed, we route requests to a partition with current Integer as the Id
val dynamic = Var(0)
val observable: Activity[Int] = Activity(dynamic.map(Activity.Ok(_)))
val getPartitionIdAndRequest: Int => ClientCustomStrategy.ToPartitionedMap = { state =>
{
case sendBox: SendBox.Args =>
Future.value(Map(state -> sendBox))
}
}
val getLogicalPartitionId: Int => Int => Seq[Int] = {
_ =>
{ instance: Int =>
Seq(fixedInetAddresses.indexWhere(_.getPort == instance))
}
}
val dynamicStrategy = ClientCustomStrategy.resharding[Int](
getPartitionIdAndRequest,
getLogicalPartitionId,
observable
)
val client = clientImpl().withPartitioning
.strategy(dynamicStrategy)
.build[DeliveryService.MethodPerEndpoint](Name.bound(addresses: _*), "client")
val numBalancersBeforeRepartition = BalancerRegistry.get.allMetadata.size
await(client.sendBox(Box(addrInfo0, "")))
dynamic() += 1
// Ensure that the number of balancers remain the same after the repartition
assert(numBalancersBeforeRepartition == BalancerRegistry.get.allMetadata.size)
await(client.sendBox(Box(addrInfo0, "")))
val server0Request =
if (sr0.counters.isDefinedAt(Seq("requests"))) sr0.counters(Seq("requests"))
else sr0.counters(Seq("thrift", "requests"))
val server1Request =
if (sr1.counters.isDefinedAt(Seq("requests"))) sr1.counters(Seq("requests"))
else sr1.counters(Seq("thrift", "requests"))
assert(server0Request == 1)
assert(server1Request == 1)
}
}
test("with cluster resharding, expanding cluster's instances") {
new Ctx {
val sr = new InMemoryStatsReceiver
var index = 0
// set instance 1 a stats receiver
override val servers: Seq[ListeningServer] =
inetAddresses.map { inet =>
if (index == 0) {
index = index + 1
serverImpl().serveIface(inet, iface)
} else if (index == 1) {
index = index + 1
serverImpl().withStatsReceiver(sr).serveIface(inet, iface)
} else {
serverImpl().serveIface(inet, iface)
}
}
val getPartitionIdAndRequest: Set[Address] => ClientCustomStrategy.ToPartitionedMap = { _ =>
{
case sendBox: SendBox.Args =>
Future.value(Map(1 -> sendBox)) // always route requests to partition 1
}
}
val getLogicalPartitionId: Set[Address] => Int => Seq[Int] = {
cluster =>
{ instance: Int =>
if (cluster.size == 2) { // p0(0) p1(1)
Seq(fixedInetAddresses.indexWhere(_.getPort == instance))
} else { //p0(0,1) p1(2,3,4)
val partitionPositions = List(0.to(1), 2.until(fixedInetAddresses.size))
val position = fixedInetAddresses.indexWhere(_.getPort == instance)
partitionPositions.zipWithIndex
.filter { case (range, _) => range.contains(position) }.map(_._2)
}
}
}
val clusterResharding =
ClientCustomStrategy.clusterResharding(getPartitionIdAndRequest, getLogicalPartitionId)
val dynamicAddresses = Var(Addr.Bound(addresses.take(2): _*))
val client = clientImpl().withPartitioning
.strategy(clusterResharding)
.build[DeliveryService.MethodPerEndpoint](
Name.Bound(dynamicAddresses, dynamicAddresses()),
"client")
// pre-resharding, the request goes to server1,
// post-resharding, the request goes to server2/3/4
await(client.sendBox(Box(addrInfo0, "")))
val server1Request =
if (sr.counters.isDefinedAt(Seq("requests"))) sr.counters(Seq("requests"))
else sr.counters(Seq("thrift", "requests"))
val numBalancersBeforeRepartition = BalancerRegistry.get.allMetadata.size
assert(server1Request == 1)
dynamicAddresses() = Addr.Bound(addresses: _*)
// Ensure that the number of balancers remain the same after the repartition
assert(numBalancersBeforeRepartition == BalancerRegistry.get.allMetadata.size)
await(client.sendBox(Box(addrInfo0, "")))
assert(server1Request == 1)
}
}
}
| twitter/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/exp/partitioning/PartitionAwareClientEndtoEndTest.scala | Scala | apache-2.0 | 18,436 |
package at.ac.tuwien.ifs.ir.model
/**
* Created by aldo on 10/10/14.
*/
class RunRecord(val iteration: String, val document: Document, val rank: Int, val score: Float) {
override def toString: String = s"$iteration ${document.id} $rank $score"
}
object RunRecord {
def apply(iteration: String, document: Document, rank: Int, score: Float) = new RunRecord(iteration, document, rank, score)
def fromItems(arr: Array[String]): RunRecord = try {
new RunRecord(arr(0), new Document(arr(1)), arr(2).toInt, arr(3).toFloat)
} catch {
case e: Exception => {
println(arr.mkString(" "));
throw e
}
}
} | aldolipani/PoolBiasEstimators | src/main/scala/at/ac/tuwien/ifs/ir/model/RunRecord.scala | Scala | apache-2.0 | 631 |
package com.mizhi.nlp.stemmers.huskpaice
import com.mizhi.nlp.stemmers.huskpaice.RuleAction._
import org.mockito.Mockito.{doReturn, spy}
class RuleSpec extends UnitSpec {
val rule = Rule("suffix", None, true, stop)
describe("apply") {
val state = StemmingState("asuffix", true, None)
describe("when rule is applied") {
val spiedRule = spy(rule.copy(append=Some("thing")))
doReturn(true, Array[Object](): _*).when(spiedRule).ruleApplies(state)
doReturn(true, Array[Object](): _*).when(spiedRule).stemAcceptable(state.word)
it("removes suffix") {
spiedRule(state).word should be("athing")
}
it("sets intact to false") {
spiedRule(state).intact should be(false)
}
it("sets the nextAction to the rule's action") {
spiedRule(state).nextAction should be (Some(spiedRule.nextAction))
}
}
describe("when rule is not applied") {
val spiedRule = spy(rule.copy(append=Some("thing")))
doReturn(false, Array[Object](): _*).when(spiedRule).ruleApplies(state)
it("returns same state") {
spiedRule(state) should be(state)
}
}
}
describe("applyStringTransform") {
describe("when append is None") {
it("leaves stem alone") {
rule.applyStringTransform("prefixwordsuffix") should be("prefixwordsuffix")
}
}
describe("when append is the empty string") {
it("strips suffix") {
rule.copy(append=Some("")).applyStringTransform("prefixwordsuffix") should be("prefixword")
}
}
describe("when append is not empty") {
it("replaces suffix") {
rule.copy(append=Some("newstring")).applyStringTransform("prefixwordsuffix") should be("prefixwordnewstring")
}
}
}
describe("endingMatches") {
it("detects when ending matches suffix") {
rule.endingMatches("somefoolstringsuffix") should be(true)
}
it("detects when ending doesn't match suffix") {
rule.endingMatches("somefoolstring") should be(false)
}
}
describe("intactnessIsGood") {
val intactState = StemmingState("foo", true, None)
val modifiedStem = intactState.copy(intact=false)
describe("when rule requires the word to be intact") {
it("is true when stem is intact") {
rule.intactnessIsGood(intactState) should be(true)
}
it("is false when stem is not intact") {
rule.intactnessIsGood(modifiedStem) should be(false)
}
}
describe("when rule does not require the word to be intact") {
val modifiedRule = rule.copy(intactOnly = false)
it("is true when stem is intact") {
modifiedRule.intactnessIsGood(intactState) should be(true)
}
it("is true when stem is not intact") {
modifiedRule.intactnessIsGood(modifiedStem) should be(true)
}
}
}
describe("stemAcceptable") {
describe("when word starts with vowel") {
it("returns true when length is >= 2") {
rule.stemAcceptable("ace") should be(true)
}
it("returns false when length is < 2") {
rule.stemAcceptable("a") should be(false)
}
it("works with empty string") {
rule.stemAcceptable("") should be(false)
}
}
describe("when word starts with consonant") {
it("returns true when length is >= 3 and has a vowel") {
rule.stemAcceptable("cad") should be(true)
}
it("returns true when vowel is a y") {
rule.stemAcceptable("cry") should be(true)
}
it("returns false when length is < 3") {
rule.stemAcceptable("cr") should be (false)
}
it("returns false when there are no vowels") {
rule.stemAcceptable("str") should be (false)
}
}
}
}
| mizhi/scala-stemmer | src/test/scala/com/mizhi/nlp/stemmers/huskpaice/RuleSpec.scala | Scala | mit | 3,755 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.race.Race
import io.truthencode.ddo.model.race.Race.Warforged
import io.truthencode.ddo.support.requisite.{RaceRequisiteImpl, RequiresNoneOfRace}
/**
* Created by adarr on 4/6/2017.
*/
trait EberronReligionNonWarforged
extends RaceRequisiteImpl with RequiresNoneOfRace with EberronReligionBase {
self: DeityFeat =>
override def noneOfRace: Seq[(Race, Int)] = List((Warforged, 1))
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/EberronReligionNonWarforged.scala | Scala | apache-2.0 | 1,138 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.application.assets.stocksAndShares
import iht.config.AppConfig
import iht.controllers.application.ApplicationControllerTest
import iht.forms.ApplicationForms._
import iht.models.application.ApplicationDetails
import iht.testhelpers.CommonBuilder
import iht.testhelpers.TestHelper._
import iht.utils.CommonHelper
import iht.views.html.application.asset.stocksAndShares.stocks_and_shares_not_listed
import play.api.mvc.MessagesControllerComponents
import play.api.test.Helpers.{contentAsString, _}
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
class StocksAndSharesNotListedControllerTest extends ApplicationControllerTest {
lazy val submitUrl = CommonHelper.addFragmentIdentifierToUrl(routes.StocksAndSharesOverviewController.onPageLoad().url, AssetsStocksNotListedID)
def setUpTests(applicationDetails: ApplicationDetails) = {
createMocksForApplication(mockCachingConnector,
mockIhtConnector,
appDetails = Some(applicationDetails),
getAppDetails = true,
saveAppDetails = true,
storeAppDetailsInCache = true)
}
protected abstract class TestController extends FrontendController(mockControllerComponents) with StocksAndSharesNotListedController {
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
override val stocksAndSharesNotListedView: stocks_and_shares_not_listed = app.injector.instanceOf[stocks_and_shares_not_listed]
}
def stocksAndSharesNotListedController = new TestController {
val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
def stocksAndSharesNotListedControllerNotAuthorised = new TestController {
val authConnector = mockAuthConnector
override val cachingConnector = mockCachingConnector
override val ihtConnector = mockIhtConnector
}
"StocksAndSharesNotListedController" must {
"redirect to login page on PageLoad if the user is not logged in" in {
val result = stocksAndSharesNotListedControllerNotAuthorised.onPageLoad(createFakeRequest(isAuthorised = false))
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (Some(loginUrl))
}
"redirect to login page on Submit if the user is not logged in" in {
val result = stocksAndSharesNotListedControllerNotAuthorised.onSubmit(createFakeRequest(isAuthorised = false))
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (Some(loginUrl))
}
"respond with OK on page load" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
setUpTests(applicationDetails)
val result = stocksAndSharesNotListedController.onPageLoad(createFakeRequest())
status(result) must be(OK)
}
"save application and go to stocksAndShares overview page on submit" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
val formFill = stockAndShareNotListedForm.fill(CommonBuilder.buildStockAndShare.copy(isNotListed = Some(true),
valueNotListed = Some(200)))
implicit val request = createFakeRequest().withFormUrlEncodedBody(formFill.data.toSeq: _*)
setUpTests(applicationDetails)
val result = stocksAndSharesNotListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
}
"wipe out the sharesNotListed value if user selects No, save application and go to stocksAndShares overview page on submit" in {
val sharesNotListed = CommonBuilder.buildStockAndShare.copy(isNotListed = Some(false), valueNotListed = Some(200))
val applicationDetails = CommonBuilder.buildApplicationDetails.copy(
allAssets = Some(CommonBuilder.buildAllAssets.copy(
stockAndShare = Some(sharesNotListed))))
val formFill = stockAndShareNotListedForm.fill(sharesNotListed)
implicit val request = createFakeRequest().withFormUrlEncodedBody(formFill.data.toSeq: _*)
setUpTests(applicationDetails)
val result = stocksAndSharesNotListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
val capturedValue = verifyAndReturnSavedApplicationDetails(mockIhtConnector)
val expectedAppDetails = applicationDetails.copy(allAssets = applicationDetails.allAssets.map(_.copy(
stockAndShare = Some(CommonBuilder.buildStockAndShare.copy(valueNotListed = None, isNotListed = Some(false))))))
capturedValue mustBe expectedAppDetails
}
"display validation message when form is submitted with no values entered" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
implicit val request = createFakeRequest()
setUpTests(applicationDetails)
val result = stocksAndSharesNotListedController.onSubmit()(request)
status(result) must be (BAD_REQUEST)
contentAsString(result) must include (messagesApi("error.problem"))
}
"redirect to overview when form is submitted with answer yes and a value entered" in {
val applicationDetails = CommonBuilder.buildApplicationDetails
implicit val request = createFakeRequest().withFormUrlEncodedBody(("isNotListed", "true"), ("valueNotListed", "233"))
setUpTests(applicationDetails)
val result = stocksAndSharesNotListedController.onSubmit()(request)
status(result) must be (SEE_OTHER)
redirectLocation(result) must be (Some(submitUrl))
}
"respond with bad request when incorrect value are entered on the page" in {
implicit val fakePostRequest = createFakeRequest().withFormUrlEncodedBody(("value", "utytyyterrrrrrrrrrrrrr"))
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector)
val result = stocksAndSharesNotListedController.onSubmit (fakePostRequest)
status(result) mustBe (BAD_REQUEST)
}
behave like controllerOnPageLoadWithNoExistingRegistrationDetails(mockCachingConnector,
stocksAndSharesNotListedController.onPageLoad(createFakeRequest()))
}
}
| hmrc/iht-frontend | test/iht/controllers/application/assets/stocksAndShares/StocksAndSharesNotListedControllerTest.scala | Scala | apache-2.0 | 6,863 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import junit.framework.Assert._
import org.easymock.{Capture, EasyMock}
import org.junit.Test
import kafka.integration.KafkaServerTestHarness
import kafka.utils._
import kafka.common._
import kafka.log.LogConfig
import kafka.admin.{AdminOperationException, AdminUtils}
class DynamicConfigChangeTest extends KafkaServerTestHarness {
def generateConfigs() = List(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
@Test
def testConfigChange() {
assertTrue("Should contain a ConfigHandler for topics",
this.servers(0).dynamicConfigHandlers.contains(ConfigType.Topic))
val oldVal: java.lang.Long = 100000L
val newVal: java.lang.Long = 200000L
val tp = TopicAndPartition("test", 0)
val logProps = new Properties()
logProps.put(LogConfig.FlushMessagesProp, oldVal.toString)
AdminUtils.createTopic(zkUtils, tp.topic, 1, 1, logProps)
TestUtils.retry(10000) {
val logOpt = this.servers(0).logManager.getLog(tp)
assertTrue(logOpt.isDefined)
assertEquals(oldVal, logOpt.get.config.flushInterval)
}
logProps.put(LogConfig.FlushMessagesProp, newVal.toString)
AdminUtils.changeTopicConfig(zkUtils, tp.topic, logProps)
TestUtils.retry(10000) {
assertEquals(newVal, this.servers(0).logManager.getLog(tp).get.config.flushInterval)
}
}
// For now client config changes do not do anything. Simply verify that the call was made
@Test
def testClientConfigChange() {
assertTrue("Should contain a ConfigHandler for topics",
this.servers(0).dynamicConfigHandlers.contains(ConfigType.Client))
val clientId = "testClient"
val props = new Properties()
props.put("a.b", "c")
props.put("x.y", "z")
AdminUtils.changeClientIdConfig(zkUtils, clientId, props)
TestUtils.retry(10000) {
val configHandler = this.servers(0).dynamicConfigHandlers(ConfigType.Client).asInstanceOf[ClientIdConfigHandler]
assertTrue("ClientId testClient must exist", configHandler.configPool.contains(clientId))
assertEquals("ClientId testClient must be the only override", 1, configHandler.configPool.size)
assertEquals("c", configHandler.configPool.get(clientId).getProperty("a.b"))
assertEquals("z", configHandler.configPool.get(clientId).getProperty("x.y"))
}
}
@Test
def testConfigChangeOnNonExistingTopic() {
val topic = TestUtils.tempTopic
try {
val logProps = new Properties()
logProps.put(LogConfig.FlushMessagesProp, 10000: java.lang.Integer)
AdminUtils.changeTopicConfig(zkUtils, topic, logProps)
fail("Should fail with AdminOperationException for topic doesn't exist")
} catch {
case e: AdminOperationException => // expected
}
}
@Test
def testProcessNotification {
val props = new Properties()
props.put("a.b", "10")
// Create a mock ConfigHandler to record config changes it is asked to process
val entityArgument = new Capture[String]()
val propertiesArgument = new Capture[Properties]()
val handler = EasyMock.createNiceMock(classOf[ConfigHandler])
handler.processConfigChanges(
EasyMock.and(EasyMock.capture(entityArgument), EasyMock.isA(classOf[String])),
EasyMock.and(EasyMock.capture(propertiesArgument), EasyMock.isA(classOf[Properties])))
EasyMock.expectLastCall().once()
EasyMock.replay(handler)
val configManager = new DynamicConfigManager(zkUtils, Map(ConfigType.Topic -> handler))
// Notifications created using the old TopicConfigManager are ignored.
configManager.processNotification(Some("not json"))
// Incorrect Map. No version
try {
val jsonMap = Map("v" -> 1, "x" -> 2)
configManager.processNotification(Some(Json.encode(jsonMap)))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case t: Throwable =>
}
// Version is provided. EntityType is incorrect
try {
val jsonMap = Map("version" -> 1, "entity_type" -> "garbage", "entity_name" -> "x")
configManager.processNotification(Some(Json.encode(jsonMap)))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case t: Throwable =>
}
// EntityName isn't provided
try {
val jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.Topic)
configManager.processNotification(Some(Json.encode(jsonMap)))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case t: Throwable =>
}
// Everything is provided
val jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.Topic, "entity_name" -> "x")
configManager.processNotification(Some(Json.encode(jsonMap)))
// Verify that processConfigChanges was only called once
EasyMock.verify(handler)
}
}
| vkroz/kafka | core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala | Scala | apache-2.0 | 5,736 |
package ch.wsl.box.rest.routes.enablers.twirl
import akka.http.scaladsl.marshalling.{Marshaller, _}
import akka.http.scaladsl.model.MediaType
import akka.http.scaladsl.model.MediaTypes._
import play.twirl.api.{Html, Txt, Xml}
object Implicits {
/** Twirl marshallers for Xml, Html and Txt mediatypes */
implicit val twirlHtmlMarshaller = twirlMarshaller[Html](`text/html`)
implicit val twirlTxtMarshaller = twirlMarshaller[Txt](`text/plain`)
implicit val twirlXmlMarshaller = twirlMarshaller[Xml](`text/xml`)
def twirlMarshaller[A](contentType: MediaType): ToEntityMarshaller[A] = {
Marshaller.StringMarshaller.wrap(contentType)(_.toString)
}
}
| Insubric/box | server/src/main/scala/ch/wsl/box/rest/routes/enablers/twirl/Implicits.scala | Scala | apache-2.0 | 667 |
/*
* Copyright 2018 Michal Harish, michal.harish@gmail.com
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.core.util
import java.util.concurrent.ConcurrentHashMap
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpResponse
import com.codahale.metrics.{MetricRegistry, Timer}
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
object AffinityMetrics {
private val reporters = scala.collection.mutable.ListBuffer[MetricRegistry => Unit]()
def apply(f: MetricRegistry => Unit): Unit = reporters += f
private val metricsRegistries = new ConcurrentHashMap[ActorSystem, AffinityMetrics]()
def forActorSystem(system: ActorSystem): AffinityMetrics = {
metricsRegistries.get(system) match {
case null =>
val registry = new AffinityMetrics
reporters.foreach(_(registry))
metricsRegistries.put(system, registry)
registry
case registry => registry
}
}
}
class AffinityMetrics extends MetricRegistry {
private implicit val executor = scala.concurrent.ExecutionContext.Implicits.global
private val processMetricsMap = new ConcurrentHashMap[String, ProcessMetrics]()
def meterAndHistogram(name: String): ProcessMetrics = {
processMetricsMap.get(name) match {
case null =>
val m = new ProcessMetrics(name)
processMetricsMap.put(name, m)
m
case some => some
}
}
def process(groupName: String, result: Promise[_]): Unit = process(groupName, result.future)
def process(groupName: String, result: Future[Any]): Unit = {
val metrics = meterAndHistogram(groupName)
val startTime = metrics.markStart()
result.onComplete {
case Success(response: HttpResponse) => if (response.status.intValue() < 400) metrics.markSuccess(startTime) else metrics.markFailure(startTime)
case Success(_) => metrics.markSuccess(startTime)
case Failure(_) => metrics.markFailure(startTime)
}
}
class ProcessMetrics(name: String) {
val durations = timer(s"$name.timer")
val successes = meter(s"$name.success")
val failures = meter(s"$name.failure")
def markStart(): Timer.Context = durations.time()
def markSuccess(context: Timer.Context, n: Long = 1): Unit = {
context.stop
successes.mark(n)
}
def markFailure(context: Timer.Context): Unit = {
context.stop
failures.mark()
}
}
}
| amient/affinity | core/src/main/scala/io/amient/affinity/core/util/AffinityMetrics.scala | Scala | apache-2.0 | 3,192 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package funnel
package elastic
import argonaut._
import java.io.File
import journal.Logger
import java.util.Date
import java.text.SimpleDateFormat
import java.util.concurrent.{ExecutorService, Executors}
import scala.util.control.NonFatal
import scalaz.stream.Process
import scalaz.concurrent.{Strategy, Task}
import Process.constant
import scalaz._
import scalaz.stream.async.mutable.ScalazHack
object Elastic {
import Argonaut._
import Kleisli.ask
import scalaz.syntax.kleisli._
import scalaz.syntax.monad._ // pulls in *>
import scala.concurrent.duration._
type SourceURL = String
type ExperimentID = String
type GroupID = String
type Window = String
type Path = List[String]
type ES[A] = Kleisli[Task, ElasticCfg, A]
private[this] val log = Logger[Elastic.type]
val esPool: ExecutorService =
Executors.newFixedThreadPool(8, Monitoring.daemonThreads("elastic-publisher-thread"))
/**
*
*/
val getConfig: ES[ElasticCfg] = ask[Task, ElasticCfg]
/**
* Not in Scalaz until 7.2 so duplicating here.
*/
def lower[M[_]:Monad,A,B](k: Kleisli[M,A,B]): Kleisli[M,A,M[B]] =
Kleisli(a => Monad[M].pure(k(a)))
/**
* natural transformation between task and ES, which is a kleisli
* in Task. provided for explicitness, instead of using the `liftKleisli`
* syntax all through the code.
*/
def lift: Task ~> ES = new (Task ~> ES) {
def apply[A](t: Task[A]) = t.liftKleisli
}
/**
* get a handle on the subscriptionTimeout without having an ES instance
*/
def duration: Reader[ElasticCfg, FiniteDuration] = Reader { es => es.subscriptionTimeout }
/**
* build a function that sends string-like inputs to the specified request
* using HTTP GET verb which will be handled using the customised `handler`.
*
* @see funnel.elastic.Elastic.handler
*/
def elasticString(req: HttpOp)(H: HttpLayer): ES[String] = H.http(req)
/**
* send the supplied json string to the configured elastic search endpoint
* using HTTP POST.
*/
def elastic(req: HttpOp)(H: HttpLayer): ES[Unit] = for {
ta <- lower(elasticString(req)(H))
_ <- lift(ta.attempt.map(_.fold(
e => log.error(s"Unable to send document to elastic search due to '$e'. Request was: \\n $req"),
_ => ())))
} yield ()
/**
* retries any non-HTTP errors with exponential backoff
*/
def retry[A](task: Task[A])(m: ElasticMetrics): Task[A] = {
val schedule = Stream.iterate(2)(_ * 2).take(30).map(_.seconds)
val t = task.attempt.flatMap(_.fold(
{
case e: HttpException =>
Task.delay {
if (e.serverError) m.HttpResponse5xx.increment
else if (e.clientError) m.HttpResponse4xx.increment
}.flatMap(_ => Task.fail(e))
case e =>
Task.delay {
log.error(s"Error contacting ElasticSearch: $e. Retrying...")
m.NonHttpErrors.increment
} >>= (_ => Task.fail(e))
},
a => Task.now(a)
))
t.retry(schedule, {
case e: HttpException if e.clientError => false
case e: HttpException => true //retry on server errors like "gateway timeout"
case e@NonFatal(_) => true
case _ => false
})
}
private def toBulkPayload(jsons: Seq[Json]): String =
jsons.map {
js => s"""{ "create": {}}\\n${js.nospaces}\\n"""
}.mkString
/**
* Publishes to an ElasticSearch URL at `esURL`.
*/
def bufferAndPublish(
flaskName: String,
flaskCluster: String
)(M: Monitoring, E: Strategy, H: HttpLayer, iselfie: ElasticMetrics
)(jsonStream: ElasticCfg => Process[Task, Json]): ES[Unit] = {
def doPublish(de: Process[Task, Seq[Json]], cfg: ElasticCfg): Process[Task, Unit] =
de to constant(
(jsons: Seq[Json]) => retry(iselfie.HttpResponse2xx.timeTask(
for {
//delaying task is important here. Otherwise we will not really retry to send http request
u <- Task.delay(esBulkURL)
_ = log.debug(s"Posting ${jsons.size} docs.")
_ <- H.http(POST(u, Reader((cfg: ElasticCfg) => toBulkPayload(jsons))))(cfg)
} yield ()
))(iselfie).attempt.map {
case \\/-(v) => ()
case -\\/(t) =>
//if we fail to publish metric, proceed to the next one
// TODO: consider circuit breaker on ES failures (embed into HttpLayer)
// TODO: how does publishing dies when flasks stops monitoring target? do we release resources?
log.warn(s"[elastic] failed to publish. error=$t cnt=${jsons.size} data=$jsons")
iselfie.BufferDropped.increment
()
}
)
for {
_ <- ensureTemplate(H)
r <- Kleisli.kleisli[Task, ElasticCfg, Unit] {
(cfg: ElasticCfg) =>
log.info(s"Initializing Elastic buffer of size ${cfg.bufferSize}...")
val buffer = ScalazHack.observableCircularBuffer[Json](
cfg.bufferSize, iselfie.BufferDropped, iselfie.BufferUsed
)(E)
log.info(s"Started Elastic subscription (max batch size=${cfg.batchSize})")
// Reads from the monitoring instance and posts to the publishing queue
val read = jsonStream(cfg).to(buffer.enqueue)
// Reads from the publishing queue and writes to ElasticSearch
val write = doPublish(buffer.dequeueBatch(cfg.batchSize), cfg)
Nondeterminism[Task].reduceUnordered[Unit, Unit](Seq(read.run, write.run))
}
} yield r
}
/****************************** indexing ******************************/
/**
* construct the actual url to send documents to based on the configuration
* parameters, taking into account the index data pattern and prefix.
*/
def indexURL: Reader[ElasticCfg, String] = Reader { es =>
val date = new SimpleDateFormat(es.dateFormat).format(new Date)
s"${es.url}/${es.indexName}-$date"
}
/**
* given the ES url, make sure that all our requests are properly set with
* the application/json content mime type.
*/
def esURL: Reader[ElasticCfg, String] = Reader {
es => s"${indexURL(es)}/${es.typeName}"
}
def esBulkURL: Reader[ElasticCfg, String] = Reader {
es => s"${indexURL(es)}/${es.typeName}/_bulk"
}
private def esTemplateURL: Reader[ElasticCfg, String] = Reader {
es => s"${es.url}/_template/${es.templateName}"
}
private def esTemplate: Reader[ElasticCfg, String] = Reader {
cfg => cfg.templateLocation.map(
f => scala.io.Source.fromFile(new File(f).getAbsolutePath).mkString
).getOrElse(
sys.error("no index mapping template specified.")
)
}
/**
* returns true if the index was created. False if it already existed.
*/
def ensureIndex(url: Reader[ElasticCfg, String])(H: HttpLayer): ES[Boolean] =
ensureExists(
HEAD(url),
//create index
PUT(url, Reader((cfg: ElasticCfg) => Json("settings" := Json("index.cache.query.enable" := true)).nospaces))
)(H)
/**
* ensure the index we are trying to send documents too (defined in the config)
* exists in the backend elastic search cluster.
*/
def ensureExists(check: HttpOp, action: HttpOp)(H: HttpLayer): ES[Boolean] =
for {
s <- H.http(check).mapK(_.attempt)
b <- s.fold(
{
case HttpException(404) => H.http(action).map(_ => true)
case e => lift(Task.fail(e)) // SPLODE!
},
z => lift(Task.now(false))
)
} yield b
/**
* load the template specified in the configuration and send it to the index
* to ensure that all the document fields we publish are correctly handled by
* elastic search / kibana.
*/
def ensureTemplate(H: HttpLayer): ES[Unit] = ensureExists(
HEAD(
esTemplateURL.map {s => log.info(s"Ensuring Elastic template $s exists..."); s }
),
PUT(esTemplateURL, esTemplate)
)(H).map(_ => ())
} | neigor/funnel | elastic/src/main/scala/Elastic.scala | Scala | apache-2.0 | 8,723 |
/*
* StructuredAlgorithm.scala
* Abstract class for algorithms that are structured
*
* Created By: Brian Ruttenberg (bruttenberg@cra.com)
* Creation Date: December 30, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured.algorithm
import com.cra.figaro.algorithm.Algorithm
import com.cra.figaro.language._
import scala.collection.mutable.Map
import com.cra.figaro.algorithm.factored.factors.Factor
import com.cra.figaro.algorithm.factored.factors.Semiring
import com.cra.figaro.algorithm.structured.Problem
import com.cra.figaro.algorithm.structured.ComponentCollection
import com.cra.figaro.algorithm.OneTimeMPE
import com.cra.figaro.algorithm.AlgorithmException
abstract class StructuredMPEAlgorithm(val universe: Universe) extends Algorithm with OneTimeMPE {
def run(): Unit
val semiring: Semiring[Double]
//val targetFactors: Map[Element[_], Factor[Double]] = Map()
val cc: ComponentCollection = new ComponentCollection
val problem = new Problem(cc, List())
// We have to add all active elements to the problem since these elements, if they are every used, need to have components created at the top level problem
universe.permanentElements.foreach(problem.add(_))
val evidenceElems = universe.conditionedElements ::: universe.constrainedElements
def initialComponents() = (universe.permanentElements ++ evidenceElems).distinct.map(cc(_))
/**
* Returns the most likely value for the target element.
*/
def mostLikelyValue[T](target: Element[T]): T = {
val targetVar = cc(target).variable
val factor = problem.recordingFactors(targetVar).asInstanceOf[Factor[T]]
if (factor.size != 1) throw new AlgorithmException//("Final factor for most likely value has more than one entry")
factor.get(List())
}
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/algorithm/StructuredMPEAlgorithm.scala | Scala | bsd-3-clause | 2,049 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.memstore2
import java.nio.ByteBuffer
import scala.collection.immutable.BitSet
import shark.memstore2.column.ColumnIterator
import shark.memstore2.column.ColumnIteratorFactory
/**
* An iterator for a partition of data. Each element returns a ColumnarStruct
* that can be read by a ColumnarStructObjectInspector.
*
* @param numRows: total number of rows in this partition.
* @param columnIterators: iterators for all columns.
* @param columnUsed: an optional bitmap indicating whether a column is used.
*/
class TablePartitionIterator(
val numRows: Long,
val columnIterators: Array[ColumnIterator],
val columnUsed: BitSet = null)
extends Iterator[ColumnarStruct] {
private val _struct = new ColumnarStruct(columnIterators)
private var _position: Long = 0
def hasNext(): Boolean = _position < numRows
def next(): ColumnarStruct = {
_position += 1
var i = 0
while (i < _columnIteratorsToAdvance.size) {
_columnIteratorsToAdvance(i).next
i += 1
}
_struct
}
// Track the list of columns we need to call next on.
private val _columnIteratorsToAdvance: Array[ColumnIterator] = {
if (columnUsed == null) {
columnIterators
} else {
columnUsed.map(colId => columnIterators(colId)).toArray
}
}
}
| vax11780/shark | src/main/scala/shark/memstore2/TablePartitionIterator.scala | Scala | apache-2.0 | 1,956 |
package com.bradbrok.filmomatic.state
object Direction extends Enumeration {
type Direction = Value
val In, Out, Alternate = Value
}
| bradbrok/Film-O-Matic | core/src/main/scala/com/bradbrok/filmomatic/state/Direction.scala | Scala | mit | 138 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.kylin.cube.CubeInstance
import org.apache.kylin.cube.cuboid.Cuboid
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.execution.datasource.FilePruner
import org.apache.spark.sql.execution.datasources.HadoopFsRelation
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import scala.collection.mutable.{HashMap => MutableHashMap}
class KylinDataFrameManager(sparkSession: SparkSession) {
private var extraOptions = new MutableHashMap[String, String]()
private var userSpecifiedSchema: Option[StructType] = None
/** File format for table */
def format(source: String): KylinDataFrameManager = {
option("source", source)
this
}
/** Add key-value to options */
def option(key: String, value: String): KylinDataFrameManager = {
this.extraOptions += (key -> value)
this
}
/** Add boolean value to options, for compatibility with Spark */
def option(key: String, value: Boolean): KylinDataFrameManager = {
option(key, value.toString)
}
/** Add long value to options, for compatibility with Spark */
def option(key: String, value: Long): KylinDataFrameManager = {
option(key, value.toString)
}
/** Add double value to options, for compatibility with Spark */
def option(key: String, value: Double): KylinDataFrameManager = {
option(key, value.toString)
}
def isFastBitmapEnabled(isFastBitmapEnabled: Boolean): KylinDataFrameManager = {
option("isFastBitmapEnabled", isFastBitmapEnabled.toString)
this
}
def cuboidTable(cubeInstance: CubeInstance, layout: Cuboid): DataFrame = {
option("project", cubeInstance.getProject)
option("cubeId", cubeInstance.getUuid)
option("cuboidId", layout.getId)
val indexCatalog = new FilePruner(cubeInstance, layout, sparkSession, options = extraOptions.toMap)
sparkSession.baseRelationToDataFrame(
HadoopFsRelation(
indexCatalog,
partitionSchema = indexCatalog.partitionSchema,
dataSchema = indexCatalog.dataSchema.asNullable,
bucketSpec = None,
new ParquetFileFormat,
options = extraOptions.toMap)(sparkSession))
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): KylinDataFrameManager = {
this.userSpecifiedSchema = Option(schema)
this
}
}
| apache/kylin | kylin-spark-project/kylin-spark-common/src/main/scala/org/apache/spark/sql/KylinDataFrameManager.scala | Scala | apache-2.0 | 3,414 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.ToolBox
object Test extends App {
val toolbox = cm.mkToolBox()
def printSource[T](expr: Expr[T]) {
val ttree = toolbox typecheck expr.tree
println(ttree.toString)
}
var y = 3
printSource(reify {
5 + y
})
} | felixmulder/scala | test/files/run/t5816.scala | Scala | bsd-3-clause | 350 |
/*
* Copyright (c) 2018. Yuriy Stul
*/
package com.stulsoft.ysps.pforcomprehansion
import scala.util.Try
/**
* @author Yuriy Stul
*/
object Example7 extends App {
test1()
test2()
test3()
test4()
test5()
def test1(): Unit = {
println("==>test1")
val result = for {
_ <- f1(false)
_ <- f2(false)
} yield "Everything is OK"
println(result)
println("<==test1")
}
def test2(): Unit = {
println("==>test2")
val result = for {
_ <- f1(true)
_ <- f2(false)
} yield "Everything is OK"
println(result)
println("<==test2")
}
def f1(toFail: Boolean): Try[Boolean] = {
Try(if (toFail)
throw new RuntimeException("f1 failed")
else
true
)
}
def f2(toFail: Boolean): Try[Boolean] = {
Try(if (toFail)
throw new RuntimeException("f2 failed")
else
true
)
}
def test3(): Unit = {
println("==>test3")
val result = for {
_ <- f1(true)
_ <- f2(true)
} yield "Everything is OK"
println(result)
println("<==test3")
}
def test4(): Unit = {
println("==>test4")
val result = for {
_ <- f1(false)
_ <- f2(true)
} yield "Everything is OK"
println(result)
println("<==test4")
}
def test5(): Unit = {
println("==>test5")
val result1 = for {
_ <- f1(false)
_ <- f3(false)
} yield "Everything is OK"
println(s"result1=$result1")
val result2 = for {
_ <- f1(false)
_ <- f3(true)
} yield "Everything is OK"
println(s"result2=$result2")
println("<==test5")
}
def f3(toFail: Boolean): Try[Boolean] = {
Try {
if (toFail)
throw new RuntimeException("f3 failed")
else
true
}
}
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/pforcomprehansion/Example7.scala | Scala | mit | 1,766 |
package de.sciss.cord
package incomplete
import de.sciss.cord.audio.AudioProcessingEvent
import org.scalajs.dom._
import org.scalajs.dom.html.Canvas
import scala.scalajs.js
import scalatags.JsDom.all._
import scala.scalajs.js.timers.SetIntervalHandle
class Meter { meter =>
private var lastPeak = 0.0
private var lastRMS = 0.0
private var sqrSum = 0.0
private var sqrMax = 0.0
private var count = 0
lazy val render: Canvas = {
val elem = canvas(cls := "meter").render
elem
}
def peak: Double = {
if (count > 0) lastPeak = js.Math.sqrt(sqrMax)
lastPeak
}
def rms: Double = {
if (count > 0) lastRMS = js.Math.sqrt(sqrSum / count)
lastRMS
}
def reset(): Unit = if (count > 0) {
sqrSum = 0.0
sqrMax = 0.0
count = 0
}
def node(context: AudioContext): AudioNode = {
// println("INIT METER NODE")
// val inNode = context.createGain()
// val squared = context.createGain()
// val isFirefox = navigator.userAgent.toLowerCase.indexOf("firefox") > -1
val blockSize = 512 // 0 // if (isFirefox) 512 else 0 // Chrome doesn't accept any blockSize by default (4096)
val analyze = context.createScriptProcessor(blockSize, 1, 1)
analyze.onaudioprocess = { e: AudioProcessingEvent =>
// if (paintCount == 0) println("IN AUDIO LOOP")
val input = e.inputBuffer.getChannelData(0)
val len = input.length
var i = 0; while (i < len) {
val x0 = input(i)
val x = x0 * x0
sqrSum += x
if (x > sqrMax) sqrMax = x
i += 1
}
count += len
}
// inNode connect squared
// inNode connect squared.gain
// squared connect analyze
// THIS IS NEEDED FOR CHROME
// XXX TODO --- look into Tone.js, they
// call some GC-prevention function that might serve the same purpose.
val dummy = context.createGain()
dummy.gain.value = 0.0
analyze connect dummy
dummy connect context.destination
// inNode
analyze
}
private var animHandle: SetIntervalHandle = null
private def startAnimation(): Unit = {
stopAnimation()
animHandle = js.timers.setInterval(33.3)(animStep _)
}
private val ampdbFactor = 20 / js.Math.log(10)
private def ampdb(in: Double): Double =
js.Math.log(in) * ampdbFactor
private var lastPeakPx = 0.0
private var lastRMSPx = 0.0
// private var paintCount = 0
private def animStep(): Unit = {
val peakDB = ampdb(peak)
val floorDB = -48
val peakNorm = peakDB / -floorDB + 1
val rmsDB = ampdb(rms)
val rmsNorm = rmsDB / -floorDB + 1
val elem = render
// paintCount = (paintCount + 1) % 20
// if (paintCount == 0) println(f"peak $peakDB%1.1f rms $rmsDB%1.1f sqrSum $sqrSum%1.2f count $count")
reset()
val w = elem.width
val h = elem.height
val px0 = js.Math.max(0, js.Math.min(w, peakNorm * w))
val rx0 = js.Math.max(0, js.Math.min(w, rmsNorm * w))
val px = js.Math.max(lastPeakPx - 4, px0)
val rx = js.Math.max(lastRMSPx - 4, rx0)
if (lastPeakPx != px || lastRMSPx != rx) {
lastPeakPx = px
lastRMSPx = rx
val ctx = elem.getContext("2d").asInstanceOf[CanvasRenderingContext2D]
ctx.fillStyle = "#000000"
ctx.fillRect(0, 0, w , h)
ctx.fillStyle = "#FF0000"
ctx.fillRect(0, 0, px, h)
ctx.fillStyle = "#0000FF"
ctx.fillRect(0, 0, rx, h)
}
}
private def stopAnimation(): Unit = {
if (animHandle != null) {
js.timers.clearInterval(animHandle)
animHandle = null
}
}
}
| Sciss/Cord | src/main/scala/de/sciss/cord/incomplete/Meter.scala | Scala | lgpl-2.1 | 3,611 |
/**
* Copyright (c) 2016 LIBBLE team supervised by Dr. Wu-Jun LI at Nanjing University.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
package libble.generalizedLinear
import libble.linalg.{DenseVector, Vector}
import libble.linalg.implicits._
/**
* In this class, you should give your own gradient of function.
* We give some instances of different losses.
* If you want to run optimization on your own function, you should extend
* this class, and override functions.
*/
abstract class LossFunc extends Serializable {
/**
* The gradient of a convex function is obtained by a deltafactor*x (where x is the data point).
* Here, we return this gradient factor.
*
* @param data
* @param label
* @param weights
* @return delta factor
*
*/
def deltaF(data: Vector, label: Double, weights: WeightsVector): Vector
def deltaF(data: Vector, label: Double, weights: Vector): Vector
/**
* Here we return the gradient factor and loss.
*
* @param data
* @param label
* @param weights
* @return delta cumulate to cum,and loss
*/
def deltaFWithLoss(data: Vector, label: Double, weights: Vector): (Vector, Double)
/**
* Give the prediction on data with the weights.
*
* @param data
* @param weights
* @return predict Result
*/
def predict(data: Vector, weights: Vector): Double
def log1pExp(x: Double): Double = {
if (x > 0) {
x + math.log1p(math.exp(-x))
} else {
math.log1p(math.exp(x))
}
}
}
/**
* ******************Codes bellow has reference to mllib.optimization._
*/
/**
* Here we defined the logistic Loss.
*
* @param classNum
*/
class LogisticLoss(classNum: Int) extends LossFunc {
def this() = this(2)
/**
* The gradient of a convex function is obtained by a deltafactor*x (where x is the data point).
* Here, we return this gradient factor.
*
* @param data
* @param label
* @param weights
* @return delta factor
*
*/
override def deltaF(data: Vector, label: Double, weights: WeightsVector): Vector = {
require((data.size % weights.size) == 0 || classNum == (weights.size / data.size + 1), "weights size not match!!!")
classNum match {
case 2 => {
val margin = -1.0 * (weights * data)
val factor = (1.0 / (1.0 + math.exp(margin))) - label
new DenseVector(Array(factor))
}
case _ => {
val dim = data.size
var marginY = 0.0
var maxIndex = 0
var maxMargin = Double.NegativeInfinity
val margins = Array.tabulate(classNum - 1) { p =>
var tMargin = 0.0
data.foreachActive((i, v) => {
if (v != 0) {
tMargin += v * weights(p * dim + i)
}
})
if (p == (label - 1))
marginY = tMargin
if (tMargin > maxMargin) {
maxIndex = p
maxMargin = tMargin
}
tMargin
}
val sum = {
var temp = 0.0
if (maxMargin > 0) {
for (i <- 0 until classNum - 1) {
margins(i) -= maxMargin
if (i == maxIndex) {
temp += math.exp(-maxMargin)
}
else {
temp += math.exp(margins(i))
}
}
} else {
for (i <- 0 until classNum - 1) {
temp += math.exp(margins(i))
}
}
temp
}
val deltaFactor = new Array[Double](classNum - 1)
for (i <- 0 until classNum - 1) {
val la = {
if (label != 0.0 && label == i + 1)
1.0
else
0.0
}
deltaFactor(i) = math.exp(margins(i)) / (sum + 1.0) - la
}
new DenseVector(deltaFactor)
}
}
}
def deltaF(data: Vector, label: Double, weights: Vector): Vector= {
require((data.size % weights.size) == 0 || classNum == (weights.size / data.size + 1), "weights size not match!!!")
classNum match {
case 2 => {
val margin = -1.0 * (weights * data)
val factor = (1.0 / (1.0 + math.exp(margin))) - label
new DenseVector(Array(factor))
}
case _ => {
val dim = data.size
var marginY = 0.0
var maxIndex = 0
var maxMargin = Double.NegativeInfinity
val margins = Array.tabulate(classNum - 1) { p =>
var tMargin = 0.0
data.foreachActive((i, v) => {
if (v != 0) {
tMargin += v * weights(p * dim + i)
}
})
if (p == (label - 1))
marginY = tMargin
if (tMargin > maxMargin) {
maxIndex = p
maxMargin = tMargin
}
tMargin
}
val sum = {
var temp = 0.0
if (maxMargin > 0) {
for (i <- 0 until classNum - 1) {
margins(i) -= maxMargin
if (i == maxIndex) {
temp += math.exp(-maxMargin)
}
else {
temp += math.exp(margins(i))
}
}
} else {
for (i <- 0 until classNum - 1) {
temp += math.exp(margins(i))
}
}
temp
}
val deltaFactor = new Array[Double](classNum - 1)
for (i <- 0 until classNum - 1) {
val la = {
if (label != 0.0 && label == i + 1)
1.0
else
0.0
}
deltaFactor(i) = math.exp(margins(i)) / (sum + 1.0) - la
}
new DenseVector(deltaFactor)
}
}
}
/**
* Here we return the gradient factor and loss.
*
* @param data
* @param label
* @param weights
* @return delta fator,and loss
*/
override def deltaFWithLoss(data: Vector, label: Double, weights: Vector): (Vector, Double) = {
require((data.size % weights.size) == 0 || classNum == (weights.size / data.size + 1), "weights size not match!!!")
classNum match {
case 2 => {
val margin = -1.0 * (data * weights)
val factor = (1.0 / (1.0 + math.exp(margin))) - label
if (label > 0) {
(new DenseVector(Array(factor)), log1pExp(margin))
}
else {
(new DenseVector(Array(factor)), log1pExp(margin) - margin)
}
}
case _ => {
val dim = data.size
var marginY = 0.0
var maxIndex = 0
var maxMargin = Double.NegativeInfinity
val margins = Array.tabulate(classNum - 1) { p =>
var tMargin = 0.0
data.foreachActive((i, v) => {
if (v != 0.0) {
tMargin += v * weights(p * dim + i)
}
})
if (p == (label - 1))
marginY = tMargin
if (tMargin > maxMargin) {
maxIndex = p
maxMargin = tMargin
}
tMargin
}
val sum = {
var temp = 0.0
if (maxMargin > 0) {
for (i <- 0 until classNum - 1) {
margins(i) -= maxMargin
if (i == maxIndex) {
temp += math.exp(-maxMargin)
}
else {
temp += math.exp(margins(i))
}
}
} else {
for (i <- 0 until classNum - 1) {
temp += math.exp(margins(i))
}
}
temp
}
val deltaFactor = new Array[Double](classNum - 1)
for (i <- 0 until classNum - 1) {
val la = {
if (label != 0.0 && label == i + 1)
1.0
else
0.0
}
deltaFactor(i) = math.exp(margins(i)) / (sum + 1.0) - la
}
var loss = {
if (label > 0.0) {
math.log1p(sum) - marginY
} else {
math.log1p(sum)
}
}
if (maxMargin > 0) {
loss += maxMargin
}
(new DenseVector(deltaFactor), loss)
}
}
}
/**
* Give the prediction on data with the weights.
*
* @param data
* @param weights
* @return predict Result
*/
override def predict(data: Vector, weights: Vector): Double = {
require((data.size % weights.size) == 0 || classNum == (weights.size / data.size + 1), "weights size not match!!!")
classNum match {
case 2 =>
val margin = -(data * weights)
1.0 / log1pExp(margin)
case _ =>
var maxMargin = 0.0
var softMax = 0
val dataSize = data.size
for (p <- 0 until classNum) {
var margin = 0.0
data.foreachActive((i, v) =>
margin += v * weights(p * dataSize + i)
)
if (margin > maxMargin) {
maxMargin = margin
softMax = p
}
}
softMax
}
}
}
/**
* Here we defined the hinge Loss
*/
class HingeLoss extends LossFunc {
/**
* The gradient of a convex function is obtained by a deltafactor*x (where x is the data point).
* Here, we return this gradient factor.
*
* @param data
* @param label
* @param weights
* @return delta factor
*
*/
override def deltaF(data: Vector, label: Double, weights: WeightsVector): Vector = {
val innerP = weights * data
val factor = 2 * label - 1.0
if (1.0 > factor * innerP) {
new DenseVector(Array(-factor))
}
else {
new DenseVector(1)
}
}
override def deltaF(data: Vector, label: Double, weights: Vector): Vector = {
val innerP = weights * data
val factor = 2 * label - 1.0
if (1.0 > factor * innerP) {
new DenseVector(Array(-factor))
}
else {
new DenseVector(1)
}
}
/**
* Here we return the gradient factor and loss.
*
* @param data
* @param label
* @param weights
* @return delta cumulate to cum,and loss
*/
override def deltaFWithLoss(data: Vector, label: Double, weights: Vector): (Vector, Double) = {
val innerP = weights * data
val factor = 2 * label - 1.0
if (1.0 > factor * innerP) {
(new DenseVector(Array(-factor)), 1.0 - factor * innerP)
}
else {
(new DenseVector(1), 0.0)
}
}
/**
* Give the prediction on data with the weights
*
* @param data
* @param weights
* @return predict Result
*/
override def predict(data: Vector, weights: Vector): Double = {
weights * data
}
}
/**
* Here we defined the least square Loss.
*/
class LeastSquareLoss extends LossFunc {
/**
* The gradient of a convex function is obtained by a deltafactor*x (where x is the data point).
* Here, we return this gradient factor.
*
* @param data
* @param label
* @param weights
* @return delta factor
*
*/
override def deltaF(data: Vector, label: Double, weights: WeightsVector): Vector = {
new DenseVector(Array(weights * data - label))
}
override def deltaF(data: Vector, label: Double, weights: Vector): Vector = {
new DenseVector(Array(weights * data - label))
}
/**
* Here we return the gradient factor and loss.
*
* @param data
* @param label
* @param weights
* @return delta cumulate to cum,and loss
*/
override def deltaFWithLoss(data: Vector, label: Double, weights: Vector): (Vector, Double) = {
val deltaF = weights * data - label
(new DenseVector(Array(deltaF)), deltaF * deltaF / 2.0)
}
/**
* Give the prediction on data with the weights.
*
* @param data
* @param weights
* @return predict Result
*/
override def predict(data: Vector, weights: Vector): Double = {
data * weights
}
}
/** ***********************************************************************************/ | syh6585/LIBBLE-Spark | src/main/scala/generalizedLinear/LossFunc.scala | Scala | apache-2.0 | 12,435 |
package smt.report
import smt.{UpMoveState, DownMoveState, MoveState}
object MoveStateDescription {
def describe(nms: MoveState): String = nms match {
case dms: DownMoveState => {
dms.crashedDown match {
case None => "rolled back."
case Some(s) => "roll-back attempted, but crashed at: " + s.name
}
}
case ums: UpMoveState => {
ums.crashedUp match {
case None => "applied."
case Some(s) => "attempted, but crashed at: " + s.name
}
}
}
def describe(name: String, nms: MoveState): String = name + ": " + describe(nms)
} | davidpeklak/smt | src/main/scala/smt/report/MoveStateDescription.scala | Scala | mit | 598 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
object Test {
def main ( args: Array[String] ) {
val conf = new SparkConf().setAppName("Test")
val sc = new SparkContext(conf)
explain(true)
var M = sc.textFile(args(0))
.map( line => { val a = line.split(",")
((a(0).toInt,a(1).toInt),a(2).toDouble) } )
var N = sc.textFile(args(1))
.map( line => { val a = line.split(",")
((a(0).toInt,a(1).toInt),a(2).toDouble) } )
M.map{ case ((i,j),m) => (j,(i,m)) }
.join( N.map{ case ((i,j),n) => (i,(j,n)) } )
.map{ case (k,((i,m),(j,n))) => ((i,j),m*n) }
.reduceByKey(_+_)
.sortBy(_._1,true,1).take(30).foreach(println)
}
}
| fegaras/DIQL | tests/diablo/spark/MatrixMultiplicationSpark.scala | Scala | apache-2.0 | 785 |
package org.json4s
package native
import org.scalatest.wordspec.AnyWordSpec
import org.json4s.prefs.EmptyValueStrategy
import org.json4s.MonadicJValue._
class NativeJsonMethodsSpec extends AnyWordSpec {
import org.json4s.JsonDSL._
import JsonMethods._
"JsonMethods.parse" should {
val stringJson = """{"number": 200}"""
"parse StringInput and produce JInt" in {
assert((parse(stringJson) \\ "number").isInstanceOf[JInt])
}
"parse ReaderInput and produce JInt" in {
assert((parse(new java.io.StringReader(stringJson)) \\ "number").isInstanceOf[JInt])
}
"parse StreamInput and produce JInt" in {
assert((parse(new java.io.ByteArrayInputStream(stringJson.getBytes)) \\ "number").isInstanceOf[JInt])
}
"parse StringInput and produce JLong" in {
assert((parse(stringJson, useBigIntForLong = false) \\ "number").isInstanceOf[JLong])
}
"parse ReaderInput and produce JLong" in {
assert((parse(new java.io.StringReader(stringJson), useBigIntForLong = false) \\ "number").isInstanceOf[JLong])
}
"parse StreamInput and produce AST using Long" in {
assert(
(parse(
new java.io.ByteArrayInputStream(stringJson.getBytes),
useBigIntForLong = false
) \\ "number").isInstanceOf[JLong]
)
}
}
"JsonMethods.write" should {
"produce JSON without empty fields" should {
"from Seq(Some(1), None, None, Some(2))" in {
val seq = Seq(Some(1), None, None, Some(2))
val expected =
DocCons(DocText("["), DocCons(DocCons(DocText("1"), DocCons(DocText(","), DocText("2"))), DocText("]")))
assert(render(seq) == expected)
}
"""from Map("a" -> Some(1), "b" -> None, "c" -> None, "d" -> Some(2))""" in {
val map = Map("a" -> Some(1), "b" -> None, "c" -> None, "d" -> Some(2))
val expected = DocCons(
DocText("{"),
DocCons(
DocNest(
2,
DocCons(
DocBreak,
DocCons(
DocCons(DocText("\\"a\\":"), DocText("1")),
DocCons(DocCons(DocText(","), DocBreak), DocCons(DocText("\\"d\\":"), DocText("2")))
)
)
),
DocCons(DocBreak, DocText("}"))
)
)
assert(render(map) == expected)
}
}
"produce JSON with empty fields preserved" should {
"from Seq(Some(1), None, None, Some(2))" in {
val seq = Seq(Some(1), None, None, Some(2))
val expected = DocCons(
DocText("["),
DocCons(
DocCons(
DocCons(
DocCons(DocText("1"), DocCons(DocText(","), DocText("null"))),
DocCons(DocText(","), DocText("null"))
),
DocCons(DocText(","), DocText("2"))
),
DocText("]")
)
)
assert(render(seq, emptyValueStrategy = EmptyValueStrategy.preserve) == expected)
}
"""from Map("a" -> Some(1), "b" -> None, "c" -> None, "d" -> Some(2))""" in {
val map = Map("a" -> Some(1), "b" -> None, "c" -> None, "d" -> Some(2))
val expected = DocCons(
DocText("{"),
DocCons(
DocNest(
2,
DocCons(
DocBreak,
DocCons(
DocCons(
DocCons(
DocCons(DocText("\\"a\\":"), DocText("1")),
DocCons(DocCons(DocText(","), DocBreak), DocCons(DocText("\\"b\\":"), DocText("null")))
),
DocCons(DocCons(DocText(","), DocBreak), DocCons(DocText("\\"c\\":"), DocText("null")))
),
DocCons(DocCons(DocText(","), DocBreak), DocCons(DocText("\\"d\\":"), DocText("2")))
)
)
),
DocCons(DocBreak, DocText("}"))
)
)
assert(render(map, emptyValueStrategy = EmptyValueStrategy.preserve) == expected)
}
}
}
}
| xuwei-k/json4s | native-core/shared/src/test/scala/org/json4s/native/NativeJsonMethodsSpec.scala | Scala | apache-2.0 | 4,073 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator.benchmarks
package lists
import io.hascalator.Prelude._
import org.openjdk.jmh.annotations.{ Benchmark, BenchmarkMode, Mode, OutputTimeUnit }
import java.util.concurrent.TimeUnit
class FoldBenchmarks {
import BenchmarkState.sample._
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldLeftListBenchmark(): Int = {
list.foldLeft(0)(_ + _)
}
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldLeftScalaListBenchmark(): Int = {
scalaList.foldLeft(0)(_ + _)
}
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldLeftArrayBenchmark(): Int = {
array.foldLeft(0)(_ + _)
}
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldRightListBenchmark(): Int = {
list.foldRight(0)(_ + _)
}
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldRightScalaListBenchmark(): Int = {
scalaList.foldRight(0)(_ + _)
}
@Benchmark @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS)
def foldRightArrayBenchmark(): Int = {
array.foldRight(0)(_ + _)
}
} | CarloMicieli/hascalator | bench/src/main/scala/io/hascalator/benchmarks/lists/FoldBenchmarks.scala | Scala | apache-2.0 | 1,836 |
package org.jetbrains.plugins.scala
package script
import com.intellij.execution.configurations._
import com.intellij.execution.filters._
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, PsiManager}
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.openapi.util.JDOMExternalizer
import com.intellij.execution.runners.ExecutionEnvironment
import com.intellij.execution.{ExecutionException, Executor}
import com.intellij.openapi.module.{ModuleUtilCore, Module}
import com.intellij.openapi.options.SettingsEditor
import lang.psi.api.ScalaFile
import com.intellij.vcsUtil.VcsUtil
import org.jdom.Element
import collection.JavaConversions._
import compiler.ScalacSettings
import config.{Libraries, CompilerLibraryData, ScalaFacet}
import com.intellij.refactoring.listeners.{RefactoringElementAdapter, RefactoringElementListener}
/**
* User: Alexander Podkhalyuzin
* Date: 04.02.2009
*/
class ScalaScriptRunConfiguration(val project: Project, val configurationFactory: ConfigurationFactory, val name: String)
extends ModuleBasedConfiguration[RunConfigurationModule](name, new RunConfigurationModule(project), configurationFactory) with RefactoringListenerProvider {
val SCALA_HOME = "-Dscala.home="
val CLASSPATH = "-Denv.classpath=\\"%CLASSPATH%\\""
val EMACS = "-Denv.emacs=\\"%EMACS%\\""
val MAIN_CLASS = "scala.tools.nsc.MainGenericRunner"
private var scriptPath = ""
private var scriptArgs = ""
private var javaOptions = ""
private var consoleArgs = ""
private var workingDirectory = {
val base = getProject.getBaseDir
if (base != null) base.getPath
else ""
}
def getScriptPath = scriptPath
def getScriptArgs = scriptArgs
def getJavaOptions = javaOptions
def getConsoleArgs = consoleArgs
def getWorkingDirectory: String = workingDirectory
def setScriptPath(s: String) {
scriptPath = s
}
def setScriptArgs(s: String) {
scriptArgs = s
}
def setJavaOptions(s: String) {
javaOptions = s
}
def setConsoleArgs(s: String) {
consoleArgs = s
}
def setWorkingDirectory(s: String) {
workingDirectory = s
}
def apply(params: ScalaScriptRunConfigurationForm) {
setScriptArgs(params.getScriptArgs)
setScriptPath(params.getScriptPath)
setJavaOptions(params.getJavaOptions)
setConsoleArgs(params.getConsoleArgs)
setWorkingDirectory(params.getWorkingDirectory)
}
def getState(executor: Executor, env: ExecutionEnvironment): RunProfileState = {
def fileNotFoundError() {
throw new ExecutionException("Scala script file not found.")
}
try {
val file: VirtualFile = VcsUtil.getVirtualFile(scriptPath)
PsiManager.getInstance(project).findFile(file) match {
case f: ScalaFile if f.isScriptFile() && !f.isWorksheetFile =>
case _ => fileNotFoundError()
}
}
catch {
case e: Exception => fileNotFoundError()
}
val module = getModule
if (module == null) throw new ExecutionException("Module is not specified")
val script = VcsUtil.getVirtualFile(scriptPath)
val state = new JavaCommandLineState(env) {
protected override def createJavaParameters: JavaParameters = {
val params = new JavaParameters()
params.setCharset(null)
params.getVMParametersList.addParametersString(getJavaOptions)
params.setWorkingDirectory(getWorkingDirectory)
// params.getVMParametersList.addParametersString("-Xnoagent -Djava.compiler=NONE -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5009")
// params.getVMParametersList.add(SCALA_HOME + scalaSdkPath)
params.getVMParametersList.add(CLASSPATH)
params.getVMParametersList.add(EMACS)
params.setMainClass(MAIN_CLASS)
params.getProgramParametersList.add("-nocompdaemon") //todo: seems to be a bug in scala compiler. Ticket #1498
params.getProgramParametersList.add("-classpath")
params.configureByModule(module, JavaParameters.JDK_AND_CLASSES_AND_TESTS)
params.getProgramParametersList.add(params.getClassPath.getPathsString)
ScalaFacet.findIn(module).foreach {
case facet =>
val files =
if (facet.fsc) {
val settings = ScalacSettings.getInstance(getProject)
val lib: Option[CompilerLibraryData] = Libraries.findBy(settings.COMPILER_LIBRARY_NAME,
settings.COMPILER_LIBRARY_LEVEL, getProject)
lib match {
case Some(libr) => libr.files
case _ => facet.files
}
} else facet.files
files.foreach(params.getClassPath.add)
}
val array = getConsoleArgs.trim.split("\\\\s+").filter(!_.trim().isEmpty)
params.getProgramParametersList.addAll(array: _*)
params.getProgramParametersList.add(scriptPath)
params.getProgramParametersList.addParametersString(scriptArgs)
params
}
}
val consoleBuilder = TextConsoleBuilderFactory.getInstance.createBuilder(getProject)
consoleBuilder.addFilter(getFilter(script))
state.setConsoleBuilder(consoleBuilder)
state
}
def getModule: Module = {
var module: Module = null
try {
val file: VirtualFile = VcsUtil.getVirtualFile(scriptPath)
module = ModuleUtilCore.findModuleForFile(file, getProject)
}
catch {
case e: Exception =>
}
if (module == null) module = getConfigurationModule.getModule
module
}
def getValidModules: java.util.List[Module] = ScalaFacet.findModulesIn(getProject).toList
def getConfigurationEditor: SettingsEditor[_ <: RunConfiguration] = new ScalaScriptRunConfigurationEditor(project, this)
override def writeExternal(element: Element) {
super.writeExternal(element)
writeModule(element)
JDOMExternalizer.write(element, "path", getScriptPath)
JDOMExternalizer.write(element, "vmparams", getJavaOptions)
JDOMExternalizer.write(element, "consoleargs", getConsoleArgs)
JDOMExternalizer.write(element, "params", getScriptArgs)
JDOMExternalizer.write(element, "workingDirectory", workingDirectory)
}
override def readExternal(element: Element) {
super.readExternal(element)
readModule(element)
scriptPath = JDOMExternalizer.readString(element, "path")
javaOptions = JDOMExternalizer.readString(element, "vmparams")
scriptArgs = JDOMExternalizer.readString(element, "params")
consoleArgs = JDOMExternalizer.readString(element, "consoleargs")
val pp = JDOMExternalizer.readString(element, "workingDirectory")
if (pp != null) workingDirectory = pp
}
private def getFilter(file: VirtualFile): Filter = {
import Filter._
new Filter {
def applyFilter(line: String, entireLength: Int): Result = {
val start = entireLength - line.length
var end = entireLength - line.length
if (line.startsWith("(fragment of ")) {
try {
var cache = line.replaceFirst("[(][f][r][a][g][m][e][n][t][ ][o][f][ ]", "")
cache = cache.replaceFirst("[^)]*[)][:]", "")
val lineNumber = Integer.parseInt(cache.substring(0, cache.indexOf(":")))
cache = cache.replaceFirst("[^:]", "")
end += line.length - cache.length
val hyperlink = new OpenFileHyperlinkInfo(getProject, file, lineNumber-1)
new Result(start, end, hyperlink)
}
catch {
case _: Exception => return null
}
} else null
}
}
}
def getRefactoringElementListener(element: PsiElement): RefactoringElementListener = element match {
case file: ScalaFile => new RefactoringElementAdapter {
def elementRenamedOrMoved(newElement: PsiElement) = {
newElement match {
case f: ScalaFile =>
val newPath = f.getVirtualFile.getPath
setScriptPath(newPath)
case _ =>
}
}
//todo this method does not called when undo of moving action executed
def undoElementMovedOrRenamed(newElement: PsiElement, oldQualifiedName: String) {
setScriptPath(oldQualifiedName)
}
}
case _ => RefactoringElementListener.DEAF
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/script/ScalaScriptRunConfiguration.scala | Scala | apache-2.0 | 8,237 |
package pl.umk.bugclassification.scmparser.training
import java.util
import java.util.ArrayList
import org.slf4j.LoggerFactory
import weka.classifiers.Classifier
import weka.core.{Attribute, DenseInstance, FastVector, Instance, Instances}
import scala.collection.mutable.ArrayBuilder.ofDouble
trait WekaWrapper {
val log = LoggerFactory.getLogger(classOf[WekaWrapper])
def generateAttributes(keys: Array[String]): (ArrayList[Attribute], Double, Double) = {
log.info("generateAttributes before creation of attributes")
val attributes: util.ArrayList[Attribute] = new util.ArrayList[Attribute]
keys.foreach(key => attributes.add(new Attribute(key)))
val classificationAttributeValues = new util.ArrayList[String]
classificationAttributeValues.add("buggy")
classificationAttributeValues.add("clean")
val classificationAttribute = new Attribute("WekaWrapperClassification", classificationAttributeValues)
attributes.add(classificationAttribute)
log.info("generateAttributes after creation of attributes")
(attributes, classificationAttributeValues.indexOf("buggy").toDouble, classificationAttributeValues.indexOf("clean").toDouble)
}
def generateInstances(bags: List[ClassifiedBagOfWords], keys: Array[String]): Instances = {
val (attributes, buggyValue, cleanValue) = generateAttributes(keys)
log.info("generateInstances before createTrainingInstance for each instance ")
val instances = new Instances("Training", attributes, 0)
bags.par.
map(bag => createTrainingInstance(bag, keys, buggyValue, cleanValue)).seq.
foreach(instance => instances.add(instance))
instances.setClassIndex(instances.numAttributes() - 1)
log.info("generateInstances after createTrainingInstance for each instance ")
instances
}
private def populateValues(bag: BagOfWords, keys: Array[String]): ofDouble = {
val bagParMap = bag.map.par
val arrayBuilderOfDouble = new ofDouble()
arrayBuilderOfDouble.sizeHint(keys.size)
var i = 0
while (i < keys.size) {
arrayBuilderOfDouble += bagParMap.get(keys(i)).orElse(Some(0)).get.toDouble
i += 1
}
arrayBuilderOfDouble
}
def createTrainingInstance(bag: ClassifiedBagOfWords, keys: Array[String], buggyValue: Double, cleanValue: Double): Instance = {
val b = populateValues(bag, keys)
if (bag.isBug) {
b += buggyValue
} else {
b += cleanValue
}
val values = b.result
val instance = new DenseInstance(1.0, values)
instance
}
def createClassificationInstances(bag: BagOfWords, keys: Array[String]): Instances = {
val attributes = generateAttributes(keys)
val values = (populateValues(bag, keys) += 0).result
val instance = new DenseInstance(1.0, values)
val instances = new Instances("Classification", attributes._1, 0)
instances.setClassIndex(instances.numAttributes() - 1)
instances.add(instance)
instances
}
def train(instances: Instances)
def saveModel: Classifier
def printEvaluation(instances: Instances)
}
| mfejzer/CommitClassification | src/main/scala/pl/umk/bugclassification/scmparser/training/WekaWrapper.scala | Scala | bsd-3-clause | 3,060 |
package unluac.parse
import java.nio.ByteBuffer
class LBooleanType extends BObjectType[LBoolean] {
def parse(buffer: ByteBuffer, header: BHeader): LBoolean = {
val value: Int = buffer.get
if ((value & 0xFFFFFFFE) != 0) {
throw new IllegalStateException
}
else {
val bool: LBoolean = if (value == 0) LBoolean.LFALSE else LBoolean.LTRUE
if (header.debug) {
System.out.println("-- parsed <boolean> " + bool)
}
bool
}
}
} | danielwegener/unluac-scala | shared/src/main/scala/unluac/parse/LBooleanType.scala | Scala | mit | 482 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs10x.boxes
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfterEach
import uk.gov.hmrc.ct.accounts.AccountStatementValidationFixture
import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xDormancyBoxRetriever
class ACQ8990Spec extends AccountStatementValidationFixture[Frs10xDormancyBoxRetriever] with BeforeAndAfterEach {
override val boxRetriever = mock[Frs10xDormancyBoxRetriever] (RETURNS_SMART_NULLS)
override def setupMocks() = {
when(boxRetriever.profitAndLossStatementRequired()).thenReturn(ProfitAndLossStatementRequired(true))
}
doStatementValidationTests("ACQ8990", ACQ8990.apply)
"ACQ8990 should" should {
"validate successfully when not set and notTradedStatementRequired false" in {
when(boxRetriever.profitAndLossStatementRequired()).thenReturn(ProfitAndLossStatementRequired(false))
ACQ8990(None).validate(boxRetriever) shouldBe Set.empty
}
}
}
| pncampbell/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs10x/boxes/ACQ8990Spec.scala | Scala | apache-2.0 | 1,554 |
/*
* Copyright 2015 Ark International Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spream
import scala.reflect.ClassTag
import scala.collection.mutable
import org.apache.spark.rdd._
import org.apache.spark.{RangePartitioning, Partitioner}
object PartitionLocation {
sealed trait EnumVal
case object Past extends EnumVal
case object Current extends EnumVal
case object Future extends EnumVal
}
/**
* Given a series indexed by K, it can be parallelised across partitions to a series indexed by PartitionedSeriesKey[K].
* @param partition the id of the partition containing this. This is used to implement efficient partitioning and
* probably should not be used by client code.
* @param key the underlying key for the series.
* @param location whether key is part of this current partition, or duplicated from a
* previous (past) or subsequent (future) partition in order to allow moving window computations.
* @tparam K
*/
case class PartitionedSeriesKey[K : Ordering : Numeric : ClassTag](partition : Int, key : K, location : PartitionLocation.EnumVal)
object PartitionedSeriesKey {
implicit def ordering[K : Ordering] : Ordering[PartitionedSeriesKey[K]] = Ordering.by(x => (x.partition,x.key))
}
//TODO write this like PairRDDFunctions and have an implicit conversion, so can convert ordered RDDs to this easily.
//TODO prevent the currently required type specification required when calling these functions (the above may help)
//TODO type the resulting RDD, e.g. SeriesRDD
//TODO implement faster ops on this, because we know it's a type of range partitioning so filtering can be made fast in the same way.
//TODO consider writing this ias a subclass of RangePartitioner somehow (though note the duplication cannot be done by getPartition).
/**
* In order to parallelise moving window computations on series, it is necessary to duplicate data so that there
* is an overlap between partitions.
* Specifically, a partition p must have enough data from partition p-1 and possibly earlier (and p+1 and possibly
* later) to fill the past (and future) windows.
*/
object MovingWindowPartitioning {
type IntervalSets[K] = List[(Option[K],Set[(Int, PartitionLocation.EnumVal)])]
/**
* Given rangeBounds and desired past and future window widths, determine the (Past,Current,Future) labelling
* that should be applied within each interval.
* This allows efficient application of labeling.
* @param rangeBounds must be sorted according to K
* @param pastWindow
* @param futureWindow
* @return A list of (upper bound, labels), where labels are to be applied until the bound is hit.
*/
def intervalSets[K : Ordering : Numeric](rangeBounds : Array[K], pastWindow : K, futureWindow : K): IntervalSets[K] = {
var numeric = implicitly[Numeric[K]]
val Start = 's'
val End = 'e'
val allBounds = rangeBounds.zipWithIndex.flatMap{ case (ub,p) =>
(p,PartitionLocation.Current,End,ub) ::
(p,PartitionLocation.Future,Start,ub) ::
(p,PartitionLocation.Future,End,numeric.plus(ub,futureWindow)) ::
(p+1,PartitionLocation.Past,Start,numeric.minus(ub,pastWindow)) ::
(p+1,PartitionLocation.Past,End,ub) ::
(p+1,PartitionLocation.Current,Start,ub) :: Nil : List[(Int,PartitionLocation.EnumVal,Char,K)]
}.sortBy(_._4)
type ACC = IntervalSets[K]
val empty : ACC = Nil
//Anything lower than first split is in "this" of first partition
//We start with bound being lower
val initial : ACC = (Option.empty[K],Set((0,PartitionLocation.Current : PartitionLocation.EnumVal))) :: Nil
val intervalSets: ACC = allBounds.foldLeft(initial){
case (acc,(p,what,se,bound)) =>
val currBound = acc.head._1
val currSet = acc.head._2
val entry = (p,what)
val ns = se match {
case Start => currSet + entry
case End => currSet - entry
}
//Note this reverses the list.
if (currBound.map(_ == bound).getOrElse(false))
(Some(bound), ns) :: acc.tail
else
(Some(bound), ns) :: acc
}.foldLeft((Option.empty[K],empty)){
//Note this reverses again. Here, we're making the bound upper, rather than lower.
case ((prevBound,res),(bound,set)) => (bound,(prevBound,set) :: res)
}._2
require(rangeBounds.size+1 <= intervalSets.size)
intervalSets
}
//TODO can we enforce rdd ordered?
/**
* Applies duplication and labelling as defined by intervalSets in a streaming fashion over each partition.
* @param rdd ordered by K
* @param intervalSets
* @return
*/
def duplicateToIntervals[K : Ordering : Numeric : ClassTag, V : ClassTag, P <: Product2[K,V] : ClassTag](
rdd: RDD[P], intervalSets : IntervalSets[K]): RDD[Product2[PartitionedSeriesKey[K], V]] = {
def f(it : Iterator[P]) = new DuplicateToIntervalsIterator[K,V,P](it,intervalSets)
rdd.mapPartitions(f,true)
}
/**
* Apply the pre-computed partitioning.
* @param rdd
* @param rangeBounds used to pre-label partitions
* @return
*/
def applyPartitioning[K : Ordering : Numeric : ClassTag, V : ClassTag, P <: Product2[PartitionedSeriesKey[K],V] : ClassTag](
rdd: RDD[P], rangeBounds : Array[K], ascending : Boolean): RDD[(PartitionedSeriesKey[K], V)] = {
val f = new OrderedRDDFunctions[PartitionedSeriesKey[K], V, P](rdd)
f.repartitionAndSortWithinPartitions(new PartitionedSeriesPartitioner(rangeBounds, ascending))
}
/**
* Convert an RDD of (K,V) ordered by K (a series) into an RDD with appropriate overlaps and labels to allow
* moving window computations to be applied. Data is distributed roughly evenly across the resulting partitions.
* @param rdd
* @param pastWidth defines window width in which past events will be made available in each partition.
* @param futureWidth
* @param partitions number of partitions desired
* @tparam K
* @tparam V
* @tparam P
* @return
*/
def movingWindowPartitioned[K : Ordering : Numeric : ClassTag, V : ClassTag, P <: Product2[K,V] : ClassTag](
rdd: RDD[P], pastWidth : K, futureWidth : K, partitions : Int): RDD[(PartitionedSeriesKey[K], V)] = {
val rb = RangePartitioning.rangeBounds[K,V](rdd,partitions)
val is = intervalSets(rb,pastWidth,futureWidth)
val d = duplicateToIntervals[K,V,P](rdd,is)
applyPartitioning(d,rb,true)
}
}
//TODO extend sRangPartitioner, and if not PartitionedSeriesKey, delegate to that... should work ??
class PartitionedSeriesPartitioner[K : Ordering : ClassTag](val rangeBounds : Array[K], val ascending : Boolean) extends Partitioner { //RangePartitioner[K,Unit](rangeBounds.length+1,null,true) {
require(ascending,"Only ascending is completely supported at this stage")
private var ordering = implicitly[Ordering[K]]
override def numPartitions: Int = rangeBounds.length + 1
// These are used to filter by range
def getPartitionTypesafe(key: K): Int = doGetPartition(key)
def getPartitionTypesafe(key: PartitionedSeriesKey[K]): Int = key.partition
// Used to perform partitioning. No control over the API to improve this...
override def getPartition(key: Any): Int =
getPartitionTypesafe(key.asInstanceOf[PartitionedSeriesKey[K]])
private var binarySearch: ((Array[K], K) => Int) =
org.apache.spark.util.CollectionsUtils.makeBinarySearch[K]
// Unfortunately RangePartitioner isn't written in a way where you can inherit from it, so the following
// is almost a cut-and-paste of functionality there.
private def doGetPartition(k: K): Int = {
var partition = 0
if (rangeBounds.length <= 128) {
// If we have less than 128 partitions naive search
while (partition < rangeBounds.length && ordering.gt(k, rangeBounds(partition))) {
partition += 1
}
} else {
// Determine which binary search method to use only once.
partition = binarySearch(rangeBounds, k)
// binarySearch either returns the match location or -[insertion point]-1
if (partition < 0) {
partition = -partition-1
}
if (partition > rangeBounds.length) {
partition = rangeBounds.length
}
}
if (ascending) {
partition
} else {
rangeBounds.length - partition
}
}
}
/**
* Applies interval mapping in a streaming fashion.
* @param it
* @param intervalSets
* @tparam K
* @tparam V
* @tparam P
*/
class DuplicateToIntervalsIterator[K : Ordering : Numeric : ClassTag,V,P <: Product2[K,V] : ClassTag](
it : Iterator[P], intervalSets : List[(Option[K],Set[(Int, PartitionLocation.EnumVal)])]) extends Iterator[Product2[PartitionedSeriesKey[K],V]]() {
private val ordering = implicitly[Ordering[K]]
private val queue = new mutable.Queue[Product2[PartitionedSeriesKey[K],V]]()
private val currentIntervals: mutable.Queue[(Option[K], Set[(Int, PartitionLocation.EnumVal)])] = mutable.Queue() ++ intervalSets
private var prev : Option[P] = None //Only used to enforce ordering
//Skip to correct interval for k (interval is closed below and open above).
//In future, could be worth changing queue to an Array and then binary searching it to get to the initial location.
//Note: Will probably make little difference in practice though as it will only happen once per partition.
def skipTo(k : K) =
while (currentIntervals.head._1.map(ordering.gteq(k, _)).getOrElse(false))
currentIntervals.dequeue()
override def hasNext: Boolean = !queue.isEmpty || it.hasNext
override def next() = {
if (queue.isEmpty) {
val n: P = it.next()
require(prev.map(p => ordering.gteq(n._1,p._1)).getOrElse(true),
"Out of order: "+ n._1 + " not >= " + prev.get._1)
skipTo(n._1)
// Assign n to all currently active intervals
queue ++= currentIntervals.head._2.map{ case (partition,what) =>
(PartitionedSeriesKey(partition,n._1,what),n._2)
}
prev = Some(n)
}
queue.dequeue()
}
}
| arkig/spream | src/main/scala/spream/MovingWindowPartitioning.scala | Scala | apache-2.0 | 10,584 |
package edu.osu.cse.groenkeb.logic
trait Operator {
def matches(op: Operator): Boolean
override def toString(): String
}
trait Predicate extends Operator
trait Quantifier extends Operator {
def evaluate(domain: Domain, functor: Sentence => Boolean, arg: Sentence): Boolean
}
trait Connective extends Operator {
def evaluate(functor: Sentence => Boolean, args: Sentence*): Boolean
}
abstract class UnaryConnective extends Connective
abstract class BinaryConnective extends Connective
case class NamedPredicate(val name: String) extends Predicate {
require(name != IdentityPredicate.name)
def matches(op: Operator) = op match {
case NamedPredicate(this.name) => true
case _ => false
}
override def toString = name
}
case class IdentityPredicate() extends Predicate {
def matches(op: Operator) = op match {
case IdentityPredicate() => true
case _ => false
}
override def toString = IdentityPredicate.name
}
case class NullOp() extends Operator {
def matches(op: Operator) = op match {
case NullOp() => true
case _ => false
}
override def toString() = ""
}
object IdentityPredicate {
def name = "I"
}
| bgroenks96/PropLogic | core/src/main/scala/edu/osu/cse/groenkeb/logic/operator.scala | Scala | mit | 1,169 |
package com.avsystem.scex
package compiler
import com.avsystem.scex.compiler.presentation.{CachingScexPresentationCompiler, ScexPresentationCompiler}
/**
* Created: 17-10-2013
* Author: ghik
*/
class DefaultScexCompiler(val settings: ScexSettings)
extends ScexCompiler
with ScexPresentationCompiler
with ClassfileReusingScexCompiler
with TemplateOptimizingScexCompiler
with CachingScexCompiler
with CachingScexPresentationCompiler
with WeakReferenceWrappingScexCompiler
| AVSystem/scex | scex-core/src/main/scala/com/avsystem/scex/compiler/DefaultScexCompiler.scala | Scala | mit | 490 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.