code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package cobase.play.post
import play.api.data.Form
import play.api.data.Forms._
/**
* The form which handles the creation of a post.
*/
object PostForm {
/**
* A play framework form.
*/
val form = Form(
mapping(
"content" -> nonEmptyText
)(PostFormData.apply)(PostFormData.unapply)
)
}
case class PostFormData(content: String)
| Cobase/cobase-pro | app/cobase/play/post/PostForm.scala | Scala | mit | 359 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hyperledger.network
import java.net.{InetAddress, InetSocketAddress}
import org.hyperledger.network.Version.{NETWORK_NODE, SIMPLE_NODE}
import org.scalatest.{FunSuite, Matchers}
import scodec.bits.BitVector
/**
*
*/
class ProtocolTests extends FunSuite with Matchers {
val LOCAL_ADDR = new InetSocketAddress(InetAddress.getLocalHost, 8333)
def createVersion(services: BitVector) = Version.forNow(1,
services,
NetworkAddress.forVersion(services, LOCAL_ADDR),
NetworkAddress.forVersion(services, LOCAL_ADDR),
123, "Test", 0, relay = false)
test("VersionMessage#supportsServices") {
createVersion(SIMPLE_NODE).supportsServices(SIMPLE_NODE) shouldBe true
createVersion(SIMPLE_NODE).supportsServices(NETWORK_NODE) shouldBe false
createVersion(NETWORK_NODE).supportsServices(SIMPLE_NODE) shouldBe true
createVersion(NETWORK_NODE).supportsServices(NETWORK_NODE) shouldBe true
}
}
| DigitalAssetCom/hlp-candidate | server/network/src/test/scala/org/hyperledger/network/ProtocolTests.scala | Scala | apache-2.0 | 1,502 |
package chrome.sockets.tcpServer
import chrome.events.EventSource
import chrome.sockets.tcp
import chrome.sockets.tcpServer.bindings._
import scala.concurrent.Future
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.scalajs.js
class Socket(val socketId: SocketId) {
val onAccept = TCPServer.onAccept
.filter(_.socketId == socketId)
.map(event => Socket.Accepted(tcp.Socket(event.clientSocketId)))
val onAcceptError = TCPServer.onAcceptError
.filter(_.socketId == socketId)
.map(event => Socket.Error(event.resultCode))
val all: EventSource[Socket.AcceptEvent] = onAccept.merge(onAcceptError)
def update(properties: SocketProperties): Future[Unit] = {
TCPServer.update(socketId, properties)
}
def setPaused(paused: Boolean): Future[Unit] = {
TCPServer.setPaused(socketId, paused)
}
def listen(address: String,
port: Int,
backlog: js.UndefOr[Int] = js.undefined): Future[Int] = {
TCPServer.listen(socketId, address, port, backlog)
}
def disconnect: Future[Unit] = {
TCPServer.disconnect(socketId)
}
def close: Future[Unit] = {
TCPServer.close(socketId)
}
def getInfo: Future[SocketInfo] = {
TCPServer.getInfo(socketId)
}
}
object Socket {
sealed trait AcceptEvent
case class Accepted(client: chrome.sockets.tcp.Socket) extends AcceptEvent
case class Error(code: Int) extends AcceptEvent
def apply(id: SocketId): Socket = new Socket(id)
def apply(name: String = "", persistent: Boolean): Future[Socket] = {
TCPServer
.create(SocketProperties(persistent, name))
.map(i => Socket(i.socketId))
}
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/sockets/tcpServer/Socket.scala | Scala | mit | 1,662 |
package com.example
import akka.http.scaladsl.model.{StatusCodes, HttpEntity, ContentTypes}
import akka.http.scaladsl.server.{Directive1, Directives}
import com.example.model.Post
import com.example.service.{Health, HealthCheckService, PostService}
import scala.concurrent.Future
class Route(postService: PostService, healthCheckService: HealthCheckService) extends Directives {
private[this] def fetchPosts: Directive1[Seq[Post]] = {
import scala.concurrent.ExecutionContext.Implicits.global
onSuccess(Future(postService.all()))
}
private[this] def createPost(title: String, content: String): Directive1[Seq[Post]] = {
import scala.concurrent.ExecutionContext.Implicits.global
onSuccess {
Future(postService.create(title, content)).map(_ => postService.all())
}
}
private[this] def index(posts: Seq[Post]) = {
complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, html.index.render(posts).body))
}
val route = pathSingleSlash {
fetchPosts(index)
} ~ path("post") {
post {
formFields('title, 'content) { (title, content) =>
createPost(title, content)(index)
}
} ~ get {
fetchPosts(index)
}
} ~ path("health") {
healthCheckService.health() match {
case Health.Healthy => complete("ok")
case Health.Unhealthy => complete(StatusCodes.ServiceUnavailable)
}
}
}
| ocadaruma/introduction-to-kubernetes | app/src/main/scala/com/example/Route.scala | Scala | mit | 1,377 |
package org.coursera.naptime.courier
import com.linkedin.data.DataMap
import org.coursera.courier.templates.DataTemplates
import org.scalatest.junit.AssertionsForJUnit
object CourierAssertions extends AssertionsForJUnit {
def assertSameJson(json: String, expectedJson: String): Unit = {
assert(DataTemplates.readDataMap(json) === DataTemplates.readDataMap(expectedJson))
}
def assertSameJson(dataMap: DataMap, expectedJson: String): Unit = {
assert(dataMap === DataTemplates.readDataMap(expectedJson))
}
}
| vkuo-coursera/naptime | naptime-models/src/test/scala/org/coursera/naptime/courier/CourierAssertions.scala | Scala | apache-2.0 | 527 |
package cfc.shale.http_server
import cfc.shale.core.ShaleSessionId
import cfc.shale.selenium.SeleniumSessionId
case class Session(
sessionId: ShaleSessionId,
webdriverId: Option[SeleniumSessionId],
tags: Set[String],
reserved: Boolean,
currentUrl: String,
browserName: String,
node: Node
)
| cardforcoin/shale-scala | shale-http-server/src/main/scala/cfc/shale/http_server/Session.scala | Scala | mit | 306 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
import util.control.Exception._
import org.specs2.mutable.Specification
object ParserBugs extends Specification {
"Unicode ffff is a valid char in string literal" in {
parseOpt(""" {"x":"\\uffff"} """).isDefined mustEqual true
}
"Does not hang when parsing 2.2250738585072012e-308" in {
(allCatch.opt(parse(""" [ 2.2250738585072012e-308 ] """)) mustEqual None) and
(allCatch.opt(parse(""" [ 22.250738585072012e-309 ] """)) mustEqual None)
}
"Does not allow colon at start of array (1039)" in {
parseOpt("""[:"foo", "bar"]""") mustEqual None
}
"Does not allow colon instead of comma in array (1039)" in {
parseOpt("""["foo" : "bar"]""") mustEqual None
}
"Solo quote mark should fail cleanly (not StringIndexOutOfBoundsException) (1041)" in {
JsonParser.parse("\\"", discardParser) must throwA[JsonParser.ParseException].like {
case e => e.getMessage must startWith("unexpected eof")
}
}
"Field names must be quoted" in {
val json = JObject(List(JField("foo\\nbar", JInt(1))))
val s = compact(render(json))
(s mustEqual """{"foo\\nbar":1}""") and
(parse(s) mustEqual json)
}
"Double in scientific notation with + can be parsed" in {
val json = JObject(List(JField("t", JDouble(12.3))))
val s = """{"t" : 1.23e+1}"""
parse(s) mustEqual json
}
private val discardParser = (p : JsonParser.Parser) => {
var token: JsonParser.Token = null
do {
token = p.nextToken
} while (token != JsonParser.End)
}
}
| sortable/framework | core/json/src/test/scala/net/liftweb/json/ParserBugs.scala | Scala | apache-2.0 | 2,172 |
import scala.tools.partest.DirectTest
object Test extends DirectTest {
override def extraSettings: String =
s"-usejavacp -Vprint-pos -Vprint:typer -Yrangepos -Ystop-after:typer -cp ${testOutput.path}"
override def code = """
object X {
val d = new D
d.field
}
""".trim
override def show(): Unit = compile()
}
import language.dynamics
class D extends Dynamic {
def selectDynamic(name: String) = ???
}
| scala/scala | test/files/run/dynamic-selectDynamic.scala | Scala | apache-2.0 | 439 |
package beam.agentsim.agents.ridehail
import akka.actor.ActorRef
import beam.agentsim.agents.vehicles.PassengerSchedule
import org.matsim.api.core.v01.Id
import org.matsim.vehicles.Vehicle
case class RepositionVehicleRequest(
passengerSchedule: PassengerSchedule,
tick: Int,
vehicleId: Id[Vehicle],
rideHailAgent: ActorRef
)
| colinsheppard/beam | src/main/scala/beam/agentsim/agents/ridehail/RepositionVehicleRequest.scala | Scala | gpl-3.0 | 335 |
package lila.blog
import org.joda.time.DateTime
case class MiniPost(
id: String,
slug: String,
title: String,
shortlede: String,
date: DateTime,
image: String)
object MiniPost {
def fromDocument(coll: String)(doc: io.prismic.Document): Option[MiniPost] = for {
title <- doc getText s"$coll.title"
shortlede = ~(doc getText s"$coll.shortlede")
date <- doc getDate s"$coll.date" map (_.value)
image = ~doc.getImage(s"$coll.image", "column").map(_.url)
} yield MiniPost(doc.id, doc.slug, title, shortlede, date.toDateTimeAtStartOfDay, image)
}
| Happy0/lila | modules/blog/src/main/MiniPost.scala | Scala | mit | 577 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.util.convert
import util.parsing.combinator.RegexParsers
import de.fuberlin.wiwiss.silk.util.ValidationException
import de.fuberlin.wiwiss.silk.entity.Path
import util.parsing.input.CharSequenceReader
import de.fuberlin.wiwiss.silk.entity.{Restriction, SparqlRestriction}
import de.fuberlin.wiwiss.silk.config.Prefixes
import de.fuberlin.wiwiss.silk.entity.Restriction.{Operator, Or, Condition, And}
/**
* Converts a SPARQL restriction to a Silk restriction.
*/
class SparqlRestrictionParser(implicit prefixes: Prefixes) extends RegexParsers {
def apply(sparqlRestriction: SparqlRestriction): Restriction = {
parseAll(parser, new CharSequenceReader(sparqlRestriction.toString)) match {
case Success(parsedPath, _) => parsedPath
case error: NoSuccess => throw new ValidationException(error.toString)
}
}
override val skipWhitespace = false
def parser: Parser[Restriction] = unionPatterns ^^ {
r => Restriction(Some(r))
}
def unionPatterns: Parser[Operator] = rep1(unionPattern <~ opt("UNION" <~ anyWhitespace) <~ opt(".")) ^^ {
case operator :: Nil => operator
case operators => Or(operators)
}
def unionPattern = (anyWhitespace ~> opt(repsep(fowbrace, anyWhitespace)) ~> anyWhitespace) ~> triplePatterns <~ (anyWhitespace <~ opt(repsep(revbrace, anyWhitespace)) <~ anyWhitespace) ^^ {
case patterns => patterns
}
//one or more whitespace
def anyWhitespace = """\\s*""".r
// curly brace forward
def fowbrace = """\\{+""".r
// curly brace reward
def revbrace = """\\}+""".r
def triplePatterns: Parser[Operator] = rep1(triplePattern <~ anyWhitespace <~ opt(".")) ^^ {
case condition :: Nil => condition
case conditions => And(conditions)
}
def triplePattern = subject ~ predicate ~ objectt ^^ {
case v ~ p ~ o => Condition.resolve(Path.parse("?" + v + "/" + p), o)
}
def subject = "?" ~> idChars ^^ {
v => v
}
def predicate = " " ~> (prefixName | uri | rdfTypeReplacement)
def objectt = " " ~> (variable | prefixName | uri)
def variable = "?" ~> idChars ^^ {
case name => ""
}
def uri = "<" ~> uriChars <~ ">" ^^ {
case uri => "<" + uri + ">"
}
def prefixName = idChars ~ ":" ~ idChars ^^ {
case prefix ~ ":" ~ name => prefix + ":" + name
}
def rdfTypeReplacement = "a" ^^ {
_ => "rdf:type"
}
val idChars = """[a-zA-Z_]\\w*""".r
val uriChars = """[^>]+""".r
}
| fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/util/convert/SparqlRestrictionParser.scala | Scala | apache-2.0 | 3,011 |
package workflow
/**
* A rule to remove all nodes & sources in a graph that don't lead to any sink,
* and are effectively unused.
*/
object UnusedBranchRemovalRule extends Rule {
override def apply(plan: Graph, prefixes: Map[NodeId, Prefix]): (Graph, Map[NodeId, Prefix]) = {
val ancestorsOfSinks = plan.sinks.foldLeft(Set[GraphId]()) {
case (ancestors, sink) => ancestors ++ AnalysisUtils.getAncestors(plan, sink)
}
val nodesToRemove = plan.nodes -- ancestorsOfSinks.collect { case node: NodeId => node }
val sourcesToRemove = plan.sources -- ancestorsOfSinks.collect { case source: SourceId => source }
val afterSourceRemoval = sourcesToRemove.foldLeft(plan) {
case (curPlan, sourceToRemove) => curPlan.removeSource(sourceToRemove)
}
nodesToRemove.foldLeft((afterSourceRemoval, prefixes)) {
case ((curPlan, curPrefixes), nodeToRemove) => (curPlan.removeNode(nodeToRemove), curPrefixes - nodeToRemove)
}
}
}
| tomerk/keystone | src/main/scala/workflow/UnusedBranchRemovalRule.scala | Scala | apache-2.0 | 970 |
/*
* Copyright 2017-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package higherkindness.mu.rpc.channel.okhttp
import higherkindness.mu.rpc.ChannelFor
import higherkindness.mu.rpc.channel._
import cats.effect.IO
import io.grpc.okhttp.OkHttpChannelBuilder
class ManagedChannelInterpreterOkHttpTests extends ManagedChannelInterpreterTests {
def mkInterpreter(
channelFor: ChannelFor,
channelConfigList: List[ManagedChannelConfig]
): ManagedChannelInterpreter[IO] =
new ManagedChannelInterpreter[IO](
channelFor,
channelConfigList,
OkHttpChannelBuilder.forAddress(_, _),
OkHttpChannelBuilder.forTarget(_)
)
}
| frees-io/freestyle-rpc | modules/tests/src/test/scala/higherkindness/mu/rpc/channel/okhttp/ManagedChannelInterpreterOkHttpTests.scala | Scala | apache-2.0 | 1,229 |
package pdf
import java.io.{ByteArrayOutputStream, File, FileNotFoundException, OutputStream}
import com.google.inject.Inject
import org.apache.pdfbox.Overlay
import org.apache.pdfbox.pdmodel.PDDocument
import org.apache.pdfbox.pdmodel.PDPage
import org.apache.pdfbox.pdmodel.edit.PDPageContentStream
import org.apache.pdfbox.pdmodel.font.PDFont
import org.apache.pdfbox.pdmodel.font.PDType1Font
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
import pdf.PdfServiceImpl.blankPage
import pdf.PdfServiceImpl.fontDefaultSize
import pdf.PdfServiceImpl.v948Blank
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.TrackingId
import uk.gov.dvla.vehicles.presentation.common.model.AddressModel
import uk.gov.dvla.vehicles.presentation.common.services.DateService
import uk.gov.dvla.vehicles.presentation.common.views.constraints.RegistrationNumber
import uk.gov.dvla.vehicles.presentation.common.views.models.DayMonthYear
import scala.util.{Failure, Success, Try}
final class PdfServiceImpl @Inject()(dateService: DateService) extends PdfService {
def create(transactionId: String,
name: String,
address: Option[AddressModel],
prVrm: String,
trackingId: TrackingId): Array[Byte] = {
val output = new ByteArrayOutputStream()
v948(transactionId, name, address, prVrm, output, trackingId)
output.toByteArray
}
private def v948(transactionId: String,
name: String,
address: Option[AddressModel],
prVrm: String,
output: OutputStream,
trackingId: TrackingId) = {
// Create a document and add a page to it
implicit val document = new PDDocument()
document.addPage(page1(transactionId, name, address, prVrm, document, trackingId))
blankPage match {
case Success( pdPage) => document.addPage(pdPage)
case Failure (ex) => logMessage(trackingId, Error, ex.getMessage)
}
var documentWatermarked: PDDocument = null
try {
documentWatermarked = combineWithOriginal(trackingId)
// Save the results and ensure that the document is properly closed:
documentWatermarked.save(output)
} catch {
case e: Exception => logMessage(trackingId, Error,
s"PdfServiceImpl v948 error when combining and saving: ${e.getMessage}")
} finally {
documentWatermarked.close()
}
documentWatermarked
}
private def page1(implicit transactionId: String,
name: String, address: Option[AddressModel],
prVrm: String,
document: PDDocument,
trackingId: TrackingId): PDPage = {
val page = new PDPage()
implicit var contentStream: PDPageContentStream = null
try {
// Start a new content stream which will "hold" the to be created content
contentStream = new PDPageContentStream(document, page)
writeCustomerNameAndAddress(name, address)
writeVrn(prVrm)
writeTransactionId(transactionId)
writeDateOfRetention()
} catch {
case e: Exception => logMessage(trackingId, Error,
s"PdfServiceImpl v948 page1 error when writing vrn and dateOfRetention: ${e.getMessage}")
} finally {
// Make sure that the content stream is closed:
contentStream.close()
}
page
}
private def fontHelvetica(size: Int)(implicit contentStream: PDPageContentStream): PDFont = {
// Create a new font object selecting one of the PDF base fonts
val font: PDFont = PDType1Font.HELVETICA
contentStream.setFont(font, size)
font
}
private def fontHelveticaBold(size: Int)(implicit contentStream: PDPageContentStream): PDFont = {
// Create a new font object selecting one of the PDF base fonts
val font: PDFont = PDType1Font.HELVETICA_BOLD
contentStream.setFont(font, size)
font
}
private def width(font: PDFont, content: String, fontSize: Int) = {
// Return the width of a bounding box that surrounds the string.
font.getStringWidth(content) / 1000 * fontSize
}
private def wrapText(words: List[String]): List[List[String]] = words match {
case Nil => Nil
case _ =>
val output = words.inits.dropWhile { _.mkString(" ").length > 30 }.next()
output :: wrapText(words.drop(output.length))
}
private def writeCustomerNameAndAddress(name: String, address: Option[AddressModel])
(implicit contentStream: PDPageContentStream): Unit = {
var positionY = 580
wrapText(name.split(" ").toList) foreach {
words => {
contentStream.beginText()
fontHelvetica(fontDefaultSize)
contentStream.moveTextPositionByAmount(330, positionY)
contentStream.drawString(words.mkString(" "))
contentStream.endText()
positionY = positionY - 15
}
}
address.foreach { a =>
for (line <- a.address) {
contentStream.beginText()
fontHelvetica(fontDefaultSize)
contentStream.moveTextPositionByAmount(330, positionY)
contentStream.drawString(line)
contentStream.endText()
positionY = positionY - 15
}
}
}
private def writeVrn(registrationNumber: String)
(implicit contentStream: PDPageContentStream, document: PDDocument): Unit = {
contentStream.beginText()
val formattedVrm = RegistrationNumber.formatVrm(registrationNumber)
val size = 26
val font = fontHelveticaBold(size = size)
contentStream.moveTextPositionByAmount(45, 385)
// Centre the text.
contentStream.moveTextPositionByAmount((180 - width(font, formattedVrm, fontSize = size)) / 2, 0)
contentStream.drawString(formattedVrm)
contentStream.endText()
}
private def writeTransactionId(transactionId: String)
(implicit contentStream: PDPageContentStream, document: PDDocument): Unit = {
contentStream.beginText()
val size = 18
val font = fontHelveticaBold(size = 18)
contentStream.moveTextPositionByAmount(321, 388)
// Centre the text.
contentStream.moveTextPositionByAmount((200 - width(font, transactionId, fontSize = size)) / 2, 0)
contentStream.drawString(transactionId) // Transaction ID
contentStream.endText()
}
private def writeDateOfRetention()(implicit contentStream: PDPageContentStream): Unit = {
val today = DayMonthYear.from(new DateTime(dateService.now, DateTimeZone.forID("Europe/London")))
val dateStamp = today.`dd shortMonth yyyy`
val timeStamp = today.`hh:mm a`
val font = fontHelvetica(size = fontDefaultSize)
contentStream.beginText()
contentStream.moveTextPositionByAmount(50, 280)
contentStream.moveTextPositionByAmount((110 - width(font, dateStamp, fontDefaultSize)) / 2, 0) // Centre the text.
contentStream.drawString("DVLA")
contentStream.endText()
contentStream.beginText()
contentStream.moveTextPositionByAmount(45, 260)
contentStream.moveTextPositionByAmount((85 - width(font, dateStamp, fontDefaultSize)) / 2, 0) // Centre the text.
contentStream.drawString(dateStamp) // Date of retention
contentStream.endText()
contentStream.beginText()
contentStream.moveTextPositionByAmount(50, 240)
contentStream.moveTextPositionByAmount((110 - width(font, dateStamp, fontDefaultSize)) / 2, 0) // Centre the text.
contentStream.drawString(timeStamp) // Time of retention
contentStream.endText()
}
private def combineWithOriginal(trackingId: TrackingId)(implicit document: PDDocument): PDDocument = {
// https://stackoverflow.com/questions/8929954/watermarking-with-pdfbox
// Caution: You should make sure you match the number of pages in both document. Otherwise, you would end up with a
// document with number of pages matching the one which has least number of pages.
v948Blank match {
case Success(blankFile) =>
// Load document containing just the watermark image.
val blankDoc = PDDocument.load(blankFile)
val overlay = new Overlay()
overlay.overlay(document, blankDoc)
case Failure(ex) =>
logMessage(trackingId, Error, ex.getMessage)
document
// Other file was not found so cannot combine with it.
}
}
}
object PdfServiceImpl {
private val fontDefaultSize = 12
private val v948Blank: Try[File] = {
val filename = "vrm-assign-online-v948-blank.pdf"
try {
val file = new File(filename)
if (file.exists()) {
//`PDF/A validation`(file, "v948Blank")
// Validate that the file we have loaded meets the specification,
// otherwise we are writing on top of existing problems.
Success(file)
}
else {
Failure(new FileNotFoundException("PdfService could not find blank file for v948"))
}
} catch {
case ex: Exception => Failure(ex)
}
}
private def blankPage(implicit document: PDDocument): Try[PDPage] = {
var contentStream: PDPageContentStream = null
try {
val page = new PDPage()
// Start a new content stream which will "hold" the to be created content
contentStream = new PDPageContentStream(document, page)
Success(page)
}
catch {
case e: Exception => Failure(
new Exception(s"PdfServiceImpl v948 page1 error when writing vrn and dateOfRetention: ${e.getStackTrace}")
)
} finally {
contentStream.close() // Make sure that the content stream is closed.
}
}
} | dvla/vrm-assign-online | app/pdf/PdfServiceImpl.scala | Scala | mit | 9,539 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2017 Lars Kroll <bathtor@googlemail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package com.lkroll.common.collections
import scala.collection.mutable;
/** An implementation of MultiMap[A,B] with a TreeMap as outer collection and HashSet as inner collection.
*/
@SerialVersionUID(-1017000328094141241L)
class TreeSetMultiMap[A: Ordering, B] extends MultiMap[A, B] with Serializable {
type InnerCollection = mutable.HashSet[B];
type KeySet = collection.SortedSet[A];
private val ord = implicitly[Ordering[A]];
private val inner: mutable.TreeMap[A, mutable.HashSet[B]] = new mutable.TreeMap;
override def put(kv: (A, B)): Unit = {
val set = inner.get(kv._1) match {
case Some(set) => set
case None => {
val set = new mutable.HashSet[B];
inner += (kv._1 -> set);
set
}
};
set += kv._2;
}
def putAll(kv: (A, IterableOnce[B])): Unit = {
val set = inner.get(kv._1) match {
case Some(set) => set
case None => {
val set = new mutable.HashSet[B];
inner += (kv._1 -> set);
set
}
};
set ++= kv._2;
}
override def remove(kv: (A, B)): Boolean = {
inner.get(kv._1) match {
case Some(set) => set.remove(kv._2)
case None => false
}
}
override def remove(key: A): Option[InnerCollection] = inner.remove(key);
def get(key: A): Option[InnerCollection] = {
inner.get(key)
}
override def iterator: Iterator[(A, InnerCollection)] = inner.iterator;
override def contains(key: A): Boolean = inner.contains(key);
override def entryExists(key: A, p: B => Boolean): Boolean =
inner.get(key) match {
case Some(set) => set.exists(p)
case None => false
}
override def keySet: KeySet = inner.keySet;
def firstKey: A = inner.firstKey;
def lastKey: A = inner.lastKey;
def floor(needle: A): Option[A] = {
val it = inner.keysIterator;
var last: Option[A] = None;
while (it.hasNext) {
val k = it.next();
if (ord.gt(k, needle)) {
return last;
} else {
last = Some(k);
}
}
None
}
def ceil(needle: A): Option[A] = {
val it = inner.keysIteratorFrom(needle);
if (it.hasNext) {
Some(it.next())
} else {
None
}
}
}
object TreeSetMultiMap {
def empty[A: Ordering, B]: TreeSetMultiMap[A, B] = new TreeSetMultiMap;
}
| Bathtor/common-utils | data-tools/shared/src/main/scala-2.13/com/lkroll/common/collections/TreeSetMultiMap.scala | Scala | mit | 3,471 |
package com.github.mrpowers.spark.spec.sql
import org.apache.spark.sql.Encoders
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.scalatest._
import com.github.mrpowers.spark.spec.SparkSessionTestWrapper
import com.github.mrpowers.spark.fast.tests.DatasetComparer
class SparkSessionSpec extends FunSpec with SparkSessionTestWrapper with DatasetComparer {
describe("#baseRelationToDataFrame") {
pending
}
describe("#builder") {
pending
}
describe("#catalog") {
pending
}
describe("#clearActiveSession") {
pending
}
describe("#clearDefaultSession") {
pending
}
describe("#close") {
pending
}
describe("#conf") {
pending
}
describe("#createDataFrame") {
pending
}
describe("#createDataset") {
it("creates a Dataset from data and encoders") {
val data = Seq(
(("a", "b"), "c"),
(null, "d")
)
val encoders = Encoders.tuple(
Encoders.tuple(Encoders.STRING, Encoders.STRING),
Encoders.STRING
)
val ds = spark.createDataset(data)(encoders)
assert(ds.getClass().getName() === "org.apache.spark.sql.Dataset")
}
}
describe("#emptyDataFrame") {
pending
}
describe("#emptyDataset") {
pending
}
describe("#experimental") {
pending
}
describe("#implicits") {
pending
}
describe("#listenerManager") {
pending
}
describe("#newSession") {
pending
}
describe("#range") {
pending
}
describe("#read") {
it("reads a CSV file into a DataFrame") {
val path = new java.io.File("./src/test/resources/people.csv").getCanonicalPath
val actualDF = spark.read.option("header", "true").csv(path)
val expectedSchema = List(
StructField("name", StringType, true),
StructField("country", StringType, true),
StructField("zip_code", StringType, true)
)
val expectedData = List(
Row("joe", "usa", "89013"),
Row("ravi", "india", null),
Row(null, null, "12389")
)
val expectedDF = spark.createDataFrame(
spark.sparkContext.parallelize(expectedData),
StructType(expectedSchema)
)
assertSmallDatasetEquality(actualDF, expectedDF)
}
}
describe("#readStream") {
pending
}
describe("#setActiveSession") {
pending
}
describe("#sql") {
pending
}
describe("#sqlContext") {
pending
}
describe("#stop") {
pending
}
describe("#streams") {
pending
}
describe("#table") {
pending
}
describe("#time") {
pending
}
describe("#udf") {
pending
}
describe("version") {
it("returns the version of Spark on which this application is running") {
assert(spark.version === "3.0.1")
}
}
}
| MrPowers/spark-spec | src/test/scala/com/github/mrpowers/spark/spec/sql/SparkSessionSpec.scala | Scala | mit | 2,857 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.engine.core.environment
import com.bwsw.common.file.utils.FileStorage
import com.bwsw.sj.common.dal.model.stream.StreamDomain
import com.bwsw.sj.common.engine.core.reporting.PerformanceMetrics
import com.bwsw.sj.common.engine.core.state.StateStorage
import com.bwsw.sj.common.utils.SjTimer
import scala.collection.mutable
/**
* Class allowing to manage environment of module that has got a state
*
* @param stateStorage storage of state of module [[com.bwsw.sj.common.engine.core.state.StateStorage]]
* @param options user defined options from instance
* [[com.bwsw.sj.common.dal.model.instance.InstanceDomain.options]]
* @param outputs set of output streams [[com.bwsw.sj.common.dal.model.stream.StreamDomain]]
* from instance [[com.bwsw.sj.common.dal.model.instance.InstanceDomain.outputs]]
* @param producerPolicyByOutput keeps a tag (partitioned or round-robin output) corresponding to the output for each
* output stream
* @param moduleTimer provides a possibility to set a timer inside a module
* @param performanceMetrics set of metrics that characterize performance
* of [[com.bwsw.sj.common.utils.EngineLiterals.regularStreamingType]]
* or [[com.bwsw.sj.common.utils.EngineLiterals.batchStreamingType]] module
* @param fileStorage file storage
* @param senderThread thread for sending data to the T-Streams service
* @author Kseniya Mikhaleva
*/
class StatefulModuleEnvironmentManager(stateStorage: StateStorage,
options: String,
outputs: Array[StreamDomain],
producerPolicyByOutput: mutable.Map[String, ModuleOutput],
moduleTimer: SjTimer,
performanceMetrics: PerformanceMetrics,
fileStorage: FileStorage,
senderThread: TStreamsSenderThread)
extends ModuleEnvironmentManager(
options,
outputs,
producerPolicyByOutput,
moduleTimer,
performanceMetrics,
fileStorage,
senderThread) {
override def getState: StateStorage = {
logger.info(s"Get a storage where states are kept.")
stateStorage
}
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/engine/core/environment/StatefulModuleEnvironmentManager.scala | Scala | apache-2.0 | 3,314 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import org.scalatest.events._
import DispatchReporter.propagateDispose
/**
* FiterReporter catches exceptions that may be thrown by custom reporters, and doesn't forward
* reports that were not selected by the passed configuration.
*
* @author Bill Venners
*/
private[tools] class FilterReporter(reporter: Reporter, configSet: Set[ReporterConfigParam]) extends ResourcefulReporter {
// def reFilter(configSet: EventToPresent.Set32) = new FilterReporter(reporter, configSet)
override def apply(event: Event) {
val report = reporter
event match {
case event: RunStarting => report(event)
case event: RunCompleted => report(event)
case event: RunAborted => report(event)
case event: RunStopped => report(event)
case event: SuiteAborted => report(event)
case event: TestFailed => report(event.copy(recordedEvents = event.recordedEvents.filter(filterInfoMarkupProvided(_))))
case event: SuiteCompleted => if (!configSet.contains(FilterSuiteCompleted)) report(event)
case event: SuiteStarting => if (!configSet.contains(FilterSuiteStarting)) report(event)
case event: TestStarting => if (!configSet.contains(FilterTestStarting)) report(event)
case event: TestSucceeded =>
if (!configSet.contains(FilterTestSucceeded))
report(event.copy(recordedEvents = event.recordedEvents.filter(filterInfoMarkupProvided(_))))
case event: TestIgnored => if (!configSet.contains(FilterTestIgnored)) report(event)
case event: TestCanceled =>
if (!configSet.contains(FilterTestCanceled))
report(event.copy(recordedEvents = event.recordedEvents.filter(filterInfoMarkupProvided(_))))
case event: TestPending =>
if (!configSet.contains(FilterTestPending))
report(event.copy(recordedEvents = event.recordedEvents.filter(filterInfoMarkupProvided(_))))
case event: InfoProvided => if (!configSet.contains(FilterInfoProvided)) report(event)
case event: ScopeOpened => if (!configSet.contains(FilterScopeOpened)) report(event)
case event: ScopeClosed => if (!configSet.contains(FilterScopeClosed)) report(event)
case event: MarkupProvided => if (!configSet.contains(FilterMarkupProvided)) report(event)
}
}
private def filterInfoMarkupProvided(event: Event): Boolean = {
event match {
case infoProvided: InfoProvided => !configSet.contains(FilterInfoProvided)
case markupProvided: MarkupProvided => !configSet.contains(FilterMarkupProvided)
case _ => true
}
}
override def dispose() = propagateDispose(reporter)
}
// Have some methods that translate chars & strings to Opts things, and vice versa?
| hubertp/scalatest | src/main/scala/org/scalatest/tools/FilterReporter.scala | Scala | apache-2.0 | 3,343 |
package spray.contrib.socketio.mq
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Terminated
import akka.contrib.pattern.DistributedPubSubMediator.{ Publish, Subscribe, SubscribeAck, Unsubscribe, UnsubscribeAck }
import akka.event.LoggingAdapter
import akka.pattern.ask
import akka.routing.ActorRefRoutee
import akka.routing.Router
import scala.concurrent.duration._
trait Publishable extends Actor {
var subscribers = Set[ActorRef]() // ActorRef of subscriber
var groupToSubscribers: Map[Option[String], Set[ActorRefRoutee]] = Map.empty.withDefaultValue(Set.empty)
def log: LoggingAdapter
def groupRouter: Router
def topic = self.path.name
def publishableBehavior: Receive = {
case x @ Subscribe(_, group, subscriber) =>
insertSubscription(group, subscriber)
sender() ! SubscribeAck(x)
log.info("{} successfully subscribed to topic(me) [{}] under group [{}]", subscriber, topic, group)
case x @ Unsubscribe(_, group, subscriber) =>
removeSubscription(group, subscriber)
sender() ! UnsubscribeAck(x)
log.info("{} successfully unsubscribed to topic(me) [{}] under group [{}]", subscriber, topic, group)
case Publish(_, msg, _) => publish(msg)
case Terminated(ref) => removeSubscription(ref)
}
def publish(x: Any) {
groupToSubscribers foreach {
case (None, subscribers) => subscribers foreach (_.ref ! x)
case (_, subscribers) => groupRouter.withRoutees(subscribers.toVector).route(x, self)
}
}
def existsSubscriber(subscriber: ActorRef) = {
groupToSubscribers exists { case (group, subscribers) => subscribers.contains(ActorRefRoutee(subscriber)) }
}
def insertSubscription(group: Option[String], subscriber: ActorRef) {
if (!subscribers.contains(subscriber)) {
context watch subscriber
subscribers += subscriber
}
groupToSubscribers = groupToSubscribers.updated(group, groupToSubscribers(group) + ActorRefRoutee(subscriber))
}
def removeSubscription(group: Option[String], subscriber: ActorRef) {
if (!existsSubscriber(subscriber)) {
context unwatch subscriber
subscribers -= subscriber
}
groupToSubscribers = groupToSubscribers.updated(group, groupToSubscribers(group) - ActorRefRoutee(subscriber))
}
def removeSubscription(subscriber: ActorRef) {
context unwatch subscriber
subscribers -= subscriber
groupToSubscribers = for {
(group, subscribers) <- groupToSubscribers
} yield (group -> (subscribers - ActorRefRoutee(subscriber)))
}
}
| wandoulabs/spray-socketio | spray-socketio/src/main/scala/spray/contrib/socketio/mq/Publishable.scala | Scala | apache-2.0 | 2,561 |
package leo.datastructures
import java.util.concurrent.atomic.AtomicInteger
/**
* Created by mwisnie on 2/3/16.
*/
object Store {
var unnamedFormulas : AtomicInteger = new AtomicInteger(0)
def apply(cl: Clause, role: Role, status: Int, annotation: ClauseAnnotation = NoAnnotation): FormulaStore
= new FormulaStore("gen_formula_"+unnamedFormulas.incrementAndGet(), cl, TimeStamp(), role, status, annotation)
def apply(name: String, cl: Clause, role: Role, status: Int, annotation: ClauseAnnotation): FormulaStore
= new FormulaStore(name, cl, TimeStamp(), role, status, annotation)
def apply(cl: Clause, created : TimeStamp, role: Role, status: Int, annotation: ClauseAnnotation): FormulaStore
= new FormulaStore("gen_formula_"+unnamedFormulas.incrementAndGet(), cl, created, role, status, annotation)
}
| Ryugoron/Leonora | src/main/scala/leo/datastructures/Store.scala | Scala | mit | 826 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.transformation.filter
import java.io.{Serializable => JSerializable}
import com.stratio.sparta.sdk.pipeline.filter.Filter
import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import com.stratio.sparta.sdk.pipeline.schema.TypeOp._
import com.stratio.sparta.sdk.pipeline.transformation.Parser
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
class FilterParser(order: Integer,
inputField: Option[String],
outputFields: Seq[String],
val schema: StructType,
properties: Map[String, JSerializable])
extends Parser(order, inputField, outputFields, schema, properties) with Filter {
def filterInput: Option[String] = properties.getString("filters", None)
def defaultCastingFilterType: TypeOp = TypeOp.Any
override def parse(row: Row): Seq[Row] =
applyFilters(row) match {
case Some(valuesFiltered) =>
Seq(Row.fromSeq(removeInputField(row)))
case None => Seq.empty[Row]
}
}
| fjsc/sparta | plugins/src/main/scala/com/stratio/sparta/plugin/transformation/filter/FilterParser.scala | Scala | apache-2.0 | 1,732 |
package lila.search
import play.api.libs.json._
import play.api.libs.ws._
import play.api.libs.ws.JsonBodyWritables._
import scala.annotation.nowarn
sealed trait ESClient {
def search[Q: Writes](query: Q, from: From, size: Size): Fu[SearchResponse]
def count[Q: Writes](query: Q): Fu[CountResponse]
def store(id: Id, doc: JsObject): Funit
def deleteById(id: Id): Funit
def deleteByIds(ids: List[Id]): Funit
def refresh: Funit
}
final class ESClientHttp(
ws: StandaloneWSClient,
config: SearchConfig,
val index: Index
)(implicit ec: scala.concurrent.ExecutionContext)
extends ESClient {
def store(id: Id, doc: JsObject) =
config.writeable ?? monitor("store") {
HTTP(s"store/${index.name}/${id.value}", doc)
}
def search[Q: Writes](query: Q, from: From, size: Size) =
monitor("search") {
HTTP(s"search/${index.name}/${from.value}/${size.value}", query, SearchResponse.apply)
.dmap(~_)
}
def count[Q: Writes](query: Q) =
monitor("count") {
HTTP(s"count/${index.name}", query, CountResponse.apply)
.dmap(~_)
}
def deleteById(id: lila.search.Id) =
config.writeable ??
HTTP(s"delete/id/${index.name}/${id.value}", Json.obj())
def deleteByIds(ids: List[lila.search.Id]) =
config.writeable ??
HTTP(s"delete/ids/${index.name}", Json.obj("ids" -> ids.map(_.value)))
def putMapping =
HTTP(s"mapping/${index.name}", Json.obj())
def storeBulk(docs: Seq[(Id, JsObject)]) =
HTTP(
s"store/bulk/${index.name}",
JsObject(docs map { case (Id(id), doc) =>
id -> JsString(Json.stringify(doc))
})
)
def refresh =
HTTP(s"refresh/${index.name}", Json.obj())
private[search] def HTTP[D: Writes, R](url: String, data: D, read: String => R): Fu[Option[R]] =
ws.url(s"${config.endpoint}/$url").post(Json toJson data) flatMap {
case res if res.status == 200 => fuccess(read(res.body).some)
case res if res.status == 400 => fuccess(none)
case res => fufail(s"$url ${res.status}")
}
private[search] def HTTP(url: String, data: JsObject): Funit = HTTP(url, data, _ => ()).void
private def monitor[A](op: String)(f: Fu[A]) =
f.monTry(res => _.search.time(op, index.name, res.isSuccess))
}
final class ESClientStub extends ESClient {
def search[Q: Writes](query: Q, from: From, size: Size) = fuccess(SearchResponse(Nil))
def count[Q: Writes](query: Q) = fuccess(CountResponse(0))
def store(id: Id, doc: JsObject) = funit
@nowarn("msg=parameter value")
def storeBulk(docs: Seq[(Id, JsObject)]) = funit
def deleteById(id: Id) = funit
def deleteByIds(ids: List[Id]) = funit
def putMapping = funit
def refresh = funit
}
| luanlv/lila | modules/search/src/main/ESClient.scala | Scala | mit | 2,872 |
package se.ramn.bottfarmen.simulation
import collection.immutable.IndexedSeq
import TileMap.Tile
import se.ramn.bottfarmen.simulation.entity.Position
import se.ramn.bottfarmen.util.loadTextFileFromClassPath
trait TileMap {
val rowCount: Int
val colCount: Int
val rows: IndexedSeq[IndexedSeq[Tile]]
def startingPositions: Seq[StartingPosition]
def isWithinMap(position: Position): Boolean
def isWalkable(position: Position): Boolean
def tile(position: Position): Option[Tile]
}
case class StartingPosition(id: Int, row: Int, col: Int)
object Tiles {
val Land = '.'
val Water = '~'
val Food = 'f'
val Mountain = '^'
val HomeCommander0 = '0'
val HomeCommander1 = '1'
}
object TileMap {
type Tile = Char
def loadFromClassPath(path: String): TileMap = {
parse(loadTextFileFromClassPath(path))
}
def fromEnvOrDefault(defaultMapPath: String) = {
val mapPath = sys.env.getOrElse("MAP_FILE", defaultMapPath)
val absoluteJarPath =
if (mapPath.startsWith("/")) mapPath
else "/" + mapPath
loadFromClassPath(absoluteJarPath)
}
/*
* The map file uses the same format as Ants AI Challenge.
* Comment lines start with #
* Then there are header rows, key/value pairs.
* Then the actual map rows, prefixed with "m "
*/
def parse(rawMap: String): TileMap = {
def isComment(row: String) = row.trim.startsWith("#")
def isMap(row: String) = row.trim.startsWith("m ")
val headers: Map[String, String] = {
for {
row <- rawMap.lines
if !isComment(row)
if !isMap(row)
pair = row.split(" ")
key = pair(0)
value = pair(1)
} yield key -> value
}.toMap
val map = for {
row <- rawMap.lines.toIndexedSeq
if isMap(row)
} yield row.trim.drop(2).toIndexedSeq
val startingPos = {
for {
(rows, rowIx) <- map.zipWithIndex
(cell, colIx) <- rows.zipWithIndex
if cell.isDigit
id = cell.toString.toInt
} yield StartingPosition(id=id, row=rowIx, col=colIx)
}.sortBy(_.id)
new TileMap {
override val rowCount = headers("rows").toInt
override val colCount = headers("cols").toInt
override val rows = map
override val startingPositions = startingPos
override def isWithinMap(position: Position): Boolean = {
val (row, col) = (position.row, position.col)
rowCount >= row && row >= 0 && colCount >= col && col >= 0
}
override def tile(position: Position) = {
if (isWithinMap(position)) {
Some(rows(position.row)(position.col))
} else {
None
}
}
override def isWalkable(position: Position): Boolean = {
val nonWalkable = Set(Tiles.Water)
tile(position).filterNot(nonWalkable).isDefined
}
}
}
}
| ramn/bottfarmen | common/src/main/scala/simulation/TileMap.scala | Scala | gpl-3.0 | 2,839 |
package mesosphere.marathon.core.externalvolume
import com.wix.accord.Validator
import mesosphere.marathon.state.ExternalVolume
trait ExternalVolumes
object ExternalVolumes {
def validExternalVolume: Validator[ExternalVolume] = ???
}
| vivint-smarthome/ceph-on-mesos | marathon-submodule/src/main/scala/mesosphere/marathon/core/externalvolume/ExternalVolumes.scala | Scala | apache-2.0 | 238 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.lang.reflect.InvocationTargetException
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{Channels, FileChannel}
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.util.{Locale, Properties, Random, UUID}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.GZIPInputStream
import javax.net.ssl.HttpsURLConnection
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
import _root_.io.netty.channel.unix.Errors.NativeIoException
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.google.common.io.{ByteStreams, Files => GFiles}
import com.google.common.net.InetAddresses
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.eclipse.jetty.util.MultiException
import org.slf4j.Logger
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
import org.apache.spark.status.api.v1.{StackTrace, ThreadStackTrace}
/** CallSite represents a place in user code. It can have a short and a long form. */
private[spark] case class CallSite(shortForm: String, longForm: String)
private[spark] object CallSite {
val SHORT_FORM = "callSite.short"
val LONG_FORM = "callSite.long"
val empty = CallSite("", "")
}
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils extends Logging {
val random = new Random()
private val sparkUncaughtExceptionHandler = new SparkUncaughtExceptionHandler
/**
* Define a default value for driver memory here since this value is referenced across the code
* base and nearly all files already use Utils.scala
*/
val DEFAULT_DRIVER_MEM_MB = JavaUtils.DEFAULT_DRIVER_MEM_MB.toInt
private val MAX_DIR_CREATION_ATTEMPTS: Int = 10
@volatile private var localRootDirs: Array[String] = null
/**
* The performance overhead of creating and logging strings for wide schemas can be large. To
* limit the impact, we bound the number of fields to include by default. This can be overridden
* by setting the 'spark.debug.maxToStringFields' conf in SparkEnv.
*/
val DEFAULT_MAX_TO_STRING_FIELDS = 25
private def maxNumToStringFields = {
if (SparkEnv.get != null) {
SparkEnv.get.conf.getInt("spark.debug.maxToStringFields", DEFAULT_MAX_TO_STRING_FIELDS)
} else {
DEFAULT_MAX_TO_STRING_FIELDS
}
}
/** Whether we have warned about plan string truncation yet. */
private val truncationWarningPrinted = new AtomicBoolean(false)
/**
* Format a sequence with semantics similar to calling .mkString(). Any elements beyond
* maxNumToStringFields will be dropped and replaced by a "... N more fields" placeholder.
*
* @return the trimmed and formatted string.
*/
def truncatedString[T](
seq: Seq[T],
start: String,
sep: String,
end: String,
maxNumFields: Int = maxNumToStringFields): String = {
if (seq.length > maxNumFields) {
if (truncationWarningPrinted.compareAndSet(false, true)) {
logWarning(
"Truncated the string representation of a plan since it was too large. This " +
"behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.")
}
val numFields = math.max(0, maxNumFields - 1)
seq.take(numFields).mkString(
start, sep, sep + "... " + (seq.length - numFields) + " more fields" + end)
} else {
seq.mkString(start, sep, end)
}
}
/** Shorthand for calling truncatedString() without start or end strings. */
def truncatedString[T](seq: Seq[T], sep: String): String = truncatedString(seq, "", sep, "")
/** Serialize an object using Java serialization */
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
bos.toByteArray
}
/** Deserialize an object using Java serialization */
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
ois.readObject.asInstanceOf[T]
}
/** Deserialize an object using Java serialization and the given ClassLoader */
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
// scalastyle:off classforname
Class.forName(desc.getName, false, loader)
// scalastyle:on classforname
}
}
ois.readObject.asInstanceOf[T]
}
/** Deserialize a Long value (used for [[org.apache.spark.api.python.PythonPartitioner]]) */
def deserializeLongValue(bytes: Array[Byte]) : Long = {
// Note: we assume that we are given a Long value encoded in network (big-endian) byte order
var result = bytes(7) & 0xFFL
result = result + ((bytes(6) & 0xFFL) << 8)
result = result + ((bytes(5) & 0xFFL) << 16)
result = result + ((bytes(4) & 0xFFL) << 24)
result = result + ((bytes(3) & 0xFFL) << 32)
result = result + ((bytes(2) & 0xFFL) << 40)
result = result + ((bytes(1) & 0xFFL) << 48)
result + ((bytes(0) & 0xFFL) << 56)
}
/** Serialize via nested stream using specific serializer */
def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(
f: SerializationStream => Unit): Unit = {
val osWrapper = ser.serializeStream(new OutputStream {
override def write(b: Int): Unit = os.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len)
})
try {
f(osWrapper)
} finally {
osWrapper.close()
}
}
/** Deserialize via nested stream using specific serializer */
def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(
f: DeserializationStream => Unit): Unit = {
val isWrapper = ser.deserializeStream(new InputStream {
override def read(): Int = is.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
})
try {
f(isWrapper)
} finally {
isWrapper.close()
}
}
/**
* Get the ClassLoader which loaded Spark.
*/
def getSparkClassLoader: ClassLoader = getClass.getClassLoader
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Spark.
*
* This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
* active loader when setting up ClassLoader delegation chains.
*/
def getContextOrSparkClassLoader: ClassLoader =
Option(Thread.currentThread().getContextClassLoader).getOrElse(getSparkClassLoader)
/** Determines whether the provided class is loadable in the current thread. */
def classIsLoadable(clazz: String): Boolean = {
// scalastyle:off classforname
Try { Class.forName(clazz, false, getContextOrSparkClassLoader) }.isSuccess
// scalastyle:on classforname
}
// scalastyle:off classforname
/** Preferred alternative to Class.forName(className) */
def classForName(className: String): Class[_] = {
Class.forName(className, true, getContextOrSparkClassLoader)
// scalastyle:on classforname
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.DataOutput]]
*/
def writeByteBuffer(bb: ByteBuffer, out: DataOutput): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.OutputStream]]
*/
def writeByteBuffer(bb: ByteBuffer, out: OutputStream): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* JDK equivalent of `chmod 700 file`.
*
* @param file the file whose permissions will be modified
* @return true if the permissions were successfully changed, false otherwise.
*/
def chmod700(file: File): Boolean = {
file.setReadable(false, false) &&
file.setReadable(true, true) &&
file.setWritable(false, false) &&
file.setWritable(true, true) &&
file.setExecutable(false, false) &&
file.setExecutable(true, true)
}
/**
* Create a directory inside the given parent directory. The directory is guaranteed to be
* newly created, and is not marked for automatic deletion.
*/
def createDirectory(root: String, namePrefix: String = "spark"): File = {
var attempts = 0
val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
var dir: File = null
while (dir == null) {
attempts += 1
if (attempts > maxAttempts) {
throw new IOException("Failed to create a temp directory (under " + root + ") after " +
maxAttempts + " attempts!")
}
try {
dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString)
if (dir.exists() || !dir.mkdirs()) {
dir = null
}
} catch { case e: SecurityException => dir = null; }
}
dir.getCanonicalFile
}
/**
* Create a temporary directory inside the given parent directory. The directory will be
* automatically deleted when the VM shuts down.
*/
def createTempDir(
root: String = System.getProperty("java.io.tmpdir"),
namePrefix: String = "spark"): File = {
val dir = createDirectory(root, namePrefix)
ShutdownHookManager.registerShutdownDeleteDir(dir)
dir
}
/**
* Copy all data from an InputStream to an OutputStream. NIO way of file stream to file stream
* copying is disabled by default unless explicitly set transferToEnabled as true,
* the parameter transferToEnabled should be configured by spark.file.transferTo = [true|false].
*/
def copyStream(
in: InputStream,
out: OutputStream,
closeStreams: Boolean = false,
transferToEnabled: Boolean = false): Long = {
tryWithSafeFinally {
if (in.isInstanceOf[FileInputStream] && out.isInstanceOf[FileOutputStream]
&& transferToEnabled) {
// When both streams are File stream, use transferTo to improve copy performance.
val inChannel = in.asInstanceOf[FileInputStream].getChannel()
val outChannel = out.asInstanceOf[FileOutputStream].getChannel()
val size = inChannel.size()
copyFileStreamNIO(inChannel, outChannel, 0, size)
size
} else {
var count = 0L
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
out.write(buf, 0, n)
count += n
}
}
count
}
} {
if (closeStreams) {
try {
in.close()
} finally {
out.close()
}
}
}
}
def copyFileStreamNIO(
input: FileChannel,
output: FileChannel,
startPosition: Long,
bytesToCopy: Long): Unit = {
val initialPos = output.position()
var count = 0L
// In case transferTo method transferred less data than we have required.
while (count < bytesToCopy) {
count += input.transferTo(count + startPosition, bytesToCopy - count, output)
}
assert(count == bytesToCopy,
s"request to copy $bytesToCopy bytes, but actually copied $count bytes.")
// Check the position after transferTo loop to see if it is in the right position and
// give user information if not.
// Position will not be increased to the expected length after calling transferTo in
// kernel version 2.6.32, this issue can be seen in
// https://bugs.openjdk.java.net/browse/JDK-7052359
// This will lead to stream corruption issue when using sort-based shuffle (SPARK-3948).
val finalPos = output.position()
val expectedPos = initialPos + bytesToCopy
assert(finalPos == expectedPos,
s"""
|Current position $finalPos do not equal to expected position $expectedPos
|after transferTo, please check your kernel version to see if it is 2.6.32,
|this is a kernel bug which will lead to unexpected behavior when using transferTo.
|You can set spark.file.transferTo = false to disable this NIO feature.
""".stripMargin)
}
/**
* Construct a URI container information used for authentication.
* This also sets the default authenticator to properly negotiation the
* user/password based on the URI.
*
* Note this relies on the Authenticator.setDefault being set properly to decode
* the user name and password. This is currently set in the SecurityManager.
*/
def constructURIForAuthentication(uri: URI, securityMgr: SecurityManager): URI = {
val userCred = securityMgr.getSecretKey()
if (userCred == null) throw new Exception("Secret key is null with authentication on")
val userInfo = securityMgr.getHttpUser() + ":" + userCred
new URI(uri.getScheme(), userInfo, uri.getHost(), uri.getPort(), uri.getPath(),
uri.getQuery(), uri.getFragment())
}
/**
* A file name may contain some invalid URI characters, such as " ". This method will convert the
* file name to a raw path accepted by `java.net.URI(String)`.
*
* Note: the file name must not contain "/" or "\\"
*/
def encodeFileNameToURIRawPath(fileName: String): String = {
require(!fileName.contains("/") && !fileName.contains("\\\\"))
// `file` and `localhost` are not used. Just to prevent URI from parsing `fileName` as
// scheme or host. The prefix "/" is required because URI doesn't accept a relative path.
// We should remove it after we get the raw path.
new URI("file", null, "localhost", -1, "/" + fileName, null, null).getRawPath.substring(1)
}
/**
* Get the file name from uri's raw path and decode it. If the raw path of uri ends with "/",
* return the name before the last "/".
*/
def decodeFileNameInURI(uri: URI): String = {
val rawPath = uri.getRawPath
val rawFileName = rawPath.split("/").last
new URI("file:///" + rawFileName).getPath.substring(1)
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* If `useCache` is true, first attempts to fetch the file to a local cache that's shared
* across executors running the same application. `useCache` is used mainly for
* the executors, and not in local mode.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def fetchFile(
url: String,
targetDir: File,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration,
timestamp: Long,
useCache: Boolean): File = {
val fileName = decodeFileNameInURI(new URI(url))
val targetFile = new File(targetDir, fileName)
val fetchCacheEnabled = conf.getBoolean("spark.files.useFetchCache", defaultValue = true)
if (useCache && fetchCacheEnabled) {
val cachedFileName = s"${url.hashCode}${timestamp}_cache"
val lockFileName = s"${url.hashCode}${timestamp}_lock"
val localDir = new File(getLocalDir(conf))
val lockFile = new File(localDir, lockFileName)
val lockFileChannel = new RandomAccessFile(lockFile, "rw").getChannel()
// Only one executor entry.
// The FileLock is only used to control synchronization for executors download file,
// it's always safe regardless of lock type (mandatory or advisory).
val lock = lockFileChannel.lock()
val cachedFile = new File(localDir, cachedFileName)
try {
if (!cachedFile.exists()) {
doFetchFile(url, localDir, cachedFileName, conf, securityMgr, hadoopConf)
}
} finally {
lock.release()
lockFileChannel.close()
}
copyFile(
url,
cachedFile,
targetFile,
conf.getBoolean("spark.files.overwrite", false)
)
} else {
doFetchFile(url, targetDir, fileName, conf, securityMgr, hadoopConf)
}
// Decompress the file if it's a .tar or .tar.gz
if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir)
} else if (fileName.endsWith(".tar")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir)
}
// Make the file executable - That's necessary for scripts
FileUtil.chmod(targetFile.getAbsolutePath, "a+x")
// Windows does not grant read permission by default to non-admin users
// Add read permission to owner explicitly
if (isWindows) {
FileUtil.chmod(targetFile.getAbsolutePath, "u+r")
}
targetFile
}
/**
* Download `in` to `tempFile`, then move it to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param in InputStream to download.
* @param destFile File path to move `tempFile` to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
*/
private def downloadFile(
url: String,
in: InputStream,
destFile: File,
fileOverwrite: Boolean): Unit = {
val tempFile = File.createTempFile("fetchFileTemp", null,
new File(destFile.getParentFile.getAbsolutePath))
logInfo(s"Fetching $url to $tempFile")
try {
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, closeStreams = true)
copyFile(url, tempFile, destFile, fileOverwrite, removeSourceFile = true)
} finally {
// Catch-all for the couple of cases where for some reason we didn't move `tempFile` to
// `destFile`.
if (tempFile.exists()) {
tempFile.delete()
}
}
}
/**
* Copy `sourceFile` to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param sourceFile File path to copy/move from.
* @param destFile File path to copy/move to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
* @param removeSourceFile Whether to remove `sourceFile` after / as part of moving/copying it to
* `destFile`.
*/
private def copyFile(
url: String,
sourceFile: File,
destFile: File,
fileOverwrite: Boolean,
removeSourceFile: Boolean = false): Unit = {
if (destFile.exists) {
if (!filesEqualRecursive(sourceFile, destFile)) {
if (fileOverwrite) {
logInfo(
s"File $destFile exists and does not match contents of $url, replacing it with $url"
)
if (!destFile.delete()) {
throw new SparkException(
"Failed to delete %s while attempting to overwrite it with %s".format(
destFile.getAbsolutePath,
sourceFile.getAbsolutePath
)
)
}
} else {
throw new SparkException(
s"File $destFile exists and does not match contents of $url")
}
} else {
// Do nothing if the file contents are the same, i.e. this file has been copied
// previously.
logInfo(
"%s has been previously copied to %s".format(
sourceFile.getAbsolutePath,
destFile.getAbsolutePath
)
)
return
}
}
// The file does not exist in the target directory. Copy or move it there.
if (removeSourceFile) {
Files.move(sourceFile.toPath, destFile.toPath)
} else {
logInfo(s"Copying ${sourceFile.getAbsolutePath} to ${destFile.getAbsolutePath}")
copyRecursive(sourceFile, destFile)
}
}
private def filesEqualRecursive(file1: File, file2: File): Boolean = {
if (file1.isDirectory && file2.isDirectory) {
val subfiles1 = file1.listFiles()
val subfiles2 = file2.listFiles()
if (subfiles1.size != subfiles2.size) {
return false
}
subfiles1.sortBy(_.getName).zip(subfiles2.sortBy(_.getName)).forall {
case (f1, f2) => filesEqualRecursive(f1, f2)
}
} else if (file1.isFile && file2.isFile) {
GFiles.equal(file1, file2)
} else {
false
}
}
private def copyRecursive(source: File, dest: File): Unit = {
if (source.isDirectory) {
if (!dest.mkdir()) {
throw new IOException(s"Failed to create directory ${dest.getPath}")
}
val subfiles = source.listFiles()
subfiles.foreach(f => copyRecursive(f, new File(dest, f.getName)))
} else {
Files.copy(source.toPath, dest.toPath)
}
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def doFetchFile(
url: String,
targetDir: File,
filename: String,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration): File = {
val targetFile = new File(targetDir, filename)
val uri = new URI(url)
val fileOverwrite = conf.getBoolean("spark.files.overwrite", defaultValue = false)
Option(uri.getScheme).getOrElse("file") match {
case "spark" =>
if (SparkEnv.get == null) {
throw new IllegalStateException(
"Cannot retrieve files with 'spark' scheme without an active SparkEnv.")
}
val source = SparkEnv.get.rpcEnv.openChannel(url)
val is = Channels.newInputStream(source)
downloadFile(url, is, targetFile, fileOverwrite)
case "http" | "https" | "ftp" =>
var uc: URLConnection = null
if (securityMgr.isAuthenticationEnabled()) {
logDebug("fetchFile with security enabled")
val newuri = constructURIForAuthentication(uri, securityMgr)
uc = newuri.toURL().openConnection()
uc.setAllowUserInteraction(false)
} else {
logDebug("fetchFile not using security")
uc = new URL(url).openConnection()
}
val timeoutMs =
conf.getTimeAsSeconds("spark.files.fetchTimeout", "60s").toInt * 1000
uc.setConnectTimeout(timeoutMs)
uc.setReadTimeout(timeoutMs)
uc.connect()
val in = uc.getInputStream()
downloadFile(url, in, targetFile, fileOverwrite)
case "file" =>
// In the case of a local file, copy the local file to the target directory.
// Note the difference between uri vs url.
val sourceFile = if (uri.isAbsolute) new File(uri) else new File(url)
copyFile(url, sourceFile, targetFile, fileOverwrite)
case _ =>
val fs = getHadoopFileSystem(uri, hadoopConf)
val path = new Path(uri)
fetchHcfsFile(path, targetDir, fs, conf, hadoopConf, fileOverwrite,
filename = Some(filename))
}
targetFile
}
/**
* Fetch a file or directory from a Hadoop-compatible filesystem.
*
* Visible for testing
*/
private[spark] def fetchHcfsFile(
path: Path,
targetDir: File,
fs: FileSystem,
conf: SparkConf,
hadoopConf: Configuration,
fileOverwrite: Boolean,
filename: Option[String] = None): Unit = {
if (!targetDir.exists() && !targetDir.mkdir()) {
throw new IOException(s"Failed to create directory ${targetDir.getPath}")
}
val dest = new File(targetDir, filename.getOrElse(path.getName))
if (fs.isFile(path)) {
val in = fs.open(path)
try {
downloadFile(path.toString, in, dest, fileOverwrite)
} finally {
in.close()
}
} else {
fs.listStatus(path).foreach { fileStatus =>
fetchHcfsFile(fileStatus.getPath(), dest, fs, conf, hadoopConf, fileOverwrite)
}
}
}
/**
* Validate that a given URI is actually a valid URL as well.
* @param uri The URI to validate
*/
@throws[MalformedURLException]("when the URI is an invalid URL")
def validateURL(uri: URI): Unit = {
Option(uri.getScheme).getOrElse("file") match {
case "http" | "https" | "ftp" =>
try {
uri.toURL
} catch {
case e: MalformedURLException =>
val ex = new MalformedURLException(s"URI (${uri.toString}) is not a valid URL.")
ex.initCause(e)
throw ex
}
case _ => // will not be turned into a URL anyway
}
}
/**
* Get the path of a temporary directory. Spark's local directories can be configured through
* multiple settings, which are used with the following precedence:
*
* - If called from inside of a YARN container, this will return a directory chosen by YARN.
* - If the SPARK_LOCAL_DIRS environment variable is set, this will return a directory from it.
* - Otherwise, if the spark.local.dir is set, this will return a directory from it.
* - Otherwise, this will return java.io.tmpdir.
*
* Some of these configuration options might be lists of multiple paths, but this method will
* always return a single directory.
*/
def getLocalDir(conf: SparkConf): String = {
getOrCreateLocalRootDirs(conf).headOption.getOrElse {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
throw new IOException(
s"Failed to get a temp directory under [${configuredLocalDirs.mkString(",")}].")
}
}
private[spark] def isRunningInYarnContainer(conf: SparkConf): Boolean = {
// These environment variables are set by YARN.
conf.getenv("CONTAINER_ID") != null
}
/**
* Gets or creates the directories listed in spark.local.dir or SPARK_LOCAL_DIRS,
* and returns only the directories that exist / could be created.
*
* If no directories could be created, this will return an empty list.
*
* This method will cache the local directories for the application when it's first invoked.
* So calling it multiple times with a different configuration will always return the same
* set of directories.
*/
private[spark] def getOrCreateLocalRootDirs(conf: SparkConf): Array[String] = {
if (localRootDirs == null) {
this.synchronized {
if (localRootDirs == null) {
localRootDirs = getOrCreateLocalRootDirsImpl(conf)
}
}
}
localRootDirs
}
/**
* Return the configured local directories where Spark can write files. This
* method does not create any directories on its own, it only encapsulates the
* logic of locating the local directories according to deployment mode.
*/
def getConfiguredLocalDirs(conf: SparkConf): Array[String] = {
val shuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
if (isRunningInYarnContainer(conf)) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available. Note this assumes that Yarn has
// created the directories already, and that they are secured so that only the
// user has access to them.
getYarnLocalDirs(conf).split(",")
} else if (conf.getenv("SPARK_EXECUTOR_DIRS") != null) {
conf.getenv("SPARK_EXECUTOR_DIRS").split(File.pathSeparator)
} else if (conf.getenv("SPARK_LOCAL_DIRS") != null) {
conf.getenv("SPARK_LOCAL_DIRS").split(",")
} else if (conf.getenv("MESOS_DIRECTORY") != null && !shuffleServiceEnabled) {
// Mesos already creates a directory per Mesos task. Spark should use that directory
// instead so all temporary files are automatically cleaned up when the Mesos task ends.
// Note that we don't want this if the shuffle service is enabled because we want to
// continue to serve shuffle files after the executors that wrote them have already exited.
Array(conf.getenv("MESOS_DIRECTORY"))
} else {
if (conf.getenv("MESOS_DIRECTORY") != null && shuffleServiceEnabled) {
logInfo("MESOS_DIRECTORY available but not using provided Mesos sandbox because " +
"spark.shuffle.service.enabled is enabled.")
}
// In non-Yarn mode (or for the driver in yarn-client mode), we cannot trust the user
// configuration to point to a secure directory. So create a subdirectory with restricted
// permissions under each listed directory.
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(",")
}
}
private def getOrCreateLocalRootDirsImpl(conf: SparkConf): Array[String] = {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
val uris = configuredLocalDirs.filter { root =>
// Here, we guess if the given value is a URI at its best - check if scheme is set.
Try(new URI(root).getScheme != null).getOrElse(false)
}
if (uris.nonEmpty) {
logWarning(
"The configured local directories are not expected to be URIs; however, got suspicious " +
s"values [${uris.mkString(", ")}]. Please check your configured local directories.")
}
configuredLocalDirs.flatMap { root =>
try {
val rootDir = new File(root)
if (rootDir.exists || rootDir.mkdirs()) {
val dir = createTempDir(root)
chmod700(dir)
Some(dir.getAbsolutePath)
} else {
logError(s"Failed to create dir in $root. Ignoring this directory.")
None
}
} catch {
case e: IOException =>
logError(s"Failed to create local root dir in $root. Ignoring this directory.")
None
}
}
}
/** Get the Yarn approved local directories. */
private def getYarnLocalDirs(conf: SparkConf): String = {
val localDirs = Option(conf.getenv("LOCAL_DIRS")).getOrElse("")
if (localDirs.isEmpty) {
throw new Exception("Yarn Local dirs can't be empty")
}
localDirs
}
/** Used by unit tests. Do not call from other places. */
private[spark] def clearLocalRootDirs(): Unit = {
localRootDirs = null
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i + 1)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
* Note, this is typically not used from within core spark.
*/
private lazy val localIpAddress: InetAddress = findLocalInetAddress()
private def findLocalInetAddress(): InetAddress = {
val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
if (defaultIpOverride != null) {
InetAddress.getByName(defaultIpOverride)
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
// getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
// on unix-like system. On windows, it returns in index order.
// It's more proper to pick ip address following system output order.
val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq
val reOrderedNetworkIFs = if (isWindows) activeNetworkIFs else activeNetworkIFs.reverse
for (ni <- reOrderedNetworkIFs) {
val addresses = ni.getInetAddresses.asScala
.filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
if (addresses.nonEmpty) {
val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
// because of Inet6Address.toHostName may add interface at the end if it knows about it
val strippedAddress = InetAddress.getByAddress(addr.getAddress)
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " +
strippedAddress.getHostAddress + " instead (on interface " + ni.getName + ")")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
return strippedAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
}
address
}
}
private var customHostname: Option[String] = sys.env.get("SPARK_LOCAL_HOSTNAME")
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String) {
// DEBUG code
Utils.checkHost(hostname)
customHostname = Some(hostname)
}
/**
* Get the local machine's FQDN.
*/
def localCanonicalHostName(): String = {
customHostname.getOrElse(localIpAddress.getCanonicalHostName)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(localIpAddress.getHostAddress)
}
/**
* Get the local machine's URI.
*/
def localHostNameForURI(): String = {
customHostname.getOrElse(InetAddresses.toUriString(localIpAddress))
}
def checkHost(host: String) {
assert(host != null && host.indexOf(':') == -1, s"Expected hostname (not IP) but got $host")
}
def checkHostPort(hostPort: String) {
assert(hostPort != null && hostPort.indexOf(':') != -1,
s"Expected host and port but got $hostPort")
}
// Typically, this will be of order of number of nodes in cluster
// If not, we should change it to LRUCache or something.
private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
def parseHostPort(hostPort: String): (String, Int) = {
// Check cache first.
val cached = hostPortParseResults.get(hostPort)
if (cached != null) {
return cached
}
val indx: Int = hostPort.lastIndexOf(':')
// This is potentially broken - when dealing with ipv6 addresses for example, sigh ...
// but then hadoop does not support ipv6 right now.
// For now, we assume that if port exists, then it is valid - not check if it is an int > 0
if (-1 == indx) {
val retval = (hostPort, 0)
hostPortParseResults.put(hostPort, retval)
return retval
}
val retval = (hostPort.substring(0, indx).trim(), hostPort.substring(indx + 1).trim().toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
hostPortParseResults.get(hostPort)
}
/**
* Return the string to tell how long has passed in milliseconds.
*/
def getUsedTimeMs(startTimeMs: Long): String = {
" " + (System.currentTimeMillis - startTimeMs) + " ms"
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
* Throws an exception if deletion is unsuccessful.
*/
def deleteRecursively(file: File): Unit = {
if (file != null) {
JavaUtils.deleteRecursively(file)
ShutdownHookManager.removeShutdownDeleteDir(file)
}
}
/**
* Determines if a directory contains any files newer than cutoff seconds.
*
* @param dir must be the path to a directory, or IllegalArgumentException is thrown
* @param cutoff measured in seconds. Returns true if there are any files or directories in the
* given directory whose last modified time is later than this many seconds ago
*/
def doesDirectoryContainAnyNewFiles(dir: File, cutoff: Long): Boolean = {
if (!dir.isDirectory) {
throw new IllegalArgumentException(s"$dir is not a directory!")
}
val filesAndDirs = dir.listFiles()
val cutoffTimeInMillis = System.currentTimeMillis - (cutoff * 1000)
filesAndDirs.exists(_.lastModified() > cutoffTimeInMillis) ||
filesAndDirs.filter(_.isDirectory).exists(
subdir => doesDirectoryContainAnyNewFiles(subdir, cutoff)
)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to microseconds for internal use. If
* no suffix is provided, the passed number is assumed to be in ms.
*/
def timeStringAsMs(str: String): Long = {
JavaUtils.timeStringAsMs(str)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
* no suffix is provided, the passed number is assumed to be in seconds.
*/
def timeStringAsSeconds(str: String): Long = {
JavaUtils.timeStringAsSec(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in bytes.
*/
def byteStringAsBytes(str: String): Long = {
JavaUtils.byteStringAsBytes(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in kibibytes.
*/
def byteStringAsKb(str: String): Long = {
JavaUtils.byteStringAsKb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in mebibytes.
*/
def byteStringAsMb(str: String): Long = {
JavaUtils.byteStringAsMb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in gibibytes.
*/
def byteStringAsGb(str: String): Long = {
JavaUtils.byteStringAsGb(str)
}
/**
* Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes.
*/
def memoryStringToMb(str: String): Int = {
// Convert to bytes, rather than directly to MB, because when no units are specified the unit
// is assumed to be bytes
(JavaUtils.byteStringAsBytes(str) / 1024 / 1024).toInt
}
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MB".
*/
def bytesToString(size: Long): String = bytesToString(BigInt(size))
def bytesToString(size: BigInt): String = {
val EB = 1L << 60
val PB = 1L << 50
val TB = 1L << 40
val GB = 1L << 30
val MB = 1L << 20
val KB = 1L << 10
if (size >= BigInt(1L << 11) * EB) {
// The number is too large, show it in scientific notation.
BigDecimal(size, new MathContext(3, RoundingMode.HALF_UP)).toString() + " B"
} else {
val (value, unit) = {
if (size >= 2 * EB) {
(BigDecimal(size) / EB, "EB")
} else if (size >= 2 * PB) {
(BigDecimal(size) / PB, "PB")
} else if (size >= 2 * TB) {
(BigDecimal(size) / TB, "TB")
} else if (size >= 2 * GB) {
(BigDecimal(size) / GB, "GB")
} else if (size >= 2 * MB) {
(BigDecimal(size) / MB, "MB")
} else if (size >= 2 * KB) {
(BigDecimal(size) / KB, "KB")
} else {
(BigDecimal(size), "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
}
/**
* Returns a human-readable string representing a duration such as "35ms"
*/
def msDurationToString(ms: Long): String = {
val second = 1000
val minute = 60 * second
val hour = 60 * minute
val locale = Locale.US
ms match {
case t if t < second =>
"%d ms".formatLocal(locale, t)
case t if t < minute =>
"%.1f s".formatLocal(locale, t.toFloat / second)
case t if t < hour =>
"%.1f m".formatLocal(locale, t.toFloat / minute)
case t =>
"%.2f h".formatLocal(locale, t.toFloat / hour)
}
}
/**
* Convert a quantity in megabytes to a human-readable string such as "4.0 MB".
*/
def megabytesToString(megabytes: Long): String = {
bytesToString(megabytes * 1024L * 1024L)
}
/**
* Execute a command and return the process running the command.
*/
def executeCommand(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): Process = {
val builder = new ProcessBuilder(command: _*).directory(workingDir)
val environment = builder.environment()
for ((key, value) <- extraEnvironment) {
environment.put(key, value)
}
val process = builder.start()
if (redirectStderr) {
val threadName = "redirect stderr for command " + command(0)
def log(s: String): Unit = logInfo(s)
processStreamByLine(threadName, process.getErrorStream, log)
}
process
}
/**
* Execute a command and get its output, throwing an exception if it yields a code other than 0.
*/
def executeAndGetOutput(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): String = {
val process = executeCommand(command, workingDir, extraEnvironment, redirectStderr)
val output = new StringBuilder
val threadName = "read stdout for " + command(0)
def appendToOutput(s: String): Unit = output.append(s).append("\\n")
val stdoutThread = processStreamByLine(threadName, process.getInputStream, appendToOutput)
val exitCode = process.waitFor()
stdoutThread.join() // Wait for it to finish reading output
if (exitCode != 0) {
logError(s"Process $command exited with code $exitCode: $output")
throw new SparkException(s"Process $command exited with code $exitCode")
}
output.toString
}
/**
* Return and start a daemon thread that processes the content of the input stream line by line.
*/
def processStreamByLine(
threadName: String,
inputStream: InputStream,
processLine: String => Unit): Thread = {
val t = new Thread(threadName) {
override def run() {
for (line <- Source.fromInputStream(inputStream).getLines()) {
processLine(line)
}
}
}
t.setDaemon(true)
t.start()
t
}
/**
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
* default UncaughtExceptionHandler
*
* NOTE: This method is to be called by the spark-started JVM process.
*/
def tryOrExit(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable => sparkUncaughtExceptionHandler.uncaughtException(t)
}
}
/**
* Execute a block of code that evaluates to Unit, stop SparkContext if there is any uncaught
* exception
*
* NOTE: This method is to be called by the driver-side components to avoid stopping the
* user-started JVM process completely; in contrast, tryOrExit is to be called in the
* spark-started JVM process .
*/
def tryOrStopSparkContext(sc: SparkContext)(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable =>
val currentThreadName = Thread.currentThread().getName
if (sc != null) {
logError(s"uncaught error in thread $currentThreadName, stopping SparkContext", t)
sc.stopInNewThread()
}
if (!NonFatal(t)) {
logError(s"throw uncaught fatal error in thread $currentThreadName", t)
throw t
}
}
}
/**
* Execute a block of code that returns a value, re-throwing any non-fatal uncaught
* exceptions as IOException. This is used when implementing Externalizable and Serializable's
* read and write methods, since Java's serializer will not report non-IOExceptions properly;
* see SPARK-4080 for more context.
*/
def tryOrIOException[T](block: => T): T = {
try {
block
} catch {
case e: IOException =>
logError("Exception encountered", e)
throw e
case NonFatal(e) =>
logError("Exception encountered", e)
throw new IOException(e)
}
}
/** Executes the given block. Log non-fatal errors if any, and only throw fatal errors */
def tryLogNonFatalError(block: => Unit) {
try {
block
} catch {
case NonFatal(t) =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
}
}
/**
* Execute a block of code, then a finally block, but if exceptions happen in
* the finally block, do not suppress the original exception.
*
* This is primarily an issue with `finally { out.close() }` blocks, where
* close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case t: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = t
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/**
* Execute a block of code and call the failure callbacks in the catch block. If exceptions occur
* in either the catch or the finally block, they are appended to the list of suppressed
* exceptions in original exception which is then rethrown.
*
* This is primarily an issue with `catch { abort() }` or `finally { out.close() }` blocks,
* where the abort/close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `abort` or `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinallyAndFailureCallbacks[T](block: => T)
(catchBlock: => Unit = (), finallyBlock: => Unit = ()): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case cause: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = cause
try {
logError("Aborting task", originalThrowable)
TaskContext.get().asInstanceOf[TaskContextImpl].markTaskFailed(originalThrowable)
catchBlock
} catch {
case t: Throwable =>
if (originalThrowable != t) {
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in catch: ${t.getMessage}", t)
}
}
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/** Default filtering function for finding call sites using `getCallSite`. */
private def sparkInternalExclusionFunction(className: String): Boolean = {
// A regular expression to match classes of the internal Spark API's
// that we want to skip when finding the call site of a method.
val SPARK_CORE_CLASS_REGEX =
"""^org\\.apache\\.spark(\\.api\\.java)?(\\.util)?(\\.rdd)?(\\.broadcast)?\\.[A-Z]""".r
val SPARK_SQL_CLASS_REGEX = """^org\\.apache\\.spark\\.sql.*""".r
val SCALA_CORE_CLASS_PREFIX = "scala"
val isSparkClass = SPARK_CORE_CLASS_REGEX.findFirstIn(className).isDefined ||
SPARK_SQL_CLASS_REGEX.findFirstIn(className).isDefined
val isScalaClass = className.startsWith(SCALA_CORE_CLASS_PREFIX)
// If the class is a Spark internal class or a Scala class, then exclude.
isSparkClass || isScalaClass
}
/**
* When called inside a class in the spark package, returns the name of the user code class
* (outside the spark package) that called into Spark, as well as which Spark method they called.
* This is used, for example, to tell users where in their code each RDD got created.
*
* @param skipClass Function that is used to exclude non-user-code classes.
*/
def getCallSite(skipClass: String => Boolean = sparkInternalExclusionFunction): CallSite = {
// Keep crawling up the stack trace until we find the first function not inside of the spark
// package. We track the last (shallowest) contiguous Spark method. This might be an RDD
// transformation, a SparkContext function (such as parallelize), or anything else that leads
// to instantiation of an RDD. We also track the first (deepest) user method, file, and line.
var lastSparkMethod = "<unknown>"
var firstUserFile = "<unknown>"
var firstUserLine = 0
var insideSpark = true
val callStack = new ArrayBuffer[String]() :+ "<unknown>"
Thread.currentThread.getStackTrace().foreach { ste: StackTraceElement =>
// When running under some profilers, the current stack trace might contain some bogus
// frames. This is intended to ensure that we don't crash in these situations by
// ignoring any frames that we can't examine.
if (ste != null && ste.getMethodName != null
&& !ste.getMethodName.contains("getStackTrace")) {
if (insideSpark) {
if (skipClass(ste.getClassName)) {
lastSparkMethod = if (ste.getMethodName == "<init>") {
// Spark method is a constructor; get its class name
ste.getClassName.substring(ste.getClassName.lastIndexOf('.') + 1)
} else {
ste.getMethodName
}
callStack(0) = ste.toString // Put last Spark method on top of the stack trace.
} else {
if (ste.getFileName != null) {
firstUserFile = ste.getFileName
if (ste.getLineNumber >= 0) {
firstUserLine = ste.getLineNumber
}
}
callStack += ste.toString
insideSpark = false
}
} else {
callStack += ste.toString
}
}
}
val callStackDepth = System.getProperty("spark.callstack.depth", "20").toInt
val shortForm =
if (firstUserFile == "HiveSessionImpl.java") {
// To be more user friendly, show a nicer string for queries submitted from the JDBC
// server.
"Spark JDBC Server Query"
} else {
s"$lastSparkMethod at $firstUserFile:$firstUserLine"
}
val longForm = callStack.take(callStackDepth).mkString("\\n")
CallSite(shortForm, longForm)
}
private val UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF =
"spark.worker.ui.compressedLogFileLengthCacheSize"
private val DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE = 100
private var compressedLogFileLengthCache: LoadingCache[String, java.lang.Long] = null
private def getCompressedLogFileLengthCache(
sparkConf: SparkConf): LoadingCache[String, java.lang.Long] = this.synchronized {
if (compressedLogFileLengthCache == null) {
val compressedLogFileLengthCacheSize = sparkConf.getInt(
UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF,
DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE)
compressedLogFileLengthCache = CacheBuilder.newBuilder()
.maximumSize(compressedLogFileLengthCacheSize)
.build[String, java.lang.Long](new CacheLoader[String, java.lang.Long]() {
override def load(path: String): java.lang.Long = {
Utils.getCompressedFileLength(new File(path))
}
})
}
compressedLogFileLengthCache
}
/**
* Return the file length, if the file is compressed it returns the uncompressed file length.
* It also caches the uncompressed file size to avoid repeated decompression. The cache size is
* read from workerConf.
*/
def getFileLength(file: File, workConf: SparkConf): Long = {
if (file.getName.endsWith(".gz")) {
getCompressedLogFileLengthCache(workConf).get(file.getAbsolutePath)
} else {
file.length
}
}
/** Return uncompressed file length of a compressed file. */
private def getCompressedFileLength(file: File): Long = {
var gzInputStream: GZIPInputStream = null
try {
// Uncompress .gz file to determine file size.
var fileSize = 0L
gzInputStream = new GZIPInputStream(new FileInputStream(file))
val bufSize = 1024
val buf = new Array[Byte](bufSize)
var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
while (numBytes > 0) {
fileSize += numBytes
numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
}
fileSize
} catch {
case e: Throwable =>
logError(s"Cannot get file length of ${file}", e)
throw e
} finally {
if (gzInputStream != null) {
gzInputStream.close()
}
}
}
/** Return a string containing part of a file from byte 'start' to 'end'. */
def offsetBytes(path: String, length: Long, start: Long, end: Long): String = {
val file = new File(path)
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = if (path.endsWith(".gz")) {
new GZIPInputStream(new FileInputStream(file))
} else {
new FileInputStream(file)
}
try {
ByteStreams.skipFully(stream, effectiveStart)
ByteStreams.readFully(stream, buff)
} finally {
stream.close()
}
Source.fromBytes(buff).mkString
}
/**
* Return a string containing data across a set of files. The `startIndex`
* and `endIndex` is based on the cumulative size of all the files take in
* the given order. See figure below for more details.
*/
def offsetBytes(files: Seq[File], fileLengths: Seq[Long], start: Long, end: Long): String = {
assert(files.length == fileLengths.length)
val startIndex = math.max(start, 0)
val endIndex = math.min(end, fileLengths.sum)
val fileToLength = files.zip(fileLengths).toMap
logDebug("Log files: \\n" + fileToLength.mkString("\\n"))
val stringBuffer = new StringBuffer((endIndex - startIndex).toInt)
var sum = 0L
files.zip(fileLengths).foreach { case (file, fileLength) =>
val startIndexOfFile = sum
val endIndexOfFile = sum + fileToLength(file)
logDebug(s"Processing file $file, " +
s"with start index = $startIndexOfFile, end index = $endIndex")
/*
____________
range 1: | |
| case A |
files: |==== file 1 ====|====== file 2 ======|===== file 3 =====|
| case B . case C . case D |
range 2: |___________.____________________.______________|
*/
if (startIndex <= startIndexOfFile && endIndex >= endIndexOfFile) {
// Case C: read the whole file
stringBuffer.append(offsetBytes(file.getAbsolutePath, fileLength, 0, fileToLength(file)))
} else if (startIndex > startIndexOfFile && startIndex < endIndexOfFile) {
// Case A and B: read from [start of required range] to [end of file / end of range]
val effectiveStartIndex = startIndex - startIndexOfFile
val effectiveEndIndex = math.min(endIndex - startIndexOfFile, fileToLength(file))
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
} else if (endIndex > startIndexOfFile && endIndex < endIndexOfFile) {
// Case D: read from [start of file] to [end of require range]
val effectiveStartIndex = math.max(startIndex - startIndexOfFile, 0)
val effectiveEndIndex = endIndex - startIndexOfFile
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
}
sum += fileToLength(file)
logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
}
stringBuffer.toString
}
/**
* Clone an object using a Spark serializer.
*/
def clone[T: ClassTag](value: T, serializer: SerializerInstance): T = {
serializer.deserialize[T](serializer.serialize(value))
}
private def isSpace(c: Char): Boolean = {
" \\t\\r\\n".indexOf(c) != -1
}
/**
* Split a string of potentially quoted arguments from the command line the way that a shell
* would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
* then it would be parsed as three arguments: 'a', 'b c' and 'd'.
*/
def splitCommandString(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var inWord = false
var inSingleQuote = false
var inDoubleQuote = false
val curWord = new StringBuilder
def endWord() {
buf += curWord.toString
curWord.clear()
}
var i = 0
while (i < s.length) {
val nextChar = s.charAt(i)
if (inDoubleQuote) {
if (nextChar == '"') {
inDoubleQuote = false
} else if (nextChar == '\\\\') {
if (i < s.length - 1) {
// Append the next character directly, because only " and \\ may be escaped in
// double quotes after the shell's own expansion
curWord.append(s.charAt(i + 1))
i += 1
}
} else {
curWord.append(nextChar)
}
} else if (inSingleQuote) {
if (nextChar == '\\'') {
inSingleQuote = false
} else {
curWord.append(nextChar)
}
// Backslashes are not treated specially in single quotes
} else if (nextChar == '"') {
inWord = true
inDoubleQuote = true
} else if (nextChar == '\\'') {
inWord = true
inSingleQuote = true
} else if (!isSpace(nextChar)) {
curWord.append(nextChar)
inWord = true
} else if (inWord && isSpace(nextChar)) {
endWord()
inWord = false
}
i += 1
}
if (inWord || inDoubleQuote || inSingleQuote) {
endWord()
}
buf
}
/* Calculates 'x' modulo 'mod', takes to consideration sign of x,
* i.e. if 'x' is negative, than 'x' % 'mod' is negative too
* so function return (x % mod) + mod in that case.
*/
def nonNegativeMod(x: Int, mod: Int): Int = {
val rawMod = x % mod
rawMod + (if (rawMod < 0) mod else 0)
}
// Handles idiosyncrasies with hash (add more as required)
// This method should be kept in sync with
// org.apache.spark.network.util.JavaUtils#nonNegativeHash().
def nonNegativeHash(obj: AnyRef): Int = {
// Required ?
if (obj eq null) return 0
val hash = obj.hashCode
// math.abs fails for Int.MinValue
val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
// Nothing else to guard against ?
hashAbs
}
/**
* NaN-safe version of `java.lang.Double.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN double.
*/
def nanSafeCompareDoubles(x: Double, y: Double): Int = {
val xIsNan: Boolean = java.lang.Double.isNaN(x)
val yIsNan: Boolean = java.lang.Double.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* NaN-safe version of `java.lang.Float.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN float.
*/
def nanSafeCompareFloats(x: Float, y: Float): Int = {
val xIsNan: Boolean = java.lang.Float.isNaN(x)
val yIsNan: Boolean = java.lang.Float.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* Returns the system properties map that is thread-safe to iterator over. It gets the
* properties which have been set explicitly, as well as those for which only a default value
* has been defined.
*/
def getSystemProperties: Map[String, String] = {
System.getProperties.stringPropertyNames().asScala
.map(key => (key, System.getProperty(key))).toMap
}
/**
* Method executed for repeating a task for side effects.
* Unlike a for comprehension, it permits JVM JIT optimization
*/
def times(numIters: Int)(f: => Unit): Unit = {
var i = 0
while (i < numIters) {
f
i += 1
}
}
/**
* Timing method based on iterations that permit JVM JIT optimization.
*
* @param numIters number of iterations
* @param f function to be executed. If prepare is not None, the running time of each call to f
* must be an order of magnitude longer than one millisecond for accurate timing.
* @param prepare function to be executed before each call to f. Its running time doesn't count.
* @return the total time across all iterations (not counting preparation time)
*/
def timeIt(numIters: Int)(f: => Unit, prepare: Option[() => Unit] = None): Long = {
if (prepare.isEmpty) {
val start = System.currentTimeMillis
times(numIters)(f)
System.currentTimeMillis - start
} else {
var i = 0
var sum = 0L
while (i < numIters) {
prepare.get.apply()
val start = System.currentTimeMillis
f
sum += System.currentTimeMillis - start
i += 1
}
sum
}
}
/**
* Counts the number of elements of an iterator using a while loop rather than calling
* [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
* in the current version of Scala.
*/
def getIteratorSize(iterator: Iterator[_]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
/**
* Generate a zipWithIndex iterator, avoid index value overflowing problem
* in scala's zipWithIndex
*/
def getIteratorZipWithIndex[T](iterator: Iterator[T], startIndex: Long): Iterator[(T, Long)] = {
new Iterator[(T, Long)] {
require(startIndex >= 0, "startIndex should be >= 0.")
var index: Long = startIndex - 1L
def hasNext: Boolean = iterator.hasNext
def next(): (T, Long) = {
index += 1L
(iterator.next(), index)
}
}
}
/**
* Creates a symlink.
*
* @param src absolute path to the source
* @param dst relative path for the destination
*/
def symlink(src: File, dst: File): Unit = {
if (!src.isAbsolute()) {
throw new IOException("Source must be absolute")
}
if (dst.isAbsolute()) {
throw new IOException("Destination must be relative")
}
Files.createSymbolicLink(dst.toPath, src.toPath)
}
/** Return the class name of the given object, removing all dollar signs */
def getFormattedClassName(obj: AnyRef): String = {
obj.getClass.getSimpleName.replace("$", "")
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: URI, conf: Configuration): FileSystem = {
FileSystem.get(path, conf)
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: String, conf: Configuration): FileSystem = {
getHadoopFileSystem(new URI(path), conf)
}
/**
* Whether the underlying operating system is Windows.
*/
val isWindows = SystemUtils.IS_OS_WINDOWS
/**
* Whether the underlying operating system is Mac OS X.
*/
val isMac = SystemUtils.IS_OS_MAC_OSX
/**
* Pattern for matching a Windows drive, which contains only a single alphabet character.
*/
val windowsDrive = "([a-zA-Z])".r
/**
* Indicates whether Spark is currently running unit tests.
*/
def isTesting: Boolean = {
sys.env.contains("SPARK_TESTING") || sys.props.contains("spark.testing")
}
/**
* Terminates a process waiting for at most the specified duration.
*
* @return the process exit value if it was successfully terminated, else None
*/
def terminateProcess(process: Process, timeoutMs: Long): Option[Int] = {
// Politely destroy first
process.destroy()
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
// Successful exit
Option(process.exitValue())
} else {
try {
process.destroyForcibly()
} catch {
case NonFatal(e) => logWarning("Exception when attempting to kill process", e)
}
// Wait, again, although this really should return almost immediately
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
Option(process.exitValue())
} else {
logWarning("Timed out waiting to forcibly kill process")
None
}
}
}
/**
* Return the stderr of a process after waiting for the process to terminate.
* If the process does not terminate within the specified timeout, return None.
*/
def getStderr(process: Process, timeoutMs: Long): Option[String] = {
val terminated = process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)
if (terminated) {
Some(Source.fromInputStream(process.getErrorStream).getLines().mkString("\\n"))
} else {
None
}
}
/**
* Execute the given block, logging and re-throwing any uncaught exception.
* This is particularly useful for wrapping code that runs in a thread, to ensure
* that exceptions are printed, and to avoid having to catch Throwable.
*/
def logUncaughtExceptions[T](f: => T): T = {
try {
f
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
throw t
}
}
/** Executes the given block in a Try, logging any uncaught exceptions. */
def tryLog[T](f: => T): Try[T] = {
try {
val res = f
scala.util.Success(res)
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
scala.util.Failure(t)
}
}
/** Returns true if the given exception was fatal. See docs for scala.util.control.NonFatal. */
def isFatalError(e: Throwable): Boolean = {
e match {
case NonFatal(_) |
_: InterruptedException |
_: NotImplementedError |
_: ControlThrowable |
_: LinkageError =>
false
case _ =>
true
}
}
/**
* Return a well-formed URI for the file described by a user input string.
*
* If the supplied path does not contain a scheme, or is a relative path, it will be
* converted into an absolute path with a file:// scheme.
*/
def resolveURI(path: String): URI = {
try {
val uri = new URI(path)
if (uri.getScheme() != null) {
return uri
}
// make sure to handle if the path has a fragment (applies to yarn
// distributed cache)
if (uri.getFragment() != null) {
val absoluteURI = new File(uri.getPath()).getAbsoluteFile().toURI()
return new URI(absoluteURI.getScheme(), absoluteURI.getHost(), absoluteURI.getPath(),
uri.getFragment())
}
} catch {
case e: URISyntaxException =>
}
new File(path).getAbsoluteFile().toURI()
}
/** Resolve a comma-separated list of paths. */
def resolveURIs(paths: String): String = {
if (paths == null || paths.trim.isEmpty) {
""
} else {
paths.split(",").filter(_.trim.nonEmpty).map { p => Utils.resolveURI(p) }.mkString(",")
}
}
/** Return all non-local paths from a comma-separated list of paths. */
def nonLocalPaths(paths: String, testWindows: Boolean = false): Array[String] = {
val windows = isWindows || testWindows
if (paths == null || paths.trim.isEmpty) {
Array.empty
} else {
paths.split(",").filter { p =>
val uri = resolveURI(p)
Option(uri.getScheme).getOrElse("file") match {
case windowsDrive(d) if windows => false
case "local" | "file" => false
case _ => true
}
}
}
}
/**
* Load default Spark properties from the given file. If no file is provided,
* use the common defaults file. This mutates state in the given SparkConf and
* in this JVM's system properties if the config specified in the file is not
* already set. Return the path of the properties file used.
*/
def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
val path = Option(filePath).getOrElse(getDefaultPropertiesFile())
Option(path).foreach { confFile =>
getPropertiesFromFile(confFile).filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.setIfMissing(k, v)
sys.props.getOrElseUpdate(k, v)
}
}
path
}
/**
* Updates Spark config with properties from a set of Properties.
* Provided properties have the highest priority.
*/
def updateSparkConfigFromProperties(
conf: SparkConf,
properties: Map[String, String]) : Unit = {
properties.filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.set(k, v)
}
}
/** Load properties present in the given file. */
def getPropertiesFromFile(filename: String): Map[String, String] = {
val file = new File(filename)
require(file.exists(), s"Properties file $file does not exist")
require(file.isFile(), s"Properties file $file is not a normal file")
val inReader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)
try {
val properties = new Properties()
properties.load(inReader)
properties.stringPropertyNames().asScala.map(
k => (k, properties.getProperty(k).trim)).toMap
} catch {
case e: IOException =>
throw new SparkException(s"Failed when loading Spark properties from $filename", e)
} finally {
inReader.close()
}
}
/** Return the path of the default Spark properties file. */
def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
env.get("SPARK_CONF_DIR")
.orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf" })
.map { t => new File(s"$t${File.separator}spark-defaults.conf")}
.filter(_.isFile)
.map(_.getAbsolutePath)
.orNull
}
/**
* Return a nice string representation of the exception. It will call "printStackTrace" to
* recursively generate the stack trace including the exception and its causes.
*/
def exceptionString(e: Throwable): String = {
if (e == null) {
""
} else {
// Use e.printStackTrace here because e.getStackTrace doesn't include the cause
val stringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(stringWriter))
stringWriter.toString
}
}
private implicit class Lock(lock: LockInfo) {
def lockString: String = {
lock match {
case monitor: MonitorInfo =>
s"Monitor(${lock.getClassName}@${lock.getIdentityHashCode}})"
case _ =>
s"Lock(${lock.getClassName}@${lock.getIdentityHashCode}})"
}
}
}
/** Return a thread dump of all threads' stacktraces. Used to capture dumps for the web UI */
def getThreadDump(): Array[ThreadStackTrace] = {
// We need to filter out null values here because dumpAllThreads() may return null array
// elements for threads that are dead / don't exist.
val threadInfos = ManagementFactory.getThreadMXBean.dumpAllThreads(true, true).filter(_ != null)
threadInfos.sortWith { case (threadTrace1, threadTrace2) =>
val v1 = if (threadTrace1.getThreadName.contains("Executor task launch")) 1 else 0
val v2 = if (threadTrace2.getThreadName.contains("Executor task launch")) 1 else 0
if (v1 == v2) {
val name1 = threadTrace1.getThreadName().toLowerCase(Locale.ROOT)
val name2 = threadTrace2.getThreadName().toLowerCase(Locale.ROOT)
val nameCmpRes = name1.compareTo(name2)
if (nameCmpRes == 0) {
threadTrace1.getThreadId < threadTrace2.getThreadId
} else {
nameCmpRes < 0
}
} else {
v1 > v2
}
}.map(threadInfoToThreadStackTrace)
}
def getThreadDumpForThread(threadId: Long): Option[ThreadStackTrace] = {
if (threadId <= 0) {
None
} else {
// The Int.MaxValue here requests the entire untruncated stack trace of the thread:
val threadInfo =
Option(ManagementFactory.getThreadMXBean.getThreadInfo(threadId, Int.MaxValue))
threadInfo.map(threadInfoToThreadStackTrace)
}
}
private def threadInfoToThreadStackTrace(threadInfo: ThreadInfo): ThreadStackTrace = {
val monitors = threadInfo.getLockedMonitors.map(m => m.getLockedStackFrame -> m).toMap
val stackTrace = StackTrace(threadInfo.getStackTrace.map { frame =>
monitors.get(frame) match {
case Some(monitor) =>
monitor.getLockedStackFrame.toString + s" => holding ${monitor.lockString}"
case None =>
frame.toString
}
})
// use a set to dedup re-entrant locks that are held at multiple places
val heldLocks =
(threadInfo.getLockedSynchronizers ++ threadInfo.getLockedMonitors).map(_.lockString).toSet
ThreadStackTrace(
threadId = threadInfo.getThreadId,
threadName = threadInfo.getThreadName,
threadState = threadInfo.getThreadState,
stackTrace = stackTrace,
blockedByThreadId =
if (threadInfo.getLockOwnerId < 0) None else Some(threadInfo.getLockOwnerId),
blockedByLock = Option(threadInfo.getLockInfo).map(_.lockString).getOrElse(""),
holdingLocks = heldLocks.toSeq)
}
/**
* Convert all spark properties set in the given SparkConf to a sequence of java options.
*/
def sparkJavaOpts(conf: SparkConf, filterKey: (String => Boolean) = _ => true): Seq[String] = {
conf.getAll
.filter { case (k, _) => filterKey(k) }
.map { case (k, v) => s"-D$k=$v" }
}
/**
* Maximum number of retries when binding to a port before giving up.
*/
def portMaxRetries(conf: SparkConf): Int = {
val maxRetries = conf.getOption("spark.port.maxRetries").map(_.toInt)
if (conf.contains("spark.testing")) {
// Set a higher number of retries for tests...
maxRetries.getOrElse(100)
} else {
maxRetries.getOrElse(16)
}
}
/**
* Returns the user port to try when trying to bind a service. Handles wrapping and skipping
* privileged ports.
*/
def userPort(base: Int, offset: Int): Int = {
(base + offset - 1024) % (65536 - 1024) + 1024
}
/**
* Attempt to start a service on the given port, or fail after a number of attempts.
* Each subsequent attempt uses 1 + the port used in the previous attempt (unless the port is 0).
*
* @param startPort The initial port to start the service on.
* @param startService Function to start service on a given port.
* This is expected to throw java.net.BindException on port collision.
* @param conf A SparkConf used to get the maximum number of retries when binding to a port.
* @param serviceName Name of the service.
* @return (service: T, port: Int)
*/
def startServiceOnPort[T](
startPort: Int,
startService: Int => (T, Int),
conf: SparkConf,
serviceName: String = ""): (T, Int) = {
require(startPort == 0 || (1024 <= startPort && startPort < 65536),
"startPort should be between 1024 and 65535 (inclusive), or 0 for a random free port.")
val serviceString = if (serviceName.isEmpty) "" else s" '$serviceName'"
val maxRetries = portMaxRetries(conf)
for (offset <- 0 to maxRetries) {
// Do not increment port if startPort is 0, which is treated as a special port
val tryPort = if (startPort == 0) {
startPort
} else {
userPort(startPort, offset)
}
try {
val (service, port) = startService(tryPort)
logInfo(s"Successfully started service$serviceString on port $port.")
return (service, port)
} catch {
case e: Exception if isBindCollision(e) =>
if (offset >= maxRetries) {
val exceptionMessage = if (startPort == 0) {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (on a random free port)! " +
s"Consider explicitly setting the appropriate binding address for " +
s"the service$serviceString (for example spark.driver.bindAddress " +
s"for SparkDriver) to the correct binding address."
} else {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (starting from $startPort)! Consider explicitly setting " +
s"the appropriate port for the service$serviceString (for example spark.ui.port " +
s"for SparkUI) to an available port or increasing spark.port.maxRetries."
}
val exception = new BindException(exceptionMessage)
// restore original stack trace
exception.setStackTrace(e.getStackTrace)
throw exception
}
if (startPort == 0) {
// As startPort 0 is for a random free port, it is most possibly binding address is
// not correct.
logWarning(s"Service$serviceString could not bind on a random free port. " +
"You may check whether configuring an appropriate binding address.")
} else {
logWarning(s"Service$serviceString could not bind on port $tryPort. " +
s"Attempting port ${tryPort + 1}.")
}
}
}
// Should never happen
throw new SparkException(s"Failed to start service$serviceString on port $startPort")
}
/**
* Return whether the exception is caused by an address-port collision when binding.
*/
def isBindCollision(exception: Throwable): Boolean = {
exception match {
case e: BindException =>
if (e.getMessage != null) {
return true
}
isBindCollision(e.getCause)
case e: MultiException =>
e.getThrowables.asScala.exists(isBindCollision)
case e: NativeIoException =>
(e.getMessage != null && e.getMessage.startsWith("bind() failed: ")) ||
isBindCollision(e.getCause)
case e: Exception => isBindCollision(e.getCause)
case _ => false
}
}
/**
* configure a new log4j level
*/
def setLogLevel(l: org.apache.log4j.Level) {
org.apache.log4j.Logger.getRootLogger().setLevel(l)
}
/**
* Return the current system LD_LIBRARY_PATH name
*/
def libraryPathEnvName: String = {
if (isWindows) {
"PATH"
} else if (isMac) {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/**
* Return the prefix of a command that appends the given library paths to the
* system-specific library path environment variable. On Unix, for instance,
* this returns the string LD_LIBRARY_PATH="path1:path2:$LD_LIBRARY_PATH".
*/
def libraryPathEnvPrefix(libraryPaths: Seq[String]): String = {
val libraryPathScriptVar = if (isWindows) {
s"%${libraryPathEnvName}%"
} else {
"$" + libraryPathEnvName
}
val libraryPath = (libraryPaths :+ libraryPathScriptVar).mkString("\\"",
File.pathSeparator, "\\"")
val ampersand = if (Utils.isWindows) {
" &"
} else {
""
}
s"$libraryPathEnvName=$libraryPath$ampersand"
}
/**
* Return the value of a config either through the SparkConf or the Hadoop configuration.
* We Check whether the key is set in the SparkConf before look at any Hadoop configuration.
* If the key is set in SparkConf, no matter whether it is running on YARN or not,
* gets the value from SparkConf.
* Only when the key is not set in SparkConf and running on YARN,
* gets the value from Hadoop configuration.
*/
def getSparkOrYarnConfig(conf: SparkConf, key: String, default: String): String = {
if (conf.contains(key)) {
conf.get(key, default)
} else if (conf.get(SparkLauncher.SPARK_MASTER, null) == "yarn") {
new YarnConfiguration(SparkHadoopUtil.get.newConfiguration(conf)).get(key, default)
} else {
default
}
}
/**
* Return a pair of host and port extracted from the `sparkUrl`.
*
* A spark url (`spark://host:port`) is a special URI that its scheme is `spark` and only contains
* host and port.
*
* @throws org.apache.spark.SparkException if sparkUrl is invalid.
*/
@throws(classOf[SparkException])
def extractHostPortFromSparkUrl(sparkUrl: String): (String, Int) = {
try {
val uri = new java.net.URI(sparkUrl)
val host = uri.getHost
val port = uri.getPort
if (uri.getScheme != "spark" ||
host == null ||
port < 0 ||
(uri.getPath != null && !uri.getPath.isEmpty) || // uri.getPath returns "" instead of null
uri.getFragment != null ||
uri.getQuery != null ||
uri.getUserInfo != null) {
throw new SparkException("Invalid master URL: " + sparkUrl)
}
(host, port)
} catch {
case e: java.net.URISyntaxException =>
throw new SparkException("Invalid master URL: " + sparkUrl, e)
}
}
/**
* Returns the current user name. This is the currently logged in user, unless that's been
* overridden by the `SPARK_USER` environment variable.
*/
def getCurrentUserName(): String = {
Option(System.getenv("SPARK_USER"))
.getOrElse(UserGroupInformation.getCurrentUser().getShortUserName())
}
val EMPTY_USER_GROUPS = Set.empty[String]
// Returns the groups to which the current user belongs.
def getCurrentUserGroups(sparkConf: SparkConf, username: String): Set[String] = {
val groupProviderClassName = sparkConf.get("spark.user.groups.mapping",
"org.apache.spark.security.ShellBasedGroupsMappingProvider")
if (groupProviderClassName != "") {
try {
val groupMappingServiceProvider = classForName(groupProviderClassName).newInstance.
asInstanceOf[org.apache.spark.security.GroupMappingServiceProvider]
val currentUserGroups = groupMappingServiceProvider.getGroups(username)
return currentUserGroups
} catch {
case e: Exception => logError(s"Error getting groups for user=$username", e)
}
}
EMPTY_USER_GROUPS
}
/**
* Split the comma delimited string of master URLs into a list.
* For instance, "spark://abc,def" becomes [spark://abc, spark://def].
*/
def parseStandaloneMasterUrls(masterUrls: String): Array[String] = {
masterUrls.stripPrefix("spark://").split(",").map("spark://" + _)
}
/** An identifier that backup masters use in their responses. */
val BACKUP_STANDALONE_MASTER_PREFIX = "Current state is not alive"
/** Return true if the response message is sent from a backup Master on standby. */
def responseFromBackup(msg: String): Boolean = {
msg.startsWith(BACKUP_STANDALONE_MASTER_PREFIX)
}
/**
* To avoid calling `Utils.getCallSite` for every single RDD we create in the body,
* set a dummy call site that RDDs use instead. This is for performance optimization.
*/
def withDummyCallSite[T](sc: SparkContext)(body: => T): T = {
val oldShortCallSite = sc.getLocalProperty(CallSite.SHORT_FORM)
val oldLongCallSite = sc.getLocalProperty(CallSite.LONG_FORM)
try {
sc.setLocalProperty(CallSite.SHORT_FORM, "")
sc.setLocalProperty(CallSite.LONG_FORM, "")
body
} finally {
// Restore the old ones here
sc.setLocalProperty(CallSite.SHORT_FORM, oldShortCallSite)
sc.setLocalProperty(CallSite.LONG_FORM, oldLongCallSite)
}
}
/**
* Return whether the specified file is a parent directory of the child file.
*/
@tailrec
def isInDirectory(parent: File, child: File): Boolean = {
if (child == null || parent == null) {
return false
}
if (!child.exists() || !parent.exists() || !parent.isDirectory()) {
return false
}
if (parent.equals(child)) {
return true
}
isInDirectory(parent, child.getParentFile)
}
/**
*
* @return whether it is local mode
*/
def isLocalMaster(conf: SparkConf): Boolean = {
val master = conf.get("spark.master", "")
master == "local" || master.startsWith("local[")
}
/**
* Return whether dynamic allocation is enabled in the given conf.
*/
def isDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val dynamicAllocationEnabled = conf.getBoolean("spark.dynamicAllocation.enabled", false)
dynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.getBoolean("spark.dynamicAllocation.testing", false))
}
/**
* Return the initial number of executors for dynamic allocation.
*/
def getDynamicAllocationInitialExecutors(conf: SparkConf): Int = {
if (conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
if (conf.get(EXECUTOR_INSTANCES).getOrElse(0) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${EXECUTOR_INSTANCES.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
val initialExecutors = Seq(
conf.get(DYN_ALLOCATION_MIN_EXECUTORS),
conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS),
conf.get(EXECUTOR_INSTANCES).getOrElse(0)).max
logInfo(s"Using initial executors = $initialExecutors, max of " +
s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key}, ${DYN_ALLOCATION_MIN_EXECUTORS.key} and " +
s"${EXECUTOR_INSTANCES.key}")
initialExecutors
}
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
/**
* Returns a path of temporary file which is in the same directory with `path`.
*/
def tempFileWith(path: File): File = {
new File(path.getAbsolutePath + "." + UUID.randomUUID())
}
/**
* Returns the name of this JVM process. This is OS dependent but typically (OSX, Linux, Windows),
* this is formatted as PID@hostname.
*/
def getProcessName(): String = {
ManagementFactory.getRuntimeMXBean().getName()
}
/**
* Utility function that should be called early in `main()` for daemons to set up some common
* diagnostic state.
*/
def initDaemon(log: Logger): Unit = {
log.info(s"Started daemon with process name: ${Utils.getProcessName()}")
SignalUtils.registerLogger(log)
}
/**
* Return the jar files pointed by the "spark.jars" property. Spark internally will distribute
* these jars through file server. In the YARN mode, it will return an empty list, since YARN
* has its own mechanism to distribute jars.
*/
def getUserJars(conf: SparkConf): Seq[String] = {
val sparkJars = conf.getOption("spark.jars")
sparkJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
/**
* Return the local jar files which will be added to REPL's classpath. These jar files are
* specified by --jars (spark.jars) or --packages, remote jars will be downloaded to local by
* SparkSubmit at first.
*/
def getLocalUserJarsForShell(conf: SparkConf): Seq[String] = {
val localJars = conf.getOption("spark.repl.local.jars")
localJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
private[spark] val REDACTION_REPLACEMENT_TEXT = "*********(redacted)"
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(conf: SparkConf, kvs: Seq[(String, String)]): Seq[(String, String)] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
redact(redactionPattern, kvs)
}
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(regex: Option[Regex], kvs: Seq[(String, String)]): Seq[(String, String)] = {
regex match {
case None => kvs
case Some(r) => redact(r, kvs)
}
}
/**
* Redact the sensitive information in the given string.
*/
def redact(regex: Option[Regex], text: String): String = {
regex match {
case None => text
case Some(r) =>
if (text == null || text.isEmpty) {
text
} else {
r.replaceAllIn(text, REDACTION_REPLACEMENT_TEXT)
}
}
}
private def redact(redactionPattern: Regex, kvs: Seq[(String, String)]): Seq[(String, String)] = {
// If the sensitive information regex matches with either the key or the value, redact the value
// While the original intent was to only redact the value if the key matched with the regex,
// we've found that especially in verbose mode, the value of the property may contain sensitive
// information like so:
// "sun.java.command":"org.apache.spark.deploy.SparkSubmit ... \\
// --conf spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password ...
//
// And, in such cases, simply searching for the sensitive information regex in the key name is
// not sufficient. The values themselves have to be searched as well and redacted if matched.
// This does mean we may be accounting more false positives - for example, if the value of an
// arbitrary property contained the term 'password', we may redact the value from the UI and
// logs. In order to work around it, user would have to make the spark.redaction.regex property
// more specific.
kvs.map { case (key, value) =>
redactionPattern.findFirstIn(key)
.orElse(redactionPattern.findFirstIn(value))
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
}
}
/**
* Looks up the redaction regex from within the key value pairs and uses it to redact the rest
* of the key value pairs. No care is taken to make sure the redaction property itself is not
* redacted. So theoretically, the property itself could be configured to redact its own value
* when printing.
*/
def redact(kvs: Map[String, String]): Seq[(String, String)] = {
val redactionPattern = kvs.getOrElse(
SECRET_REDACTION_PATTERN.key,
SECRET_REDACTION_PATTERN.defaultValueString
).r
redact(redactionPattern, kvs.toArray)
}
def stringToSeq(str: String): Seq[String] = {
str.split(",").map(_.trim()).filter(_.nonEmpty)
}
/**
* Create instances of extension classes.
*
* The classes in the given list must:
* - Be sub-classes of the given base class.
* - Provide either a no-arg constructor, or a 1-arg constructor that takes a SparkConf.
*
* The constructors are allowed to throw "UnsupportedOperationException" if the extension does not
* want to be registered; this allows the implementations to check the Spark configuration (or
* other state) and decide they do not need to be added. A log message is printed in that case.
* Other exceptions are bubbled up.
*/
def loadExtensions[T](extClass: Class[T], classes: Seq[String], conf: SparkConf): Seq[T] = {
classes.flatMap { name =>
try {
val klass = classForName(name)
require(extClass.isAssignableFrom(klass),
s"$name is not a subclass of ${extClass.getName()}.")
val ext = Try(klass.getConstructor(classOf[SparkConf])) match {
case Success(ctor) =>
ctor.newInstance(conf)
case Failure(_) =>
klass.getConstructor().newInstance()
}
Some(ext.asInstanceOf[T])
} catch {
case _: NoSuchMethodException =>
throw new SparkException(
s"$name did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the class as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
case e: InvocationTargetException =>
e.getCause() match {
case uoe: UnsupportedOperationException =>
logDebug(s"Extension $name not being initialized.", uoe)
logInfo(s"Extension $name not being initialized.")
None
case null => throw e
case cause => throw cause
}
}
}
}
/**
* Check the validity of the given Kubernetes master URL and return the resolved URL. Prefix
* "k8s://" is appended to the resolved URL as the prefix is used by KubernetesClusterManager
* in canCreate to determine if the KubernetesClusterManager should be used.
*/
def checkAndGetK8sMasterUrl(rawMasterURL: String): String = {
require(rawMasterURL.startsWith("k8s://"),
"Kubernetes master URL must start with k8s://.")
val masterWithoutK8sPrefix = rawMasterURL.substring("k8s://".length)
// To handle master URLs, e.g., k8s://host:port.
if (!masterWithoutK8sPrefix.contains("://")) {
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
return s"k8s://$resolvedURL"
}
val masterScheme = new URI(masterWithoutK8sPrefix).getScheme
val resolvedURL = masterScheme.toLowerCase match {
case "https" =>
masterWithoutK8sPrefix
case "http" =>
logWarning("Kubernetes master URL uses HTTP instead of HTTPS.")
masterWithoutK8sPrefix
case null =>
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
resolvedURL
case _ =>
throw new IllegalArgumentException("Invalid Kubernetes master scheme: " + masterScheme)
}
s"k8s://$resolvedURL"
}
}
private[util] object CallerContext extends Logging {
val callerContextSupported: Boolean = {
SparkHadoopUtil.get.conf.getBoolean("hadoop.caller.context.enabled", false) && {
try {
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
true
} catch {
case _: ClassNotFoundException =>
false
case NonFatal(e) =>
logWarning("Fail to load the CallerContext class", e)
false
}
}
}
}
/**
* An utility class used to set up Spark caller contexts to HDFS and Yarn. The `context` will be
* constructed by parameters passed in.
* When Spark applications run on Yarn and HDFS, its caller contexts will be written into Yarn RM
* audit log and hdfs-audit.log. That can help users to better diagnose and understand how
* specific applications impacting parts of the Hadoop system and potential problems they may be
* creating (e.g. overloading NN). As HDFS mentioned in HDFS-9184, for a given HDFS operation, it's
* very helpful to track which upper level job issues it.
*
* @param from who sets up the caller context (TASK, CLIENT, APPMASTER)
*
* The parameters below are optional:
* @param upstreamCallerContext caller context the upstream application passes in
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param jobId id of the job this task belongs to
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskId task id
* @param taskAttemptNumber task attempt id
*/
private[spark] class CallerContext(
from: String,
upstreamCallerContext: Option[String] = None,
appId: Option[String] = None,
appAttemptId: Option[String] = None,
jobId: Option[Int] = None,
stageId: Option[Int] = None,
stageAttemptId: Option[Int] = None,
taskId: Option[Long] = None,
taskAttemptNumber: Option[Int] = None) extends Logging {
private val context = prepareContext("SPARK_" +
from +
appId.map("_" + _).getOrElse("") +
appAttemptId.map("_" + _).getOrElse("") +
jobId.map("_JId_" + _).getOrElse("") +
stageId.map("_SId_" + _).getOrElse("") +
stageAttemptId.map("_" + _).getOrElse("") +
taskId.map("_TId_" + _).getOrElse("") +
taskAttemptNumber.map("_" + _).getOrElse("") +
upstreamCallerContext.map("_" + _).getOrElse(""))
private def prepareContext(context: String): String = {
// The default max size of Hadoop caller context is 128
lazy val len = SparkHadoopUtil.get.conf.getInt("hadoop.caller.context.max.size", 128)
if (context == null || context.length <= len) {
context
} else {
val finalContext = context.substring(0, len)
logWarning(s"Truncated Spark caller context from $context to $finalContext")
finalContext
}
}
/**
* Set up the caller context [[context]] by invoking Hadoop CallerContext API of
* [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
*/
def setCurrentContext(): Unit = {
if (CallerContext.callerContextSupported) {
try {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
val builder = Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
val builderInst = builder.getConstructor(classOf[String]).newInstance(context)
val hdfsContext = builder.getMethod("build").invoke(builderInst)
callerContext.getMethod("setCurrent", callerContext).invoke(null, hdfsContext)
} catch {
case NonFatal(e) =>
logWarning("Fail to set Spark caller context", e)
}
}
}
}
/**
* A utility class to redirect the child process's stdout or stderr.
*/
private[spark] class RedirectThread(
in: InputStream,
out: OutputStream,
name: String,
propagateEof: Boolean = false)
extends Thread(name) {
setDaemon(true)
override def run() {
scala.util.control.Exception.ignoring(classOf[IOException]) {
// FIXME: We copy the stream on the level of bytes to avoid encoding problems.
Utils.tryWithSafeFinally {
val buf = new Array[Byte](1024)
var len = in.read(buf)
while (len != -1) {
out.write(buf, 0, len)
out.flush()
len = in.read(buf)
}
} {
if (propagateEof) {
out.close()
}
}
}
}
}
/**
* An [[OutputStream]] that will store the last 10 kilobytes (by default) written to it
* in a circular buffer. The current contents of the buffer can be accessed using
* the toString method.
*/
private[spark] class CircularBuffer(sizeInBytes: Int = 10240) extends java.io.OutputStream {
private var pos: Int = 0
private var isBufferFull = false
private val buffer = new Array[Byte](sizeInBytes)
def write(input: Int): Unit = {
buffer(pos) = input.toByte
pos = (pos + 1) % buffer.length
isBufferFull = isBufferFull || (pos == 0)
}
override def toString: String = {
if (!isBufferFull) {
return new String(buffer, 0, pos, StandardCharsets.UTF_8)
}
val nonCircularBuffer = new Array[Byte](sizeInBytes)
System.arraycopy(buffer, pos, nonCircularBuffer, 0, buffer.length - pos)
System.arraycopy(buffer, 0, nonCircularBuffer, buffer.length - pos, pos)
new String(nonCircularBuffer, StandardCharsets.UTF_8)
}
}
| brad-kaiser/spark | core/src/main/scala/org/apache/spark/util/Utils.scala | Scala | apache-2.0 | 104,424 |
// Woot, Scala has traits which are like Ruby Mixins or a Java Interface (plus implementation)
class Person(val name: String)
trait Nice {
def greet() = println("Howdily doodily.")
}
class Character(override val name:String) extends Person(name) with Nice
val flanders = new Character("Ned") | FredericJacobs/Scala-Tests | nicetraits.scala | Scala | gpl-2.0 | 296 |
package com.twitter.finatra.http.tests.integration.tweetexample.main.services.admin
import com.twitter.finatra.http.tests.integration.tweetexample.main.services.TweetsRepository
import javax.inject.Inject
class UserService @Inject() (database: DatabaseClient, tweetsRepository: TweetsRepository) {
assert(tweetsRepository != null)
def get(id: String): String = {
database.get(id)
}
}
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/tests/integration/tweetexample/main/services/admin/UserService.scala | Scala | apache-2.0 | 397 |
package com.twitter.inject
import org.scalacheck.Arbitrary
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import scala.reflect.runtime.universe._
trait A
trait B
class TypeUtilsTest extends AnyFunSuite with ScalaCheckDrivenPropertyChecks {
test("asManifest handles AnyVal/Any/Null/Nothing") {
forAll(Arbitrary.arbAnyVal.arbitrary) { anyVal =>
assert(manifestedTypesEquals(anyVal))
}
assert(TypeUtils.asManifest[AnyVal] == manifest[AnyVal])
assert(TypeUtils.asManifest[Any] == manifest[Any])
assert(TypeUtils.asManifest[Null] == manifest[Null])
assert(TypeUtils.asManifest[Nothing] == manifest[Nothing])
}
test("asManifest handles NoClassDefFound exceptions") {
val t = typeTag[A with B]
intercept[NoClassDefFoundError] {
t.mirror.runtimeClass(t.tpe)
}
assert(TypeUtils.asManifest[A with B] == manifest[Any])
}
test("asTypeTag handles classes") {
typeTagEquals(classOf[String], TypeUtils.asTypeTag(classOf[String]))
typeTagEquals(classOf[AnyVal], TypeUtils.asTypeTag(classOf[AnyVal]))
typeTagEquals(classOf[Any], TypeUtils.asTypeTag(classOf[Any]))
typeTagEquals(classOf[Null], TypeUtils.asTypeTag(classOf[Null]))
typeTagEquals(classOf[Nothing], TypeUtils.asTypeTag(classOf[Nothing]))
}
def manifestedTypesEquals[T: TypeTag: Manifest](a: T): Boolean = {
TypeUtils.asManifest[T] == manifest[T]
}
def typeTagEquals[T](clazz: Class[T], typeTag: TypeTag[T]): Boolean = {
clazz.isAssignableFrom(typeTag.mirror.runtimeClass(typeTag.tpe.typeSymbol.asClass))
}
}
| twitter/util | util-inject/src/test/scala/com/twitter/util/inject/TypeUtilsTest.scala | Scala | apache-2.0 | 1,623 |
import test.BigFloat
object Test extends App {
val x: BigFloat = 1234.45e3333333333 // error: exponent too large
} | som-snytt/dotty | tests/neg-macros/BigFloat/Test_2.scala | Scala | apache-2.0 | 117 |
package play.api.libs.typedmap
/**
* An entry that binds a typed key and a value. These entries can be
* placed into a [[TypedMap]] or any other type of object with typed
* values.
*
* @param key The key for this entry.
* @param value The value for this entry.
* @tparam A The type of the value.
*/
final case class TypedEntry[A](key: TypedKey[A], value: A) {
/**
* Convert the entry into a standard pair.
*/
def toPair: (TypedKey[A], A) = (key, value)
}
| aradchykov/playframework | framework/src/play/src/main/scala/play/api/libs/typedmap/TypedEntry.scala | Scala | apache-2.0 | 474 |
package edu.gemini.pit.ui.editor
import edu.gemini.model.p1.immutable._
import javax.swing.BorderFactory
import edu.gemini.pit.ui.util.{Platform, StdModalEditor}
import swing._
import Swing._
import BorderPanel.Position._
object VisitorSelector {
def open(is:List[(Investigator, Boolean)], canEdit: Boolean, parent:UIElement) = new VisitorSelector(is, canEdit).open(parent)
}
class VisitorSelector private (is:List[(Investigator, Boolean)], canEdit: Boolean) extends StdModalEditor[List[Investigator]]("Select Visitors") {
override def header = new Label("Select site visitors:") {
border = BorderFactory.createEmptyBorder(0, 0, 4, 0)
horizontalAlignment = Alignment.Left
}
object editor extends BorderPanel {
add(new ScrollPane(list), Center)
add(footer, South)
object list extends ListView[Investigator](is.map(_._1)) {
enabled = canEdit
preferredSize = (200, 200)
is.zipWithIndex.filter(_._1._2).map(_._2).foreach(selection.indices += _)
}
object footer extends Label {
val mod = if (Platform.IS_MAC) "Command" else "Ctrl"
text = mod + "+Click to select/deselect."
border = BorderFactory.createEmptyBorder(4, 0, 0, 0)
horizontalAlignment = Alignment.Left
}
}
def value = editor.list.selection.items.toList
} | arturog8m/ocs | bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/ui/editor/VisitorSelector.scala | Scala | bsd-3-clause | 1,308 |
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.functions._
import scala.collection.mutable.WrappedArray
import org.apache.spark.sql._
import scala.util.parsing.json.JSONArray
import scala.util.parsing.json.JSONObject
import scala.collection.immutable.HashMap
/**
* Scala script used to analyze the Yelp business data.
* Please note that this script is built using various spark-scala API calls
* made in the Spark Shell running on windows.
*/
object BusinessByDemographics {
def main(args: Array[String]) {
val sparkConf = new SparkConf().setAppName("BusinessByDemographics")
val sc = new SparkContext(sparkConf)
val sqlContext = new SQLContext(sc)
//val businesses = sqlContext.read.json("C:/casnc/amit/installs/data/sample.json")
val businesses = sqlContext.read.json("C:/amit/yelp/data/yelp_academic_dataset_business.json")
val busWithCategory = businesses.select("business_id","categories","city","state","latitude","longitude","stars").
explode("categories", "category") {
categories: WrappedArray[String] => categories.mkString(",").split(",") }
// List down distinct restaurant categories
val distinctRestCategories = busWithCategory.distinct()
// Filter type of restaurants Chinese,Indian, American, Mexican etc.
// Overall Restaurants and average star rating per state
val restaurants = busWithCategory.filter(upper($"category").equalTo("Restaurants".toUpperCase())).drop("categories")
val restaurantsAvgStarsPerState = restaurants.groupBy($"state").avg("stars").select($"state",round($"avg(stars)",2).alias("avg_star"))
val fastfoodRests = busWithCategory.filter(upper($"category").equalTo("fast food".toUpperCase()))
val fastfoodRestAvgStarsPerState = fastfoodRests.groupBy($"state").avg("stars").select($"state",round($"avg(stars)",2).alias("avg_star"))
val chineseRests = busWithCategory.filter(upper($"category").equalTo("chinese".toUpperCase())).drop("categories")
val chineseRestsAvgStarsPerState = chineseRests.groupBy($"state").avg("stars").select($"state",round($"avg(stars)",2).alias("avg_star"))
// Store the average calculated above in "state-cuisinetype-avg-rating.json"
val cuisinetypeAvgRatingPerstate = sqlContext.read.json("C:/amit/yelp/data/state-cuisinetype-avg-rating.json").orderBy("state")
// Merge the different cuisines for a State together in a JSON Array
val stateCensusData = sqlContext.read.json("C:/amit/yelp/data/state-census-data.json")
val requiredCensusData = stateCensusData.select("state","percentasian","percentblack","percentwhite")
val censusWithCuisine = requiredCensusData.join(cuisinetypeAvgRatingPerstate,"state").orderBy("state")
val censusWithCuisineSankeyData = censusWithCuisine.select("state","percentasian","percentblack","percentwhite","avg_rating","cuisine_type").map( row => {
val cuisineType = JSONArray(List(((row.getDouble(4)*100)/5).toString(),row.getString(5),row.getString(0),"Average Rating".concat(row.getString(4))))
val asian = JSONArray(List(row.getString(0),"percentasian",row.getString(1),"Percent Asian"))
val black = JSONArray(List(row.getString(0),"percentblack",row.getString(2),"Percent Black"))
val white = JSONArray(List(row.getString(0),"percentwhite",row.getString(3),"Percent White"))
val sankeydata = JSONArray(List(cuisineType,asian,black,white))
val jsonobj = JSONObject(Map(row.getString(0)->sankeydata))
jsonobj
})
censusWithCuisineSankeyData.coalesce(1).saveAsTextFile("C:/amit/yelp/data/percentperstate")
/* Schema for the joined DataFrame
root
|-- state: string (nullable = true)
|-- avg_rating: string (nullable = true)
|-- cuisine_type: string (nullable = true)
|-- percentasian: string (nullable = true)
|-- percentblack: string (nullable = true)
|-- percentwhite: string (nullable = true)
|-- state_code: string (nullable = true)
|-- totalasian: string (nullable = true)
|-- totalblack: string (nullable = true)
|-- totalpop: string (nullable = true)
|-- totalwhites: string (nullable = true)
*/
val cuisineRatingWithCenusData = cuisinetypeAvgRatingPerstate.join(stateCensusData,"state")
}
}
/*
Output of Average rating per state
Average star rating for "Chinese"
['US-NC', 3.22],
['US-NV', 3.29],
['US-AZ', 3.32],
['US-IL', 3.41],
['US-PA', 3.28],
['US-WI', 3.34],
['US-SC', 3.73]
Average star rating for "Restaurants"
['US-NC',3.41],
['US-TX', 4.0],
['US-NM', 3.0],
['US-NV',3.41],
['US-AZ',3.42],
['US-NW', 4.5],
['US-IL',3.35],
['US-PA',3.56],
['US-WI',3.45],
['US-SC',3.34]
Average star rating for "Fast Food"
['US- NC', 3.04],
['US- NV', 2.89],
['US- AZ', 2.92],
['US- NW', 4.5],
['US- IL', 2.68],
['US- PA', 2.89],
['US- WI', 3.02],
['US- SC', 2.94]
*/ | amitlondhe/spark-app | spark-scala/BusinessByDemographics.scala | Scala | mit | 4,922 |
package ecdc.crypto
import java.io.{ InputStreamReader, InputStream }
import java.security.{ Security, PrivateKey }
import org.bouncycastle.asn1.DERNull
import org.bouncycastle.asn1.pkcs.{ RSAPrivateKey, PKCSObjectIdentifiers, PrivateKeyInfo }
import org.bouncycastle.asn1.x509.AlgorithmIdentifier
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter
import org.bouncycastle.openssl.{ PEMKeyPair, PEMParser }
trait SecretKeyProvider {
Security.addProvider(new BouncyCastleProvider())
final lazy val key: PrivateKey = {
val pemParser = new PEMParser(new InputStreamReader(keyAsStream))
val keyPair = pemParser.readObject().asInstanceOf[PEMKeyPair]
val privateKeyInfoAsn1 = keyPair.getPrivateKeyInfo.parsePrivateKey()
val converter = new JcaPEMKeyConverter().setProvider("BC")
val privateKeyInfo = new PrivateKeyInfo(new AlgorithmIdentifier(PKCSObjectIdentifiers.rsaEncryption, DERNull.INSTANCE),
RSAPrivateKey.getInstance(privateKeyInfoAsn1))
converter.getPrivateKey(privateKeyInfo)
}
protected def keyAsStream: InputStream
}
| benfrasersimpson/ecdc | src/crypto/src/main/scala/ecdc/crypto/SecretKeyProvider.scala | Scala | isc | 1,131 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import org.junit.runner.RunWith
import breeze.math.Complex
import breeze.numerics._
import org.scalatest.matchers.ShouldMatchers
import breeze.util.DoubleImplicits
@RunWith(classOf[JUnitRunner])
class DenseMatrixTest extends FunSuite with Checkers with Matchers with DoubleImplicits {
test("Slicing") {
val m = DenseMatrix((0,1,2),
(3,4,5))
// slice sub-matrix
val s1 = m(0 to 1, 1 to 2)
assert(s1 === DenseMatrix((1,2),(4,5)))
s1 += 1
assert(m === DenseMatrix((0,2,3),(3,5,6)))
// slice row
val s2 = m(0, ::)
assert(s2 === DenseVector(0,2,3).t)
s2 *= 2
assert(m === DenseMatrix((0,4,6),(3,5,6)))
// slice column
val s3 : DenseVector[Int] = m(::, 1)
assert(s3 === DenseVector(4,5))
s3 -= 1
assert(m === DenseMatrix((0,3,6),(3,4,6)))
// slice rows
val s4 = m(1 to 1, ::)
assert(s4 === DenseMatrix((3,4,6)))
val mbig = DenseMatrix(
(0,1,2,3,4,5),
(3,4,5,6,7,8),
(3,4,5,6,7,8),
(5,4,5,9,7,8)
)
val sbig1 = mbig(::, 0 to 2 by 2)
assert(sbig1 === DenseMatrix(
(0,2),
(3,5),
(3,5),
(5,5)
))
// slice columns
val s5 = m(::, 1 to 2)
assert(s5 === DenseMatrix((3,6),(4,6)))
// slice part of a row
val s6a = m(0, 1 to 2)
s6a += 1
assert(m === DenseMatrix((0,4,7),(3,4,6)))
// slice part of a column
val s7a = m(0 to 1, 0)
s7a += 2
val s7b = m(0 to 1,0)
s7b += 1
assert(m === DenseMatrix((3,4,7),(6,4,6)))
}
test("Multiple Slicing") {
val m = new DenseMatrix[Int](6, 6, (1 to 36).toArray)
val slice1 = m(1 to 3, 1 to 3)
assert(slice1(::, 1) === DenseVector(14, 15, 16))
assert(slice1(::, 1 to 2) === DenseMatrix((14, 20), (15, 21), (16, 22)))
}
test("Transpose") {
val m = DenseMatrix((1,2,3),(4,5,6))
// check that the double transpose gives us back the original
assert(m.t.t == m)
// check static type and write-through
val t = m.t
assert(t === DenseMatrix((1,4),(2,5),(3,6)))
t(0,1) = 0
assert(m === DenseMatrix((1,2,3),(0,5,6)))
}
test("Sliced Transpose") {
val m = DenseMatrix((0, 1, 2),
(3, 4, 5))
// column of original looks same as row of tranpose
val sm1 = m(::, 1)
val smt1 = m.t(1, ::)
assert(sm1.t === smt1)
val sm2 = m(::, 2)
val smt2 = m.t(2, ::)
assert(sm2.t === smt2)
val sm1c = m(1, ::)
val smt1c = m.t(::, 1)
assert(sm1c === smt1c.t)
val sm2c = m(0, ::)
val smt2c = m.t(::, 0)
assert(sm2c === smt2c.t)
// slice sub-matrix
val s1 = m(0 to 1, 1 to 2)
assert(s1 === DenseMatrix((1, 2), (4, 5)))
val t1 = s1.t
assert(t1 === DenseMatrix((1, 4), (2, 5)))
val t1b = m.t(1 to 2, 0 to 1)
assert(t1 === t1b)
val s2 = m(0 to 1, 1)
val t2 = m.t(1, 0 to 1)
assert(s2 === t2.t)
val s3 = m(0, 0 to 1)
val t3 = m.t(0 to 1, 0)
assert(s3.t === t3)
{
val s2 = m(0 to 1, ::)
val t2 = m.t(::, 0 to 1)
assert(s2.t === t2)
assert(s2 === t2.t)
val s3 = m(::, 0 to 1)
val t3 = m.t(0 to 1, ::)
assert(s3.t === t3)
assert(s3 === t3.t)
}
}
test("Min/Max") {
val m = DenseMatrix((1,0,0),(2,3,-1))
assert(argmin(m) === (1,2))
assert(argmax(m) === (1,1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1, 3))
assert(ptp(m) === 4)
}
test("elementwise max") {
val v = DenseVector(2, 0, 3, 2, -1).asDenseMatrix
val v2 = DenseVector(3, -1, 3, 4, -4).asDenseMatrix
assert(max(v, v2) === DenseVector(3, 0, 3, 4, -1).asDenseMatrix)
assert(max(v, 2) === DenseVector(2, 2, 3, 2, 2).asDenseMatrix)
assert(min(v, 2) === DenseVector(2, 0, 2, 2, -1).asDenseMatrix)
}
test("Min/Max[Float]") {
val m = convert(DenseMatrix((1,0,0),(2,3,-1)), Float)
assert(argmin(m) === (1,2))
assert(argmax(m) === (1,1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1.0f, 3.0f))
assert(ptp(m) === 4)
}
test("Min/Max[Double]") {
val m = convert(DenseMatrix((1,0,0),(2,3,-1)), Double)
assert(argmin(m) === (1,2))
assert(argmax(m) === (1,1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1.0, 3.0))
assert(ptp(m) === 4)
}
test("Min/Max[Long]") {
val m = convert(DenseMatrix((1,0,0),(2,3,-1)), Long)
assert(argmin(m) === (1,2))
assert(argmax(m) === (1,1))
assert(min(m) === -1)
assert(max(m) === 3)
assert(minMax(m) === (-1L, 3L))
assert(ptp(m) === 4)
}
test("MapValues") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1))
val b1 : DenseMatrix[Int] = a.mapValues(_ + 1)
assert(b1 === DenseMatrix((2,1,1),(3,4,0)))
val b2 : DenseMatrix[Double] = a.mapValues(_ + 1.0)
assert(b2 === DenseMatrix((2.0,1.0,1.0),(3.0,4.0,0.0)))
val b3 = a.t.mapValues(_ + 1)
assert(b3 === DenseMatrix((2,3), (1,4), (1,0)))
}
/*
test("Map Triples") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1))
val b1 : DenseMatrix[Int] = a.mapTriples((i,j,v) => i + v)
assert(b1 === DenseMatrix((1,0,0),(3,4,0)))
val b2 : DenseMatrix[Double] = a.mapTriples((i,j,v) => j + v.toDouble)
assert(b2 === DenseMatrix((1.0,1.0,2.0),(2.0,4.0,1.0)))
}
test("Triples") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1))
var s = 0
// foreach
s = 0
for ((i,j,v) <- a.triples) s += v
assert(s === sum(a))
// filter
s = 0
for ((i,j,v) <- a.triples; if i % 2 == 0 || j % 2 == 0) s += v
assert(s === 1+2-1)
// // map
// val b1 : DenseMatrix[Double] = for ((i,j,v) <- a) yield v * 2.0
// assert(b1 === DenseMatrix((2.0,0.0,0.0),(4.0,6.0,-2.0)))
//
// // map with filter
// val b2 : DenseMatrix[Int] = for ((i,j,v) <- a; if j == 0) yield v * 2
// assert(b2 === DenseMatrix((2,0,0),(4,0,0)))
}
*/
test("set") {
{
val a = DenseMatrix.zeros[Int](2,2)
val b = DenseMatrix((1,0),(2,3))
a := b
assert(a === b)
}
val a = DenseMatrix.zeros[Int](2,3)
val b = DenseMatrix((1,0,5),(2,3,-1))
a := b
assert(a === b)
}
test("horzcat") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,5),(2,3,-1))
val result: DenseMatrix[Int] = DenseMatrix((1,0,5,1,0, 5),(2,3,-1,2,3,-1))
assert(DenseMatrix.horzcat(a,a) === result)
}
test("vertcat") {
val a : DenseMatrix[Int] = DenseMatrix((1,0,5),(2,3,-1))
val result: DenseMatrix[Int] = DenseMatrix((1,0,5),(2,3,-1),(1,0,5),(2,3,-1))
assert(DenseMatrix.vertcat(a,a) === result)
}
test("Multiply") {
val a = DenseMatrix((1.0, 2.0, 3.0),(4.0, 5.0, 6.0))
val b = DenseMatrix((7.0, -2.0, 8.0),(-3.0, -3.0, 1.0),(12.0, 0.0, 5.0))
val c = DenseVector(6.0,2.0,3.0)
val cs = SparseVector(6.0,2.0,3.0)
assert(a * b === DenseMatrix((37.0, -8.0, 25.0), (85.0, -23.0, 67.0)))
assert(a * c === DenseVector(19.0,52.0))
assert(b * c === DenseVector(62.0, -21.0, 87.0))
assert(a * cs === DenseVector(19.0,52.0))
assert(b * cs === DenseVector(62.0, -21.0, 87.0))
assert(b.t * c === DenseVector(72.0, -18.0, 65.0))
assert(a.t * DenseVector(4.0, 3.0) === DenseVector(16.0, 23.0, 30.0))
assert(c.t * a.t === (a * c).t)
// should be dense
val x:DenseMatrix[Double] = a * a.t
assert(x === DenseMatrix((14.0,32.0),(32.0,77.0)))
// should be dense
val y:DenseMatrix[Double] = a.t * a
assert(y === DenseMatrix((17.0,22.0,27.0),(22.0,29.0,36.0),(27.0,36.0,45.0)))
val z : DenseMatrix[Double] = b * (b + 1.0)
assert(z === DenseMatrix((164.0,5.0,107.0),(-5.0,10.0,-27.0),(161.0,-7.0,138.0)))
}
test("Multiply Int") {
val a = DenseMatrix((1, 2, 3),(4, 5, 6))
val b = DenseMatrix((7, -2, 8),(-3, -3, 1),(12, 0, 5))
val c = DenseVector(6,2,3)
assert(a * b === DenseMatrix((37, -8, 25), (85, -23, 67)))
assert(a * c === DenseVector(19,52))
assert(b * c === DenseVector(62, -21, 87))
assert(b.t * c === DenseVector(72, -18, 65))
assert(a.t * DenseVector(4, 3) === DenseVector(16, 23, 30))
// should be dense
val x = a * a.t
assert(x === DenseMatrix((14,32),(32,77)))
// should be dense
val y = a.t * a
assert(y === DenseMatrix((17,22,27),(22,29,36),(27,36,45)))
val z : DenseMatrix[Int] = b * ((b + 1):DenseMatrix[Int])
assert(z === DenseMatrix((164,5,107),(-5,10,-27),(161,-7,138)))
}
test("Multiply Boolean") {
val a = DenseMatrix((true, true, true),(true, true, true))
val b = DenseMatrix((true, false, true),(true, false, true),(true, false, true))
assert(a * b === DenseMatrix((true, false, true),(true, false, true)))
}
test("Multiply Float") {
val a = DenseMatrix((1.0f, 2.0f, 3.0f),(4.0f, 5.0f, 6.0f))
val b = DenseMatrix((7.0f, -2.0f, 8.0f),(-3.0f, -3.0f, 1.0f),(12.0f, 0.0f, 5.0f))
val c = DenseVector(6.0f,2.0f,3.0f)
val cs = SparseVector(6.0f,2.0f,3.0f)
assert(a * b === DenseMatrix((37.0f, -8.0f, 25.0f), (85.0f, -23.0f, 67.0f)))
assert(a * c === DenseVector(19.0f,52.0f))
assert(b * c === DenseVector(62.0f, -21.0f, 87.0f))
assert(a * cs === DenseVector(19.0f,52.0f))
assert(b * cs === DenseVector(62.0f, -21.0f, 87.0f))
assert(b.t * c === DenseVector(72.0f, -18.0f, 65.0f))
assert(a.t * DenseVector(4.0f, 3.0f) === DenseVector(16.0f, 23.0f, 30.0f))
// should be dense
val x = a * a.t
assert(x === DenseMatrix((14.0f,32.0f),(32.0f,77.0f)))
// should be dense
val y = a.t * a
assert(y === DenseMatrix((17.0f,22.0f,27.0f),(22.0f,29.0f,36.0f),(27.0f,36.0f,45.0f)))
val z : DenseMatrix[Float] = b * (b + 1.0f)
assert(z === DenseMatrix((164.0f,5.0f,107.0f),(-5.0f,10.0f,-27.0f),(161.0f,-7.0f,138.0f)))
}
test("Multiply Complex") {
val a = DenseMatrix((Complex(1,1), Complex(2,2), Complex(3,3)),
(Complex(4,4), Complex(5,5), Complex(6,6)))
val b = DenseMatrix((Complex(7,7), Complex(-2,-2), Complex(8,8)),
(Complex(-3,-3), Complex(-3,-3), Complex(1,1)),
(Complex(12,12), Complex(0,0), Complex(5,5)))
val c = DenseVector(Complex(6,0), Complex(2,0), Complex(3,0))
val cs = SparseVector(Complex(6,0), Complex(2,0), Complex(3,0))
val value: DenseMatrix[Complex] = a * b
assert(value === DenseMatrix((Complex(0,74), Complex(0,-16), Complex(0,50)),
(Complex(0,170), Complex(0,-46), Complex(0,134))))
assert(b * c === DenseVector(Complex(62,62), Complex(-21,-21), Complex(87,87)))
assert(b * cs === DenseVector(Complex(62,62), Complex(-21,-21), Complex(87,87)))
assert(b.t * c === DenseVector(Complex(72,-72), Complex(-18,18), Complex(65,-65)))
}
test("toDenseVector") {
val a = DenseMatrix((1,2,3), (4,5,6))
val b = a(0 to 1, 1 to 2)
val c = b.t
assert(a.toDenseVector === DenseVector(1,4,2,5,3,6))
assert(b.toDenseVector === DenseVector(2,5,3,6))
assert(c.toDenseVector === DenseVector(2,3,5,6))
}
test("flattenView") {
val a = DenseMatrix((1,2,3), (4,5,6))
a.flatten(true)(2) = 4
assert(a === DenseMatrix((1,4,3), (4,5,6)))
}
test("Trace") {
assert(trace(DenseMatrix((1,2),(4,5))) === 1 + 5)
assert(trace(DenseMatrix((1,2,3),(3,4,5),(5,6,7))) == 1 + 4 + 7)
assert(trace(DenseMatrix((1,2,3),(4,5,6),(7,8,9))) === 1 + 5 + 9)
}
test("Reshape") {
val m : DenseMatrix[Int] = DenseMatrix((1,2,3),(4,5,6))
val r : DenseMatrix[Int] = m.reshape(3, 2, true)
assert(m.data eq r.data)
assert(r.rows === 3)
assert(r.cols === 2)
assert(r === DenseMatrix((1,5),(4,3),(2,6)))
}
test("Reshape transpose") {
val m : DenseMatrix[Int] = DenseMatrix((1,2,3),(4,5,6)).t
val r : DenseMatrix[Int] = m.reshape(2, 3, true)
assert(m.data eq r.data)
assert(r.rows === 2)
assert(r.cols === 3)
assert(r === DenseMatrix((1,5),(4,3),(2,6)).t)
}
test("Solve") {
// square solve
val r1 : DenseMatrix[Double] = DenseMatrix((1.0,3.0),(2.0,0.0)) \\ DenseMatrix((1.0,2.0),(3.0,4.0))
assert(r1 === DenseMatrix((1.5, 2.0), (-1.0/6, 0.0)))
// matrix-vector solve
val r2 : DenseVector[Double] = DenseMatrix((1.0,3.0,4.0),(2.0,0.0,6.0)) \\ DenseVector(1.0,3.0)
assert( norm(r2 - DenseVector(0.1813186813186811, -0.3131868131868131, 0.43956043956043944), inf) < 1E-5)
// wide matrix solve
val r3 : DenseMatrix[Double] = DenseMatrix((1.0,3.0,4.0),(2.0,0.0,6.0)) \\ DenseMatrix((1.0,2.0),(3.0,4.0))
matricesNearlyEqual(r3,
DenseMatrix((0.1813186813186811, 0.2197802197802196),
(-0.3131868131868131, -0.1978021978021977),
(0.43956043956043944, 0.5934065934065933)))
// tall matrix solve
val r4 : DenseMatrix[Double] = DenseMatrix((1.0,3.0),(2.0,0.0),(4.0,6.0)) \\ DenseMatrix((1.0,4.0),(2.0,5.0),(3.0,6.0))
assert( max(abs(r4 - DenseMatrix((0.9166666666666667, 1.9166666666666672),
(-0.08333333333333352, -0.08333333333333436)))) < 1E-5)
}
test("Solve Float") {
// square solve
val r1 : DenseMatrix[Float] = DenseMatrix((1.0f,3.0f),(2.0f,0.0f)) \\ DenseMatrix((1.0f,2.0f),(3.0f,4.0f))
assert(r1 === DenseMatrix((1.5f, 2.0f), (-1.0f/6, 0.0f)))
// matrix-vector solve
val r2 : DenseVector[Float] = DenseMatrix((1.0f,3.0f,4.0f),(2.0f,0.0f,6.0f)) \\ DenseVector(1.0f,3.0f)
assert( norm(r2 - DenseVector(0.1813186813186811f, -0.3131868131868131f, 0.43956043956043944f)) < 1E-5)
// wide matrix solve
val r3 : DenseMatrix[Float] = DenseMatrix((1.0f,3.0f,4.0f),(2.0f,0.0f,6.0f)) \\ DenseMatrix((1.0f,2.0f),(3.0f,4.0f))
assert( max(abs(r3 - DenseMatrix((0.1813186813186811f, 0.2197802197802196f),
(-0.3131868131868131f, -0.1978021978021977f),
(0.43956043956043944f, 0.5934065934065933f)))) < 1E-5)
// tall matrix solve
val r4 : DenseMatrix[Float] = DenseMatrix((1.0f,3.0f),(2.0f,0.0f),(4.0f,6.0f)) \\ DenseMatrix((1.0f,4.0f),(2.0f,5.0f),(3.0f,6.0f))
assert( max(abs(r4 - DenseMatrix((0.9166666666666667f, 1.9166666666666672f),
(-0.08333333333333352f, -0.08333333333333436f)))) < 1E-5)
}
test("GH#29 transpose solve is broken") {
val A = DenseMatrix((1.0,0.0),(1.0,-1.0))
val t = DenseVector(1.0,0.0)
assert(A \\ t === DenseVector(1.0, 1.0))
assert(A.t \\ t === DenseVector(1.0, 0.0))
}
test("sum") {
// Test square and rectangular matrices
val A = DenseMatrix((1.0, 3.0), (2.0, 4.0))
assert(sum(A, Axis._0) === DenseVector(3.0, 7.0).t)
assert(sum(A(::, *)) === DenseVector(3.0, 7.0).t)
assert(sum(DenseMatrix((1.0,3.0,5.0),(2.0,4.0,6.0)), Axis._0) === DenseVector(3.0, 7.0,11.0).t)
assert(sum(DenseMatrix((1.0,3.0),(2.0,4.0),(5.0, 6.0)), Axis._0) === DenseVector(8.0, 13.0).t)
assert(sum(A, Axis._1) === DenseVector(4.0, 6.0))
assert(sum(DenseMatrix((1.0,3.0,5.0),(2.0,4.0,6.0)), Axis._1) === DenseVector(9.0, 12.0))
assert(sum(DenseMatrix((1.0,3.0),(2.0,4.0),(5.0, 6.0)), Axis._1) === DenseVector(4.0, 6.0, 11.0))
assert(sum(A) === 10.0)
}
test("normalize rows and columns") {
val A = DenseMatrix((1.0, 3.0), (2.0, 4.0))
assert(normalize(A, Axis._0, 1) === DenseMatrix((1.0/3.0, 3.0/7.0), (2.0/3.0,4.0/7.0)))
assert(normalize(A, Axis._1, 1) === DenseMatrix((1.0/4.0, 3.0/4.0), (2.0/6.0,4.0/6.0)))
// handle odd sized matrices (test for a bug.)
val dm = DenseMatrix.tabulate(2,5)( (i,j) => i * j * 1.0 + 1)
dm := normalize(dm, Axis._1, 2)
assert(abs(sum(dm(0,::).t.map(x => x * x)) - 1.0) < 1E-4, dm.toString + " not normalized!")
}
test("Generic Dense ops") {
// mostly for coverage
val a = DenseMatrix.create[String](1,1, Array("SSS"))
intercept[IndexOutOfBoundsException] {
a(3,3) = ":("
assert(false, "Shouldn't be here!")
}
assert(a(0,0) === "SSS")
intercept[IndexOutOfBoundsException] {
a(3,3)
assert(false, "Shouldn't be here!")
}
a(0,0) = ":("
assert(a(0,0) === ":(")
a := ":)"
assert(a(0,0) === ":)")
val b = DenseMatrix.zeros[String](1,1)
b := a
assert(b === a)
}
test("toString with no rows doesn't throw") {
DenseMatrix.zeros[Double](0, 2).toString
}
test("GH #30: Shaped solve of transposed and slice matrix does not work") {
val A=DenseMatrix((1.0,0.0),(1.0,-1.0))
val i = DenseMatrix.eye[Double](2)
val res = i \\ A.t(::,1)
assert(res === DenseVector(1.0,-1.0))
val res2 = i \\ A(1,::).t
assert(res2 === DenseVector(1.0,-1.0))
}
test("GH #148: out of bounds slice throws") {
val temp2 = DenseMatrix.tabulate(5,5)( (x: Int, y: Int) => x + y*10 )
intercept[IndexOutOfBoundsException] {
temp2( Range( 4, 6 ), 3 )
}
}
test("softmax on dm slices") {
val a = DenseMatrix((1.0, 2.0, 3.0))
assert(softmax(a(::, 1)) === 2.0)
}
test("Delete") {
val a = DenseMatrix((1, 2, 3),(4, 5, 6), (7,8,9))
assert(a.delete(0, Axis._0) === DenseMatrix((4, 5, 6), (7,8,9)))
assert(a.delete(1, Axis._0) === DenseMatrix((1, 2, 3), (7,8,9)))
assert(a.delete(2, Axis._0) === DenseMatrix((1, 2, 3), (4,5,6)))
assert(a.delete(0, Axis._1) === DenseMatrix((2, 3), (5,6), (8,9)))
assert(a.delete(1, Axis._1) === DenseMatrix((1, 3), (4,6), (7,9)))
assert(a.delete(2, Axis._1) === DenseMatrix((1, 2), (4,5), (7,8)))
assert(a.delete(Seq(0,2), Axis._1) === DenseMatrix(2, 5, 8))
assert(a.delete(Seq(1, 2), Axis._1) === DenseMatrix(1, 4, 7))
assert(a.delete(Seq(0,2), Axis._0) === DenseMatrix((4, 5, 6)))
assert(a.delete(Seq(1,2), Axis._0) === DenseMatrix((1, 2, 3)))
}
test("Big Int zeros are the right thing") {
val dm = DenseMatrix.zeros[BigInt](1,1)
assert(dm(0, 0) === BigInt(0))
}
test("BigInt multiply") {
val m = DenseMatrix((BigInt(1), BigInt(1)), (BigInt(1), BigInt(0)))
val m2 = DenseMatrix((1, 1), (1, 0))
assert(m * m === convert(m2 * m2, Int))
}
test("comparisons") {
val one = DenseMatrix.ones[Double](5, 6)
val zero = DenseMatrix.zeros[Double](5, 6)
assert( (one :> zero) === DenseMatrix.ones[Boolean](5, 6))
}
test("Some ill-typedness") {
import shapeless.test.illTyped
illTyped {
"""
val one = DenseMatrix.ones[Double](5, 6)
val z = DenseVector.zeros[Double](5)
(z + one)
"""
}
}
test("ensure we don't crash on weird strides") {
val dm = DenseMatrix.zeros[Double](3,3)
assert( (dm(::, 0 until 0) * dm(0 until 0, ::)) === dm)
assert( (dm(0 until 0, ::) * dm(::, 0 until 0)) === DenseMatrix.zeros[Double](0, 0))
// assert( (dm(::, 2 until 0 by -1) * dm(2 until 0 by -1, ::)) === dm)
}
test("Ensure a += a.t gives the right result") {
val dm = DenseMatrix.rand[Double](3,3)
val dmdmt = dm + dm.t
dm += dm.t
assert(dm === dmdmt)
}
test("#221") {
val data = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val mat = new DenseMatrix(rows = 10, data, offset = 0).t
val area = mat(3 until 6, 2 until 7)
assert(area === DenseMatrix((3,4,5,6,7),
(3,4,5,6,7),
(3,4,5,6,7)))
assert(area.t === DenseMatrix((3,4,5,6,7),
(3,4,5,6,7),
(3,4,5,6,7)).t)
val sl2t = area.t(0 until area.cols, 1 until area.rows)
assert(sl2t.offset === area.offset + area.majorStride, sl2t.data(area.offset + area.majorStride) + " " + area.offset)
assert(sl2t.t === DenseMatrix( (3,4,5,6,7),
(3,4,5,6,7)))
val sl2 = area(1 until area.rows, 0 until area.cols)
assert(sl2 === DenseMatrix( (3,4,5,6,7),
(3,4,5,6,7)))
}
test("DenseMatrix construction with list of lists") {
val dm = DenseMatrix(List(List(1, 2, 3, 0, 0, 0, 0, 0, 0), List(0, 0, 0, 1, 2, 3, 0, 0, 0), List(0, 0, 0, 0, 0, 0, 1, 2, 3)):_*)
}
test("#265: slices of :: and IndexedSeq") {
val dm = DenseMatrix( (0, 1, 2), (3, 4, 5))
assert(dm(::, IndexedSeq(2,1, 0)).toDenseMatrix === fliplr(dm))
assert(dm(IndexedSeq(1, 0), ::).toDenseMatrix === flipud(dm))
}
test("#278: don't crash on solve when majorStride == 0") {
val d = DenseVector[Double]()
val m = DenseMatrix.tabulate(0,0) { case x => 0.0 }
assert( m \\ d === d)
}
test("#283: slice of dm by dm boolean") {
val dm = DenseMatrix( (0, 1, 2), (3, 4, 5))
dm(dm :>= 2) := 3
assert(dm === DenseMatrix( (0, 1, 3), (3, 3, 3)))
}
test("#286: argsort diverging implicit") {
val dm = DenseMatrix( (0.1f), (0.0f))
assert(argsort(dm) === IndexedSeq((1, 0), (0, 0)))
}
test("#289: sigmoid dm slice") {
val m = DenseMatrix.zeros[Double](10, 10)
assert(sigmoid(m(::,0 to 5)) === DenseMatrix.fill(10, 6)(0.5))
assert(sigmoid(m(::,3 to 5)) === DenseMatrix.fill(10, 3)(0.5))
}
def matricesNearlyEqual(A: DenseMatrix[Double], B: DenseMatrix[Double], threshold: Double = 1E-6) {
for(i <- 0 until A.rows; j <- 0 until A.cols)
A(i,j) should be (B(i, j) +- threshold)
}
test("#336 argmax for Dense Matrices") {
val m = DenseMatrix.zeros[Double](3, 3)
m(2, ::) := DenseVector(1.0, 2.0, 3.0).t
assert(argmax(m(2, ::).t) === 2)
assert(max(m(2, ::).t) === 3.0)
}
test("lhs scalars") {
assert(1.0 :/ (DenseMatrix.fill(2,2)(10.0)) === DenseMatrix.fill(2,2)(1/10.0))
assert(1.0 :- (DenseMatrix.fill(2,2)(10.0)) === DenseMatrix.fill(2,2)(-9.0))
}
test("mapping ufunc") {
val r = DenseMatrix.rand(100, 100)
val explicit = new DenseMatrix(100, 100, r.data.map(math.sin))
assert(sin(r) == explicit)
sin.inPlace(r)
assert(explicit == r)
}
test("mapping ufunc, strides") {
val r = (DenseMatrix.rand(100, 100)).apply(10 until 27, 4 until 37 by 4)
var explicit = new DenseMatrix(100, 100, r.data.map(math.sin))
explicit = explicit(10 until 27, 4 until 37 by 4)
assert(sin(r) == explicit)
sin.inPlace(r)
assert(explicit == r)
}
}
| doron123/breeze | math/src/test/scala/breeze/linalg/DenseMatrixTest.scala | Scala | apache-2.0 | 22,832 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.ExperimentalMethods
import org.apache.spark.sql.catalyst.catalog.SessionCatalog
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SparkOptimizer
import org.apache.spark.sql.internal.SQLConf
class CarbonOptimizer(
catalog: SessionCatalog,
conf: SQLConf,
experimentalMethods: ExperimentalMethods)
extends SparkOptimizer(catalog, conf, experimentalMethods) {
override def execute(plan: LogicalPlan): LogicalPlan = {
val transFormedPlan: LogicalPlan = CarbonOptimizerUtil.transformForScalarSubQuery(plan)
super.execute(transFormedPlan)
}
}
| manishgupta88/incubator-carbondata | integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonOptimizer.scala | Scala | apache-2.0 | 1,481 |
/* date: Aug 16, 2012 PARENTHESIZED
Used in: DisplayParser
The Display command may have Parenthesizedd Components.
The components are delimited by open and closed parentheses.
There are eight component types:
1. variables, e.g., (# $one)
2. contents of variables (% $one)
3. text to be modified (%% /color red/my text):
4. image, e.g., (@ <filename>.gif)
5. yes/no entry, e.g., (#yn $a)
6. multiple choice, e.g., (#<number of choices> $a)
7. audio, e.g., (& <filename>.au )
8. list box, e.g., (#list $a)
For example, the Display command 'd Enter gender (# $sex)'
presents the text 'Enter gender' along with a entry field,
and then stores the user's response entry in the variable
'sex'.
Purpose is to detect a paranthesize component rather than
to parse it. See: isParenthesizedComponent(..) .
*/
package com.script
object Parenthesized {
// val variableRegex="""(\\(#\\s+.+\\)).*""" .r // #
val variableRegex="""(\\(#\\s*.+\\)).*""" .r // #
val textDisplayRegex="""(\\(%%.+\\)).*""" .r // %%
val displayVariableRegex=""".*(\\(%.+\\)).*""" .r // %
val imageRegex ="""(\\(@.+\\)).*""" .r // @
val yesNoRegex="""(\\(#yn.+\\)).*""" .r // #yn
val choiceRegex="""(\\(#\\d+.+\\)).*""" .r // #\\d
val audioRegex ="""(\\(&.+\\)).*""" .r // &
val listRegex ="""(\\(#list.+\\)).*""" .r // #list
val parenthesisTag="""(\\([%#])""".r
// Scan line for '(%' or '(#'. Return true if found
def isParenthesisTag(line:String)= {
val tag=parenthesisTag findFirstIn(line)
tag != None
}
// Search line for beginning parenthesized tags
// '(#', '(%'. Add tags to list provided a
// tag does not begin with '\\', such as '\\(%%...'
// Text of (%% tag, containing '(%%', must begin with '\\'.
def listParenthesizedTags(line: String)={
var list=List[String]()
val size=line.size
var flag=false
for(i <- 0 until line.size) {
flag=false
if(isOpenParen(line(i)) ) //found '('
if(i+1 < size)
// see if '%' or '#' follow
flag=line(i+1) match{
case '#' | '%' => true
case _=> false
}
if(i >0)
if(isBackSlash(line(i-1)) )
flag=false
// '(% '(# found. if no preceding
// '\\' then add to list
if(flag) {
// capture '(#' or '(%'
var str=line(i).toString+line(i+1).toString
list= str :: list // add to list
}
}
list.reverse
}
def isOpenParen(letter:Char)= letter=='('
def isBackSlash(letter:Char)= '\\\\'==letter
// Example: 'd now (# $s) is (# $b) the (%% text) time'
// has the parenthesized components '(# $a), (# $b), (%% text)
// Only the first component is extracted and returned.
// Note: 'componentKey' found by 'listParenthesizedTags'/
def extractFirstParenthesizedComponent( componentKey:String,line:String) ={
val beginIndex= line.indexOf(componentKey)
val endIndex= indexOfClosingParenthesis( componentKey,
line)
val l=line.drop(beginIndex)
l.take(endIndex + 1) //returned parenthesized component
}
// 'componentKey' is '(#' or '(%'. and 'line'
// contains this key. Find ')' that follows
// '(#' or '(%'
def isClosedParenthesize(componentKey:String,
line:String) ={
if(indexOfClosingParenthesis(componentKey,
line) != -1)
true
else
false
}
// Line begins with '(#' or '(%'. Find
// index of closing parenthesis ')'.
// Ignore ')' that is preceded by '\\'.
def indexOfClosingParenthesis(componentKey:String,
line:String) :Int={
val beginIndex= line.indexOf(componentKey)
// remove line preceding key
val shortenLine= line.drop(beginIndex)
var found= -1
// Search for closing ')'. Ignore '\\\\)'
for(i <- 0 until shortenLine.size) {
if( found == -1 && shortenLine(i)==')' ) {
if(shortenLine(i-1) != '\\\\')
found= i
}
}
found // index of ')'
}
// parses out components '(#...)', '(%...)', '(%%...)
// and '(@...)','(#3..)', '(#yn..), returning
// an identifying tag (variable,text,display,image,
// audio, multiple, yesNo).
// Used in 'DisplayParser' also tag used by DisplayScript.
def extractParenthesizedTag(line:String):String={ //(String,String)={
line match {
case variableRegex(variable)=> // .*(\\(#.+\\)).*
"variable"
case textDisplayRegex(text) => // """(\\(%%.+\\)).*""" .r
"text"
case displayVariableRegex(display)=> //".*(\\(%.+\\)).
"display"
case imageRegex(image)=>
"image"
case yesNoRegex(yesNo)=>
"yesNo"
case choiceRegex(multiple)=>
"multiple"
case audioRegex(audio)=>
"audio"
case listRegex(list)=>
"list"
case _=>
throw new SyntaxException("unknown parenthesized tag ")
}
}
// Determine if Display command contains a component.
// Invoked by DisplayParser.
def isParenthesizedComponent(line: String): Boolean ={
//val (target, xtype)= extractParenthesizedTag(line)
val xtype= extractParenthesizedTag(line)
xtype != "unknown"
}
def extractLeadingTextAndShortenLine(line:String, component:String) :(String,String)= {
val index=line.indexOf(component)
val endSlash= index + component.length
(line.take(index), line.drop(endSlash) )
}
// Capture text preceding and following component
def extractLeadingAndTrailingText(line:String,
component:String):(String,String)={
val index=line.indexOf(component)
val startOfIndex= index - component.length
val leadingText=line.take(index)
val trailingText=line.drop(line.indexOf(")")+1) // removes component
(leadingText, trailingText)
}
}
| hangle/Script | src/Parenthesized.scala | Scala | apache-2.0 | 5,591 |
package org.hammerlab.guacamole.commands
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.formats.avro.VariantAnnotation
import org.hammerlab.commands.Args
import org.hammerlab.guacamole.distributed.PileupFlatMapUtils.pileupFlatMapTwoSamples
import org.hammerlab.guacamole.filters.somatic.SomaticGenotypeFilter
import org.hammerlab.guacamole.filters.somatic.SomaticGenotypeFilter.SomaticGenotypeFilterArguments
import org.hammerlab.guacamole.likelihood.Likelihood
import org.hammerlab.guacamole.likelihood.Likelihood.probabilitiesOfGenotypes
import org.hammerlab.guacamole.logging.LoggingUtils.progress
import org.hammerlab.guacamole.pileup.Pileup
import org.hammerlab.guacamole.readsets.ReadSets
import org.hammerlab.guacamole.readsets.args.{ ReferenceArgs, TumorNormalReadsArgs }
import org.hammerlab.guacamole.readsets.rdd.{ PartitionedRegions, PartitionedRegionsArgs }
import org.hammerlab.guacamole.variants.{ Allele, AlleleEvidence, CalledSomaticAllele, Genotype, GenotypeOutputArgs, GenotypeOutputCaller }
import org.kohsuke.args4j.{ Option => Args4jOption }
import scala.math.{ exp, max }
/**
* Simple subtraction based somatic variant caller
*
* This takes two variant callers, calls variants on tumor and normal independently,
* and outputs the variants in the tumor sample BUT NOT the normal sample.
*
* This assumes that both read sets only contain a single sample, otherwise we should compare
* on a sample identifier when joining the genotypes
*
*/
object SomaticStandard {
class Arguments
extends Args
with TumorNormalReadsArgs
with PartitionedRegionsArgs
with SomaticGenotypeFilterArguments
with GenotypeOutputArgs
with ReferenceArgs {
@Args4jOption(
name = "--normal-odds",
usage = "Minimum log odds threshold for possible normal-sample variant candidates"
)
var normalOddsThreshold: Int = 5
@Args4jOption(
name = "--tumor-odds",
usage = "Minimum log odds threshold for possible tumor-sample variant candidates"
)
var tumorOddsThreshold: Int = 10
@Args4jOption(
name = "--max-normal-alternate-read-depth",
usage = "Maximum number of alternates in the normal sample"
)
var maxNormalAlternateReadDepth: Int = 4
@Args4jOption(
name = "--min-tumor-variant-allele-frequency",
usage = "Minimum VAF at which to test somatic variants, as a percentage"
)
var minTumorVariantAlleleFrequency: Int = 3
@Args4jOption(name = "--dbsnp-vcf", required = false, usage = "VCF file to identify DBSNP variants")
var dbSnpVcf: String = ""
}
object Caller extends GenotypeOutputCaller[Arguments, CalledSomaticAllele] {
override val name = "somatic-standard"
override val description = "call somatic variants using independent callers on tumor and normal"
override def computeVariants(args: Arguments, sc: SparkContext) = {
val reference = args.reference(sc)
val (readsets, loci) = ReadSets(sc, args)
val partitionedReads =
PartitionedRegions(
readsets.allMappedReads,
loci,
args
)
// Destructure `args`' fields here to avoid serializing `args` itself.
val normalOddsThreshold = args.normalOddsThreshold
val tumorOddsThreshold = args.tumorOddsThreshold
val maxTumorReadDepth = args.maxTumorReadDepth
val normalSampleName = args.normalSampleName
val tumorSampleName = args.tumorSampleName
val maxNormalAlternateReadDepth = args.maxNormalAlternateReadDepth
val minTumorVariantAlleleFrequency = args.minTumorVariantAlleleFrequency / 100.0f
var potentialGenotypes: RDD[CalledSomaticAllele] =
pileupFlatMapTwoSamples[CalledSomaticAllele](
partitionedReads,
sample1Name = normalSampleName,
sample2Name = tumorSampleName,
skipEmpty = true, // skip empty pileups
function = (pileupNormal, pileupTumor) =>
findPotentialVariantAtLocus(
pileupTumor,
pileupNormal,
normalOddsThreshold,
tumorOddsThreshold,
maxTumorReadDepth,
maxNormalAlternateReadDepth,
minTumorVariantAlleleFrequency
).iterator,
reference = reference
)
potentialGenotypes.persist()
progress("Computed %,d potential genotypes".format(potentialGenotypes.count))
if (args.dbSnpVcf != "") {
val adamContext: ADAMContext = sc
val dbSnpVariants = adamContext.loadVariantAnnotations(args.dbSnpVcf)
potentialGenotypes =
potentialGenotypes
.keyBy(_.bdgVariant)
.leftOuterJoin(dbSnpVariants.rdd.keyBy(_.getVariant))
.values
.map {
case (calledAllele: CalledSomaticAllele, dbSnpVariantOpt: Option[VariantAnnotation]) =>
calledAllele.copy(
rsID =
dbSnpVariantOpt.flatMap(
v =>
if (v.getDbSnp)
Some(
v.getVariant.getNames.get(0).toInt
)
else
None
)
)
}
}
(
SomaticGenotypeFilter(potentialGenotypes, args),
readsets.sequenceDictionary,
Vector(args.tumorSampleName)
)
}
def findPotentialVariantAtLocus(tumorPileup: Pileup,
normalPileup: Pileup,
normalOddsThreshold: Int,
tumorOddsThreshold: Int,
maxReadDepth: Int = Int.MaxValue,
maxNormalAlternateReadDepth: Int = 5,
minTumorVariantAlleleFrequency: Float = 0.05f): Option[CalledSomaticAllele] = {
// For now, we skip loci that have no reads mapped. We may instead want to emit NoCall in this case.
if (tumorPileup.elements.isEmpty
|| normalPileup.elements.isEmpty
// skip abnormally deep pileups
|| tumorPileup.depth > maxReadDepth
|| normalPileup.depth > maxReadDepth
|| tumorPileup.referenceDepth == tumorPileup.depth // skip computation if no alternate reads
|| normalPileup.depth - normalPileup.referenceDepth > maxNormalAlternateReadDepth
)
return None
val referenceAllele = Allele(tumorPileup.referenceBase, tumorPileup.referenceBase)
val referenceGenotype = Genotype(Map(referenceAllele -> 1.0))
val tumorDepth = tumorPileup.depth
val variantAlleleFractions: Map[Allele, Double] =
tumorPileup
.elements
.withFilter(_.allele.isVariant)
.map(_.allele)
.groupBy(identity)
.map{ case(k, v) => k -> v.size / tumorDepth.toDouble }
// Compute empirical frequency of alternate allele in the tumor sample
// for the likelihood computation
val (mostFrequentVariantAllele, highestFrequency) = variantAlleleFractions.maxBy(_._2)
val empiricalVariantAlleleFrequency = max(minTumorVariantAlleleFrequency, highestFrequency)
// Build a possible genotype where the alternate allele occurs at the
// observed empirical VAF
val somaticVariantGenotype =
Genotype(
Map(
referenceAllele -> (1.0 - empiricalVariantAlleleFrequency),
mostFrequentVariantAllele -> empiricalVariantAlleleFrequency
)
)
val (tumorRefLogProb, tumorAltLogProb) =
probabilitiesOfGenotypes(
tumorPileup.elements,
(referenceGenotype, somaticVariantGenotype),
prior = Likelihood.uniformPrior,
includeAlignment = false,
logSpace = true
)
val tumorAltLOD: Double = tumorAltLogProb - tumorRefLogProb
val germlineVariantGenotype =
Genotype(
Map(
referenceAllele -> 0.5,
mostFrequentVariantAllele -> 0.5
)
)
val (normalRefLogProb, normalAltLogProb) =
probabilitiesOfGenotypes(
normalPileup.elements,
(referenceGenotype, germlineVariantGenotype),
prior = Likelihood.uniformPrior,
includeAlignment = false,
logSpace = true
)
val normalRefLOD: Double = normalRefLogProb - normalAltLogProb
if (tumorAltLOD > tumorOddsThreshold &&
normalRefLOD > normalOddsThreshold &&
mostFrequentVariantAllele.altBases.nonEmpty) {
val allele = mostFrequentVariantAllele
val tumorVariantEvidence = AlleleEvidence(exp(-tumorAltLogProb), allele, tumorPileup)
val normalReferenceEvidence = AlleleEvidence(exp(-normalRefLogProb), referenceAllele, normalPileup)
Some(
CalledSomaticAllele(
tumorPileup.sampleName,
tumorPileup.contigName,
tumorPileup.locus,
allele,
tumorAltLOD,
tumorVariantEvidence,
normalReferenceEvidence
)
)
} else
None
}
}
}
| hammerlab/guacamole | src/main/scala/org/hammerlab/guacamole/commands/SomaticStandardCaller.scala | Scala | apache-2.0 | 9,290 |
package io.scalajs.nodejs.url
import io.scalajs.RawOptions
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
import scala.scalajs.js.|
/**
* The URLSearchParams API provides read and write access to the query of a URL. The URLSearchParams
* class can also be used standalone with one of the four following constructors.
*
* The WHATWG URLSearchParams interface and the querystring module have similar purpose, but the purpose
* of the querystring module is more general, as it allows the customization of delimiter characters (& and =).
* On the other hand, this API is designed purely for URL query strings.
* @author lawrence.daniels@gmail.com
*/
@js.native
@JSImport("url", "URLSearchParams")
class URLSearchParams extends js.Object {
/**
* Parse the string as a query string, and use it to instantiate a new URLSearchParams object.
* A leading '?', if present, is ignored.
* @param queryString A query string
*/
def this(queryString: String) = this()
/**
* Instantiate a new URLSearchParams object with a query hash map. The key and value of each property of
* obj are always coerced to strings.
* @param obj An object representing a collection of key-value pairs
*/
def this(obj: js.Dictionary[_]) = this()
/**
* Instantiate a new URLSearchParams object with a query hash map. The key and value of each property of
* obj are always coerced to strings.
* @param obj An object representing a collection of key-value pairs
*/
def this(obj: js.Object) = this()
/**
* Instantiate a new URLSearchParams object with an iterable map in a way that is similar to Map's constructor.
* iterable can be an Array or any iterable object. That means iterable can be another URLSearchParams,
* in which case the constructor will simply create a clone of the provided URLSearchParams. Elements of
* iterable are key-value pairs, and can themselves be any iterable object.
* @param it An iterable object whose elements are key-value pairs
*/
def this(it: Iterator[_]) = this()
/////////////////////////////////////////////////////////////////////////////////
// Methods
/////////////////////////////////////////////////////////////////////////////////
/**
* Append a new name-value pair to the query string.
* @param name the name of the property
* @param value the value of the property
*/
def append(name: String, value: String): Unit = js.native
/**
* Remove all name-value pairs whose name is name.
* @param name the name of the property
*/
def delete(name: String): Unit = js.native
/**
* Returns an ES6 Iterator over each of the name-value pairs in the query. Each item of the iterator is
* a JavaScript Array. The first item of the Array is the name, the second item of the Array is the value.
* @return an iterable of an array of results
*/
def entries(): Iterable[js.Array[String]] = js.native
/**
* Iterates over each name-value pair in the query and invokes the given function.
* @param fn the function invoked for each name-value pair in the query.
*/
def forEach(fn: js.Function3[String, String, URLSearchParams, Any]): Unit = js.native
/**
* Returns the value of the first name-value pair whose name is name. If there are no such pairs, null is returned.
* @param name the name of the property
* @return the value or null if there is no name-value pair with the given name.
*/
def get(name: String): String = js.native
/**
* Returns the values of all name-value pairs whose name is name. If there are no such pairs, an empty array is returned.
* @param name the name of the property
* @return the array of values or null if there is no name-value pair with the given name.
*/
def getAll(name: String): js.Array[String] = js.native
/**
* Returns true if there is at least one name-value pair whose name is name.
* @param name the name of the property
* @return true if there is at least one name-value pair whose name is name.
*/
def has(name: String): Boolean = js.native
/**
* Returns an ES6 Iterator over the names of each name-value pair.
* @return an [[Iterable Iterator]] over the names of each name-value pair.
*/
def keys(): Iterable[String] = js.native
/**
* Sets the value in the URLSearchParams object associated with name to value. If there are any pre-existing
* name-value pairs whose names are name, set the first such pair's value to value and remove all others.
* If not, append the name-value pair to the query string.
* @param name the name of the property
* @param value the value of the property
*/
def set(name: String, value: String): Unit = js.native
/**
* Sort all existing name-value pairs in-place by their names. Sorting is done with a stable sorting algorithm,
* so relative order between name-value pairs with the same name is preserved.
*/
def sort(): Unit = js.native
/**
* Returns an ES6 Iterator over the values of each name-value pair.
* @return an [[Iterable Iterator]] over the values of each name-value pair.
*/
def values(): Iterable[String] = js.native
/**
* Returns the Punycode ASCII serialization of the domain. If domain is an invalid domain, the empty string is returned.
* @param domain the domain
* @return the Punycode ASCII serialization of the domain
*/
def domainToASCII(domain: String): String = js.native
/**
* Returns the Unicode serialization of the domain. If domain is an invalid domain, the empty string is returned.
* @param domain the domain
* @return the Unicode serialization of the domain
*/
def domainToUnicode(domain: String): String = js.native
/**
* Returns a customizable serialization of a URL String representation of a WHATWG URL object.
* @param url the given [[URL]]
* @param options the given [[RawOptions options]]
* @return a customizable serialization of a URL String representation of a WHATWG URL object.
*/
def format(url: URL, options: UrlFormatOptions | RawOptions = js.native): String = js.native
}
/**
* URL Format Options
* @param auth true if the serialized URL string should include the username and password, false otherwise. Defaults to true.
* @param fragment true if the serialized URL string should include the fragment, false otherwise. Defaults to true.
* @param search true if the serialized URL string should include the search query, false otherwise. Defaults to true.
* @param unicode true if Unicode characters appearing in the host component of the URL string should be encoded
* directly as opposed to being Punycode encoded. Defaults to false.
*/
class UrlFormatOptions(val auth: js.UndefOr[Boolean] = js.undefined,
val fragment: js.UndefOr[Boolean] = js.undefined,
val search: js.UndefOr[Boolean] = js.undefined,
val unicode: js.UndefOr[Boolean] = js.undefined) extends js.Object | scalajs-io/nodejs | app/current/src/main/scala/io/scalajs/nodejs/url/URLSearchParams.scala | Scala | apache-2.0 | 7,122 |
package domino.configuration_watching
import domino.capsule.Capsule
import org.osgi.service.metatype.{MetaTypeProvider => JMetaTypeProvider}
import domino.scala_osgi_metatype.adapters.MetaTypeProviderAdapter
import domino.scala_osgi_metatype.interfaces.MetaTypeProvider
/**
* Contains some common methods for both the configuration and factory configuration capsules.
*
* @constructor Initializes the capsule.
* @param metaTypeProvider Optional meta type provider
*/
abstract class AbstractConfigurationWatcherCapsule(
metaTypeProvider: Option[MetaTypeProvider]) extends Capsule with JMetaTypeProvider {
/**
* Contains the adapter which translates the Scala OSGi metatype definition into a native OSGi metatype definition.
*/
protected lazy val metaTypeProviderAdapter = metaTypeProvider map { new MetaTypeProviderAdapter(_) }
def getObjectClassDefinition(id: String, locale: String) = {
metaTypeProviderAdapter map { _.getObjectClassDefinition(id, locale) } orNull
}
def getLocales = metaTypeProviderAdapter map { _.getLocales } orNull
}
| helgoboss/domino | src/main/scala/domino/configuration_watching/AbstractConfigurationWatcherCapsule.scala | Scala | mit | 1,076 |
package io.ddf.spark.content
import io.ddf.content.Schema.{ColumnClass, ColumnType}
import io.ddf.spark.ATestSuite
import scala.collection.JavaConversions._
/**
*/
class FactorSuite extends ATestSuite {
createTableMtcars()
createTableAirlineWithNA()
test("test get factors on DDF with TablePartition") {
val ddf = manager.sql2ddf("select * from mtcars", "SparkSQL")
val schemaHandler = ddf.getSchemaHandler
Array(7, 8, 9, 10).foreach {
idx => schemaHandler.setAsFactor(idx)
}
schemaHandler.computeFactorLevelsAndLevelCounts()
val cols = Array(7, 8, 9, 10).map {
idx => schemaHandler.getColumn(schemaHandler.getColumnName(idx))
}
assert(cols(0).getOptionalFactor.getLevelCounts.get("1") === 14)
assert(cols(0).getOptionalFactor.getLevelCounts.get("0") === 18)
assert(cols(1).getOptionalFactor.getLevelCounts.get("1") === 13)
assert(cols(2).getOptionalFactor.getLevelCounts.get("4") === 12)
assert(cols(2).getOptionalFactor.getLevelCounts.get("3") === 15)
assert(cols(2).getOptionalFactor.getLevelCounts.get("5") === 5)
assert(cols(3).getOptionalFactor.getLevelCounts.get("1") === 7)
assert(cols(3).getOptionalFactor.getLevelCounts.get("2") === 10)
}
test("test get factor with long column") {
val ddf = manager.sql2ddf("select mpg, cast(cyl as bigint) as cyl from mtcars", "SparkSQL")
ddf.getSchemaHandler.setAsFactor("cyl")
ddf.getSchemaHandler.computeFactorLevelsAndLevelCounts()
assert(ddf.getSchemaHandler.getColumn("cyl").getType == ColumnType.BIGINT)
assert(ddf.getSchemaHandler.getColumn("cyl").getColumnClass == ColumnClass.FACTOR)
assert(ddf.getSchemaHandler.getColumn("cyl").getOptionalFactor.getLevelCounts.get("4") == 11)
assert(ddf.getSchemaHandler.getColumn("cyl").getOptionalFactor.getLevelCounts.get("6") == 7)
assert(ddf.getSchemaHandler.getColumn("cyl").getOptionalFactor.getLevelCounts.get("8") == 14)
}
test("test set factor for string columns") {
val ddf = manager.sql2ddf("select * from airlineWithNA", "SparkSQL")
assert(ddf.getSchemaHandler.getColumn("Origin").getType == ColumnType.STRING)
assert(ddf.getSchemaHandler.getColumn("Origin").getColumnClass == ColumnClass.CHARACTER)
ddf.getSchemaHandler.setFactorLevelsForStringColumns(ddf.getSchemaHandler.getColumns.map{col => col.getName}.toArray)
ddf.getSchemaHandler.computeFactorLevelsAndLevelCounts()
assert(ddf.getSchemaHandler.getColumn("Origin").getType == ColumnType.STRING)
assert(ddf.getSchemaHandler.getColumn("Origin").getColumnClass == ColumnClass.FACTOR)
assert(ddf.getSchemaHandler.getColumn("Origin").getOptionalFactor.getLevelCounts.size() == 3)
}
test("test get factors for DDF with RDD[Array[Object]]") {
val ddf = manager.sql2ddf("select * from mtcars", "SparkSQL")
// ddf.getRepresentationHandler.remove(classOf[RDD[_]], classOf[TablePartition])
val schemaHandler = ddf.getSchemaHandler
Array(7, 8, 9, 10).foreach {
idx => schemaHandler.setAsFactor(idx)
}
schemaHandler.computeFactorLevelsAndLevelCounts()
val cols2 = Array(7, 8, 9, 10).map {
idx => schemaHandler.getColumn(schemaHandler.getColumnName(idx))
}
assert(cols2(0).getOptionalFactor.getLevelCounts.get("1") === 14)
assert(cols2(0).getOptionalFactor.getLevelCounts.get("0") === 18)
assert(cols2(1).getOptionalFactor.getLevelCounts.get("1") === 13)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("4") === 12)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("3") === 15)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("5") === 5)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("1") === 7)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("2") === 10)
}
test("test NA handling") {
val ddf = manager.sql2ddf("select * from airlineWithNA", "SparkSQL")
val schemaHandler = ddf.getSchemaHandler
Array(0, 8, 16, 17, 24, 25).foreach {
idx => schemaHandler.setAsFactor(idx)
}
schemaHandler.computeFactorLevelsAndLevelCounts()
val cols = Array(0, 8, 16, 17, 24, 25).map {
idx => schemaHandler.getColumn(schemaHandler.getColumnName(idx))
}
assert(cols(0).getOptionalFactor.getLevels.contains("2008"))
assert(cols(0).getOptionalFactor.getLevels.contains("2010"))
assert(cols(0).getOptionalFactor.getLevelCounts.get("2008") === 28.0)
assert(cols(0).getOptionalFactor.getLevelCounts.get("2010") === 1.0)
assert(cols(1).getOptionalFactor.getLevelCounts.get("WN") === 28.0)
assert(cols(2).getOptionalFactor.getLevelCounts.get("ISP") === 12.0)
assert(cols(2).getOptionalFactor.getLevelCounts.get("IAD") === 2.0)
assert(cols(2).getOptionalFactor.getLevelCounts.get("IND") === 17.0)
assert(cols(3).getOptionalFactor.getLevelCounts.get("MCO") === 3.0)
assert(cols(3).getOptionalFactor.getLevelCounts.get("TPA") === 3.0)
assert(cols(3).getOptionalFactor.getLevelCounts.get("JAX") === 1.0)
assert(cols(3).getOptionalFactor.getLevelCounts.get("LAS") === 3.0)
assert(cols(3).getOptionalFactor.getLevelCounts.get("BWI") === 10.0)
assert(cols(5).getOptionalFactor.getLevelCounts.get("0") === 9.0)
assert(cols(4).getOptionalFactor.getLevelCounts.get("3") === 1.0)
val ddf2 = manager.sql2ddf("select * from airlineWithNA", "SparkSQL")
// ddf2.getRepresentationHandler.remove(classOf[RDD[_]], classOf[TablePartition])
val schemaHandler2 = ddf2.getSchemaHandler
Array(0, 8, 16, 17, 24, 25).foreach {
idx => schemaHandler2.setAsFactor(idx)
}
schemaHandler2.computeFactorLevelsAndLevelCounts()
val cols2 = Array(0, 8, 16, 17, 24, 25).map {
idx => schemaHandler2.getColumn(schemaHandler2.getColumnName(idx))
}
assert(cols2(0).getOptionalFactor.getLevelCounts.get("2008") === 28.0)
assert(cols2(0).getOptionalFactor.getLevelCounts.get("2010") === 1.0)
assert(cols2(1).getOptionalFactor.getLevelCounts.get("WN") === 28.0)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("ISP") === 12.0)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("IAD") === 2.0)
assert(cols2(2).getOptionalFactor.getLevelCounts.get("IND") === 17.0)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("MCO") === 3.0)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("TPA") === 3.0)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("JAX") === 1.0)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("LAS") === 3.0)
assert(cols2(3).getOptionalFactor.getLevelCounts.get("BWI") === 10.0)
assert(cols2(5).getOptionalFactor.getLevelCounts.get("0") === 9.0)
assert(cols2(4).getOptionalFactor.getLevelCounts.get("3") === 1.0)
}
}
| ubolonton/DDF | spark/src/test/scala/io/ddf/spark/content/FactorSuite.scala | Scala | apache-2.0 | 6,728 |
package edu.uw.at.iroberts.wirefugue.kafka.consumer
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import com.typesafe.config.ConfigFactory
import edu.uw.at.iroberts.wirefugue.kafka.producer.KafkaKey
import edu.uw.at.iroberts.wirefugue.kafka.serdes.{PacketDeserializer, PacketSerde}
import edu.uw.at.iroberts.wirefugue.pcap.Packet
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.IntegerDeserializer
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by Ian Robertson <iroberts@uw.edu> on 5/24/17.
*/
object PacketConsumer extends App {
type PacketRecord = ConsumerRecord[KafkaKey, Array[Byte]]
val config = ConfigFactory.load("application.conf")
implicit val system = ActorSystem("stream-consumer-system", config)
implicit val materializer = ActorMaterializer()
val consumerSettings = ConsumerSettings(system, new IntegerDeserializer, new PacketDeserializer)
.withGroupId("group1")
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")
// Separate streams for each partition
val maxPartitions = 100
val consumerGroup = Consumer.plainPartitionedSource(consumerSettings, Subscriptions.topics("packets"))
val done = consumerGroup.map {
case (topicPartition, source) =>
val p: Int = topicPartition.partition
source
.map { (cr: ConsumerRecord[Integer, Packet]) => cr.value() }
.filter(_.ip.isDefined)
.toMat(Sink.foreach(packet => println(s"[$p] $packet")))(Keep.both)
.run()
}
.mapAsyncUnordered(maxPartitions)(_._2)
.runWith(Sink.ignore)
Await.result(done, Duration.Inf)
system.terminate()
}
| robertson-tech/wirefugue | sensor/src/main/scala/edu/uw/at/iroberts/wirefugue/kafka/consumer/PacketConsumer.scala | Scala | gpl-3.0 | 1,857 |
package edu.ucsb.apss.partitioning
import java.io.File
import java.nio.file.{Files, Paths}
import edu.ucsb.apss.Context
import edu.ucsb.apss.util.{VectorWithNorms, FileSystemManager}
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SerializableWritable
import org.apache.spark.mllib.linalg.SparseVector
import org.scalatest.{Matchers, BeforeAndAfter, FlatSpec}
/**
* Created by dimberman on 4/14/16.
*/
class FileSystemManagerTest extends FlatSpec with Matchers with BeforeAndAfter {
val sc = Context.sc
val outputDir = s"/tmp/filesystem/${sc.applicationId}"
val manager = new FileSystemManager(outputDir = outputDir)
val path = (s:String, sub:String, i:(Int,Int)) => s"$outputDir/$sub/${PartitionHasher.partitionHash(i)}"
val k = (5, 5)
val bucketizedVector = Seq( new VectorWithNorms(1, 1, 1, new SparseVector(1, Array(2), Array(3)), 1)).toIterable
after {
val f = new File(outputDir)
FileUtils.deleteDirectory(f)
}
"Partition Manager" should "write to file" in {
manager.writeVecFile(k, bucketizedVector, sc.applicationId, sc.broadcast(new SerializableWritable(sc.hadoopConfiguration)))
assert(Files.exists(Paths.get(path(sc.applicationId,"vec", k))))
val f = new File(outputDir+"/"+PartitionHasher.partitionHash(k))
f.delete()
}
it should "read from a file it wrote" in {
manager.writeVecFile(k, bucketizedVector, sc.applicationId, sc.broadcast(new SerializableWritable(sc.hadoopConfiguration)))
assert(Files.exists(Paths.get(path(sc.applicationId,"vec", k))))
val BVConf = sc.broadcast(new SerializableWritable(sc.hadoopConfiguration))
Files.exists(Paths.get(path(sc.applicationId,"vec", k))) shouldBe true
val answer = manager.readVecFile(new Path(path(sc.applicationId,"vec", k)),BVConf, org.apache.spark.TaskContext.get())
answer.next() shouldEqual bucketizedVector.head
val f = new File(s"/tmp/${sc.applicationId}/"+PartitionHasher.partitionHash(k))
f.delete()
}
it should "only write to a file once" in {
manager.writeVecFile(k, bucketizedVector, sc.applicationId, sc.broadcast(new SerializableWritable(sc.hadoopConfiguration)))
assert(Files.exists(Paths.get(path(sc.applicationId,"vec", k))))
val BVConf = sc.broadcast(new SerializableWritable(sc.hadoopConfiguration))
Files.exists(Paths.get(path(sc.applicationId,"vec", k))) shouldBe true
val answer1 = manager.readInvFile(new Path(path(sc.applicationId,"vec", k)),BVConf, org.apache.spark.TaskContext.get()).toList
val numlines = answer1.size
answer1.head shouldEqual bucketizedVector.head
manager.writeVecFile(k, bucketizedVector, sc.applicationId, sc.broadcast(new SerializableWritable(sc.hadoopConfiguration)))
val answer2 = manager.readInvFile(new Path(path(sc.applicationId,"vec", k)),BVConf, org.apache.spark.TaskContext.get()).toList
answer2.foreach(println)
answer2.size shouldEqual numlines
answer2.head shouldEqual bucketizedVector.head
manager.cleanup(sc.applicationId, BVConf)
}
it should "handle RDDs" in {
val rdd = sc.parallelize(Seq((k,bucketizedVector.head)))
manager.writePartitionsToFile(rdd)
val BVConf = sc.broadcast(new SerializableWritable(sc.hadoopConfiguration))
rdd.count()
assert(Files.exists(Paths.get(path(sc.applicationId,"vec", k))))
val f = new File(path(sc.applicationId,"vec", k))
manager.readVecFile(new Path(path(sc.applicationId,"vec", k)), BVConf, org.apache.spark.TaskContext.get()).foreach(println)
f
f.delete()
}
}
| danielryan2430/All-Pairs-Similarity-Spark | src/test/scala/edu/ucsb/apss/partitioning/FileSystemManagerTest.scala | Scala | apache-2.0 | 3,800 |
package org.leebli.connector.maven
import org.junit.Test
import org.junit.Before
import java.io.File
import java.nio.file.Files
import java.nio.file.Paths
import java.nio.file.Path
import org.apache.commons.io.FileUtils
class MavenConnectorTestCase {
} | jonathan-macke/leebli | connectors/maven-connector/src/test/scala/org/leebli/connector/maven/MavenConnectorTestCase.scala | Scala | apache-2.0 | 257 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bars.model
import play.api.libs.json.{Json, OFormat}
final case class BarsError(
code: String,
desc: String
)
object BarsError {
implicit val format: OFormat[BarsError] = Json.format[BarsError]
/**
* Happens when HMRC's bank details are submitted
*/
val sortCodeOnDenyList: String = "SORT_CODE_ON_DENY_LIST"
}
| hmrc/self-service-time-to-pay-frontend | app/bars/model/BarsError.scala | Scala | apache-2.0 | 948 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.sparkle.filesystem
import java.io._
import java.net.URI
import java.nio.file.{Files, Path, Paths}
import fr.cnrs.liris.util.{FileUtils, NullInputStream}
import scala.collection.JavaConverters._
/**
* Default implementation of a filesystem, relying on Java NIO to implement all operations.
*
* This allows to support even non-POSIX filesystems, as long as the associated
* [[java.nio.file.spi.FileSystemProvider]] class is registered as a service.
*/
object DefaultFilesystem extends Filesystem {
override def createInputStream(uri: String): InputStream = {
val file = getFile(uri)
if (Files.isRegularFile(file)) {
new BufferedInputStream(new FileInputStream(file.toFile))
} else {
NullInputStream
}
}
override def createOutputStream(uri: String): OutputStream = {
val file = getFile(uri)
// We first create the parent directory and the file. That way, even if no content at all is
// written, the file will still exist.
Files.createDirectories(file.getParent)
Files.createFile(file)
new BufferedOutputStream(new FileOutputStream(file.toFile))
}
override def delete(uri: String): Unit = FileUtils.safeDelete(getFile(uri))
override def list(uri: String): Iterable[String] = {
def enumerate(file: Path): Seq[String] = {
if (Files.isDirectory(file) && !Files.isHidden(file)) {
Files.list(file).iterator.asScala.flatMap(enumerate).toSeq
} else {
Seq(file.toUri.toString)
}
}
enumerate(getFile(uri))
}
override def isDirectory(uri: String): Boolean = Files.isDirectory(getFile(uri))
override def isFile(uri: String): Boolean = Files.isRegularFile(getFile(uri))
private def getFile(uri: String): Path = {
if (uri.startsWith("file:")) {
// Paths.get does not accept URIs with an authority, even empty. However NIO APIs
// (e.g., Files.list) generate URIs with such empty authorities...
Paths.get(new URI(uri.replace("://", ":")))
} else {
// We accept, for this filesystem only, a raw path to be passed as argument.
Paths.get(uri)
}
}
} | privamov/accio | accio/java/fr/cnrs/liris/sparkle/filesystem/DefaultFilesystem.scala | Scala | gpl-3.0 | 2,894 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.process
package object tube {
val DEFAULT_DTG_FIELD = "dtg"
}
| nagavallia/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/process/tube/package.scala | Scala | apache-2.0 | 570 |
import sbt._
import de.heikoseeberger.sbtheader._
import scala.util.matching.Regex
object Copyright extends AutoPlugin {
override def requires = HeaderPlugin
override def trigger = allRequirements
val HeaderRegex = "(?s)(// Copyright[^\\n]*[\\n]// Licen[cs]e[^\\n]*[\\n])(.*)".r
val CopyrightHeader = "// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs"
val ApacheHeader = "// License: http://www.apache.org/licenses/LICENSE-2.0"
val GplHeader = "// License: http://www.gnu.org/licenses/gpl-3.0.en.html"
val LgplHeader = "// License: http://www.gnu.org/licenses/lgpl-3.0.en.html"
def LicenseWithCopyright(license: String) = HeaderRegex -> s"$CopyrightHeader\n$license\n"
val ApacheMap = Map(
"scala" -> LicenseWithCopyright(ApacheHeader),
"java" -> LicenseWithCopyright(ApacheHeader)
)
val GplMap = Map(
"scala" -> LicenseWithCopyright(GplHeader),
"java" -> LicenseWithCopyright(GplHeader)
)
val LgplMap = Map(
"scala" -> LicenseWithCopyright(LgplHeader),
"java" -> LicenseWithCopyright(LgplHeader)
)
}
| espinhogr/ensime-server | project/Copyright.scala | Scala | gpl-3.0 | 1,085 |
// load this script in `play console` via
// :load scripts/console.scala
val app = new play.core.StaticApplication(new java.io.File("."))
import play.api.Play.current
val conf = current.configuration
import keemun.models._
val userAccount = GithubUser("user", Some("http://example.com/u.png"))
val orgAccount = GithubOrg("org", Some("http://example.com/o.png"))
val userAccountSettings = new AccountSettings(userAccount, includePrivateRepos = false)
val userAccountSettings2 = new AccountSettings(userAccount, includePrivateRepos = false)
val orgAccountSettings = new AccountSettings(orgAccount, includePrivateRepos = true)
val accountSettings = userAccountSettings :: orgAccountSettings :: Nil
val userRepoA = new Repo("a", userAccount, Some("some description a"))
val userRepoB = new Repo("b", userAccount, Some("some description b"))
val userRepos = userRepoA :: userRepoB :: Nil
val orgRepoZ = new Repo("z", orgAccount, Some("some description z"), isPrivate = Some(true))
val orgRepoY = new Repo("y", userAccount, Some("some description y"), isPrivate = Some(false))
val orgRepos = orgRepoZ :: orgRepoY :: Nil
val repos = userRepos ++ userRepos
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.duration._
import scala.concurrent._
| maizy/keemun | scripts/console.scala | Scala | mit | 1,270 |
/* Copyright 2015 Mario Pastorelli (pastorelli.mario@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package purecsv.safe.converter.defaults
import purecsv.safe.converter.StringConverter
import shapeless.{Generic, ::, HList, HNil}
import scala.util.{Failure, Success, Try}
package object rawfields {
import purecsv.safe.converter.RawFieldsConverter
def illegalConversion(what: String, typ: String) = {
Failure(new IllegalArgumentException(s"$what cannot be converter to a value of type $typ"))
}
implicit val deriveHNil = new RawFieldsConverter[HNil] {
override def tryFrom(s: Seq[String]): Try[HNil] = s match {
case Nil => Success(HNil)
case _ => illegalConversion(s.mkString("[",", ","]"), "HNil")
}
override def to(a: HNil): Seq[String] = Seq.empty
}
implicit def deriveHCons[V, T <: HList]
(implicit sc: StringConverter[V],
fto: RawFieldsConverter[T])
: RawFieldsConverter[V :: T] = new RawFieldsConverter[V :: T] {
override def tryFrom(s: Seq[String]): Try[V :: T] = s match {
case Nil => illegalConversion("", classOf[V :: T].toString)
case _ => for {
head <- sc.tryFrom(s.head)
tail <- fto.tryFrom(s.tail)
} yield head :: tail
}
override def to(a: ::[V, T]): Seq[String] = sc.to(a.head) +: fto.to(a.tail)
}
implicit def deriveClass[A, R](implicit gen: Generic.Aux[A, R],
conv: RawFieldsConverter[R])
: RawFieldsConverter[A] = new RawFieldsConverter[A] {
override def tryFrom(s: Seq[String]): Try[A] = conv.tryFrom(s).map(gen.from)
override def to(a: A): Seq[String] = conv.to(gen.to(a))
}
}
| jyt109/PureCSV | src/main/scala/purecsv/safe/converter/defaults/rawfields/package.scala | Scala | apache-2.0 | 2,346 |
package models;
import scala.io.Source
import scala.collection.immutable.List
import scala.collection.mutable.ListBuffer
import org.apache.commons.vfs2._
import org.apache.commons.vfs2.auth.StaticUserAuthenticator
import org.apache.commons.vfs2.impl.DefaultFileSystemConfigBuilder
import org.apache.commons.vfs2.provider.ftp.FtpFileSystemConfigBuilder;
object FileRetrival {
def getFilePathsFromFTP(ftpSite: String, userName: String, password: String): List[String] = {
var filePaths: ListBuffer[String] = new ListBuffer[String]()
try {
val authenticator: StaticUserAuthenticator = new StaticUserAuthenticator(ftpSite, userName, password)
val opts: FileSystemOptions = new FileSystemOptions()
DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, authenticator)
FtpFileSystemConfigBuilder.getInstance().setDataTimeout(opts, new Integer(600));
var fileObj = VFS.getManager().resolveFile("ftp://" + ftpSite, opts)
filePaths = getFilePathsFromRoot(fileObj)
return filePaths.toList
} catch {
case ex: FileSystemException => {
return new ListBuffer[String]().toList
}
}
}
def getFileContentsFromFTP(ftpSite: String, userName: String, password: String, filePath: String): String = {
try {
val authenticator: StaticUserAuthenticator = new StaticUserAuthenticator(ftpSite, userName, password)
val opts: FileSystemOptions = new FileSystemOptions()
DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, authenticator)
FtpFileSystemConfigBuilder.getInstance().setDataTimeout(opts, new Integer(10000));
var fileObj = VFS.getManager().resolveFile("ftp://" + ftpSite + filePath, opts)
val inputStream = fileObj.getContent().getInputStream()
return Source.fromInputStream(inputStream).mkString
} catch {
case ex: FileSystemException => {
return ""
}
}
}
def getFilePathsFromLocalDir(dirPath: String): List[String] = {
var filePaths: ListBuffer[String] = new ListBuffer[String]()
try {
var fileObj = VFS.getManager().resolveFile("file://" + dirPath)
filePaths = getFilePathsFromRoot(fileObj)
return filePaths.toList
} catch {
case ex: FileSystemException => {
println("Error when attempting to access directory at: " + dirPath)
println(ex)
return new ListBuffer[String]().toList
}
}
}
def getFileContentsFromLocal(dirPath: String): String = {
try {
var fileObj = VFS.getManager().resolveFile("file://" + dirPath)
val inputStream = fileObj.getContent().getInputStream()
return Source.fromInputStream(inputStream).mkString
} catch {
case ex: FileSystemException => {
return ""
}
}
}
def getFilePathsFromRoot(fileObj: FileObject): ListBuffer[String] = {
val filePaths = new ListBuffer[String]()
var successful = false
while (!successful) {
successful = true
try {
println(fileObj.getName().getPath())
if (fileObj.getType() == FileType.FOLDER) {
var children = fileObj.getChildren()
for (child: FileObject <- children) {
filePaths.appendAll(getFilePathsFromRoot(child))
}
} else {
filePaths += fileObj.getName().getPath()
}
} catch {
case ex: FileSystemException => {
println("Timed out when attempting to get children of " + fileObj.getName().getPath())
successful = false
}
}
}
return filePaths
}
}
| seqprodbio/restoule | app/models/FileRetrival.scala | Scala | gpl-3.0 | 3,809 |
package org.apache.spark.ml.mleap.converter
import com.truecar.mleap.core.linalg
import org.apache.spark.mllib.linalg.{SparseVector, DenseVector, Vector}
/**
* Created by hwilkins on 11/18/15.
*/
case class VectorToMleap(vector: Vector) {
def toMleap: linalg.Vector = {
vector match {
case DenseVector(values) => linalg.Vector.dense(values)
case SparseVector(size, indices, values) => linalg.Vector.sparse(size, indices, values)
}
}
}
| TrueCar/mleap | mleap-spark/src/main/scala/org/apache/spark/ml/mleap/converter/VectorToMleap.scala | Scala | apache-2.0 | 465 |
package scala.meta.internal.hosts.scalac
package reflect
import scala.tools.nsc.Global
import scala.reflect.internal.Flags
import scala.collection.mutable
import org.scalameta.invariants._
trait TypeHelpers {
self: GlobalToolkit =>
import global.{require => _, _}
import definitions._
implicit class RichHelperType(tpe: Type) {
def etaReduce: Type = EtaReduce.unapply(tpe).getOrElse(tpe)
def normalizeVararg: Type = tpe match {
case TypeRef(_, sym, List(arg)) if sym == RepeatedParamClass => appliedType(SeqClass, arg)
case _ => tpe
}
def directBaseTypes: List[Type] = ???
}
implicit class RichHelperClassInfoType(tpe: ClassInfoType) {
def realParents: List[Type] = {
tpe.parents match {
case classTpe :: traitTpe :: rest if traitTpe <:< classTpe =>
// NOTE: this is obviously imprecise, but at least it captures the common case
traitTpe :: rest
case other =>
other
}
}
}
object EtaReduce {
def unapply(tpe: Type): Option[Type] = tpe match {
case PolyType(tparams, TypeRef(pre, sym, targs)) =>
val canReduce = tparams.zip(targs).forall({
case (tparam, TypeRef(_, targsym, Nil)) => tparam == targsym
case _ => false
})
if (canReduce) Some(TypeRef(pre, sym, Nil))
else None
case _ =>
None
}
}
}
| beni55/scalameta | scalahost/src/main/scala/scala/meta/internal/hosts/scalac/reflect/TypeHelpers.scala | Scala | bsd-3-clause | 1,391 |
package pirc.kpi.impl
import scala.collection.JavaConverters._
import akka.actor.{Actor, ActorSystem, Props}
// ----------------------------------------------------------------------
//
// L O G
//
// T R A C K E R
//
// A tracker that accumulates log messages from the tracked application.
// ----------------------------------------------------------------------
object LogTracker {
val Match = "(.*Log)$".r
Tracker.factories.push(
{ case Match(path) => Props[LogTracker] }
)
TrackerClient.factories.push(
{ case Match(path) => new LogTrackerClient(path) }
)
case class Info(msg: String) extends TrackerMessage
case class Warning(msg: String) extends TrackerMessage
case class Error(msg: String) extends TrackerMessage
}
class LogTracker extends Tracker {
val bufferSize = 20
var messages = Seq[Message]()
case class Message(timestamp: java.util.Date, level: Int, message: String) {
def getTimestamp = timestamp
def getLevel = level
def getMessage = message
}
override def receive = logReceive orElse super.receive
private def addMessage(level: Int, msg: String): Seq[Message] = {
println(s"add message ${msg}, level ${level} to the stack which has ${messages.size} messages.")
(Message(new java.util.Date, level, msg) +: messages).take(bufferSize)
}
def logReceive: Actor.Receive = {
case LogTracker.Info(msg) => messages = addMessage(3, msg)
case LogTracker.Warning(msg) => messages = addMessage(2, msg)
case LogTracker.Error(msg) => messages = addMessage(1, msg)
}
override def status: Any = messages.asJava
}
class LogTrackerClient(path: String)
extends TrackerClient(path)
with pirc.kpi.LogTrackerClient {
def info(msg: String) = { tracker ! LogTracker.Info(msg) }
def warning(msg: String) = { tracker ! LogTracker.Warning(msg) }
def error(msg: String) = { tracker ! LogTracker.Error(msg) }
}
trait LogTrackerClientActor
extends TrackerClientActor
with pirc.kpi.LogTrackerClient {
def info(msg: String) = tracker.map { t => t ! LogTracker.Info(msg) }
def warning(msg: String) = tracker.map { t => t ! LogTracker.Warning(msg) }
def error(msg: String) = tracker.map { t => t ! LogTracker.Error(msg) }
}
| Pirc/kpi | src/main/scala/LogTracker.scala | Scala | apache-2.0 | 2,282 |
import sbt._
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
import sbt.Keys._
import scalajsbundler.sbtplugin.ScalaJSBundlerPlugin.autoImport._
/**
* Application settings. Configure the build for your application here.
* You normally don't have to touch the actual build definition after this.
*/
object Settings {
/** The name of your application */
val name = "scalajs-spa"
/** The version of your application */
val version = "0.0.1"
/** Options for the scala compiler */
val scalacOptions = Seq(
"-Xlint",
"-unchecked",
"-deprecation",
"-feature"
)
/** Declare global dependency versions here to avoid mismatches in multi part dependencies */
object versions {
val scala = "2.12.4"
val scalaDom = "0.9.2"
val scalajsReact = "1.0.1"
val scalajsReactComponents = "0.8.0"
val scalaCSS = "0.5.3"
val log4js = "1.4.14"
val autowire = "0.2.6"
val booPickle = "1.2.5"
val diode = "1.1.2"
val uTest = "0.4.7"
val react = "15.5.4"
val jQuery = "1.11.1"
val bootstrap = "3.3.6"
val chartjs = "2.1.3"
val scalajsScripts = "1.1.1"
val playJson = "2.6.0"
val Sttp = "0.0.14"
val circe = "0.8.0"
// js dependencies
val MuiVersion = "0.19.4"
val reactVersion = "15.5.0"
val reactInfinite = "0.12.1"
}
/**
* These dependencies are shared between JS and JVM projects
* the special %%% function selects the correct version for each project
*/
val sharedDependencies = Def.setting(
Seq(
"com.lihaoyi" %%% "autowire" % versions.autowire,
"me.chrons" %%% "boopickle" % versions.booPickle
))
/** Dependencies only used by the JVM project */
val jvmDependencies = Def.setting(
Seq(
"com.vmunier" %% "scalajs-scripts" % versions.scalajsScripts,
"com.typesafe.play" %% "play-json" % versions.playJson,
"org.webjars" % "font-awesome" % "4.3.0-1" % Provided,
"org.webjars" % "bootstrap" % versions.bootstrap % Provided,
"io.circe" %% "circe-generic" % versions.circe,
"com.softwaremill.sttp" %% "core" % versions.Sttp,
"com.softwaremill.sttp" %% "akka-http-backend" % versions.Sttp,
"com.softwaremill.sttp" %% "circe" % versions.Sttp,
"com.lihaoyi" %% "utest" % versions.uTest % Test
))
/** Dependencies only used by the JS project (note the use of %%% instead of %%) */
val scalajsDependencies = Def.setting(
Seq(
"com.github.japgolly.scalajs-react" %%% "core" % versions.scalajsReact,
"com.github.japgolly.scalajs-react" %%% "extra" % versions.scalajsReact,
"com.github.japgolly.scalacss" %%% "ext-react" % versions.scalaCSS,
"com.olvind" %%% "scalajs-react-components" % versions.scalajsReactComponents,
"io.suzaku" %%% "diode" % versions.diode,
"io.suzaku" %%% "diode-react" % versions.diode,
"org.typelevel" %%% "cats-core" % "1.0.0-MF",
"org.scala-js" %%% "scalajs-dom" % versions.scalaDom,
"com.lihaoyi" %%% "utest" % versions.uTest % Test
))
}
| beikern/foulkon-ui | project/Settings.scala | Scala | apache-2.0 | 3,087 |
package daos.doobie
import doobie.specs2.imports.AnalysisSpec
import org.specs2.mutable.Specification
import testutil.TestUtil
import PagosDaoDoobie._
object PagosDaoDoobieSpec extends Specification with AnalysisSpec {
val transactor = TestUtil.transactor()
check(qAddPago(0L, BigDecimal("0"), None))
check(qTodosPendientes)
}
| kdoomsday/kaminalapp | test/daos/doobie/PagosDaoDoobieSpec.scala | Scala | mit | 336 |
import org.scalatest.FunSuite
import scala.reflect.runtime.universe._
class Issue10 extends FunSuite {
test("compiles") {}
}
| hiroshi-cl/InverseMacros | paradise_tests/src/test/scala/annotations/run/Issue10.scala | Scala | bsd-2-clause | 128 |
package ml.combust.mleap.core.tree
import ml.combust.mleap.core.annotation.SparkCode
import org.apache.spark.ml.linalg.Vector
/** Trait for a split logic.
*
* Normally used for splits in a decision tree.
*/
@SparkCode(uri = "https://github.com/apache/spark/blob/v2.0.0/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala")
sealed trait Split extends Serializable {
/** Index of the feature to split on.
*
* @return index of split feature
*/
def featureIndex: Int
/** Whether to go left or not.
*
* @param features features for split
* @return true if features go left, false otherwise
*/
def shouldGoLeft(features: Vector): Boolean
/** Whether to go left or not.
*
* Utilized binned features as an optimization.
*
* @param binnedFeature binned features
* @param splits array of splits
* @return true if binned features for left, false otherwise
*/
def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean
}
/** Class for splitting on a categorical feature.
*
* @param featureIndex index of the features
* @param numCategories number of potential categories
* @param categories array of categories
* @param isLeft if this split goes left
*/
@SparkCode(uri = "https://github.com/apache/spark/blob/v2.0.0/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala")
final case class CategoricalSplit(featureIndex: Int,
numCategories: Int,
categories: Array[Double],
isLeft: Boolean) extends Split {
override def shouldGoLeft(features: Vector): Boolean = {
if(isLeft) {
categories.contains(features(featureIndex))
} else {
!categories.contains(features(featureIndex))
}
}
override def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean = {
if(isLeft) {
categories.contains(binnedFeature.toDouble)
} else {
!categories.contains(binnedFeature.toDouble)
}
}
}
/** Class for splitting on a continuous feature.
*
* @param featureIndex index of the features
* @param threshold threshold for going left or right
*/
@SparkCode(uri = "https://github.com/apache/spark/blob/v2.0.0/mllib/src/main/scala/org/apache/spark/ml/tree/Split.scala")
final case class ContinuousSplit(featureIndex: Int,
threshold: Double) extends Split {
override def shouldGoLeft(features: Vector): Boolean = features(featureIndex) <= threshold
override def shouldGoLeft(binnedFeature: Int, splits: Array[Split]): Boolean = {
if(binnedFeature == splits.length) {
false
} else {
val featureUpperBound = splits(binnedFeature).asInstanceOf[ContinuousSplit].threshold
featureUpperBound <= threshold
}
}
}
| combust/mleap | mleap-core/src/main/scala/ml/combust/mleap/core/tree/Split.scala | Scala | apache-2.0 | 2,818 |
package composition
import play.api.GlobalSettings
import play.api.mvc.EssentialAction
import play.api.mvc.Filters
trait WithFilters extends Composition with GlobalSettings {
override def doFilter(a: EssentialAction): EssentialAction = {
Filters(super.doFilter(a), filters: _*)
}
} | dvla/vrm-retention-online | app/composition/WithFilters.scala | Scala | mit | 292 |
package mesosphere.marathon
package api.v2.json
import mesosphere.marathon.api.JsonTestHelper
import mesosphere.marathon.api.v2.{AppHelpers, AppNormalization, ValidationHelper}
import mesosphere.marathon.core.check.MesosCommandCheck
import mesosphere.marathon.core.health.{MarathonHttpHealthCheck, MesosCommandHealthCheck, MesosHttpHealthCheck, PortReference}
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.core.pod.{BridgeNetwork, ContainerNetwork}
import mesosphere.marathon.raml.{Raml, Resources, SecretDef}
import mesosphere.marathon.state.Container.{Docker, PortMapping}
import mesosphere.marathon.state.EnvVarValue._
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.util.RoleSettings
import mesosphere.{UnitTest, ValidationTestLike}
import play.api.libs.json.Json
import scala.collection.immutable.Seq
import scala.concurrent.duration._
class AppDefinitionTest extends UnitTest with ValidationTestLike {
val enabledFeatures = Set("secrets")
val enforcedrole = "*"
val validator = AppDefinition.validAppDefinition(enabledFeatures, ValidationHelper.roleSettings())(PluginManager.None)
val validatorWithRole =
AppDefinition.validAppDefinition(enabledFeatures, RoleSettings(validRoles = Set("someRole"), defaultRole = "someRole"))(
PluginManager.None
)
private[this] def appNormalization(app: raml.App): raml.App =
AppHelpers
.appNormalization(
AppNormalization.Configuration(None, "mesos-bridge-name", enabledFeatures, ResourceRole.Unreserved),
Set(ResourceRole.Unreserved)
)
.normalized(app)
private[this] def fromJson(json: String): AppDefinition = {
val raw: raml.App = Json.parse(json).as[raml.App]
Raml.fromRaml(appNormalization(raw))
}
"AppDefinition" should {
"Validation" in {
var app = AppDefinition(id = "a b".toAbsolutePath, role = "*")
val idError =
"must fully match regular expression '^(([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])|(\\.|\\.\\.)$'"
validator(app) should haveViolations("/id" -> idError)
app = app.copy(id = "a#$%^&*b".toAbsolutePath)
validator(app) should haveViolations("/id" -> idError)
app = app.copy(id = "-dash-disallowed-at-start".toAbsolutePath)
validator(app) should haveViolations("/id" -> idError)
app = app.copy(id = "dash-disallowed-at-end-".toAbsolutePath)
validator(app) should haveViolations("/id" -> idError)
app = app.copy(id = "uppercaseLettersNoGood".toAbsolutePath)
validator(app) should haveViolations("/id" -> idError)
val correct = AppDefinition(id = "test".toAbsolutePath, role = "*")
app = correct.copy(
role = "aRole",
cmd = Some("cmd")
)
validatorWithRole(app) should haveViolations(
"/role" -> "got aRole, expected one of: [someRole]"
)
app = app.copy(
role = "someRole",
acceptedResourceRoles = Set("differentRole")
)
validatorWithRole(app) should haveViolations(
"/acceptedResourceRoles" -> "acceptedResourceRoles can only contain * and someRole"
)
app = correct.copy(
networks = Seq(ContainerNetwork("whatever")),
container = Some(
Docker(
image = "mesosphere/marathon",
portMappings = Seq(
PortMapping(8080, None, 0, "tcp", Some("foo"))
)
)
),
portDefinitions = Nil
)
validator(app) shouldNot haveViolations("/container/portMappings(0)" -> "hostPort is required for BRIDGE mode.")
val caught = intercept[IllegalArgumentException] {
correct.copy(
networks = Seq(BridgeNetwork()),
container = Some(
Docker(
image = "mesosphere/marathon",
portMappings = Seq(
PortMapping(8080, None, 0, "tcp", Some("foo"))
)
)
),
portDefinitions = Nil
)
}
caught.getMessage should include("bridge networking requires that every host-port in a port-mapping is non-empty (but may be zero)")
app = correct.copy(executor = "//cmd")
validator(app) shouldNot haveViolations("/executor" -> "{javax.validation.constraints.Pattern.message}")
app = correct.copy(executor = "some/relative/path.mte")
validator(app) shouldNot haveViolations("/executor" -> "{javax.validation.constraints.Pattern.message}")
app = correct.copy(executor = "/some/absolute/path")
validator(app) shouldNot haveViolations("/executor" -> "{javax.validation.constraints.Pattern.message}")
app = correct.copy(executor = "")
validator(app) shouldNot haveViolations("/executor" -> "{javax.validation.constraints.Pattern.message}")
app = correct.copy(executor = "/test/")
validator(app) should haveViolations("/executor" -> "must fully match regular expression '^(//cmd)|(/?[^/]+(/[^/]+)*)|$'")
app = correct.copy(executor = "/test//path")
validator(app) should haveViolations("/executor" -> "must fully match regular expression '^(//cmd)|(/?[^/]+(/[^/]+)*)|$'")
app = correct.copy(cmd = Some("command"), args = Seq("a", "b", "c"))
validator(app) should haveViolations("/" -> "AppDefinition must either contain one of 'cmd' or 'args', and/or a 'container'.")
app = correct.copy(cmd = None, args = Seq("a", "b", "c"))
validator(app) shouldNot haveViolations("/" -> "AppDefinition must either contain one of 'cmd' or 'args', and/or a 'container'.")
app = correct.copy(upgradeStrategy = UpgradeStrategy(1.2))
validator(app) should haveViolations("/upgradeStrategy/minimumHealthCapacity" -> "got 1.2, expected between 0.0 and 1.0")
app = correct.copy(upgradeStrategy = UpgradeStrategy(0.5, 1.2))
validator(app) should haveViolations("/upgradeStrategy/maximumOverCapacity" -> "got 1.2, expected between 0.0 and 1.0")
app = correct.copy(upgradeStrategy = UpgradeStrategy(-1.2))
validator(app) should haveViolations("/upgradeStrategy/minimumHealthCapacity" -> "got -1.2, expected between 0.0 and 1.0")
app = correct.copy(upgradeStrategy = UpgradeStrategy(0.5, -1.2))
validator(app) should haveViolations("/upgradeStrategy/maximumOverCapacity" -> "got -1.2, expected between 0.0 and 1.0")
app = correct.copy(
networks = Seq(BridgeNetwork()),
container = Some(
Docker(
portMappings = Seq(
PortMapping(8080, Some(0), 0, "tcp"),
PortMapping(8081, Some(0), 0, "tcp")
)
)
),
portDefinitions = Nil,
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(1))))
)
validator(app) shouldNot haveViolations(
"/healthChecks(0)" -> "Health check port indices must address an element of the ports array or container port mappings."
)
app = correct.copy(
networks = Seq(BridgeNetwork()),
container = Some(Docker()),
portDefinitions = Nil,
healthChecks = Set(MarathonHttpHealthCheck(port = Some(80)))
)
validator(app) shouldNot haveViolations(
"/healthChecks(0)" -> "Health check port indices must address an element of the ports array or container port mappings."
)
app = correct.copy(
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(1))))
)
validator(app) should haveViolations(
"/healthChecks(0)" -> "Health check port indices must address an element of the ports array or container port mappings."
)
app = correct.copy(
fetch = Seq(FetchUri(uri = "http://example.com/valid"), FetchUri(uri = "d://\not-a-uri"))
)
validator(app) should haveViolations("/fetch(1)/uri" -> "URI has invalid syntax.")
app = correct.copy(unreachableStrategy = UnreachableDisabled)
app = correct.copy(fetch = Seq(FetchUri(uri = "http://example.com/valid"), FetchUri(uri = "/root/file")))
validator(app) shouldNot haveViolations("/fetch(1)" -> "URI has invalid syntax.")
validator(app.copy(resources = Resources(mem = -3.0))) should haveViolations("/mem" -> "got -3.0, expected 0.0 or more")
validator(app.copy(resources = Resources(cpus = -3.0))) should haveViolations("/cpus" -> "got -3.0, expected 0.0 or more")
validator(app.copy(resources = Resources(disk = -3.0))) should haveViolations("/disk" -> "got -3.0, expected 0.0 or more")
validator(app.copy(resources = Resources(gpus = -3))) should haveViolations("/gpus" -> "got -3, expected 0 or more")
validator(app.copy(instances = -3)) should haveViolations("/instances" -> "got -3, expected 0 or more")
validator(app.copy(resources = Resources(gpus = 1))) should haveViolations(
"/" -> "Feature gpu_resources is not enabled. Enable with --enable_features gpu_resources)"
)
{
val appValidator = AppDefinition.validAppDefinition(Set("gpu_resources"), ValidationHelper.roleSettings())(PluginManager.None)
appValidator(app.copy(resources = Resources(gpus = 1))) shouldNot haveViolations(
"/" -> "Feature gpu_resources is not enabled. Enable with --enable_features gpu_resources)"
)
}
app = correct.copy(resources = Resources(gpus = 1), container = Some(Container.Docker()))
validator(app) should haveViolations("/" -> "GPU resources only work with the Mesos containerizer")
app = correct.copy(
resources = Resources(gpus = 1),
container = Some(Container.Mesos())
)
validator(app) shouldNot haveViolations("/" -> "GPU resources only work with the Mesos containerizer")
app = correct.copy(
resources = Resources(gpus = 1),
container = Some(Container.MesosDocker())
)
validator(app) shouldNot haveViolations("/" -> "GPU resources only work with the Mesos containerizer")
app = correct.copy(
resources = Resources(gpus = 1),
container = None
)
validator(app) shouldNot haveViolations("/" -> "GPU resources only work with the Mesos containerizer")
app = correct.copy(check = Some(MesosCommandCheck(command = Command("foo"))), container = Some(Container.Docker()))
validator(app) should haveViolations("/" -> "AppDefinition must not use 'checks' if using docker")
app = correct.copy(
cmd = Some("cmd"),
check = Some(MesosCommandCheck(command = Command("foo"))),
container = Some(Container.Mesos())
)
validator(app) shouldNot haveViolations("/" -> "Validation succeeded, had no violations")
}
"SerializationRoundtrip empty" in {
val app1 = raml.App(id = "/test", cmd = Some("foo"))
assert(app1.args.isEmpty)
JsonTestHelper.assertSerializationRoundtripWorks(app1, appNormalization)
JsonTestHelper.assertSerializationRoundtripWithJacksonWorks(app1, appNormalization)
}
"Reading app definition with command health check" in {
val json2 =
"""
{
"id": "toggle",
"cmd": "python toggle.py $PORT0",
"cpus": 0.2,
"disk": 0.0,
"healthChecks": [
{
"protocol": "COMMAND",
"command": { "value": "env && http http://$HOST:$PORT0/" }
}
],
"instances": 2,
"mem": 32.0,
"ports": [0],
"uris": ["http://downloads.mesosphere.com/misc/toggle.tgz"]
}
"""
val readResult2 = fromJson(json2)
assert(readResult2.healthChecks.nonEmpty, readResult2)
assert(readResult2.healthChecks.head == MesosCommandHealthCheck(command = Command("env && http http://$HOST:$PORT0/")), readResult2)
}
"Reading app definition with command check" in {
val json2 =
"""
{
"id": "toggle",
"cmd": "python toggle.py $PORT0",
"cpus": 0.2,
"disk": 0.0,
"check": {
"exec": {
"command": {
"shell": "ls"
}
}
},
"instances": 2,
"mem": 32.0,
"ports": [0],
"uris": ["http://downloads.mesosphere.com/misc/toggle.tgz"]
}
"""
val readResult = fromJson(json2)
assert(readResult.check.nonEmpty, readResult)
assert(readResult.check.get == MesosCommandCheck(command = Command("ls")), readResult)
}
"SerializationRoundtrip with complex example" in {
val app3 = raml.App(
id = "/prod/product/my-app",
cmd = Some("sleep 30"),
user = Some("nobody"),
env = raml.Environment("key1" -> "value1", "key2" -> "value2"),
instances = 5,
cpus = 5.0,
mem = 55.0,
disk = 550.0,
constraints = Set(Seq("attribute", "GROUP_BY", "1")),
portDefinitions = Some(Seq(raml.PortDefinition(9001), raml.PortDefinition(9002))),
requirePorts = true,
backoffSeconds = 5,
backoffFactor = 1.5,
maxLaunchDelaySeconds = 180,
container = Some(raml.Container(`type` = raml.EngineType.Docker, docker = Some(raml.DockerContainer(image = "group/image")))),
healthChecks = Set(raml.AppHealthCheck(protocol = raml.AppHealthCheckProtocol.Http, portIndex = Some(0))),
check = Some(raml.AppCheck(exec = Some(raml.CommandCheck(raml.ShellCommand("ls"))))),
dependencies = Set("/prod/product/backend"),
upgradeStrategy = Some(raml.UpgradeStrategy(minimumHealthCapacity = 0.75, maximumOverCapacity = 1.0))
)
withValidationClue {
JsonTestHelper.assertSerializationRoundtripWorks(app3, appNormalization)
JsonTestHelper.assertSerializationRoundtripWithJacksonWorks(app3, appNormalization)
}
}
"SerializationRoundtrip preserves portIndex" in {
val app3 = raml.App(
id = "/prod/product/frontend/my-app",
cmd = Some("sleep 30"),
portDefinitions = Some(raml.PortDefinitions(9001, 9002)),
healthChecks = Set(raml.AppHealthCheck(protocol = raml.AppHealthCheckProtocol.Http, portIndex = Some(1)))
)
JsonTestHelper.assertSerializationRoundtripWorks(app3, appNormalization)
JsonTestHelper.assertSerializationRoundtripWithJacksonWorks(app3, appNormalization)
}
"Reading AppDefinition adds portIndex to a Marathon HTTP health check if the app has ports" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = PortDefinitions(9001, 9002),
healthChecks = Set(MarathonHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app)).toString()
val reread = fromJson(json)
assert(reread.healthChecks.headOption.contains(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))), json)
}
"Reading AppDefinition does not add portIndex to a Marathon HTTP health check if the app doesn't have ports" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
healthChecks = Set(MarathonHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app)).toString()
val ex = intercept[ValidationFailedException] {
fromJson(json)
}
ex.getMessage should include("Health check port indices must address an element of the ports array or container port mappings")
}
"Reading AppDefinition adds portIndex to a Marathon HTTP health check if it has at least one portMapping" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
networks = Seq(ContainerNetwork("whatever")),
container = Some(
Docker(
image = "foo",
portMappings = Seq(Container.PortMapping(containerPort = 1))
)
),
healthChecks = Set(MarathonHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app))
val reread = fromJson(json.toString)
reread.healthChecks.headOption should be(Some(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))))
}
"Reading AppDefinition does not add portIndex to a Marathon HTTP health check if it has no ports nor portMappings" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
container = Some(Docker(image = "foo")),
healthChecks = Set(MarathonHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app))
val ex = intercept[ValidationFailedException] {
fromJson(json.toString) withClue (json)
}
ex.getMessage should include("Health check port indices must address an element of the ports array or container port mappings")
}
"Reading AppDefinition does not add portIndex to a Mesos HTTP health check if the app doesn't have ports" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
healthChecks = Set(MesosHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app))
val reread = fromJson(json.toString)
reread.healthChecks.headOption should be(Some(MesosHttpHealthCheck(portIndex = None)))
}
"Reading AppDefinition adds portIndex to a Mesos HTTP health check if it has at least one portMapping" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
networks = Seq(ContainerNetwork("whatever")),
container = Some(
Docker(
image = "abc",
portMappings = Seq(Container.PortMapping(containerPort = 1))
)
),
healthChecks = Set(MesosHttpHealthCheck())
)
withValidationClue {
val json = Json.toJson(Raml.toRaml(app))
val reread = fromJson(json.toString)
reread.healthChecks.headOption should be(Some(MesosHttpHealthCheck(portIndex = Some(PortReference(0)))))
}
}
"Reading AppDefinition does not add portIndex to a Mesos HTTP health check if it has no ports nor portMappings" in {
val app = AppDefinition(
id = AbsolutePathId("/prod/product/frontend/my-app"),
role = "*",
cmd = Some("sleep 30"),
portDefinitions = Seq.empty,
container = Some(Docker(image = "foo")),
healthChecks = Set(MesosHttpHealthCheck())
)
val json = Json.toJson(Raml.toRaml(app))
val reread = fromJson(json.toString)
reread.healthChecks.headOption should be(Some(MesosHttpHealthCheck(portIndex = None)))
}
"Read app with container definition and port mappings" in {
val app4 = AppDefinition(
id = "bridged-webapp".toAbsolutePath,
role = "*",
cmd = Some("python3 -m http.server 8080"),
networks = Seq(BridgeNetwork()),
container = Some(
Docker(
image = "python:3",
portMappings = Seq(
PortMapping(containerPort = 8080, hostPort = Some(0), servicePort = 9000, protocol = "tcp")
)
)
)
)
val json4 =
"""
{
"id": "bridged-webapp",
"cmd": "python3 -m http.server 8080",
"container": {
"type": "DOCKER",
"docker": {
"image": "python:3",
"network": "BRIDGE",
"portMappings": [
{ "containerPort": 8080, "hostPort": 0, "servicePort": 9000, "protocol": "tcp" }
]
}
}
}
"""
val readResult4 = fromJson(json4)
assert(readResult4.copy(versionInfo = app4.versionInfo) == app4)
}
"Read app with fetch definition" in {
val app = AppDefinition(
id = "app-with-fetch".toAbsolutePath,
role = "*",
cmd = Some("brew update"),
fetch = Seq(
new FetchUri(uri = "http://example.com/file1", executable = false, extract = true, cache = true, outputFile = None),
new FetchUri(uri = "http://example.com/file2", executable = true, extract = false, cache = false, outputFile = None)
),
portDefinitions = Seq(state.PortDefinition(0, name = Some("default")))
)
val json =
"""
{
"id": "app-with-fetch",
"cmd": "brew update",
"fetch": [
{
"uri": "http://example.com/file1",
"executable": false,
"extract": true,
"cache": true
},
{
"uri": "http://example.com/file2",
"executable": true,
"extract": false,
"cache": false
}
]
}
"""
val readResult = fromJson(json)
assert(readResult.copy(versionInfo = app.versionInfo) == app)
}
"Transfer uris to fetch" in {
val json =
"""
{
"id": "app-with-fetch",
"cmd": "brew update",
"uris": ["http://example.com/file1.tar.gz", "http://example.com/file"]
}
"""
val app = fromJson(json)
assert(app.fetch(0).uri == "http://example.com/file1.tar.gz")
assert(app.fetch(0).extract)
assert(app.fetch(1).uri == "http://example.com/file")
assert(!app.fetch(1).extract)
}
"Validate URIs with query strings correctly" in {
val json =
"""
{
"id": "app-with-fetch",
"cmd": "brew update",
"uris": ["http://example.com/file1.tar.gz?foo=10&bar=meh", "http://example.com/file?foo=10&bar=meh"]
}
"""
val app = fromJson(json)
app.fetch(0).uri shouldBe "http://example.com/file1.tar.gz?foo=10&bar=meh"
app.fetch(0).extract shouldBe true
app.fetch(1).uri shouldBe "http://example.com/file?foo=10&bar=meh"
app.fetch(1).extract shouldBe false
}
"Serialize deserialize path with fetch" in {
val app = AppDefinition(
id = AbsolutePathId("/app-with-fetch"),
role = "*",
cmd = Some("brew update"),
fetch = Seq(
new FetchUri(
uri = "http://example.com/file1?foo=10&bar=meh",
executable = false,
extract = true,
cache = true,
outputFile = None
),
new FetchUri(uri = "http://example.com/file2", executable = true, extract = false, cache = false, outputFile = None)
)
)
val proto = app.toProto
val deserializedApp = AppDefinition.fromProto(proto)
assert(deserializedApp.fetch(0).uri == "http://example.com/file1?foo=10&bar=meh")
assert(deserializedApp.fetch(0).extract)
assert(!deserializedApp.fetch(0).executable)
assert(deserializedApp.fetch(0).cache)
assert(deserializedApp.fetch(1).uri == "http://example.com/file2")
assert(!deserializedApp.fetch(1).extract)
assert(deserializedApp.fetch(1).executable)
assert(!deserializedApp.fetch(1).cache)
}
"Read app with labeled virtual network and discovery info" in {
val app = AppDefinition(
id = "app-with-ip-address".toAbsolutePath,
role = "*",
cmd = Some("python3 -m http.server 8080"),
networks = Seq(
ContainerNetwork(
name = "whatever",
labels = Map(
"foo" -> "bar",
"baz" -> "buzz"
)
)
),
container = Some(
Container.Mesos(
portMappings = Seq(Container.PortMapping(name = Some("http"), containerPort = 80, protocol = "tcp"))
)
),
backoffStrategy = BackoffStrategy(maxLaunchDelay = 300.seconds)
)
val json =
"""
{
"id": "app-with-ip-address",
"cmd": "python3 -m http.server 8080",
"ipAddress": {
"networkName": "whatever",
"groups": ["a", "b", "c"],
"labels": {
"foo": "bar",
"baz": "buzz"
},
"discovery": {
"ports": [
{ "name": "http", "number": 80, "protocol": "tcp" }
]
}
},
"maxLaunchDelaySeconds": 300
}
"""
val readResult = fromJson(json)
assert(readResult.copy(versionInfo = app.versionInfo) == app)
}
"Read app with ip address without discovery info" in {
val app = AppDefinition(
id = "app-with-ip-address".toAbsolutePath,
role = "*",
cmd = Some("python3 -m http.server 8080"),
container = Some(state.Container.Mesos(portMappings = Seq(Container.PortMapping.defaultInstance))),
portDefinitions = Nil,
networks = Seq(
ContainerNetwork(
"whatever",
labels = Map(
"foo" -> "bar",
"baz" -> "buzz"
)
)
),
backoffStrategy = BackoffStrategy(maxLaunchDelay = 300.seconds)
)
val json =
"""
{
"id": "app-with-ip-address",
"cmd": "python3 -m http.server 8080",
"ipAddress": {
"networkName": "whatever",
"groups": ["a", "b", "c"],
"labels": {
"foo": "bar",
"baz": "buzz"
}
}
}
"""
val readResult = fromJson(json)
assert(readResult.copy(versionInfo = app.versionInfo) == app)
}
"Read app with ip address and an empty ports list" in {
val app = AppDefinition(
id = "app-with-network-isolation".toAbsolutePath,
role = "*",
cmd = Some("python3 -m http.server 8080"),
container = Some(state.Container.Mesos(portMappings = Seq(Container.PortMapping.defaultInstance))),
networks = Seq(ContainerNetwork("whatever"))
)
val json =
"""
{
"id": "app-with-network-isolation",
"cmd": "python3 -m http.server 8080",
"ports": [],
"ipAddress": {"networkName": "whatever"}
}
"""
val readResult = fromJson(json)
assert(readResult.copy(versionInfo = app.versionInfo) == app)
}
"App may not have non-empty ports and ipAddress" in {
val json =
"""
{
"id": "app-with-network-isolation",
"cmd": "python3 -m http.server 8080",
"ports": [0],
"ipAddress": {
"networkName": "whatever",
"groups": ["a", "b", "c"],
"labels": {
"foo": "bar",
"baz": "buzz"
}
}
}
"""
a[ValidationFailedException] shouldBe thrownBy(fromJson(json))
}
"App may not have both uris and fetch" in {
val json =
"""
{
"id": "app-with-network-isolation",
"uris": ["http://example.com/file1.tar.gz"],
"fetch": [{"uri": "http://example.com/file1.tar.gz"}]
}
"""
a[ValidationFailedException] shouldBe thrownBy(fromJson(json))
}
"SerializationRoundtrip preserves secret references in environment variables" in {
val app3 = raml.App(
id = "/prod/product/frontend/my-app",
cmd = Some("sleep 30"),
env = Map[String, raml.EnvVarValueOrSecret](
"FOO" -> raml.EnvVarValue("bar"),
"QAZ" -> raml.EnvVarSecret("james")
),
secrets = Map("james" -> SecretDef("somesource"))
)
JsonTestHelper.assertSerializationRoundtripWorks(app3, appNormalization)
JsonTestHelper.assertSerializationRoundtripWithJacksonWorks(app3, appNormalization)
}
"environment variables with secrets should parse" in {
val json =
"""
{
"id": "app-with-network-isolation",
"cmd": "python3 -m http.server 8080",
"env": {
"qwe": "rty",
"ssh": { "secret": "psst" }
},
"secrets": { "psst": { "source": "abc" } }
}
"""
val result = fromJson(json)
assert(
result.env.equals(
Map[String, EnvVarValue](
"qwe" -> "rty".toEnvVar,
"ssh" -> EnvVarSecretRef("psst")
)
),
result.env
)
}
"container port mappings when empty stays empty" in {
val appDef = AppDefinition(id = AbsolutePathId("/test"), container = Some(Docker()), role = "*")
val roundTripped = AppDefinition.fromProto(appDef.toProto)
roundTripped should equal(appDef)
roundTripped.container.map(_.portMappings) should equal(appDef.container.map(_.portMappings))
}
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/api/v2/json/AppDefinitionTest.scala | Scala | apache-2.0 | 28,955 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import java.sql.Timestamp
import scala.collection.mutable.ArrayBuffer
import scala.math.Numeric.{FloatAsIfIntegral, DoubleAsIfIntegral}
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.{TypeTag, runtimeMirror, typeTag}
import scala.util.parsing.combinator.RegexParsers
import org.json4s._
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.ScalaReflectionLock
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression}
import org.apache.spark.util.Utils
object DataType {
def fromJson(json: String): DataType = parseDataType(parse(json))
private object JSortedObject {
def unapplySeq(value: JValue): Option[List[(String, JValue)]] = value match {
case JObject(seq) => Some(seq.toList.sortBy(_._1))
case _ => None
}
}
// NOTE: Map fields must be sorted in alphabetical order to keep consistent with the Python side.
private def parseDataType(json: JValue): DataType = json match {
case JString(name) =>
PrimitiveType.nameToType(name)
case JSortedObject(
("containsNull", JBool(n)),
("elementType", t: JValue),
("type", JString("array"))) =>
ArrayType(parseDataType(t), n)
case JSortedObject(
("keyType", k: JValue),
("type", JString("map")),
("valueContainsNull", JBool(n)),
("valueType", v: JValue)) =>
MapType(parseDataType(k), parseDataType(v), n)
case JSortedObject(
("fields", JArray(fields)),
("type", JString("struct"))) =>
StructType(fields.map(parseStructField))
case JSortedObject(
("class", JString(udtClass)),
("pyClass", _),
("sqlType", _),
("type", JString("udt"))) =>
Class.forName(udtClass).newInstance().asInstanceOf[UserDefinedType[_]]
}
private def parseStructField(json: JValue): StructField = json match {
case JSortedObject(
("metadata", metadata: JObject),
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable, Metadata.fromJObject(metadata))
// Support reading schema when 'metadata' is missing.
case JSortedObject(
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable)
}
@deprecated("Use DataType.fromJson instead", "1.2.0")
def fromCaseClassString(string: String): DataType = CaseClassStringParser(string)
private object CaseClassStringParser extends RegexParsers {
protected lazy val primitiveType: Parser[DataType] =
( "StringType" ^^^ StringType
| "FloatType" ^^^ FloatType
| "IntegerType" ^^^ IntegerType
| "ByteType" ^^^ ByteType
| "ShortType" ^^^ ShortType
| "DoubleType" ^^^ DoubleType
| "LongType" ^^^ LongType
| "BinaryType" ^^^ BinaryType
| "BooleanType" ^^^ BooleanType
| "DateType" ^^^ DateType
| "DecimalType()" ^^^ DecimalType.Unlimited
| fixedDecimalType
| "TimestampType" ^^^ TimestampType
)
protected lazy val fixedDecimalType: Parser[DataType] =
("DecimalType(" ~> "[0-9]+".r) ~ ("," ~> "[0-9]+".r <~ ")") ^^ {
case precision ~ scale => DecimalType(precision.toInt, scale.toInt)
}
protected lazy val arrayType: Parser[DataType] =
"ArrayType" ~> "(" ~> dataType ~ "," ~ boolVal <~ ")" ^^ {
case tpe ~ _ ~ containsNull => ArrayType(tpe, containsNull)
}
protected lazy val mapType: Parser[DataType] =
"MapType" ~> "(" ~> dataType ~ "," ~ dataType ~ "," ~ boolVal <~ ")" ^^ {
case t1 ~ _ ~ t2 ~ _ ~ valueContainsNull => MapType(t1, t2, valueContainsNull)
}
protected lazy val structField: Parser[StructField] =
("StructField(" ~> "[a-zA-Z0-9_]*".r) ~ ("," ~> dataType) ~ ("," ~> boolVal <~ ")") ^^ {
case name ~ tpe ~ nullable =>
StructField(name, tpe, nullable = nullable)
}
protected lazy val boolVal: Parser[Boolean] =
( "true" ^^^ true
| "false" ^^^ false
)
protected lazy val structType: Parser[DataType] =
"StructType\\\\([A-zA-z]*\\\\(".r ~> repsep(structField, ",") <~ "))" ^^ {
case fields => StructType(fields)
}
protected lazy val dataType: Parser[DataType] =
( arrayType
| mapType
| structType
| primitiveType
)
/**
* Parses a string representation of a DataType.
*
* TODO: Generate parser as pickler...
*/
def apply(asString: String): DataType = parseAll(dataType, asString) match {
case Success(result, _) => result
case failure: NoSuccess =>
throw new IllegalArgumentException(s"Unsupported dataType: $asString, $failure")
}
}
protected[types] def buildFormattedString(
dataType: DataType,
prefix: String,
builder: StringBuilder): Unit = {
dataType match {
case array: ArrayType =>
array.buildFormattedString(prefix, builder)
case struct: StructType =>
struct.buildFormattedString(prefix, builder)
case map: MapType =>
map.buildFormattedString(prefix, builder)
case _ =>
}
}
/**
* Compares two types, ignoring nullability of ArrayType, MapType, StructType.
*/
private[types] def equalsIgnoreNullability(left: DataType, right: DataType): Boolean = {
(left, right) match {
case (ArrayType(leftElementType, _), ArrayType(rightElementType, _)) =>
equalsIgnoreNullability(leftElementType, rightElementType)
case (MapType(leftKeyType, leftValueType, _), MapType(rightKeyType, rightValueType, _)) =>
equalsIgnoreNullability(leftKeyType, rightKeyType) &&
equalsIgnoreNullability(leftValueType, rightValueType)
case (StructType(leftFields), StructType(rightFields)) =>
leftFields.size == rightFields.size &&
leftFields.zip(rightFields)
.forall{
case (left, right) =>
left.name == right.name && equalsIgnoreNullability(left.dataType, right.dataType)
}
case (left, right) => left == right
}
}
/**
* Compares two types, ignoring compatible nullability of ArrayType, MapType, StructType.
*
* Compatible nullability is defined as follows:
* - If `from` and `to` are ArrayTypes, `from` has a compatible nullability with `to`
* if and only if `to.containsNull` is true, or both of `from.containsNull` and
* `to.containsNull` are false.
* - If `from` and `to` are MapTypes, `from` has a compatible nullability with `to`
* if and only if `to.valueContainsNull` is true, or both of `from.valueContainsNull` and
* `to.valueContainsNull` are false.
* - If `from` and `to` are StructTypes, `from` has a compatible nullability with `to`
* if and only if for all every pair of fields, `to.nullable` is true, or both
* of `fromField.nullable` and `toField.nullable` are false.
*/
private[sql] def equalsIgnoreCompatibleNullability(from: DataType, to: DataType): Boolean = {
(from, to) match {
case (ArrayType(fromElement, fn), ArrayType(toElement, tn)) =>
(tn || !fn) && equalsIgnoreCompatibleNullability(fromElement, toElement)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
(tn || !fn) &&
equalsIgnoreCompatibleNullability(fromKey, toKey) &&
equalsIgnoreCompatibleNullability(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.size == toFields.size &&
fromFields.zip(toFields).forall {
case (fromField, toField) =>
fromField.name == toField.name &&
(toField.nullable || !fromField.nullable) &&
equalsIgnoreCompatibleNullability(fromField.dataType, toField.dataType)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
}
/**
* :: DeveloperApi ::
*
* The base type of all Spark SQL data types.
*
* @group dataType
*/
@DeveloperApi
abstract class DataType {
/** Matches any expression that evaluates to this DataType */
def unapply(a: Expression): Boolean = a match {
case e: Expression if e.dataType == this => true
case _ => false
}
/** The default size of a value of this data type. */
def defaultSize: Int
def isPrimitive: Boolean = false
def typeName: String = this.getClass.getSimpleName.stripSuffix("$").dropRight(4).toLowerCase
private[sql] def jsonValue: JValue = typeName
def json: String = compact(render(jsonValue))
def prettyJson: String = pretty(render(jsonValue))
def simpleString: String = typeName
/** Check if `this` and `other` are the same data type when ignoring nullability
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def sameType(other: DataType): Boolean =
DataType.equalsIgnoreNullability(this, other)
/** Returns the same data type but set all nullability fields are true
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def asNullable: DataType
}
/**
* :: DeveloperApi ::
*
* The data type representing `NULL` values. Please use the singleton [[DataTypes.NullType]].
*
* @group dataType
*/
@DeveloperApi
class NullType private() extends DataType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "NullType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
override def defaultSize: Int = 1
private[spark] override def asNullable: NullType = this
}
case object NullType extends NullType
protected[sql] object NativeType {
val all = Seq(
IntegerType, BooleanType, LongType, DoubleType, FloatType, ShortType, ByteType, StringType)
def unapply(dt: DataType): Boolean = all.contains(dt)
}
protected[sql] trait PrimitiveType extends DataType {
override def isPrimitive = true
}
protected[sql] object PrimitiveType {
private val nonDecimals = Seq(NullType, DateType, TimestampType, BinaryType) ++ NativeType.all
private val nonDecimalNameToType = nonDecimals.map(t => t.typeName -> t).toMap
/** Given the string representation of a type, return its DataType */
private[sql] def nameToType(name: String): DataType = {
val FIXED_DECIMAL = """decimal\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\)""".r
name match {
case "decimal" => DecimalType.Unlimited
case FIXED_DECIMAL(precision, scale) => DecimalType(precision.toInt, scale.toInt)
case other => nonDecimalNameToType(other)
}
}
}
protected[sql] abstract class NativeType extends DataType {
private[sql] type JvmType
@transient private[sql] val tag: TypeTag[JvmType]
private[sql] val ordering: Ordering[JvmType]
@transient private[sql] val classTag = ScalaReflectionLock.synchronized {
val mirror = runtimeMirror(Utils.getSparkClassLoader)
ClassTag[JvmType](mirror.runtimeClass(tag.tpe))
}
}
/**
* :: DeveloperApi ::
*
* The data type representing `String` values. Please use the singleton [[DataTypes.StringType]].
*
* @group dataType
*/
@DeveloperApi
class StringType private() extends NativeType with PrimitiveType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "StringType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = String
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the StringType is 4096 bytes.
*/
override def defaultSize: Int = 4096
private[spark] override def asNullable: StringType = this
}
case object StringType extends StringType
/**
* :: DeveloperApi ::
*
* The data type representing `Array[Byte]` values.
* Please use the singleton [[DataTypes.BinaryType]].
*
* @group dataType
*/
@DeveloperApi
class BinaryType private() extends NativeType with PrimitiveType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "BinaryType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Array[Byte]
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val ordering = new Ordering[JvmType] {
def compare(x: Array[Byte], y: Array[Byte]): Int = {
for (i <- 0 until x.length; if i < y.length) {
val res = x(i).compareTo(y(i))
if (res != 0) return res
}
x.length - y.length
}
}
/**
* The default size of a value of the BinaryType is 4096 bytes.
*/
override def defaultSize: Int = 4096
private[spark] override def asNullable: BinaryType = this
}
case object BinaryType extends BinaryType
/**
* :: DeveloperApi ::
*
* The data type representing `Boolean` values. Please use the singleton [[DataTypes.BooleanType]].
*
*@group dataType
*/
@DeveloperApi
class BooleanType private() extends NativeType with PrimitiveType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "BooleanType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Boolean
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the BooleanType is 1 byte.
*/
override def defaultSize: Int = 1
private[spark] override def asNullable: BooleanType = this
}
case object BooleanType extends BooleanType
/**
* :: DeveloperApi ::
*
* The data type representing `java.sql.Timestamp` values.
* Please use the singleton [[DataTypes.TimestampType]].
*
* @group dataType
*/
@DeveloperApi
class TimestampType private() extends NativeType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "TimestampType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Timestamp
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val ordering = new Ordering[JvmType] {
def compare(x: Timestamp, y: Timestamp) = x.compareTo(y)
}
/**
* The default size of a value of the TimestampType is 12 bytes.
*/
override def defaultSize: Int = 12
private[spark] override def asNullable: TimestampType = this
}
case object TimestampType extends TimestampType
/**
* :: DeveloperApi ::
*
* The data type representing `java.sql.Date` values.
* Please use the singleton [[DataTypes.DateType]].
*
* @group dataType
*/
@DeveloperApi
class DateType private() extends NativeType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "DateType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Int
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the DateType is 4 bytes.
*/
override def defaultSize: Int = 4
private[spark] override def asNullable: DateType = this
}
case object DateType extends DateType
abstract class NumericType extends NativeType with PrimitiveType {
// Unfortunately we can't get this implicitly as that breaks Spark Serialization. In order for
// implicitly[Numeric[JvmType]] to be valid, we have to change JvmType from a type variable to a
// type parameter and and add a numeric annotation (i.e., [JvmType : Numeric]). This gets
// desugared by the compiler into an argument to the objects constructor. This means there is no
// longer an no argument constructor and thus the JVM cannot serialize the object anymore.
private[sql] val numeric: Numeric[JvmType]
}
protected[sql] object NumericType {
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[NumericType]
}
/** Matcher for any expressions that evaluate to [[IntegralType]]s */
protected[sql] object IntegralType {
def unapply(a: Expression): Boolean = a match {
case e: Expression if e.dataType.isInstanceOf[IntegralType] => true
case _ => false
}
}
protected[sql] sealed abstract class IntegralType extends NumericType {
private[sql] val integral: Integral[JvmType]
}
/**
* :: DeveloperApi ::
*
* The data type representing `Long` values. Please use the singleton [[DataTypes.LongType]].
*
* @group dataType
*/
@DeveloperApi
class LongType private() extends IntegralType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "LongType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Long
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Long]]
private[sql] val integral = implicitly[Integral[Long]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the LongType is 8 bytes.
*/
override def defaultSize: Int = 8
override def simpleString = "bigint"
private[spark] override def asNullable: LongType = this
}
case object LongType extends LongType
/**
* :: DeveloperApi ::
*
* The data type representing `Int` values. Please use the singleton [[DataTypes.IntegerType]].
*
* @group dataType
*/
@DeveloperApi
class IntegerType private() extends IntegralType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "IntegerType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Int
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Int]]
private[sql] val integral = implicitly[Integral[Int]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the IntegerType is 4 bytes.
*/
override def defaultSize: Int = 4
override def simpleString = "int"
private[spark] override def asNullable: IntegerType = this
}
case object IntegerType extends IntegerType
/**
* :: DeveloperApi ::
*
* The data type representing `Short` values. Please use the singleton [[DataTypes.ShortType]].
*
* @group dataType
*/
@DeveloperApi
class ShortType private() extends IntegralType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "ShortType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Short
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Short]]
private[sql] val integral = implicitly[Integral[Short]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the ShortType is 2 bytes.
*/
override def defaultSize: Int = 2
override def simpleString = "smallint"
private[spark] override def asNullable: ShortType = this
}
case object ShortType extends ShortType
/**
* :: DeveloperApi ::
*
* The data type representing `Byte` values. Please use the singleton [[DataTypes.ByteType]].
*
* @group dataType
*/
@DeveloperApi
class ByteType private() extends IntegralType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "ByteType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Byte
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Byte]]
private[sql] val integral = implicitly[Integral[Byte]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
/**
* The default size of a value of the ByteType is 1 byte.
*/
override def defaultSize: Int = 1
override def simpleString = "tinyint"
private[spark] override def asNullable: ByteType = this
}
case object ByteType extends ByteType
/** Matcher for any expressions that evaluate to [[FractionalType]]s */
protected[sql] object FractionalType {
def unapply(a: Expression): Boolean = a match {
case e: Expression if e.dataType.isInstanceOf[FractionalType] => true
case _ => false
}
}
protected[sql] sealed abstract class FractionalType extends NumericType {
private[sql] val fractional: Fractional[JvmType]
private[sql] val asIntegral: Integral[JvmType]
}
/** Precision parameters for a Decimal */
case class PrecisionInfo(precision: Int, scale: Int)
/**
* :: DeveloperApi ::
*
* The data type representing `java.math.BigDecimal` values.
* A Decimal that might have fixed precision and scale, or unlimited values for these.
*
* Please use [[DataTypes.createDecimalType()]] to create a specific instance.
*
* @group dataType
*/
@DeveloperApi
case class DecimalType(precisionInfo: Option[PrecisionInfo]) extends FractionalType {
private[sql] type JvmType = Decimal
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = Decimal.DecimalIsFractional
private[sql] val fractional = Decimal.DecimalIsFractional
private[sql] val ordering = Decimal.DecimalIsFractional
private[sql] val asIntegral = Decimal.DecimalAsIfIntegral
def precision: Int = precisionInfo.map(_.precision).getOrElse(-1)
def scale: Int = precisionInfo.map(_.scale).getOrElse(-1)
override def typeName: String = precisionInfo match {
case Some(PrecisionInfo(precision, scale)) => s"decimal($precision,$scale)"
case None => "decimal"
}
override def toString: String = precisionInfo match {
case Some(PrecisionInfo(precision, scale)) => s"DecimalType($precision,$scale)"
case None => "DecimalType()"
}
/**
* The default size of a value of the DecimalType is 4096 bytes.
*/
override def defaultSize: Int = 4096
override def simpleString = precisionInfo match {
case Some(PrecisionInfo(precision, scale)) => s"decimal($precision,$scale)"
case None => "decimal(10,0)"
}
private[spark] override def asNullable: DecimalType = this
}
/** Extra factory methods and pattern matchers for Decimals */
object DecimalType {
val Unlimited: DecimalType = DecimalType(None)
object Fixed {
def unapply(t: DecimalType): Option[(Int, Int)] =
t.precisionInfo.map(p => (p.precision, p.scale))
}
object Expression {
def unapply(e: Expression): Option[(Int, Int)] = e.dataType match {
case t: DecimalType => t.precisionInfo.map(p => (p.precision, p.scale))
case _ => None
}
}
def apply(): DecimalType = Unlimited
def apply(precision: Int, scale: Int): DecimalType =
DecimalType(Some(PrecisionInfo(precision, scale)))
def unapply(t: DataType): Boolean = t.isInstanceOf[DecimalType]
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[DecimalType]
def isFixed(dataType: DataType): Boolean = dataType match {
case DecimalType.Fixed(_, _) => true
case _ => false
}
}
/**
* :: DeveloperApi ::
*
* The data type representing `Double` values. Please use the singleton [[DataTypes.DoubleType]].
*
* @group dataType
*/
@DeveloperApi
class DoubleType private() extends FractionalType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "DoubleType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Double
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Double]]
private[sql] val fractional = implicitly[Fractional[Double]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
private[sql] val asIntegral = DoubleAsIfIntegral
/**
* The default size of a value of the DoubleType is 8 bytes.
*/
override def defaultSize: Int = 8
private[spark] override def asNullable: DoubleType = this
}
case object DoubleType extends DoubleType
/**
* :: DeveloperApi ::
*
* The data type representing `Float` values. Please use the singleton [[DataTypes.FloatType]].
*
* @group dataType
*/
@DeveloperApi
class FloatType private() extends FractionalType {
// The companion object and this class is separated so the companion object also subclasses
// this type. Otherwise, the companion object would be of type "FloatType$" in byte code.
// Defined with a private constructor so the companion object is the only possible instantiation.
private[sql] type JvmType = Float
@transient private[sql] lazy val tag = ScalaReflectionLock.synchronized { typeTag[JvmType] }
private[sql] val numeric = implicitly[Numeric[Float]]
private[sql] val fractional = implicitly[Fractional[Float]]
private[sql] val ordering = implicitly[Ordering[JvmType]]
private[sql] val asIntegral = FloatAsIfIntegral
/**
* The default size of a value of the FloatType is 4 bytes.
*/
override def defaultSize: Int = 4
private[spark] override def asNullable: FloatType = this
}
case object FloatType extends FloatType
object ArrayType {
/** Construct a [[ArrayType]] object with the given element type. The `containsNull` is true. */
def apply(elementType: DataType): ArrayType = ArrayType(elementType, true)
}
/**
* :: DeveloperApi ::
*
* The data type for collections of multiple values.
* Internally these are represented as columns that contain a ``scala.collection.Seq``.
*
* Please use [[DataTypes.createArrayType()]] to create a specific instance.
*
* An [[ArrayType]] object comprises two fields, `elementType: [[DataType]]` and
* `containsNull: Boolean`. The field of `elementType` is used to specify the type of
* array elements. The field of `containsNull` is used to specify if the array has `null` values.
*
* @param elementType The data type of values.
* @param containsNull Indicates if values have `null` values
*
* @group dataType
*/
@DeveloperApi
case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataType {
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
builder.append(
s"$prefix-- element: ${elementType.typeName} (containsNull = $containsNull)\\n")
DataType.buildFormattedString(elementType, s"$prefix |", builder)
}
override private[sql] def jsonValue =
("type" -> typeName) ~
("elementType" -> elementType.jsonValue) ~
("containsNull" -> containsNull)
/**
* The default size of a value of the ArrayType is 100 * the default size of the element type.
* (We assume that there are 100 elements).
*/
override def defaultSize: Int = 100 * elementType.defaultSize
override def simpleString = s"array<${elementType.simpleString}>"
private[spark] override def asNullable: ArrayType =
ArrayType(elementType.asNullable, containsNull = true)
}
/**
* A field inside a StructType.
*
* @param name The name of this field.
* @param dataType The data type of this field.
* @param nullable Indicates if values of this field can be `null` values.
* @param metadata The metadata of this field. The metadata should be preserved during
* transformation if the content of the column is not modified, e.g, in selection.
*/
case class StructField(
name: String,
dataType: DataType,
nullable: Boolean = true,
metadata: Metadata = Metadata.empty) {
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
builder.append(s"$prefix-- $name: ${dataType.typeName} (nullable = $nullable)\\n")
DataType.buildFormattedString(dataType, s"$prefix |", builder)
}
// override the default toString to be compatible with legacy parquet files.
override def toString: String = s"StructField($name,$dataType,$nullable)"
private[sql] def jsonValue: JValue = {
("name" -> name) ~
("type" -> dataType.jsonValue) ~
("nullable" -> nullable) ~
("metadata" -> metadata.jsonValue)
}
}
object StructType {
protected[sql] def fromAttributes(attributes: Seq[Attribute]): StructType =
StructType(attributes.map(a => StructField(a.name, a.dataType, a.nullable, a.metadata)))
def apply(fields: Seq[StructField]): StructType = StructType(fields.toArray)
def apply(fields: java.util.List[StructField]): StructType = {
StructType(fields.toArray.asInstanceOf[Array[StructField]])
}
private[sql] def merge(left: DataType, right: DataType): DataType =
(left, right) match {
case (ArrayType(leftElementType, leftContainsNull),
ArrayType(rightElementType, rightContainsNull)) =>
ArrayType(
merge(leftElementType, rightElementType),
leftContainsNull || rightContainsNull)
case (MapType(leftKeyType, leftValueType, leftContainsNull),
MapType(rightKeyType, rightValueType, rightContainsNull)) =>
MapType(
merge(leftKeyType, rightKeyType),
merge(leftValueType, rightValueType),
leftContainsNull || rightContainsNull)
case (StructType(leftFields), StructType(rightFields)) =>
val newFields = ArrayBuffer.empty[StructField]
leftFields.foreach {
case leftField @ StructField(leftName, leftType, leftNullable, _) =>
rightFields
.find(_.name == leftName)
.map { case rightField @ StructField(_, rightType, rightNullable, _) =>
leftField.copy(
dataType = merge(leftType, rightType),
nullable = leftNullable || rightNullable)
}
.orElse(Some(leftField))
.foreach(newFields += _)
}
rightFields
.filterNot(f => leftFields.map(_.name).contains(f.name))
.foreach(newFields += _)
StructType(newFields)
case (DecimalType.Fixed(leftPrecision, leftScale),
DecimalType.Fixed(rightPrecision, rightScale)) =>
DecimalType(leftPrecision.max(rightPrecision), leftScale.max(rightScale))
case (leftUdt: UserDefinedType[_], rightUdt: UserDefinedType[_])
if leftUdt.userClass == rightUdt.userClass => leftUdt
case (leftType, rightType) if leftType == rightType =>
leftType
case _ =>
throw new SparkException(s"Failed to merge incompatible data types $left and $right")
}
}
/**
* :: DeveloperApi ::
*
* A [[StructType]] object can be constructed by
* {{{
* StructType(fields: Seq[StructField])
* }}}
* For a [[StructType]] object, one or multiple [[StructField]]s can be extracted by names.
* If multiple [[StructField]]s are extracted, a [[StructType]] object will be returned.
* If a provided name does not have a matching field, it will be ignored. For the case
* of extracting a single StructField, a `null` will be returned.
* Example:
* {{{
* import org.apache.spark.sql._
*
* val struct =
* StructType(
* StructField("a", IntegerType, true) ::
* StructField("b", LongType, false) ::
* StructField("c", BooleanType, false) :: Nil)
*
* // Extract a single StructField.
* val singleField = struct("b")
* // singleField: StructField = StructField(b,LongType,false)
*
* // This struct does not have a field called "d". null will be returned.
* val nonExisting = struct("d")
* // nonExisting: StructField = null
*
* // Extract multiple StructFields. Field names are provided in a set.
* // A StructType object will be returned.
* val twoFields = struct(Set("b", "c"))
* // twoFields: StructType =
* // StructType(List(StructField(b,LongType,false), StructField(c,BooleanType,false)))
*
* // Any names without matching fields will be ignored.
* // For the case shown below, "d" will be ignored and
* // it is treated as struct(Set("b", "c")).
* val ignoreNonExisting = struct(Set("b", "c", "d"))
* // ignoreNonExisting: StructType =
* // StructType(List(StructField(b,LongType,false), StructField(c,BooleanType,false)))
* }}}
*
* A [[org.apache.spark.sql.Row]] object is used as a value of the StructType.
* Example:
* {{{
* import org.apache.spark.sql._
*
* val innerStruct =
* StructType(
* StructField("f1", IntegerType, true) ::
* StructField("f2", LongType, false) ::
* StructField("f3", BooleanType, false) :: Nil)
*
* val struct = StructType(
* StructField("a", innerStruct, true) :: Nil)
*
* // Create a Row with the schema defined by struct
* val row = Row(Row(1, 2, true))
* // row: Row = [[1,2,true]]
* }}}
*
* @group dataType
*/
@DeveloperApi
case class StructType(fields: Array[StructField]) extends DataType with Seq[StructField] {
/** Returns all field names in an array. */
def fieldNames: Array[String] = fields.map(_.name)
private lazy val fieldNamesSet: Set[String] = fieldNames.toSet
private lazy val nameToField: Map[String, StructField] = fields.map(f => f.name -> f).toMap
/**
* Extracts a [[StructField]] of the given name. If the [[StructType]] object does not
* have a name matching the given name, `null` will be returned.
*/
def apply(name: String): StructField = {
nameToField.getOrElse(name,
throw new IllegalArgumentException(s"""Field "$name" does not exist."""))
}
/**
* Returns a [[StructType]] containing [[StructField]]s of the given names, preserving the
* original order of fields. Those names which do not have matching fields will be ignored.
*/
def apply(names: Set[String]): StructType = {
val nonExistFields = names -- fieldNamesSet
if (nonExistFields.nonEmpty) {
throw new IllegalArgumentException(
s"Field ${nonExistFields.mkString(",")} does not exist.")
}
// Preserve the original order of fields.
StructType(fields.filter(f => names.contains(f.name)))
}
protected[sql] def toAttributes: Seq[AttributeReference] =
map(f => AttributeReference(f.name, f.dataType, f.nullable, f.metadata)())
def treeString: String = {
val builder = new StringBuilder
builder.append("root\\n")
val prefix = " |"
fields.foreach(field => field.buildFormattedString(prefix, builder))
builder.toString()
}
def printTreeString(): Unit = println(treeString)
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
fields.foreach(field => field.buildFormattedString(prefix, builder))
}
override private[sql] def jsonValue =
("type" -> typeName) ~
("fields" -> map(_.jsonValue))
override def apply(fieldIndex: Int): StructField = fields(fieldIndex)
override def length: Int = fields.length
override def iterator: Iterator[StructField] = fields.iterator
/**
* The default size of a value of the StructType is the total default sizes of all field types.
*/
override def defaultSize: Int = fields.map(_.dataType.defaultSize).sum
override def simpleString = {
val fieldTypes = fields.map(field => s"${field.name}:${field.dataType.simpleString}")
s"struct<${fieldTypes.mkString(",")}>"
}
/**
* Merges with another schema (`StructType`). For a struct field A from `this` and a struct field
* B from `that`,
*
* 1. If A and B have the same name and data type, they are merged to a field C with the same name
* and data type. C is nullable if and only if either A or B is nullable.
* 2. If A doesn't exist in `that`, it's included in the result schema.
* 3. If B doesn't exist in `this`, it's also included in the result schema.
* 4. Otherwise, `this` and `that` are considered as conflicting schemas and an exception would be
* thrown.
*/
private[sql] def merge(that: StructType): StructType =
StructType.merge(this, that).asInstanceOf[StructType]
private[spark] override def asNullable: StructType = {
val newFields = fields.map {
case StructField(name, dataType, nullable, metadata) =>
StructField(name, dataType.asNullable, nullable = true, metadata)
}
StructType(newFields)
}
}
object MapType {
/**
* Construct a [[MapType]] object with the given key type and value type.
* The `valueContainsNull` is true.
*/
def apply(keyType: DataType, valueType: DataType): MapType =
MapType(keyType: DataType, valueType: DataType, valueContainsNull = true)
}
/**
* :: DeveloperApi ::
*
* The data type for Maps. Keys in a map are not allowed to have `null` values.
*
* Please use [[DataTypes.createMapType()]] to create a specific instance.
*
* @param keyType The data type of map keys.
* @param valueType The data type of map values.
* @param valueContainsNull Indicates if map values have `null` values.
*
* @group dataType
*/
case class MapType(
keyType: DataType,
valueType: DataType,
valueContainsNull: Boolean) extends DataType {
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
builder.append(s"$prefix-- key: ${keyType.typeName}\\n")
builder.append(s"$prefix-- value: ${valueType.typeName} " +
s"(valueContainsNull = $valueContainsNull)\\n")
DataType.buildFormattedString(keyType, s"$prefix |", builder)
DataType.buildFormattedString(valueType, s"$prefix |", builder)
}
override private[sql] def jsonValue: JValue =
("type" -> typeName) ~
("keyType" -> keyType.jsonValue) ~
("valueType" -> valueType.jsonValue) ~
("valueContainsNull" -> valueContainsNull)
/**
* The default size of a value of the MapType is
* 100 * (the default size of the key type + the default size of the value type).
* (We assume that there are 100 elements).
*/
override def defaultSize: Int = 100 * (keyType.defaultSize + valueType.defaultSize)
override def simpleString = s"map<${keyType.simpleString},${valueType.simpleString}>"
private[spark] override def asNullable: MapType =
MapType(keyType.asNullable, valueType.asNullable, valueContainsNull = true)
}
/**
* ::DeveloperApi::
* The data type for User Defined Types (UDTs).
*
* This interface allows a user to make their own classes more interoperable with SparkSQL;
* e.g., by creating a [[UserDefinedType]] for a class X, it becomes possible to create
* a `DataFrame` which has class X in the schema.
*
* For SparkSQL to recognize UDTs, the UDT must be annotated with
* [[SQLUserDefinedType]].
*
* The conversion via `serialize` occurs when instantiating a `DataFrame` from another RDD.
* The conversion via `deserialize` occurs when reading from a `DataFrame`.
*/
@DeveloperApi
abstract class UserDefinedType[UserType] extends DataType with Serializable {
/** Underlying storage type for this UDT */
def sqlType: DataType
/** Paired Python UDT class, if exists. */
def pyUDT: String = null
/**
* Convert the user type to a SQL datum
*
* TODO: Can we make this take obj: UserType? The issue is in ScalaReflection.convertToCatalyst,
* where we need to convert Any to UserType.
*/
def serialize(obj: Any): Any
/** Convert a SQL datum to the user type */
def deserialize(datum: Any): UserType
override private[sql] def jsonValue: JValue = {
("type" -> "udt") ~
("class" -> this.getClass.getName) ~
("pyClass" -> pyUDT) ~
("sqlType" -> sqlType.jsonValue)
}
/**
* Class object for the UserType
*/
def userClass: java.lang.Class[UserType]
/**
* The default size of a value of the UserDefinedType is 4096 bytes.
*/
override def defaultSize: Int = 4096
/**
* For UDT, asNullable will not change the nullability of its internal sqlType and just returns
* itself.
*/
private[spark] override def asNullable: UserDefinedType[UserType] = this
}
| hengyicai/OnlineAggregationUCAS | sql/catalyst/src/main/scala/org/apache/spark/sql/types/dataTypes.scala | Scala | apache-2.0 | 42,226 |
package org.vitrivr.adampro.utils.exception
/**
* adamtwo
*
* Ivan Giangreco
* August 2015
*/
case class EntityExistingException(message : String = "Entity exists already.") extends GeneralAdamException(message)
| dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/utils/exception/EntityExistingException.scala | Scala | mit | 219 |
/*
Copyright 2013 Crossing-Tech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import sbt._
import Keys._
import com.typesafe.sbt.osgi.SbtOsgi.{OsgiKeys, osgiSettings}
object OsgiSampleBuild extends Build {
override lazy val settings =
super.settings ++
buildSettings ++
Seq(
shellPrompt := {
s => Project.extract(s).currentProject.id + " > "
}
)
lazy val buildSettings = Seq(
scalaVersion := "2.10.0",
resolvers ++= Seq("oss-sonatype-releases" at "https://oss.sonatype.org/content/repositories/releases",
"JBoss Repo" at "http://repository.jboss.org/nexus/content/groups/public/",
"Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/"),
version := "2.2.0-SNAPSHOT"
)
lazy val root = Project(id = "osgi-sample",
base = file("."),
settings = Project.defaultSettings ++ Seq(
libraryDependencies ++= Seq()
)
) aggregate(api, command, core, uncommons)
lazy val api = Project(id = "api",
base = file("./api"),
settings = Project.defaultSettings ++ exports(Seq("akka.osgi.sample.api")) ++ Seq(libraryDependencies ++= Seq(Dependencies.akka_actor))
)
lazy val command = Project(id = "command",
base = file("./command"),
settings = Project.defaultSettings ++ exports(Seq("akka.osgi.sample.command"), Seq("akka.osgi.sample.api", "org.osgi.framework")) ++ Seq(
libraryDependencies ++= Dependencies.command,
OsgiKeys.bundleActivator := Option("akka.osgi.sample.command.Activator")
)
) dependsOn (api)
lazy val core = Project(id = "core",
base = file("./core"),
settings = Project.defaultSettings ++ exports(Seq("akka.osgi.sample.service", "akka.osgi.sample.activation"), defaultImports, Seq("akka.osgi.sample.internal")) ++ Seq(
libraryDependencies ++= Dependencies.core,
OsgiKeys.bundleActivator := Option("akka.osgi.sample.activation.Activator")
)
) dependsOn (api)
lazy val uncommons = Project(id = "uncommons",
base = file("./uncommons"),
settings = Project.defaultSettings ++ exports(Seq("org.uncommons.maths.random"), privates = Seq("org.uncommons.maths.binary", "org.uncommons.maths", "org.uncommons.maths.number")) ++ Seq(
libraryDependencies ++= Dependencies.uncommons,
version := "1.2.2"
)
)
def exports(packages: Seq[String] = Seq(), imports: Seq[String] = Nil, privates: Seq[String] = Nil) = osgiSettings ++ Seq(
OsgiKeys.importPackage := imports ++ Seq("*"),
OsgiKeys.privatePackage := privates,
OsgiKeys.exportPackage := packages
)
def copyFile(source: String, sink: String){
val src = new java.io.File(source)
val dest = new java.io.File(sink)
new java.io.FileOutputStream(dest) getChannel() transferFrom(
new java.io.FileInputStream(src) getChannel, 0, Long.MaxValue )
}
def defaultImports = Seq("!sun.misc", akkaImport(), configImport(), scalaImport())
def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.2,2.3)\"".format(packageName)
def configImport(packageName: String = "com.typesafe.config.*") = "%s;version=\"[0.4.1,1.1.0)\"".format(packageName)
def protobufImport(packageName: String = "com.google.protobuf.*") = "%s;version=\"[2.4.0,2.5.0)\"".format(packageName)
def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.10,2.11)\"".format(packageName)
}
object Dependencies {
val akka_actor = "com.typesafe.akka" % "akka-actor_2.10" % "2.2-SNAPSHOT" changing()
val akka_osgi = "com.typesafe.akka" % "akka-osgi_2.10" % "2.2-SNAPSHOT" exclude("org.osgi.core", "org.osgi.compendium") changing()
val akka_remote = "com.typesafe.akka" % "akka-remote_2.10" % "2.2-SNAPSHOT" changing()
val akka_cluster = "com.typesafe.akka" % "akka-cluster-experimental_2.10" % "2.2-SNAPSHOT" changing()
val config = "com.typesafe" % "config" % "1.0.0"
val osgiCore = "org.osgi" % "org.osgi.core" % "4.3.0"
val osgiCompendium = "org.osgi" % "org.osgi.compendium" % "4.3.0"
val core = Seq(akka_actor, akka_osgi, akka_remote, akka_cluster, config, osgiCore, osgiCompendium)
val command = Seq(akka_actor, osgiCore, osgiCompendium)
val uncommons_math = "org.uncommons.maths" % "uncommons-maths" % "1.2.2"
val jcommon = "jfree" % "jcommon" % "1.0.16"
val jfreechart = "jfree" % "jfreechart" % "1.0.13"
val uncommons = Seq(uncommons_math, jcommon, jfreechart)
val protobuf = "com.google.protobuf" % "protobuf-java" % "2.4.1"
}
| Crossing-Tech/akka-osgi-sample | project/Build.scala | Scala | apache-2.0 | 4,963 |
/**
* Copyright (c) 2012, www.quartzsource.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.quartzsource.meutrino.hglib
import org.junit.Assert._
import org.junit.Test
import org.quartzsource.meutrino.CommandException
import java.io.File
import org.quartzsource.meutrino.client.AbstractClientTest
class OutgoingIncomingTest extends AbstractClientTest {
@Test(expected = classOf[CommandException])
def testNoPath {
client.incoming()
}
@Test
def testEmpty {
client.clone(dest = Some("other"))
val other = factory.open(new File(rootFolder, "other"))
assertEquals(Nil, other.incoming())
assertEquals(Nil, other.outgoing())
}
@Test
def testBasic {
append("a", "a")
client.commit("first", addRemove = true)
append("a", "a")
client.commit("second")
client.clone(dest = Some("other"))
val other = factory.open(new File(rootFolder, "other"))
assertEquals(client.log(), other.log())
assertEquals(other.incoming(), client.outgoing(path = Some("other")))
append("a", "a")
val (rev, node) = client.commit("third")
val out = client.outgoing(path = Some("other"))
assertEquals(1, out.size)
assertEquals(node, out(0).node)
}
@Test
def testBookmarks {
append("a", "a")
client.commit("first", addRemove = true)
append("a", "a")
client.commit("second")
client.clone(dest = Some("other"))
val other = factory.open(new File(rootFolder, "other"))
client.bookmark("bm1", Some(1))
assertEquals(List(("bm1", client.tip().node)), other.incomingBookmarks())
assertEquals(List(("bm1", client.tip().node)), client.outgoingBookmarks(path = Some("other")))
}
}
| cyberspinach/meutrino | src/test/scala/org/quartzsource/meutrino/hglib/OutgoingIncomingTest.scala | Scala | apache-2.0 | 2,206 |
package breeze.linalg
import org.scalatest.funsuite.AnyFunSuite
/**
* 2/21/15.
* @author Rakesh Chalasani
*/
class tileTest extends AnyFunSuite {
test("tile ( DenseMatrix , Int)") {
val m = new DenseMatrix(2, 2, Array.range(0, 4))
assert(tile(m, 2) == new DenseMatrix[Int](4, 2, Array(0, 1, 0, 1, 2, 3, 2, 3)))
}
test("tile ( DenseMatrix , Int, Int)") {
val m = new DenseMatrix(2, 2, Array.range(0, 4))
assert(tile(m, 2, 2) == new DenseMatrix[Int](4, 4, Array(0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3)))
}
test("tile ( DenseMatrix , Int, Int) non-square matrix.") {
val m = new DenseMatrix(1, 2, Array.range(0, 2))
assert(tile(m, 2, 3) == new DenseMatrix[Int](2, 6, Array(0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1)))
}
test("tile (DenseVector, Int) ") {
val v = DenseVector(1, 2, 3, 4)
assert(tile(v, 2) == DenseVector[Int](1, 2, 3, 4, 1, 2, 3, 4))
}
test("tile (DenseVector, Int, Int) ") {
val v = DenseVector(1, 2, 3, 4)
assert(
tile(v, 2, 3) == new DenseMatrix[Int](
8,
3,
Array(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4)))
}
}
| scalanlp/breeze | math/src/test/scala/breeze/linalg/functions/tileTest.scala | Scala | apache-2.0 | 1,158 |
/***
* Copyright 2017 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.util
import net.sf.saxon.s9api.Processor
import net.sf.saxon.s9api.XQueryEvaluator
import net.sf.saxon.s9api.XQueryExecutable
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class XQueryEvaluatorPoolSuite extends FunSuite {
private val processor = new Processor(false)
private val compiler = {
val c = processor.newXQueryCompiler()
c
}
private val query = "/xq/load-json.xq"
private val executable = compiler.compile(getClass.getResourceAsStream(query))
test("The xquery pool should successfully create an xquery evaluator") {
var evaluator : XQueryEvaluator = null
try {
evaluator = XQueryEvaluatorPool.borrowEvaluator(query, executable)
assert (evaluator != null)
}finally {
if (evaluator != null) XQueryEvaluatorPool.returnEvaluator(query, evaluator)
}
}
test("NumIdle should not be zero soon after returning an evaluator") {
var evaluator : XQueryEvaluator = null
try {
evaluator = XQueryEvaluatorPool.borrowEvaluator(query, executable)
assert (evaluator != null)
}finally {
if (evaluator != null) XQueryEvaluatorPool.returnEvaluator(query, evaluator)
assert(XQueryEvaluatorPool.numIdle(query) != 0)
}
}
test("NumActive should increase/decrease as we borrow/return new evaluators") {
val NUM_INCREASE = 5
val initActive = XQueryEvaluatorPool.numActive(query)
val initIdle = XQueryEvaluatorPool.numIdle(query)
val evaluators = new Array[XQueryEvaluator](NUM_INCREASE)
for (i <- 0 to NUM_INCREASE-1) {
evaluators(i) = XQueryEvaluatorPool.borrowEvaluator(query, executable)
}
assert (XQueryEvaluatorPool.numActive(query) >= initActive+NUM_INCREASE)
val fullActive = XQueryEvaluatorPool.numActive(query)
for (i <- 0 to NUM_INCREASE-1) {
XQueryEvaluatorPool.returnEvaluator (query, evaluators(i))
}
assert (XQueryEvaluatorPool.numActive(query) <= fullActive-NUM_INCREASE)
}
}
| rackerlabs/api-checker | util/src/test/scala/com/rackspace/com/papi/components/checker/util/XQueryEvaluatorPoolSuite.scala | Scala | apache-2.0 | 2,719 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.electro
import squants._
import squants.time.{ Seconds, TimeDerivative }
import squants.energy.Watts
/**
* Represents a quantity of electric current
*
* @author garyKeorkunian
* @since 0.1
*
* @param value the amount of charge in [[squants.electro.Amperes]]'s
*/
final class ElectricCurrent private (val value: Double, val unit: ElectricCurrentUnit)
extends Quantity[ElectricCurrent]
with TimeDerivative[ElectricCharge] {
def dimension = ElectricCurrent
protected[squants] def timeIntegrated = Coulombs(toAmperes)
protected[squants] def time = Seconds(1)
def *(that: ElectricalResistance): ElectricPotential = Volts(toAmperes * that.toOhms)
def *(that: ElectricPotential): Power = Watts(toAmperes * that.toVolts)
def *(that: Inductance): MagneticFlux = Webers(toAmperes * that.toHenry)
def /(that: ElectricPotential): ElectricalConductance = Siemens(toAmperes / that.toVolts)
def /(that: Length) = ??? // returns MagneticFieldStrength
def /(that: Area) = ??? // returns ElectricCurrentDensity
def toAmperes = to(Amperes)
def toMilliamperes = to(Milliamperes)
}
object ElectricCurrent extends Dimension[ElectricCurrent] with BaseDimension {
private[electro] def apply[A](n: A, unit: ElectricCurrentUnit)(implicit num: Numeric[A]) = new ElectricCurrent(num.toDouble(n), unit)
def apply = parse _
def name = "ElectricCurrent"
def primaryUnit = Amperes
def siUnit = Amperes
def units = Set(Amperes, Milliamperes)
def dimensionSymbol = "I"
}
/**
* Base trait for units of [[squants.electro.ElectricCurrent]]
*/
trait ElectricCurrentUnit extends UnitOfMeasure[ElectricCurrent] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = ElectricCurrent(n, this)
}
/**
* Amperes
*/
object Amperes extends ElectricCurrentUnit with PrimaryUnit with SiBaseUnit {
val symbol = "A"
}
/**
* Milliamperes
*/
object Milliamperes extends ElectricCurrentUnit {
val symbol = "mA"
val conversionFactor = MetricSystem.Milli
}
object ElectricCurrentConversions {
lazy val ampere = Amperes(1)
lazy val amp = Amperes(1)
lazy val milliampere = Milliamperes(1)
lazy val milliamp = Milliamperes(1)
implicit class ElectricCurrentConversions[A](n: A)(implicit num: Numeric[A]) {
def amperes = Amperes(n)
def amps = Amperes(n)
def A = Amperes(n)
def milliampers = Milliamperes(n)
def milliamps = Milliamperes(n)
def mA = Milliamperes(n)
}
implicit object ElectricCurrentNumeric
extends AbstractQuantityNumeric[ElectricCurrent](ElectricCurrent.primaryUnit)
}
| rmihael/squants | shared/src/main/scala/squants/electro/ElectricCurrent.scala | Scala | apache-2.0 | 3,102 |
/*
* Copyright 2013 Steve Vickers
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.extensions.dsl.criteria
import scala.language.dynamics
import reactivemongo.bson._
/**
* A '''Term'' instance reifies the use of a MongoDB document field, both
* top-level or nested. Operators common to all ''T'' types are defined here
* with type-specific ones provided in the companion object below.
*
* @author svickers
*
*/
final case class Term[T] (`_term$name`: String)
extends Dynamic
{
/**
* Logical equality.
*/
def ===[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
`_term$name` -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Logical equality.
*/
def @==[U <: T : ValueBuilder] (rhs : U) : Expression = ===[U] (rhs);
/**
* Logical inequality: '''$ne'''.
*/
def <>[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
"$ne" -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Logical inequality: '''$ne'''.
*/
def =/=[U <: T : ValueBuilder] (rhs : U) : Expression = <>[U] (rhs);
/**
* Logical inequality: '''$ne'''.
*/
def !==[U <: T : ValueBuilder] (rhs : U) : Expression = <>[U] (rhs);
/**
* Less-than comparison: '''$lt'''.
*/
def <[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
"$lt" -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Less-than or equal comparison: '''$lte'''.
*/
def <=[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
"$lte" -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Greater-than comparison: '''$gt'''.
*/
def >[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
"$gt" -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Greater-than or equal comparison: '''$gte'''.
*/
def >=[U <: T : ValueBuilder] (rhs : U) : Expression = Expression (
`_term$name`,
"$gte" -> implicitly[ValueBuilder[U]].bson (rhs)
);
/**
* Field existence: '''$exists'''.
*/
def exists : Expression = Expression (
`_term$name`,
"$exists" -> BSONBoolean (true)
);
/**
* Field value equals one of the '''values''': '''$in'''.
*/
def in[U <: T : ValueBuilder] (values : Traversable[U])
(implicit B : ValueBuilder[U])
: Expression = Expression (
`_term$name`,
"$in" -> BSONArray (values map (B.bson))
);
/**
* Field value equals either '''head''' or one of the (optional)
* '''tail''' values: '''$in'''.
*/
def in[U <: T : ValueBuilder] (head : U, tail : U*)
(implicit B : ValueBuilder[U])
: Expression = Expression (
`_term$name`,
"$in" -> BSONArray (Seq (B.bson (head)) ++ tail.map (B.bson))
);
def selectDynamic[U] (field : String) : Term[U] = Term[U] (
`_term$name` + "." + field
);
}
object Term
{
/// Class Types
/**
* The '''CollectionTermOps''' `implicit` provides EDSL functionality to
* `Seq` [[reactivemongo.extensions.dsl.criteria.Term]]s only.
*/
implicit class CollectionTermOps[T] (val term : Term[Seq[T]])
extends AnyVal
{
def all (values : Traversable[T])
(implicit B : ValueBuilder[T])
: Expression = Expression (
term.`_term$name`,
"$all" -> BSONArray (values map (B.bson))
);
}
/**
* The '''StringTermOps''' `implicit` enriches
* [[reactivemongo.extensions.dsl.criteria.Term]]s for
* `String`-only operations.
*/
implicit class StringTermOps[T >: String](val term : Term[T])
extends AnyVal
{
def =~ (re : (String, RegexModifier)) : Expression = Expression (
term.`_term$name`,
"$regex" -> BSONRegex (re._1, re._2.value)
);
def =~ (re : String) : Expression = Expression (
term.`_term$name`,
"$regex" -> BSONRegex (re, "")
);
def !~ (re : (String, RegexModifier)) : Expression = Expression (
term.`_term$name`,
"$not" -> BSONDocument ("$regex" -> BSONRegex (re._1, re._2.value))
);
def !~ (re : String) : Expression = Expression (
term.`_term$name`,
"$not" -> BSONDocument ("$regex" -> BSONRegex (re, ""))
);
}
}
/**
* '''RegexModifier''' types provide the ability for developers to specify
* `$regex` modifiers using type-checked Scala types. For example, specifying
* a `$regex` which ignores case for the `surname` property can be written as:
*
* {{{
*
* criteria.surname =~ "smith" -> IgnoreCase;
*
* }}}
*
* Multiple modifiers can be combined using the or (`|`) operator,
* producing an implementation-defined ordering.
*
* @author svickers
*
*/
sealed trait RegexModifier
{
/**
* Use the or operator to combine two or more '''RegexModifier'''s into
* one logical value.
*/
def | (other : RegexModifier) : RegexModifier =
CombinedRegexModifier (this, other);
def value () : String;
}
case class CombinedRegexModifier (
lhs : RegexModifier,
rhs : RegexModifier
)
extends RegexModifier
{
override def value () : String = lhs.value + rhs.value;
}
case object DotMatchesEverything
extends RegexModifier
{
override val value : String = "s";
}
case object ExtendedExpressions
extends RegexModifier
{
override val value : String = "x";
}
case object IgnoreCase
extends RegexModifier
{
override val value : String = "i";
}
case object MultilineMatching
extends RegexModifier
{
override val value : String = "m";
}
| osxhacker/ReactiveMongo-Criteria | src/main/scala/dsl/criteria/Term.scala | Scala | apache-2.0 | 5,799 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.uberstore.data
import java.io.RandomAccessFile
/**
* Trait providing low level File operations for an UberStore
*/
trait UberStoreFileOps {
/**
* Write body into writeHandle using standard entry encoding
* procedures.
*
* Has the side effect of advancing writeHandle to the end
* of the written data
*
* @param writeHandle the file to write the body into
* @param body the body of what we want stored
*
* @return offset this object was stored at
*/
def put(writeHandle: RandomAccessFile, body: Array[Byte]): Long
/**
* Read the next entry out of the file, from the current offset
*
* Has the side effect of advancing readHandle to the end of
* the written data
*
* @param readHandle the RandomAccessFile to read from, at the
* current offset
*
* @return Some(bytes) or None if EOF encountered
*/
def readNext(readHandle: RandomAccessFile): Option[Array[Byte]]
}
| weggert/sirius | src/main/scala/com/comcast/xfinity/sirius/uberstore/data/UberStoreFileOps.scala | Scala | apache-2.0 | 1,626 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package compiler.opt
import compiler.Compiler
import compiler.SparkCompiler
/** Flink-specific optimizations. */
private[compiler] trait SparkOptimizations extends Compiler
with SparkSpecializeOps {
self: SparkCompiler =>
object SparkOptimizations {
/** Delegates to [[SparkSpecializeOps.specializeOps]]. */
lazy val specializeOps = SparkSpecializeOps.specializeOps
}
}
| emmalanguage/emma | emma-spark/src/main/scala/org/emmalanguage/compiler/opt/SparkOptimizations.scala | Scala | apache-2.0 | 1,036 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.streams
import akka.stream.scaladsl.Flow
import akka.stream.stage._
import akka.stream._
import org.reactivestreams.{ Processor, Subscription, Subscriber, Publisher }
/**
* Probes, for debugging reactive streams.
*/
object Probes {
private trait Probe {
def startTime: Long
def time = System.nanoTime() - startTime
def probeName: String
def log[T](method: String, message: String = "", logExtra: => Unit = Unit)(block: => T) = {
val threadName = Thread.currentThread().getName
try {
println(s"ENTER $probeName.$method at $time in $threadName: $message")
logExtra
block
} catch {
case e: Exception =>
println(s"CATCH $probeName.$method ${e.getClass}: ${e.getMessage}")
throw e
} finally {
println(s"LEAVE $probeName.$method at $time")
}
}
}
def publisherProbe[T](name: String, publisher: Publisher[T], messageLogger: T => String = (t: T) => t.toString): Publisher[T] = new Publisher[T] with Probe {
val probeName = name
val startTime = System.nanoTime()
def subscribe(subscriber: Subscriber[_ >: T]) = {
log("subscribe", subscriber.toString)(publisher.subscribe(subscriberProbe(name, subscriber, messageLogger, startTime)))
}
}
def subscriberProbe[T](name: String, subscriber: Subscriber[_ >: T], messageLogger: T => String = (t: T) => t.toString, start: Long = System.nanoTime()): Subscriber[T] = new Subscriber[T] with Probe {
val probeName = name
val startTime = start
def onError(t: Throwable) = {
log("onError", s"${t.getClass}: ${t.getMessage}", t.printStackTrace())(subscriber.onError(t))
}
def onSubscribe(subscription: Subscription) = log("onSubscribe", subscription.toString)(subscriber.onSubscribe(subscriptionProbe(name, subscription, start)))
def onComplete() = log("onComplete")(subscriber.onComplete())
def onNext(t: T) = log("onNext", messageLogger(t))(subscriber.onNext(t))
}
def subscriptionProbe(name: String, subscription: Subscription, start: Long = System.nanoTime()): Subscription = new Subscription with Probe {
val probeName = name
val startTime = start
def cancel() = log("cancel")(subscription.cancel())
def request(n: Long) = log("request", n.toString)(subscription.request(n))
}
def processorProbe[In, Out](name: String, processor: Processor[In, Out],
inLogger: In => String = (in: In) => in.toString, outLogger: Out => String = (out: Out) => out.toString): Processor[In, Out] = {
val subscriber = subscriberProbe(name + "-in", processor, inLogger)
val publisher = publisherProbe(name + "-out", processor, outLogger)
new Processor[In, Out] {
override def onError(t: Throwable): Unit = subscriber.onError(t)
override def onSubscribe(s: Subscription): Unit = subscriber.onSubscribe(s)
override def onComplete(): Unit = subscriber.onComplete()
override def onNext(t: In): Unit = subscriber.onNext(t)
override def subscribe(s: Subscriber[_ >: Out]): Unit = publisher.subscribe(s)
}
}
def flowProbe[T](name: String, messageLogger: T => String = (t: T) => t.toString): Flow[T, T, _] = {
Flow[T].via(new GraphStage[FlowShape[T, T]] with Probe {
val in = Inlet[T]("Probes.in")
val out = Outlet[T]("Probes.out")
override def shape: FlowShape[T, T] = FlowShape.of(in, out)
override def startTime: Long = System.nanoTime()
override def probeName: String = name
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with OutHandler with InHandler {
override def onPush(): Unit = {
val elem = grab(in)
log("onPush", messageLogger(elem))(push(out, elem))
}
override def onPull(): Unit = log("onPull")(pull(in))
override def preStart() = log("preStart")(super.preStart())
override def onUpstreamFinish() = log("onUpstreamFinish")(super.onUpstreamFinish())
override def onDownstreamFinish() = log("onDownstreamFinish")(super.onDownstreamFinish())
override def onUpstreamFailure(cause: Throwable) = log("onUpstreamFailure", s"${cause.getClass}: ${cause.getMessage}", cause.printStackTrace())(super.onUpstreamFailure(cause))
override def postStop() = log("postStop")(super.postStop())
setHandlers(in, out, this)
}
})
}
}
| wsargent/playframework | framework/src/play-streams/src/main/scala/play/api/libs/streams/Probes.scala | Scala | apache-2.0 | 4,533 |
package mesosphere.marathon.state
import com.google.common.collect.Lists
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.mesos.protos.Implicits._
import mesosphere.mesos.protos.TextAttribute
import org.scalatest.{ GivenWhenThen, Matchers }
class MarathonTaskTest extends MarathonSpec with GivenWhenThen with Matchers {
test("toProto returns the encapsulated MarathonTask") {
Given("A state created from a task")
val encapsulatedTask = makeTask("app/dummy", "dummyhost", 42000, version = Some("123"))
val state = MarathonTaskState(encapsulatedTask)
When("We call the toProto function")
val proto = state.toProto
Then("The returned proto equals the one passed in")
proto shouldEqual encapsulatedTask
}
test("mergeFromProto returns a sane instance") {
Given("A state created from a task with version")
val dummy = makeTask("app/dummy", "dummyhost", 42000, version = Some("123"))
val dummyState = MarathonTaskState(dummy)
When("We call the mergeFromProto function on that state")
val proto = makeTask("app/foo", "superhost", 23000, version = None)
val merged = dummyState.mergeFromProto(proto)
Then("The 'merged' state does not have a version because mergeFromProto does not merge but create a new instance based on the given proto")
merged.toProto shouldEqual proto
}
test("mergeFromProto bytes returns a sane instance") {
Given("A state created from a task with version")
val dummy = makeTask("app/dummy", "dummyhost", 42000, version = Some("123"))
val dummyState = MarathonTaskState(dummy)
When("We call the mergeFromProto function using a byte array")
val proto = makeTask("app/foo", "superhost", 23000, version = None)
val merged = dummyState.mergeFromProto(proto.toByteArray)
Then("The 'merged' state does not have a version because mergeFromProto does not merge but cerate a new instance based on the given proto")
merged.toProto shouldEqual proto
}
private[this] def makeTask(id: String, host: String, port: Int, version: Option[String]) = {
val builder = MarathonTask.newBuilder()
.setHost(host)
.addAllPorts(Lists.newArrayList(port))
.setId(id)
.addAttributes(TextAttribute("attr1", "bar"))
version.map(builder.setVersion)
builder.build()
}
}
| ss75710541/marathon | src/test/scala/mesosphere/marathon/state/MarathonTaskTest.scala | Scala | apache-2.0 | 2,365 |
package mesosphere.marathon.core.launchqueue.impl
import java.util.concurrent.TimeUnit
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.state.{ AppDefinition, PathId, Timestamp }
import org.apache.log4j.Logger
import scala.concurrent.duration._
/**
* Manages the task launch delays for every app and config version.
*
* We do not keep the delays for every version because that would include scaling changes or manual restarts.
*/
private[launchqueue] class RateLimiter(clock: Clock) {
import RateLimiter._
/** The task launch delays per app and their last config change. */
private[this] var taskLaunchDelays = Map[(PathId, Timestamp), Delay]()
def cleanUpOverdueDelays(): Unit = {
taskLaunchDelays = taskLaunchDelays.filter {
case (_, delay) => delay.deadline > clock.now()
}
}
def getDelay(app: AppDefinition): Timestamp =
taskLaunchDelays.get(app.id -> app.versionInfo.lastConfigChangeVersion).map(_.deadline) getOrElse clock.now()
def addDelay(app: AppDefinition): Timestamp = {
setNewDelay(app, "Increasing delay") {
case Some(delay) => Some(delay.increased(clock, app))
case None => Some(Delay(clock, app))
}
}
private[this] def setNewDelay(app: AppDefinition, message: String)(
calcDelay: Option[Delay] => Option[Delay]): Timestamp = {
val maybeDelay: Option[Delay] = taskLaunchDelays.get(app.id -> app.versionInfo.lastConfigChangeVersion)
calcDelay(maybeDelay) match {
case Some(newDelay) =>
import mesosphere.util.DurationToHumanReadable
val now: Timestamp = clock.now()
val priorTimeLeft = (now until maybeDelay.map(_.deadline).getOrElse(now)).toHumanReadable
val timeLeft = (now until newDelay.deadline).toHumanReadable
if (newDelay.deadline <= now) {
resetDelay(app)
}
else {
log.info(s"$message. Task launch delay for [${app.id}] changed from [$priorTimeLeft] to [$timeLeft].")
taskLaunchDelays += ((app.id, app.versionInfo.lastConfigChangeVersion) -> newDelay)
}
newDelay.deadline
case None =>
resetDelay(app)
clock.now()
}
}
def resetDelay(app: AppDefinition): Unit = {
if (taskLaunchDelays contains (app.id -> app.versionInfo.lastConfigChangeVersion)) {
log.info(s"Task launch delay for [${app.id} - ${app.versionInfo.lastConfigChangeVersion}}] reset to zero")
taskLaunchDelays -= (app.id -> app.versionInfo.lastConfigChangeVersion)
}
}
}
private object RateLimiter {
private val log = Logger.getLogger(getClass.getName)
private object Delay {
def apply(clock: Clock, app: AppDefinition): Delay = Delay(clock, app.backoff)
def apply(clock: Clock, delay: FiniteDuration): Delay = Delay(clock.now() + delay, delay)
}
private case class Delay(
deadline: Timestamp,
delay: FiniteDuration) {
def increased(clock: Clock, app: AppDefinition): Delay = {
val newDelay: FiniteDuration =
app.maxLaunchDelay min FiniteDuration((delay.toNanos * app.backoffFactor).toLong, TimeUnit.NANOSECONDS)
Delay(clock, newDelay)
}
}
}
| EasonYi/marathon | src/main/scala/mesosphere/marathon/core/launchqueue/impl/RateLimiter.scala | Scala | apache-2.0 | 3,164 |
package pl.iterators.kebs_benchmarks
import java.time.format.DateTimeFormatter
import java.time.{LocalDate, LocalTime}
import java.util.concurrent.TimeUnit
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.{ContentTypes, HttpEntity}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.openjdk.jmh.annotations._
import org.scalatest.{FunSpec, Matchers}
import pl.iterators.kebs.json.KebsSpray
import spray.json._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
case class Contact(phoneNumber: String, url: String)
case class LocationShort(city: String, latitude: BigDecimal, longitude: BigDecimal, neighborhood: String, timeZone: String)
case class LocationFull(city: String,
latitude: BigDecimal,
longitude: BigDecimal,
neighborhood: String,
timeZone: String,
address_1: String,
state: String,
postalCode: String)
case class Reservation(deepLink: String, id: Int, seatType: String, timeSlot: String, webLink: String)
case class TravelTime(distance: Double, driving: Int, walking: Int)
case class AvailableReservation(contact: Contact,
deepLink: String,
location: LocationShort,
images: Option[List[String]],
name: String,
priceRangeId: Int,
reservations: List[Reservation],
travelTime: TravelTime,
`type`: String,
webLink: String)
case class AvailableReservationsResponse(available: List[AvailableReservation])
case class Rater(name: String, score: Double, scale: Double, image: String)
case class Venue(location: LocationFull,
name: String,
priceRangeId: Int,
`type`: String,
images: List[String],
about: String,
tagline: String,
rater: Rater)
case class BookedReservation(deepLink: String, seatType: String, timeSlot: String)
case class PaymentDetails(fee: Option[BigDecimal], serviceCharge: Option[BigDecimal], tax: Option[BigDecimal], total: Option[BigDecimal])
case class Payment(details: Option[PaymentDetails])
case class ReservationDetailsResponse(venue: Venue, reservation: BookedReservation, payment: Option[Payment], token: String)
case class Reservations(reservations: List[RequestedReservation])
case class RequestedReservationDetails(day: LocalDate, timeSlot: LocalTime)
case class Fee(amount: BigDecimal)
case class Cancellation(fee: Option[Fee])
case class RequestedReservation(token: String, reservation: RequestedReservationDetails, cancellation: Option[Cancellation])
sealed trait DetailsResult
object DetailsResult {
case class Success(details: ReservationDetailsResponse) extends DetailsResult
case object Expired extends DetailsResult
case class Error(message: String) extends DetailsResult
}
trait Protocol extends DefaultJsonProtocol with SprayJsonSupport {
implicit val localTimeFormat = new JsonFormat[LocalTime] {
override def write(obj: LocalTime): JsValue = JsString(formatter.format(obj))
override def read(json: JsValue): LocalTime = {
json match {
case JsString(lTString) =>
Try(LocalTime.parse(lTString, formatter)).getOrElse(deserializationError(deserializationErrorMessage))
case _ => deserializationError(deserializationErrorMessage)
}
}
private val formatter = DateTimeFormatter.ISO_LOCAL_TIME
private val deserializationErrorMessage =
s"Expected date time in ISO offset date time format ex. ${LocalTime.now().format(formatter)}"
}
implicit val localDateFormat = new JsonFormat[LocalDate] {
override def write(obj: LocalDate): JsValue = JsString(formatter.format(obj))
override def read(json: JsValue): LocalDate = {
json match {
case JsString(lDString) =>
Try(LocalDate.parse(lDString, formatter)).getOrElse(deserializationError(deserializationErrorMessage))
case _ => deserializationError(deserializationErrorMessage)
}
}
private val formatter = DateTimeFormatter.ISO_LOCAL_DATE
private val deserializationErrorMessage =
s"Expected date time in ISO offset date time format ex. ${LocalDate.now().format(formatter)}"
}
}
abstract class Service {
def getAvailableReservations: Future[AvailableReservationsResponse]
def getReservationDetails(id: Int): Future[DetailsResult]
def getUserReservations(accessToken: String): Future[Reservations]
}
object BeforeKebs {
object Protocol extends Protocol {
def jsonFlatFormat[P, T <: Product](construct: P => T)(implicit jw: JsonWriter[P], jr: JsonReader[P]): JsonFormat[T] =
new JsonFormat[T] {
override def read(json: JsValue): T = construct(jr.read(json))
override def write(obj: T): JsValue = jw.write(obj.productElement(0).asInstanceOf[P])
}
implicit val contactFormat = jsonFormat2(Contact.apply)
implicit val locationShortFormat = jsonFormat5(LocationShort.apply)
implicit val locationFullFormat = jsonFormat8(LocationFull.apply)
implicit val reservationFormat = jsonFormat5(Reservation.apply)
implicit val travelTimeFormat = jsonFormat3(TravelTime.apply)
implicit val availableReservationFormat = jsonFormat10(AvailableReservation.apply)
implicit val availableReservationResponseFormat = jsonFormat1(AvailableReservationsResponse.apply)
implicit val raterFormat = jsonFormat4(Rater.apply)
implicit val venueResponseFormat = jsonFormat8(Venue.apply)
implicit val bookedReservationResponseFormat = jsonFormat3(BookedReservation.apply)
implicit val paymentDetailsFormat = jsonFormat4(PaymentDetails.apply)
implicit val paymentFormat = jsonFormat1(Payment.apply)
implicit val reservationDetailsResponseFormat = jsonFormat4(ReservationDetailsResponse.apply)
implicit val feeFormat = jsonFormat1(Fee.apply)
private implicit val cancellationFormat = jsonFormat1(Cancellation.apply)
implicit val requestedReservationDetailsFormat = jsonFormat2(RequestedReservationDetails.apply)
implicit val requestedReservationFormat = jsonFormat3(RequestedReservation.apply)
implicit val reservationsFormat = jsonFormat1(Reservations.apply)
}
class Router(service: Service)(implicit ec: ExecutionContext) {
import Protocol._
val getAvailableReservations = (get & pathEndOrSingleSlash) {
complete(service.getAvailableReservations)
}
val getReservationDetails = (get & path(IntNumber)) { id =>
complete {
service.getReservationDetails(id).map[ToResponseMarshallable] {
case DetailsResult.Success(res) => OK -> res
case DetailsResult.Expired => NotFound
case DetailsResult.Error(error) => BadRequest -> error
}
}
}
val getUserReservations = (get & parameters('token)) { token =>
complete(service.getUserReservations(token))
}
}
}
object AfterKebs {
object Protocol extends Protocol with KebsSpray
class Router(service: Service)(implicit ec: ExecutionContext) {
import Protocol._
val getAvailableReservations = (get & pathEndOrSingleSlash) {
complete(service.getAvailableReservations)
}
val getReservationDetails = (get & path(IntNumber)) { id =>
complete {
service.getReservationDetails(id).map[ToResponseMarshallable] {
case DetailsResult.Success(res) => OK -> res
case DetailsResult.Expired => NotFound
case DetailsResult.Error(error) => BadRequest -> error
}
}
}
val getUserReservations = (get & parameters('token)) { token =>
complete(service.getUserReservations(token))
}
}
}
object SprayJsonFormatBenchmark {
val fakeService = new Service {
val sampleAvailableReservationsResponse = AvailableReservationsResponse(
List(
AvailableReservation(
Contact("12 270 24 88", "<none>"),
"?",
LocationShort("Czernichów", 49.9915924, 19.6754663, "Czernichów", "CET"),
None,
"RAPIO",
1,
List(
Reservation("?", 1, "stolik", "20:00-21:00", "<none>"),
Reservation("?", 2, "stolik", "20:00-21:00", "<none>"),
Reservation("?", 3, "stolik", "20:00-21:00", "<none>")
),
TravelTime(distance = 100, driving = 99, walking = 1),
"pizzera",
"<none>"
)))
val sampleReservetionDetailsResponse = ReservationDetailsResponse(
Venue(
LocationFull("Czernichów", 49.9915924, 19.6754663, "Czernichów", "CET", "Czernichów 232", "małopolskie", "32-071"),
"RAPIO",
1,
"pizzeria",
List.empty,
"Pizzeria & Restauracja RAPIO",
"Pizzeria & Restauracja RAPIO",
Rater("?", 100.0, 1.0, "?")
),
BookedReservation("?", "stolik", "20.00-21:00"),
Some(Payment(Some(PaymentDetails(fee = None, serviceCharge = Some(100), tax = Some(0.08), total = Some(108))))),
token = "abcdefgh"
)
val sampleReservations = Reservations(List(
RequestedReservation("abcdefgh", RequestedReservationDetails(LocalDate.now(), LocalTime.now()), Some(Cancellation(Some(Fee(10)))))))
override def getReservationDetails(id: Int) = Future.successful(DetailsResult.Success(sampleReservetionDetailsResponse))
override def getUserReservations(accessToken: String) = Future.successful(sampleReservations)
override def getAvailableReservations = Future.successful(sampleAvailableReservationsResponse)
}
import ExecutionContext.Implicits.global
val beforeKebsRouter = new BeforeKebs.Router(fakeService)
val afterKebsRouter = new AfterKebs.Router(fakeService)
val beforeKebsRoutes = beforeKebsRouter.getUserReservations ~ beforeKebsRouter.getAvailableReservations ~ beforeKebsRouter.getReservationDetails
val afterKebsRoutes = afterKebsRouter.getUserReservations ~ afterKebsRouter.getAvailableReservations ~ afterKebsRouter.getReservationDetails
}
@State(Scope.Benchmark)
@Warmup(iterations = 10, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 100, timeUnit = TimeUnit.MILLISECONDS)
@Fork(1)
class SprayJsonFormatBenchmark extends FunSpec with Matchers with ScalatestRouteTest {
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostBeforeKebs1 = Get("/?token=token") ~> SprayJsonFormatBenchmark.beforeKebsRoutes ~> check {
status shouldEqual OK
}
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostBeforeKebs2 = Get("/") ~> SprayJsonFormatBenchmark.beforeKebsRoutes ~> check {
status shouldEqual OK
}
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostBeforeKebs3 = Get("/1") ~> SprayJsonFormatBenchmark.beforeKebsRoutes ~> check {
status shouldEqual OK
}
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostAfterKebs1 = Get("/?token=token") ~> SprayJsonFormatBenchmark.afterKebsRoutes ~> check {
status shouldEqual OK
}
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostAfterKebs2 = Get("/") ~> SprayJsonFormatBenchmark.afterKebsRoutes ~> check {
status shouldEqual OK
}
@Benchmark
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
def sprayJsonCostAfterKebs3 = Get("/1") ~> SprayJsonFormatBenchmark.afterKebsRoutes ~> check {
status shouldEqual OK
}
}
| theiterators/kebs | benchmarks/src/main/scala/pl/iterators/kebs_benchmarks/SprayJsonFormatBenchmark.scala | Scala | mit | 12,390 |
package text.normalizer
import text.StringOption
/**
* @author ynupc
* Created on 2016/08/06
*/
object SentenceNormalizer {
def normalize(str: StringOption): StringOption = {
SentenceEndingNormalizer.normalize(
SentenceBeginningNormalizer.normalize(str))
}
}
| ynupc/scalastringcourseday7 | src/main/scala/text/normalizer/SentenceNormalizer.scala | Scala | apache-2.0 | 289 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package controllers
import java.util.Properties
import kafka.manager.ActorModel.TopicIdentity
import kafka.manager.utils.TopicConfigs
import kafka.manager.{Kafka_0_8_2_1, ApiError, Kafka_0_8_2_0, Kafka_0_8_1_1}
import models.FollowLink
import models.form._
import models.navigation.Menus
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.validation.{Valid, Invalid, Constraint}
import play.api.data.validation.Constraints._
import play.api.mvc._
import scala.concurrent.Future
import scala.util.{Success, Failure, Try}
import scalaz.{\\/-, -\\/}
/**
* @author hiral
*/
object Topic extends Controller{
import play.api.libs.concurrent.Execution.Implicits.defaultContext
private[this] val kafkaManager = KafkaManagerContext.getKafkaManager
val validateName : Constraint[String] = Constraint("validate name") { name =>
Try {
kafka.manager.utils.Topic.validate(name)
} match {
case Failure(t) => Invalid(t.getMessage)
case Success(_) => Valid
}
}
val kafka_0_8_1_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_1_1).map(n => TConfig(n,None)).toList)
val kafka_0_8_2_0_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_2_0).map(n => TConfig(n,None)).toList)
val kafka_0_8_2_1_Default = CreateTopic("",1,1,TopicConfigs.configNames(Kafka_0_8_2_1).map(n => TConfig(n,None)).toList)
val defaultCreateForm = Form(
mapping(
"topic" -> nonEmptyText.verifying(maxLength(250), validateName),
"partitions" -> number(min = 1, max = 10000),
"replication" -> number(min = 1, max = 1000),
"configs" -> list(
mapping(
"name" -> nonEmptyText,
"value" -> optional(text)
)(TConfig.apply)(TConfig.unapply)
)
)(CreateTopic.apply)(CreateTopic.unapply)
)
val defaultDeleteForm = Form(
mapping(
"topic" -> nonEmptyText.verifying(maxLength(250), validateName)
)(DeleteTopic.apply)(DeleteTopic.unapply)
)
val defaultAddPartitionsForm = Form(
mapping(
"topic" -> nonEmptyText.verifying(maxLength(250), validateName),
"brokers" -> seq {
mapping(
"id" -> number(min = 0),
"host" -> nonEmptyText,
"selected" -> boolean
)(BrokerSelect.apply)(BrokerSelect.unapply)
},
"partitions" -> number(min = 1, max = 10000),
"readVersion" -> number(min = 0)
)(AddTopicPartitions.apply)(AddTopicPartitions.unapply)
)
val defaultUpdateConfigForm = Form(
mapping(
"topic" -> nonEmptyText.verifying(maxLength(250), validateName),
"configs" -> list(
mapping(
"name" -> nonEmptyText,
"value" -> optional(text)
)(TConfig.apply)(TConfig.unapply)
),
"readVersion" -> number(min = 0)
)(UpdateTopicConfig.apply)(UpdateTopicConfig.unapply)
)
private def createTopicForm(clusterName: String) = {
kafkaManager.getClusterConfig(clusterName).map { errorOrConfig =>
errorOrConfig.map { clusterConfig =>
clusterConfig.version match {
case Kafka_0_8_1_1 => defaultCreateForm.fill(kafka_0_8_1_1_Default)
case Kafka_0_8_2_0 => defaultCreateForm.fill(kafka_0_8_2_0_Default)
case Kafka_0_8_2_1 => defaultCreateForm.fill(kafka_0_8_2_1_Default)
}
}
}
}
def topics(c: String) = Action.async {
kafkaManager.getTopicListExtended(c).map { errorOrTopicList =>
Ok(views.html.topic.topicList(c,errorOrTopicList))
}
}
def topic(c: String, t: String) = Action.async {
kafkaManager.getTopicIdentity(c,t).map { errorOrTopicIdentity =>
Ok(views.html.topic.topicView(c,t,errorOrTopicIdentity))
}
}
def createTopic(clusterName: String) = Action.async { implicit request =>
createTopicForm(clusterName).map { errorOrForm =>
Ok(views.html.topic.createTopic(clusterName, errorOrForm))
}
}
def handleCreateTopic(clusterName: String) = Action.async { implicit request =>
defaultCreateForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.topic.createTopic(clusterName,\\/-(formWithErrors)))),
ct => {
val props = new Properties()
ct.configs.filter(_.value.isDefined).foreach(c => props.setProperty(c.name,c.value.get))
kafkaManager.createTopic(clusterName,ct.topic,ct.partitions,ct.replication,props).map { errorOrSuccess =>
Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(clusterName,"Topic","Create",Menus.clusterMenus(clusterName)),
models.navigation.BreadCrumbs.withNamedViewAndCluster("Topics",clusterName,"Create Topic"),
errorOrSuccess,
"Create Topic",
FollowLink("Go to topic view.",routes.Topic.topic(clusterName, ct.topic).toString()),
FollowLink("Try again.",routes.Topic.createTopic(clusterName).toString())
))
}
}
)
}
def handleDeleteTopic(clusterName: String, topic: String) = Action.async { implicit request =>
defaultDeleteForm.bindFromRequest.fold(
formWithErrors => Future.successful(
BadRequest(views.html.topic.topicView(
clusterName,
topic,
-\\/(ApiError(formWithErrors.error("topic").map(_.toString).getOrElse("Unknown error deleting topic!")))))),
deleteTopic => {
kafkaManager.deleteTopic(clusterName,deleteTopic.topic).map { errorOrSuccess =>
Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(clusterName,"Topic","Topic View",Menus.clusterMenus(clusterName)),
models.navigation.BreadCrumbs.withNamedViewAndClusterAndTopic("Topic View",clusterName,topic,"Delete Topic"),
errorOrSuccess,
"Delete Topic",
FollowLink("Go to topic list.",routes.Topic.topics(clusterName).toString()),
FollowLink("Try again.",routes.Topic.topic(clusterName, topic).toString())
))
}
}
)
}
def addPartitions(clusterName: String, topic: String) = Action.async { implicit request =>
val errorOrFormFuture = kafkaManager.getTopicIdentity(clusterName, topic).flatMap { errorOrTopicIdentity =>
errorOrTopicIdentity.fold( e => Future.successful(-\\/(e)),{ topicIdentity =>
kafkaManager.getBrokerList(clusterName).map { errorOrBrokerList =>
errorOrBrokerList.map { bl =>
defaultAddPartitionsForm.fill(AddTopicPartitions(topic,bl.list.map(bi => BrokerSelect.from(bi)),topicIdentity.partitions,topicIdentity.readVersion))
}
}
})
}
errorOrFormFuture.map { errorOrForm =>
Ok(views.html.topic.addPartitions(clusterName, topic, errorOrForm))
}
}
def handleAddPartitions(clusterName: String, topic: String) = Action.async { implicit request =>
defaultAddPartitionsForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.topic.addPartitions(clusterName, topic,\\/-(formWithErrors)))),
addTopicPartitions => {
kafkaManager.addTopicPartitions(clusterName,addTopicPartitions.topic,addTopicPartitions.brokers.filter(_.selected).map(_.id),addTopicPartitions.partitions,addTopicPartitions.readVersion).map { errorOrSuccess =>
Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(clusterName,"Topic","Topic View",Menus.clusterMenus(clusterName)),
models.navigation.BreadCrumbs.withNamedViewAndClusterAndTopic("Topic View",clusterName, topic,"Add Partitions"),
errorOrSuccess,
"Add Partitions",
FollowLink("Go to topic view.",routes.Topic.topic(clusterName, addTopicPartitions.topic).toString()),
FollowLink("Try again.",routes.Topic.addPartitions(clusterName, topic).toString())
))
}
}
)
}
private def updateConfigForm(clusterName: String, ti: TopicIdentity) = {
kafkaManager.getClusterConfig(clusterName).map { errorOrConfig =>
errorOrConfig.map { clusterConfig =>
val defaultConfigMap = clusterConfig.version match {
case Kafka_0_8_1_1 => TopicConfigs.configNames(Kafka_0_8_1_1).map(n => (n,TConfig(n,None))).toMap
case Kafka_0_8_2_0 => TopicConfigs.configNames(Kafka_0_8_2_0).map(n => (n,TConfig(n,None))).toMap
case Kafka_0_8_2_1 => TopicConfigs.configNames(Kafka_0_8_2_1).map(n => (n,TConfig(n,None))).toMap
}
val combinedMap = defaultConfigMap ++ ti.config.toMap.map(tpl => tpl._1 -> TConfig(tpl._1,Option(tpl._2)))
defaultUpdateConfigForm.fill(UpdateTopicConfig(ti.topic,combinedMap.toList.map(_._2),ti.configReadVersion))
}
}
}
def updateConfig(clusterName: String, topic: String) = Action.async { implicit request =>
val errorOrFormFuture = kafkaManager.getTopicIdentity(clusterName, topic).flatMap { errorOrTopicIdentity =>
errorOrTopicIdentity.fold( e => Future.successful(-\\/(e)) ,{ topicIdentity =>
updateConfigForm(clusterName, topicIdentity)
})
}
errorOrFormFuture.map { errorOrForm =>
Ok(views.html.topic.updateConfig(clusterName, topic, errorOrForm))
}
}
def handleUpdateConfig(clusterName: String, topic: String) = Action.async { implicit request =>
defaultUpdateConfigForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.topic.updateConfig(clusterName, topic,\\/-(formWithErrors)))),
updateTopicConfig => {
val props = new Properties()
updateTopicConfig.configs.filter(_.value.isDefined).foreach(c => props.setProperty(c.name,c.value.get))
kafkaManager.updateTopicConfig(clusterName,updateTopicConfig.topic,props,updateTopicConfig.readVersion).map { errorOrSuccess =>
Ok(views.html.common.resultOfCommand(
views.html.navigation.clusterMenu(clusterName,"Topic","Topic View",Menus.clusterMenus(clusterName)),
models.navigation.BreadCrumbs.withNamedViewAndClusterAndTopic("Topic View",clusterName, topic,"Update Config"),
errorOrSuccess,
"Update Config",
FollowLink("Go to topic view.",routes.Topic.topic(clusterName, updateTopicConfig.topic).toString()),
FollowLink("Try again.",routes.Topic.updateConfig(clusterName, topic).toString())
))
}
}
)
}
}
| evertrue/kafka-manager | app/controllers/Topic.scala | Scala | apache-2.0 | 10,473 |
package com.javachen.grab
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
/**
*
* Created by <a href="mailto:junechen@163.com">june</a> on 2015-05-27 09:13.
*/
object EvaluateResult {
def coverage(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])])={
userRecommends.flatMap(_._2).distinct().count.toDouble / training.map(_.product).distinct().count
}
def popularity(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])])={
var ret = 0.0
var n=0
val item_popularity=training.map{ case Rating(user, product, rate) =>
(product,(user, rate))
}.groupByKey(4).map{case (product,list)=>
(product,list.size)
}.collectAsMap()
userRecommends.flatMap(_._2).collect().foreach { p =>
ret = ret + math.log(1 + item_popularity.get(p).get)
n = n + 1
}
ret/n
}
def recallAndPrecisionAndF1(training: RDD[Rating],userRecommends:RDD[(Int, List[Int])]):(Double, Double,Double) = {
val usersProducts: RDD[(Int, Int)] = training.map { case Rating(user, product, rate) =>
(user, product)
}
val groupData=userRecommends.join(usersProducts.groupByKey().map {case (k,v) => (k,v.toList)})
val (hit, testNum, recNum) = groupData.map{ case (user, (mItems, tItems)) =>
var count = 0
// 计算准确率:推荐命中商品数/实际推荐商品数, topN为推荐上限值
val precNum = mItems.length
for (i <- 0 until precNum)
if (tItems.contains(mItems(i)))
count += 1
(count, tItems.length, precNum) }.reduce( (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2, t1._3 + t2._3) )
val recall: Double = hit * 1.0 / testNum
val precision: Double = hit * 1.0 / recNum
val f1: Double = 2 * recall * precision / (recall + precision)
println(s"$hit,$testNum,$recNum")
(recall,precision,f1)
}
def recallAndPrecision(test:RDD[Rating],result:RDD[Rating]):Double = {
val numHit: Long = result.intersection(test).count
val recall: Double = numHit * 1.0 / test.count
val precision: Double = numHit * 1.0 / result.count
val f1: Double = 2 * recall * precision / (recall + precision)
System.out.println("recall : " + recall + "\\nprecision : " + precision + "\\nf1 : " + f1)
f1
}
}
| grayaaa/learning-spark | src/main/scala/com/javachen/spark/examples/mllib/EvaluateResult.scala | Scala | apache-2.0 | 2,322 |
package com.yukihirai0505.sInstagram.exceptions
/**
* Created by yukihirai on 2016/11/09.
*/
class OAuthException(message: String, e: Exception = null) extends RuntimeException(message, e)
| yukihirai0505/sInstagram | src/main/scala/com/yukihirai0505/sInstagram/exceptions/OAuthException.scala | Scala | mit | 194 |
package jsm4s.algorithm
import java.io.{ByteArrayOutputStream, OutputStream, OutputStreamWriter}
import com.typesafe.scalalogging.LazyLogging
import jsm4s.FIMI
import jsm4s.algorithm.Strategies._
import jsm4s.ds._
import jsm4s.processing.SortingProcessor
import jsm4s.property.{Composite, Property, PropertyFactory}
import scala.collection.mutable
trait StatsCollector {
def onClosure(): Unit
def onCanonicalTestFailure(): Unit
def printStats(): Unit
}
class SimpleCollector extends StatsCollector with LazyLogging {
var closures = 0
var canonicalTests = 0
def onClosure(): Unit = closures += 1
def onCanonicalTestFailure(): Unit = canonicalTests += 1
def printStats(): Unit = {
logger.info(s"Closures $closures")
logger.info(s"Canonical test failures $canonicalTests")
}
}
class NullCollector extends StatsCollector {
def onClosure(): Unit = {}
def onCanonicalTestFailure(): Unit = {}
def printStats(): Unit = {}
}
trait Sink {
def apply(hypothesis: Hypothesis):Unit
def close():Unit
}
class StreamSink(header: String, factory: PropertyFactory, out: OutputStream) extends Sink {
val writer = new OutputStreamWriter(out)
writer.write(header + "\\n")
override def apply(h: Hypothesis) = {
val str = h.intent.mkString(" ") + " | " + factory.decode(h.props) + "\\n"
writer.synchronized {
writer.write(str)
}
}
override def close(): Unit = {
writer.close()
}
}
class ArraySink extends Sink {
private val buffer = mutable.ArrayBuffer[Hypothesis]()
override def apply(h: Hypothesis) = {
buffer.synchronized {
buffer += h
}
}
override def close(): Unit = {}
def hypotheses:Seq[Hypothesis] = buffer
}
case class Context(rows: Seq[FcaSet],
props: Seq[Property],
attributes: Int,
minSupport: Int,
stats: StatsCollector,
sink: Sink,
ext: ExtentFactory,
int: IntentFactory,
strategy: MergeStrategy)
object Context {
def sorted(rows: Seq[FcaSet],
props: Seq[Property],
attributes: Int,
minSupport: Int,
stats: StatsCollector,
sink: Sink,
ext: ExtentFactory,
int: IntentFactory,
strategy: MergeStrategy): Context = {
val proc = new SortingProcessor(rows, attributes, sink, int)
val sorted = rows.map(intent => int.values(proc.preProcess(intent)))
Context(sorted, props, attributes, minSupport, stats, proc, ext, int, strategy)
}
}
abstract class Algorithm(context: Context) {
val rows = context.rows
val props = context.props
val attributes = context.attributes
val minSupport = context.minSupport
val stats = context.stats
val sink = context.sink
val ext = context.ext
val int = context.int
val strategy = context.strategy
val emptyProperties = new Composite(Seq())
// filter on extent-intent pair
def merge(extent: FcaSet, intent: FcaSet): Property = {
if (props.isEmpty) emptyProperties
else {
val properties = strategy(extent.map(e => props(e)).toSeq)
properties
}
}
def output(extent: FcaSet, intent: FcaSet):Unit = {
if (extent.size >= minSupport) {
val props = merge(extent, intent)
if (!props.empty)
sink(Hypothesis(intent, props))
}
}
def closeConcept(A: FcaSet, y: Int) = {
var C = ext.empty.dup
var D = int.full.dup
var cnt = 0
for (i <- A) {
if (rows(i) contains y) {
C += i
D &= rows(i)
cnt += 1
}
}
(cnt >= minSupport, C, D)
}
def perform(): Unit
def run(): Unit = {
perform()
sink.close()
}
}
trait QueueAlgorithm[T] extends Algorithm {
def processQueue(value: T): Unit
}
object Algorithm extends LazyLogging {
def apply(name: String, dataStructure: String, data: FIMI,
minSupport: Int, threads: Int, stats:StatsCollector, sink:Sink): Algorithm = {
val total = data.intents.foldLeft(0L){(a,b) => a + b.size }
val density = 100*total / (data.intents.size * data.attrs).toDouble
logger.info("Context density is {}", density)
val extFactory = new ArrayExt(data.intents.length)
val context = dataStructure match {
case "sparse" =>
val intFactory = new SparseBitInt(data.attrs)
logger.info("Using sparse data-structure")
val sparseSets = data.intents.map(x => SparseBitSet(x))
Context.sorted(sparseSets, data.props, data.attrs, minSupport, stats, sink, extFactory, intFactory, noCounterExamples)
case "dense" =>
logger.info("Using dense data-structure")
val intFactory = new BitInt(data.attrs)
Context.sorted(data.intents, data.props, data.attrs, minSupport, stats, sink, extFactory, intFactory, noCounterExamples)
}
val algo = name match {
case "cbo" => new CbO(context)
case "fcbo" => new FCbO(context)
case "pcbo" =>
new PCbO(context, threads)
case "pfcbo" =>
new PFCbO(context, threads)
case "dynsort-cbo" =>
new DynSortCbO(context)
case _ => throw new Exception(s"No algorithm ${name} is supported")
}
logger.info("Using {} algorithm", name)
algo
}
} | DmitryOlshansky/jsm4s | src/main/scala/jsm4s/algorithm/Algorithm.scala | Scala | gpl-2.0 | 5,297 |
package spire
package std
import spire.algebra.{Eq, EuclideanRing, IsIntegral, NRoot, Order, Signed, TruncatedDivisionCRing}
import spire.math.BitString
import spire.util.Opt
trait ShortIsEuclideanRing extends EuclideanRing[Short] {
override def minus(a:Short, b:Short): Short = (a - b).toShort
def negate(a:Short): Short = (-a).toShort
def one: Short = 1.toShort
def plus(a:Short, b:Short): Short = (a + b).toShort
// override def pow(a: Short, b:Int): Short = Math.pow(a, b).toShort TODO: does not obey laws
override def times(a:Short, b:Short): Short = (a * b).toShort
def zero: Short = 0.toShort
override def fromInt(n: Int): Short = n.toShort
def euclideanFunction(a:Short): BigInt = BigInt(a).abs
override def equotmod(a: Short, b: Short): (Short, Short) = spire.math.equotmod(a, b)
def equot(a: Short, b: Short): Short = spire.math.equot(a, b)
def emod(a: Short, b: Short): Short = spire.math.emod(a, b)
def gcd(a: Short, b: Short)(implicit ev: Eq[Short]): Short = spire.math.gcd(a, b).toShort
def lcm(a: Short, b: Short)(implicit ev: Eq[Short]): Short = spire.math.lcm(a, b).toShort
}
// Not included in Instances trait.
trait ShortIsNRoot extends NRoot[Short] {
def nroot(x: Short, n: Int): Short = {
def findnroot(prev: Int, add: Int): Short = {
val next = prev | add
val e = Math.pow(next, n)
if (e == x || add == 0) {
next.toShort
} else if (e <= 0 || e > x) {
findnroot(prev, add >> 1)
} else {
findnroot(next, add >> 1)
}
}
findnroot(0, 1 << ((33 - n) / n))
}
def log(a: Short): Short = Math.log(a.toDouble).toShort
def fpow(a: Short, b: Short): Short = Math.pow(a, b).toShort
}
trait ShortOrder extends Order[Short] {
override def eqv(x:Short, y:Short): Boolean = x == y
override def neqv(x:Short, y:Short): Boolean = x != y
override def gt(x: Short, y: Short): Boolean = x > y
override def gteqv(x: Short, y: Short): Boolean = x >= y
override def lt(x: Short, y: Short): Boolean = x < y
override def lteqv(x: Short, y: Short): Boolean = x <= y
def compare(x: Short, y: Short): Int = java.lang.Integer.signum((x: Int) - (y: Int))
}
trait ShortSigned extends Signed[Short] with ShortOrder {
override def signum(a: Short): Int = java.lang.Integer.signum(a)
override def abs(a: Short): Short = (if (a < 0) -a else a).toShort
}
trait ShortTruncatedDivision extends TruncatedDivisionCRing[Short] with ShortSigned {
def toBigIntOpt(x: Short): Opt[BigInt] = Opt(BigInt(x))
def tquot(x: Short, y: Short): Short = (x / y).toShort
def tmod(x: Short, y: Short): Short = (x % y).toShort
}
trait ShortIsReal extends IsIntegral[Short] with ShortTruncatedDivision {
def toDouble(n: Short): Double = n.toDouble
def toBigInt(n: Short): BigInt = BigInt(n)
}
@SerialVersionUID(0L)
class ShortIsBitString extends BitString[Short] with Serializable {
def one: Short = (-1: Short)
def zero: Short = (0: Short)
def and(a: Short, b: Short): Short = (a & b).toShort
def or(a: Short, b: Short): Short = (a | b).toShort
def complement(a: Short): Short = (~a).toShort
override def xor(a: Short, b: Short): Short = (a ^ b).toShort
def signed: Boolean = true
def width: Int = 16
def toHexString(n: Short): String = Integer.toHexString(n & 0xffff)
def bitCount(n: Short): Int = Integer.bitCount(n & 0xffff)
def highestOneBit(n: Short): Short = (Integer.highestOneBit(n & 0xffff) & 0xffff).toShort
def lowestOneBit(n: Short): Short = (Integer.lowestOneBit(n & 0xffff) & 0xffff).toShort
def numberOfLeadingZeros(n: Short): Int = Integer.numberOfLeadingZeros(n & 0xffff) - 16
def numberOfTrailingZeros(n: Short): Int = if (n == 0) 16 else Integer.numberOfTrailingZeros(n & 0xffff)
def leftShift(n: Short, i: Int): Short = (((n & 0xffff) << (i & 15)) & 0xffff).toShort
def rightShift(n: Short, i: Int): Short = (((n & 0xffff) >>> (i & 15)) & 0xffff).toShort
def signedRightShift(n: Short, i: Int): Short = ((n >> (i & 15)) & 0xffff).toShort
def rotateLeft(n: Short, i: Int): Short = {
val j = i & 15
((((n & 0xffff) << j) | ((n & 0xffff) >>> (16 - j))) & 0xffff).toShort
}
def rotateRight(n: Short, i: Int): Short = {
val j = i & 15
((((n & 0xffff) >>> j) | ((n & 0xffff) << (16 - j))) & 0xffff).toShort
}
}
@SerialVersionUID(0L)
class ShortAlgebra extends ShortIsEuclideanRing with ShortIsReal with Serializable
trait ShortInstances {
implicit final val ShortBitString = new ShortIsBitString
implicit final val ShortAlgebra = new ShortAlgebra
import spire.math.NumberTag._
implicit final val ShortTag = new BuiltinIntTag[Short](0, Short.MinValue, Short.MaxValue)
}
| non/spire | core/src/main/scala/spire/std/short.scala | Scala | mit | 4,666 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.ejson
import slamdata.Predef._
import quasar.contrib.matryoshka.{project => projectg, _}
import quasar.contrib.iota.{copkTraverse, copkOrder}
import qdata.{QDataDecode, QDataEncode}
import matryoshka._
import matryoshka.implicits._
import scalaz.{==>>, Equal, Order}
import scalaz.std.list._
import scalaz.syntax.equal._
import scalaz.syntax.foldable._
import scalaz.syntax.std.option._
object implicits {
// NB: This is defined here as we need to elide metadata from args before
// comparing them.
implicit def ejsonOrder[T](
implicit
TC: Corecursive.Aux[T, EJson],
TR: Recursive.Aux[T, EJson]
): Order[T] =
Order.order { (x, y) =>
implicit val ordExt = Extension.structuralOrder
OrderR.order[T, EJson](
x.transCata[T](EJson.elideMetadata[T]),
y.transCata[T](EJson.elideMetadata[T]))
}
implicit def ejsonQDataDecode[J](implicit T: Recursive.Aux[J, EJson]): QDataDecode[J] =
QDataEJson.decode[J]
implicit def ejsonQDataEncode[J](implicit T: Corecursive.Aux[J, EJson]): QDataEncode[J] =
QDataEJson.encode[J]
implicit final class EJsonOps[J](val j: J) extends scala.AnyVal {
def array(implicit JR: Recursive.Aux[J, EJson]): Option[List[J]] =
projectg[J, EJson].composePrism(optics.arr).headOption(j)
def assoc(implicit JR: Recursive.Aux[J, EJson]): Option[List[(J, J)]] =
projectg[J, EJson].composePrism(optics.map).headOption(j)
def decodeAs[A](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson], A: DecodeEJson[A]): Decoded[A] =
A.decode[J](j)
def decodeKeyS(k: String)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): Decoded[J] =
Decoded.attempt(j, keyS(k) \\/> s"Map[$k]")
def decodedKeyS[A: DecodeEJson](k: String)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): Decoded[A] =
decodeKeyS(k) flatMap (_.decodeAs[A])
def key(k: J)(implicit JR: Recursive.Aux[J, EJson], J: Equal[J]): Option[J] =
assoc flatMap (_ findLeft (_._1 ≟ k)) map (_._2)
def keyS(k: String)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): Option[J] =
key(EJson.str(k))
def map(implicit JR: Recursive.Aux[J, EJson], J: Order[J]): Option[J ==>> J] =
projectg[J, EJson].composePrism(optics.imap).headOption(j)
}
implicit final class EncodeEJsonOps[A](val self: A) extends scala.AnyVal {
def asEJson[J](
implicit
A : EncodeEJson[A],
JC: Corecursive.Aux[J, EJson],
JR: Recursive.Aux[J, EJson]
): J =
A.encode[J](self)
}
implicit final class EncodeEJsonKOps[F[_], J](val self: F[J]) extends scala.AnyVal {
def asEJsonK(
implicit
F : EncodeEJsonK[F],
JC: Corecursive.Aux[J, EJson],
JR: Recursive.Aux[J, EJson]
): J =
F.encodeK[J].apply(self)
}
}
| slamdata/quasar | ejson/src/main/scala/quasar/ejson/implicits.scala | Scala | apache-2.0 | 3,472 |
package com.sohu.mrd.sonlp.core
import java.io.{File, PrintWriter}
import com.sohu.mrd.sonlp.SoNLP
import com.sohu.mrd.sonlp.util.FileProcessor
import scala.collection.mutable
/**
* Created by huangyu on 15/12/20.
*/
class NewsClassification(_parameterPath: String) {
val words: mutable.Set[String] = new mutable.HashSet[String]()
val parameterPath = if (_parameterPath.endsWith("/")) _parameterPath else _parameterPath + "/"
val wordDistribute: mutable.Map[String, mutable.Map[String, Double]] = new mutable.HashMap[String, mutable.Map[String, Double]]()
// val classDistribute: mutable.Map[String, Double] = new mutable.HashMap[String, Double]()
val wordSum: mutable.Map[String, Double] = new mutable.HashMap[String, Double]()
val wordMax: mutable.Map[String, Double] = new mutable.HashMap[String, Double]()
initParameter()
// val classNum = classDistribute.keys.size
val wordNum = wordDistribute.keys.size
smooth()
normalize()
private[this] def initParameter(): Unit = {
FileProcessor.processLine(new File(parameterPath + "word"), line => {
val ss = line.split("\\t")
if (ss(1).toDouble >= 10) {
words.add(ss(0))
}
})
FileProcessor.processLine(new File(parameterPath + "word_dis"), line => {
val ss = line.split("\\t")
if (words.contains(ss(1))) {
val wd = wordDistribute.getOrElseUpdate(ss(1), new mutable.HashMap[String, Double]())
wd.put(ss(0), ss(2).toDouble)
wordSum.put(ss(0), wordSum.getOrElse(ss(0), 0.0) + ss(2).toDouble)
wordMax.put(ss(0), Math.max(wordMax.getOrElse(ss(0), 0.0), ss(2).toDouble))
}
})
// FileProcessor.processLine(new File(parameterPath + "class_dis"), line => {
// val ss = line.split("\\t")
// classDistribute.put(ss(0), ss(1).toDouble)
// })
}
private[this] def normalize(): Unit = {
// wordDistribute.foreach(kv => {
// val dis = kv._2
// dis.keys.foreach(sch => {
// dis.put(sch, (dis.getOrElse(sch, 0.0)) / (wordSum(sch)))
// })
// })
wordDistribute.foreach(kv => {
val dis = kv._2
dis.keys.foreach(sch => {
dis.put(sch, (dis.getOrElse(sch, 0.0)) / (wordMax(sch)))
})
})
// wordDistribute.foreach(kv => {
// val dis = kv._2
// val sum = dis.values.sum
// dis.keys.foreach(sch => {
// dis.put(sch, dis(sch) / sum)
// })
// })
}
private def getWeigth(word: String, sch: String): Double = {
// wordDistribute.get(word).map(_.getOrElse(sch, 1.0 / (wordSum(sch) + wordNum))).
// getOrElse(1.0 / (wordSum(sch) + wordNum))
wordDistribute.get(word).map(_.getOrElse(sch, 0.0)).getOrElse(0.0)
}
private[this] def smooth(): Unit = {
}
def predict(keywords: Array[StringWeight]): Array[(String, Double)] = {
val schWeight = new mutable.HashMap[String, Double]()
keywords.foreach(kv => {
wordDistribute.get(kv.key).foreach(_.foreach(schw => {
schWeight.put(schw._1, schWeight.getOrElse(schw._1, 0.0) + kv.weigth * schw._2)
}))
})
schWeight.toArray.sortBy(-_._2).take(8)
}
def save(resultFile: String): Unit = {
val p = new PrintWriter(resultFile, "utf-8")
wordDistribute.foreach(wordChDis => {
val word = wordChDis._1
val dis = wordChDis._2
dis.foreach(sw => {
p.println(sw._1 + "\\t" + word + "\\t" + sw._2)
})
})
p.close()
}
}
object NewsClassification {
def main(args: Array[String]): Unit = {
val nc = new NewsClassification("data/csh/")
// nc.save("result/word_dis")
// println(nc.wordSum("20200"))
// println(nc.wordSum("90300"))
// println(nc.wordSum("10100"))
// println(nc.wordSum("50100"))
// println(nc.wordDistribute("相机")("71300"))
// println(nc.getWeigth("挺住", "71400"))
// println(nc.getWeigth("挺住", "20200"))
// println(nc.getWeigth("南京", "120100"))
// println(nc.getWeigth("南京", "50100"))
// val pf = new PrintWriter("result/t")
val pfs = new mutable.HashMap[String, PrintWriter]()
FileProcessor.processLine(new File("data/20151223"), line => {
val ss = line.split("\\t")
// val pf = pfs.getOrElseUpdate(ss(7), new PrintWriter("result/predict/" + ss(7)))
val kws = SoNLP.keyword(ss(12), ss(14), num = 10)
val p = nc.predict(kws)
val name = if (p(0)._2 > 0.9) p(0)._1 else "unkown"
val pf = pfs.getOrElseUpdate(name, new PrintWriter("result/predict/" + name))
kws.foreach(kw => pf.print(kw.key + ":" + kw.weigth + ","))
pf.println
pf.println(ss(12) + "\\t" + ss(7))
p.foreach(kv => pf.print(kv._1 + ":" + kv._2 + ","))
pf.println
pf.println("##############")
})
pfs.values.foreach(_.close())
}
} | huangfish/sonlp | sonlp-core/src/main/scala/com/sohu/mrd/sonlp/core/NewsClassification.scala | Scala | apache-2.0 | 4,874 |
package com.github.mogproject.redismock.util
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Matchers}
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class BytesSpec extends FunSpec
with Matchers
with GeneratorDrivenPropertyChecks
with BeforeAndAfterEach
with BeforeAndAfterAll {
describe("Bytes#seq") {
it("should return sequence of Byte") {
Bytes(1, 2, 3, -1, 0).seq shouldBe Bytes(1, 2, 3, -1, 0)
}
it("should describe its whole data") {
forAll { xs: Seq[Byte] =>
Bytes(xs).seq shouldBe Bytes(xs)
}
}
}
describe("Bytes#length") {
it("should return length of data") {
Bytes(1, 2, 3, -1, 0).length shouldBe 5
}
}
describe("Bytes#equals") {
it("should return true when the data is same") {
Bytes(1, 2, 3, -1, 0).equals(Bytes(1, 2, 3, -1, 0)) shouldBe true
Bytes(1, 2, 3, -1, 0).equals(Seq[Byte](1, 2, 3, -1, 0)) shouldBe false
Bytes(1, 2, 3, -1, 0).equals(1.23) shouldBe false
}
}
describe("Bytes#compare") {
it("should return the result of comparison") {
Bytes().compare(Bytes()) shouldBe 0
Bytes().compare(Bytes(0)) should be < 0
Bytes(0).compare(Bytes()) should be > 0
}
it("should compare bits in unsigned") {
Bytes(-1).compare(Bytes(0)) should be > 0
Bytes(0, 1, 2, 3).compare(Bytes(0, 1, 2, -3)) should be < 0
}
it("should enable comparison with Bytes") {
Bytes(1, 2, 3) should be < Bytes(1, 2, 4)
Bytes(1, 2, 3) should be > Bytes(1, 2)
}
it("should compare with MaxValue") {
Bytes().compare(Bytes.MaxValue) shouldBe -1
Bytes.MaxValue.compare(Bytes(255, 255, 255)) shouldBe 1
Bytes.MaxValue.compare(Bytes.MaxValue) shouldBe 0
}
}
describe("Bytes#fill") {
it("should fill value of the specified number") {
Bytes.fill(-1)(3.toByte) shouldBe Bytes.empty
Bytes.fill(0)(3.toByte) shouldBe Bytes.empty
Bytes.fill(1)(3.toByte) shouldBe Bytes(3)
Bytes.fill(5)(3.toByte) shouldBe Bytes(3, 3, 3, 3, 3)
}
}
describe("Bytes#++") {
it("should append bytes") {
Bytes(1) ++ Bytes(2, 3, 4) shouldBe Bytes(1, 2, 3, 4)
Bytes(1) ++ Seq[Byte](2, 3, 4) shouldBe Bytes(1, 2, 3, 4)
}
}
describe("Bytes#newString") {
it("should make string from bytes") {
Bytes.empty.newString shouldBe ""
Bytes(97).newString shouldBe "a"
Bytes(97, 98, 99, 100, 101).newString shouldBe "abcde"
}
}
describe("Bytes#toString") {
it("should describe contents") {
Bytes.empty.toString shouldBe "Bytes()"
Bytes(97).toString shouldBe "Bytes(97)"
Bytes(97, 98, 99, 100, 101, -1).toString shouldBe "Bytes(97, 98, 99, 100, 101, 255)"
}
}
describe("Bytes#apply") {
it("should construct with Vector[Byte]") {
Bytes.apply(Vector.empty[Byte]) shouldBe Bytes.empty
Bytes.apply(Vector[Byte](1.toByte)) shouldBe Bytes(1)
Bytes.apply(Vector[Byte](1.toByte, 2.toByte, 3.toByte)) shouldBe Bytes(1, 2, 3)
}
it("should construct with empty") {
Bytes.apply() shouldBe Bytes.empty
}
it("should construct with Int") {
Bytes.apply(1) shouldBe Bytes(1)
Bytes.apply(1, 2, 3) shouldBe Bytes(1, 2, 3)
Bytes.apply(1, 2, 3.toByte) shouldBe Bytes(1, 2, 3)
}
it("should construct with Byte") {
Bytes.apply(1.toByte) shouldBe Bytes(1)
Bytes.apply(1.toByte, 2.toByte, 3.toByte) shouldBe Bytes(1, 2, 3)
}
}
describe("ByteBuilder") {
it("should build Bytes") {
val b = new BytesBuilder
b += 1
b.clear()
b += 2
b ++= Seq(3, 4)
b.result() shouldBe Bytes(2, 3, 4)
}
}
}
| mogproject/scala-redis-mock | src/test/scala/com/github/mogproject/redismock/util/BytesSpec.scala | Scala | apache-2.0 | 3,686 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.kafka
import org.junit.{After, Before, Test}
import org.junit.Assert._
import java.util
import scala.collection.JavaConversions._
import java.io.{ByteArrayOutputStream, PrintStream}
import Util.Period
class CliTest extends MesosTestCase {
val out: ByteArrayOutputStream = new ByteArrayOutputStream()
@Before
override def before {
super.before
Config.api = "http://localhost:8000"
Cli.api = Config.api
HttpServer.start(resolveDeps = false)
Cli.out = new PrintStream(out, true)
}
@After
override def after {
Cli.out = System.out
HttpServer.stop()
super.after
}
@Test
def help {
exec("help")
assertOutContains("Usage:")
assertOutContains("scheduler")
assertOutContains("start")
assertOutContains("stop")
// command help
for (command <- "help scheduler status add update remove start stop".split(" ")) {
exec("help " + command)
assertOutContains("Usage: " + command)
}
}
@Test
def status {
Scheduler.cluster.addBroker(new Broker("0"))
Scheduler.cluster.addBroker(new Broker("1"))
Scheduler.cluster.addBroker(new Broker("2"))
exec("status")
assertOutContains("status received")
assertOutContains("id: 0")
assertOutContains("id: 1")
assertOutContains("id: 2")
}
@Test
def add {
exec("add 0 --cpus=0.1 --mem=128")
assertOutContains("Broker added")
assertOutContains("id: 0")
assertOutContains("cpus:0.10, mem:128")
assertEquals(1, Scheduler.cluster.getBrokers.size())
val broker = Scheduler.cluster.getBroker("0")
assertEquals(0.1, broker.cpus, 0.001)
assertEquals(128, broker.mem)
}
@Test
def update {
val broker = Scheduler.cluster.addBroker(new Broker("0"))
exec("update 0 --failover-delay=10s --failover-max-delay=20s --options=log.dirs=/tmp/kafka-logs")
assertOutContains("Broker updated")
assertOutContains("delay:10s, max-delay:20s")
assertOutContains("options: log.dirs=/tmp/kafka-logs")
assertEquals(new Period("10s"), broker.failover.delay)
assertEquals(new Period("20s"), broker.failover.maxDelay)
assertEquals(Util.parseMap("log.dirs=/tmp/kafka-logs"), broker.options)
}
@Test
def remove {
Scheduler.cluster.addBroker(new Broker("0"))
exec("remove 0")
assertOutContains("Broker 0 removed")
assertNull(Scheduler.cluster.getBroker("0"))
}
@Test
def start_stop {
val broker0 = Scheduler.cluster.addBroker(new Broker("0"))
val broker1 = Scheduler.cluster.addBroker(new Broker("1"))
exec("start * --timeout=0")
assertOutContains("Brokers 0,1")
assertTrue(broker0.active)
assertTrue(broker1.active)
exec("stop 0 --timeout=0")
assertOutContains("Broker 0")
assertFalse(broker0.active)
assertTrue(broker1.active)
exec("stop 1 --timeout=0")
assertOutContains("Broker 1")
assertFalse(broker0.active)
assertFalse(broker1.active)
}
@Test
def start_stop_timeout {
val broker = Scheduler.cluster.addBroker(new Broker("0"))
try { exec("start 0 --timeout=1ms"); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("Got timeout")) }
assertTrue(broker.active)
broker.task = new Broker.Task("id", "slave", "executor", "host", _state = Broker.State.RUNNING)
try { exec("stop 0 --timeout=1ms"); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("Got timeout")) }
assertFalse(broker.active)
}
@Test
def rebalance {
val cluster: Cluster = Scheduler.cluster
val rebalancer: Rebalancer = cluster.rebalancer
cluster.addBroker(new Broker("0"))
cluster.addBroker(new Broker("1"))
assertFalse(rebalancer.running)
exec("rebalance *")
assertTrue(rebalancer.running)
assertOutContains("Rebalance started")
}
@Test
def usage_errors {
// no command
try { exec(""); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("command required")) }
// no id
try { exec("add"); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("argument required")) }
// invalid command
try { exec("unsupported 0"); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("unsupported command")) }
}
@Test
def connection_refused {
HttpServer.stop()
try {
try { exec("add 0"); fail() }
catch { case e: Cli.Error => assertTrue(e.getMessage, e.getMessage.contains("Connection refused")) }
} finally {
HttpServer.start()
}
}
private def assertOutContains(s: String): Unit = assertTrue("" + out, out.toString.contains(s))
private def exec(cmd: String): Unit = {
out.reset()
val args = new util.ArrayList[String]()
for (arg <- cmd.split(" "))
if (!cmd.isEmpty) args.add(arg)
Cli.exec(args.toArray(new Array[String](args.length)))
}
}
| serejja/kafka | src/test/ly/stealth/mesos/kafka/CliTest.scala | Scala | apache-2.0 | 5,780 |
/*
* Copyright 2020 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.coders.instances.kryo
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.io.{Input, Output}
import com.twitter.chill.KSerializer
import io.grpc.{Metadata, Status, StatusRuntimeException}
private[coders] object GrpcSerializers {
class StatusSerializer extends KSerializer[Status] {
override def write(kryo: Kryo, output: Output, status: Status): Unit = {
output.writeInt(status.getCode().value())
output.writeString(status.getDescription)
kryo.writeClassAndObject(output, status.getCause)
}
override def read(kryo: Kryo, input: Input, `type`: Class[Status]): Status = {
val code = input.readInt()
val description = input.readString()
val cause = kryo.readClassAndObject(input).asInstanceOf[Throwable]
Status
.fromCodeValue(code)
.withDescription(description)
.withCause(cause)
}
}
class StatusRuntimeExceptionSerializer extends KSerializer[StatusRuntimeException] {
lazy val statusSer = new StatusSerializer()
override def write(kryo: Kryo, output: Output, e: StatusRuntimeException): Unit = {
kryo.writeObject(output, e.getStatus, statusSer)
kryo.writeObjectOrNull(output, e.getTrailers, classOf[Metadata])
}
override def read(
kryo: Kryo,
input: Input,
`type`: Class[StatusRuntimeException]
): StatusRuntimeException = {
val status = kryo.readObject(input, classOf[Status], statusSer)
val trailers = kryo.readObjectOrNull(input, classOf[Metadata])
new StatusRuntimeException(status, trailers)
}
}
}
| spotify/scio | scio-core/src/main/scala/com/spotify/scio/coders/instances/kryo/GrpcSerializers.scala | Scala | apache-2.0 | 2,216 |
package rx.lang.scala
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import rx.lang.scala.observers.TestSubscriber
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
class ScalaTypesConversionsTests extends JUnitSuite {
@Test
def testIterableConversion() = {
val it = Seq("1", "2", "3")
val observer = TestSubscriber[String]()
it.toObservable.subscribe(observer)
observer.assertValues("1", "2", "3")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testIterableEmptyConversion() = {
val it = List[String]()
val observer = TestSubscriber[String]()
it.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testTrySuccessConversion() = {
val success = Success("abc")
val observer = TestSubscriber[String]()
success.toObservable.subscribe(observer)
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testTryFailureConversion() = {
val error = new IllegalArgumentException("test error")
val failure = Failure[String](error)
val observer = TestSubscriber[String]()
failure.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertError(error)
observer.assertNotCompleted()
}
@Test
def testOptionSomeConversion() = {
val some = Option("abc")
val observer = TestSubscriber[String]()
some.toObservable.subscribe(observer)
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testOptionNoneConversion() = {
val some = Option.empty[String]
val observer = TestSubscriber[String]()
some.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureSuccessfulConversion() = {
import scala.concurrent.ExecutionContext.Implicits.global
val fut = Future.successful("abc")
val observer = TestSubscriber[String]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureSuccessfulConversion2() = {
import scala.concurrent.ExecutionContext.Implicits.global
val fut = Future { "abc" }
val observer = TestSubscriber[String]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureFailedConversion() = {
import scala.concurrent.ExecutionContext.Implicits.global
val error = new IllegalArgumentException("test error")
val fut = Future.failed[Unit](error)
val observer = TestSubscriber[Unit]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertNoValues()
observer.assertError(error)
observer.assertNotCompleted()
}
}
| joohnnie/RxScala | src/test/scala/rx/lang/scala/ScalaTypesConversionsTests.scala | Scala | apache-2.0 | 3,070 |
package hu.frankdavid.diss.util
import scala.collection.{Iterator, mutable}
import hu.frankdavid.diss.expression.Expression
class JobList extends mutable.LinkedHashSet[Expression] {
def reverseIterator: Iterator[Expression] = new Iterator[Expression] {
private var cur = lastEntry
def hasNext = cur ne null
def next =
if (hasNext) { val res = cur.key; cur = cur.earlier; res }
else Iterator.empty.next
}
}
| frankdavid/diss | src/main/scala/hu/frankdavid/diss/util/JobList.scala | Scala | apache-2.0 | 436 |
package mesosphere.marathon
package integration
import mesosphere.marathon.integration.facades.MarathonFacade._
import mesosphere.marathon.integration.setup.{ EmbeddedMarathonTest, MesosConfig }
import mesosphere.marathon.raml.{ App, Container, DockerContainer, EngineType }
import mesosphere.marathon.state.PathId._
import mesosphere.{ AkkaIntegrationTest, WhenEnvSet }
@IntegrationTest
class DockerAppIntegrationTest extends AkkaIntegrationTest with EmbeddedMarathonTest {
override lazy val mesosConfig = MesosConfig(containerizers = "docker,mesos")
// FIXME (gkleiman): Docker tests don't work under Docker Machine yet. So they can be disabled through an env variable.
val envVar = "RUN_DOCKER_INTEGRATION_TESTS"
//clean up state before running the test case
after(cleanUp())
"DockerApp" should {
"deploy a simple Docker app" taggedAs WhenEnvSet(envVar) in {
Given("a new Docker app")
val app = App(
id = (testBasePath / "dockerapp").toString,
cmd = Some("sleep 600"),
container = Some(Container(`type` = EngineType.Docker, docker = Some(DockerContainer(image = "busybox")))),
cpus = 0.2, mem = 16.0,
instances = 1
)
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be(201) // Created
extractDeploymentIds(result) should have size 1
waitForDeployment(result)
waitForTasks(app.id.toPath, 1) // The app has really started
}
"create a simple docker app using http health checks with HOST networking" taggedAs WhenEnvSet(envVar) in {
Given("a new app")
val app = dockerAppProxy(testBasePath / "docker-http-app", "v1", instances = 1, healthCheck = Some(appProxyHealthCheck()))
val check = appProxyCheck(app.id.toPath, "v1", state = true)
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be(201) //Created
extractDeploymentIds(result) should have size 1
waitForDeployment(result)
eventually {
check.pinged should be(true) withClue "Docker app has not been pinged."
}
}
}
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/integration/DockerAppIntegrationTest.scala | Scala | apache-2.0 | 2,213 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.oap.execution
import com.intel.oap.ColumnarPluginConfig
import com.intel.oap.expression.ConverterUtils
import com.intel.oap.vectorized.CloseableColumnBatchIterator
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.vectorized.ColumnarBatch
private final case class BroadcastColumnarRDDPartition(index: Int) extends Partition
case class BroadcastColumnarRDD(
@transient private val sc: SparkContext,
metrics: Map[String, SQLMetric],
numPartitioning: Int,
inputByteBuf: broadcast.Broadcast[ColumnarHashedRelation])
extends RDD[ColumnarBatch](sc, Nil) {
override protected def getPartitions: Array[Partition] = {
(0 until numPartitioning).map { index => new BroadcastColumnarRDDPartition(index) }.toArray
}
override def compute(split: Partition, context: TaskContext): Iterator[ColumnarBatch] = {
val timeout: Int = SQLConf.get.broadcastTimeout.toInt
val relation = inputByteBuf.value.asReadOnlyCopy
relation.countDownClose(timeout)
new CloseableColumnBatchIterator(relation.getColumnarBatchAsIter)
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/main/scala/com/intel/oap/execution/BroadcastColumnarRDD.scala | Scala | apache-2.0 | 2,106 |
package com.datastax.spark.connector.cluster
import java.net.InetSocketAddress
import com.datastax.oss.driver.api.core.Version
import com.datastax.spark.connector.ccm.{CcmBridge, CcmConfig}
/** Cluster facade used by test code. */
case class Cluster(
name: String,
private[cluster] val config: CcmConfig,
private[cluster] val ccmBridge: CcmBridge,
private val nodeConnectionParams: InetSocketAddress => Map[String, String]) {
val addresses: Seq[InetSocketAddress] = config.nodeAddresses()
def flush(): Unit = config.nodes.foreach(ccmBridge.flush)
def getCassandraVersion: Version = ccmBridge.getCassandraVersion
def getDseVersion: Option[Version] = ccmBridge.getDseVersion
def refreshSizeEstimates(): Unit = {
flush()
config.nodes.foreach(ccmBridge.refreshSizeEstimates)
}
def getConnectionHost: String = addresses.head.getHostName
def getConnectionPort: String = addresses.head.getPort.toString
def connectionParameters: Map[String, String] = {
connectionParameters(nodeNo = 0)
}
def connectionParameters(nodeNo: Int): Map[String, String] = {
if (nodeNo >= 0 && nodeNo < addresses.size)
nodeConnectionParams(addresses(nodeNo))
else
throw new IllegalArgumentException(s"Cluster $name has ${addresses.size} nodes, node $nodeNo does not exist.")
}
}
| datastax/spark-cassandra-connector | connector/src/it/scala/com/datastax/spark/connector/cluster/Cluster.scala | Scala | apache-2.0 | 1,331 |
package slaq.ql
import slaq.util.{Node, ValueLinearizer}
/**
* A packed value together with its unpacking
*/
case class Unpackable[T, U](value: T, unpack: Unpack[T, U]) {
def linearizer = unpack.linearizer(value).asInstanceOf[ValueLinearizer[U]]
def mapOp(f: Node => Node) = unpack.mapOp(value, f).asInstanceOf[T]
def reifiedNode = Node(unpack.reify(value))
def reifiedUnpackable[R](using Reify[T, R]): Unpackable[R, U] =
Unpackable(
unpack.reify(value).asInstanceOf[R],
unpack.reifiedUnpack.asInstanceOf[Unpack[R, U]]
)
def zip[T2, U2](s2: Unpackable[T2, U2]) =
Unpackable[(T, T2), (U, U2)](
(value, s2.value), Unpack.unpackTuple2(using unpack, s2.unpack)
)
}
| godenji/slaq | src/main/scala/slaq/scalaquery/ql/Unpackable.scala | Scala | bsd-2-clause | 711 |
package mesosphere.marathon.state
import mesosphere.marathon.StoreCommandFailedException
import mesosphere.marathon.metrics.Metrics.Histogram
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import mesosphere.util.LockManager
import mesosphere.util.state.PersistentStore
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.reflect.ClassTag
import scala.util.control.NonFatal
class MarathonStore[S <: MarathonState[_, S]](
store: PersistentStore,
metrics: Metrics,
newState: () => S,
prefix: String)(implicit ct: ClassTag[S]) extends EntityStore[S] {
import scala.concurrent.ExecutionContext.Implicits.global
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] lazy val lockManager = LockManager.create()
protected[this] def metricsPrefix = MetricPrefixes.SERVICE
protected[this] val bytesRead: Histogram =
metrics.histogram(metrics.name(metricsPrefix, getClass, s"${ct.runtimeClass.getSimpleName}.read-data-size"))
protected[this] val bytesWritten: Histogram =
metrics.histogram(metrics.name(metricsPrefix, getClass, s"${ct.runtimeClass.getSimpleName}.write-data-size"))
def fetch(key: String): Future[Option[S]] = {
log.debug(s"Fetch $prefix$key")
store.load(prefix + key)
.map {
_.map { entity =>
bytesRead.update(entity.bytes.length)
stateFromBytes(entity.bytes.toArray)
}
}
.recover(exceptionTransform(s"Could not fetch ${ct.runtimeClass.getSimpleName} with key: $key"))
}
def modify(key: String, onSuccess: (S) => Unit = _ => ())(f: Update): Future[S] = {
lockManager.executeSequentially(key) {
log.debug(s"Modify $prefix$key")
val res = store.load(prefix + key).flatMap {
case Some(entity) =>
bytesRead.update(entity.bytes.length)
val updated = f(() => stateFromBytes(entity.bytes.toArray))
val updatedEntity = entity.withNewContent(updated.toProtoByteArray)
bytesWritten.update(updatedEntity.bytes.length)
store.update(updatedEntity)
case None =>
val created = f(() => newState()).toProtoByteArray
bytesWritten.update(created.length)
store.create(prefix + key, created)
}
res.map { entity =>
val result = stateFromBytes(entity.bytes.toArray)
onSuccess(result)
result
}.recover(exceptionTransform(s"Could not modify ${ct.runtimeClass.getSimpleName} with key: $key"))
}
}
def expunge(key: String, onSuccess: () => Unit = () => ()): Future[Boolean] = lockManager.executeSequentially(key) {
log.debug(s"Expunge $prefix$key")
store.delete(prefix + key).map { result =>
onSuccess()
result
}.recover(exceptionTransform(s"Could not expunge ${ct.runtimeClass.getSimpleName} with key: $key"))
}
def names(): Future[Seq[String]] = {
store.allIds()
.map {
_.collect {
case name: String if name startsWith prefix => name.replaceFirst(prefix, "")
}
}
.recover(exceptionTransform(s"Could not list names for ${ct.runtimeClass.getSimpleName}"))
}
private[this] def exceptionTransform[T](errorMessage: String): PartialFunction[Throwable, T] = {
case NonFatal(ex) => throw new StoreCommandFailedException(errorMessage, ex)
}
private def stateFromBytes(bytes: Array[Byte]): S = {
newState().mergeFromProto(bytes)
}
override def toString: String = s"MarathonStore($prefix)"
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/state/MarathonStore.scala | Scala | apache-2.0 | 3,493 |
/*
* Copyright 2018 CJWW Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import com.cjwwdev.auth.backend.BaseAuth
import com.cjwwdev.identifiers.IdentifierValidation
import com.cjwwdev.responses.ApiResponse
import com.cjwwdev.security.deobfuscation.DeObfuscator
import play.api.libs.json.Json
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
trait BackendController
extends BaseController
with BaseAuth
with IdentifierValidation
with ApiResponse {
implicit val ec: ExecutionContext
def withEncryptedUrl[T](enc: String)(f: T => Future[Result])(implicit request: Request[_], deObfuscation: DeObfuscator[T]): Future[Result] = {
deObfuscation.decrypt(enc).fold(
data => f(data),
err => Try(Json.parse(err.message)).fold(
_ => withFutureJsonResponseBody(BAD_REQUEST, s"Couldn't decrypt request body on ${request.path}") { json =>
Future.successful(BadRequest(json))
},
jsError => withFutureJsonResponseBody(BAD_REQUEST, jsError, "Decrypted json was missing a field") { json =>
Future.successful(BadRequest(json))
}
)
)
}
}
| cjww-development/auth-microservice | app/common/BackendController.scala | Scala | apache-2.0 | 1,711 |
package com.sksamuel.elastic4s.streams
import akka.actor.ActorSystem
import com.sksamuel.elastic4s.{ElasticClient, ElasticDsl, IndexAndTypes, SearchDefinition}
import scala.concurrent.duration._
import scala.language.implicitConversions
object ReactiveElastic {
implicit class ReactiveElastic(client: ElasticClient) {
import ElasticDsl._
def subscriber[T](config: SubscriberConfig)
(implicit builder: RequestBuilder[T], system: ActorSystem): BulkIndexingSubscriber[T] = {
new BulkIndexingSubscriber[T](client, builder, config)
}
def subscriber[T](batchSize: Int = 100,
concurrentRequests: Int = 5,
refreshAfterOp: Boolean = false,
listener: ResponseListener = ResponseListener.noop,
completionFn: () => Unit = () => (),
errorFn: Throwable => Unit = _ => (),
flushInterval: Option[FiniteDuration] = None,
flushAfter: Option[FiniteDuration] = None,
failureWait: FiniteDuration = 2.seconds,
maxAttempts: Int = 5)
(implicit builder: RequestBuilder[T], system: ActorSystem): BulkIndexingSubscriber[T] = {
val config = SubscriberConfig(
batchSize = batchSize,
concurrentRequests = concurrentRequests,
refreshAfterOp = refreshAfterOp,
listener = listener,
completionFn = completionFn,
errorFn = errorFn,
failureWait = failureWait,
flushInterval = flushInterval,
flushAfter = flushAfter,
maxAttempts = maxAttempts
)
subscriber(config)
}
def publisher(indexType: IndexAndTypes, elements: Long = Long.MaxValue, keepAlive: String = "1m")
(implicit system: ActorSystem): ScrollPublisher = {
publisher(search in indexType query "*:*" scroll keepAlive)
}
def publisher(q: SearchDefinition)(implicit system: ActorSystem): ScrollPublisher = publisher(q, Long.MaxValue)
def publisher(q: SearchDefinition, elements: Long)
(implicit system: ActorSystem): ScrollPublisher = {
new ScrollPublisher(client, q, elements)
}
}
}
| muuki88/elastic4s | elastic4s-streams/src/main/scala/com/sksamuel/elastic4s/streams/ReactiveElastic.scala | Scala | apache-2.0 | 2,249 |
package org.cloudio.morpheus.tutor.chat.frag.step7
import java.util.Locale
import org.morpheus._
import Morpheus._
/**
* Making the Contact entity immutable.
*
* Created by zslajchrt on 04/05/15.
*/
object Session {
}
@dimension
trait Contact {
val firstName: String
val lastName: String
val male: Boolean
val email: String
val nationality: Locale
}
case class ContactData(firstName: String,
lastName: String,
male: Boolean,
email: String,
nationality: Locale) extends Contact
@fragment
trait OfflineContact extends dlg[Contact] {
def tryOnline(): Boolean = false // todo
}
@fragment
trait OnlineContact extends dlg[Contact] {
}
object App {
def main(args: Array[String]) {
val contactData = ContactData("Pepa", "Novák", male = true, email="pepa@depo.cz", Locale.CANADA)
implicit val offlineContactFrag = single[OfflineContact, Contact](contactData)
implicit val onlineContactFrag = single[OnlineContact, Contact](contactData)
val contactKernel = singleton[(OfflineContact or OnlineContact) with
(ContactRawPrinter or ContactPrettyPrinter) with
(StandardOutputChannel or MemoryOutputChannel)]
val contact = contactKernel.~
var contactCoord: Int = 0
val contactDimStr = promote[OfflineContact or OnlineContact](contactKernel.defaultStrategy, contactCoord)
var printerCoord: Int = 0
val printerDimStr = promote[ContactRawPrinter or ContactPrettyPrinter](contactDimStr, printerCoord)
var channelCoord: Int = 0
val channelDimStr = promote[StandardOutputChannel or MemoryOutputChannel](printerDimStr, channelCoord)
contact.remorph(channelDimStr)
contact.printContact()
contactCoord = 1
printerCoord = 1
channelCoord = 0
contact.remorph()
contact.printContact()
// // Note: the only mutable part is the buffer in the memory channel
//
// altNum = 0
// val contact3 = contactKernel.morph_~(morphStrategy)
// contact3.printContact()
// altNum = 1
// contact3.remorph()
// contact3.printContact()
// Note: there is only one mutable part besides the memory channel's buffer, which is the atomic reference in
// the contact3 morph proxy holding the immutable morph.
}
//
// /**
// * Using a composite reference with a parameter to clone a prefabricated prototype composite.
// */
// def main2(args: Array[String]) {
//
// type ContactAddOns = (ContactRawPrinter or ContactPrettyPrinter) with (StandardOutputChannel or MemoryOutputChannel)
//
// val contactKernel = singleton_?[ContactAddOns]
// val contactRef: &?[$[Contact] with ContactAddOns] = contactKernel
//
// val contactCfg = ContactConfig("Pepa", "Novák", male = true, email = "pepa@gmail.com", Locale.CANADA)
// val contactKernel1 = *(contactRef, single[Contact, ContactData](contactCfg))
//
// val chinaConfig = contactCfg.copy(nationality = Locale.CHINA)
// val contactFrag2 = single[Contact, ContactData](chinaConfig)
// val contactKernel2 = *(contactRef, contactFrag2)
// contactKernel1.!.printContact()
// contactKernel2.!.printContact()
// }
//
//}
//
//
}
@dimension
trait OutputChannel {
def printText(text: String): Unit
}
@fragment
trait StandardOutputChannel extends OutputChannel {
override def printText(text: String): Unit = print(text)
}
@fragment
trait MemoryOutputChannel extends OutputChannel {
val outputBuffer = new StringBuilder()
override def printText(text: String): Unit = outputBuffer.append(text)
}
@dimension
trait ContactPrinter {
def printContact(): Unit
}
@fragment
trait ContactRawPrinter extends ContactPrinter {
this: Contact with OutputChannel =>
def printContact(): Unit = {
printText(s"$firstName $lastName $nationality $male")
}
}
@fragment
trait ContactPrettyPrinter extends ContactPrinter {
this: (OfflineContact or OnlineContact) with OutputChannel =>
def printContact(): Unit = {
select[Contact](this) match {
case None =>
case Some(contact) =>
printText(
s"""
First Name: ${contact.firstName}
Second Name: ${contact.lastName}
Male: ${contact.male}
Nationality: ${contact.nationality}
""")
contact match {
case offline: OfflineContact =>
println("is offline")
case online: OnlineContact =>
println("is online")
}
}
}
}
| zslajchrt/morpheus-tutor | src/main/scala/org/cloudio/morpheus/tutor/chat/frag/step7/Session.scala | Scala | apache-2.0 | 4,520 |
package api.domain
import spray.json.{DefaultJsonProtocol, JsonFormat}
private[api]
trait CommonJsonProtocols extends ResponseJsonProtocol
private[api]
case class RequestResponse[+T](success:Boolean, content:Option[T], redirect:Option[String], errors:List[String]) {}
private[api]
object RequestResponse extends ResponseJsonProtocol
private[api]
trait ResponseJsonProtocol extends DefaultJsonProtocol {
implicit def responseResultFormat[T:JsonFormat] = jsonFormat4(RequestResponse.apply[T])
} | onurzdg/spray-app | src/main/scala/api/domain/CommonJsonProtocols.scala | Scala | apache-2.0 | 500 |
package com.lunatic.mlx.kddcup99.mllib.modgen
import com.lunatic.mlx.kddcup99._
import com.lunatic.mlx.kddcup99.mllib.metadata._
import AnalysedPrediction._
import com.lunatic.mlx.kddcup99.mllib.Configuration
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.elasticsearch.spark.sql._
import scala.util.Try
/**
* For now this is not a true predictor, but rather a re-evaluator for the existing model; useful
* for adding more meaningful information about the model in the output (.results_1)
*/
@Experimental
class PredictKMeans extends SparkRunnable {
import PredictKMeans._
def run(implicit sc: SparkContext, appConf: Configuration) = {
predict
//predictAndSaveSmallSample
}
}
object PredictKMeans {
def apply() = new PredictKMeans
def predict(implicit sc: SparkContext, appConf: Configuration): Unit = {
val k_indices = appConf.kmeansTrainingTemplate.cluster_range
val iteration_indices = appConf.kmeansTrainingTemplate.iteration_range
val epsilon_indices = appConf.kmeansTrainingTemplate.epsilon_range
val runs = appConf.kmeansTrainingTemplate.runs
val files = Seq((appConf.trainDataSplitPath, "train"), (appConf.testDataSplitPath, "test"))
for (file <- files;
kValue <- k_indices;
iterations <- iteration_indices;
epsilon <- epsilon_indices
) yield {
val params = KMeansParams(kValue, epsilon, iterations, runs)
val modelFile = stubFilePath(params) + ".model"
val kMeansXMD = loadObjectFromFile[KMeansXMD](modelFile + ".xmd").get
val filePath = file._1
val fileLabel = file._2
val rawData = sc.objectFile[Array[String]](filePath)
val predictions = predict(kMeansXMD, rawData)
val detections = detectAnomalies(predictions, kMeansXMD.anomalyThresholds)
val results = toOutputLines(detections, kMeansXMD.anomalyThresholds)
val predFile = stubFilePath(params) + s"_${fileLabel}.prediction"
saveLinesToFile(results, predFile)
// Try saving also to ES
val sqc = new SQLContext(sc)
val schema = AnalysedPrediction.schema(kMeansXMD.anomalyThresholds.map(_._1))
val dataDF = sqc.createDataFrame(detections, schema)
Try(dataDF.saveToEs(appConf.esIndexRoot + "/predictions_stream"))
}
}
def predict(mx: KMeansXMD, rdd: RDD[KddRecord])
(implicit sc: SparkContext, appConf: Configuration): RDD[Prediction] = {
rdd.map { kddr =>
val vector = mx.preProcessor.transform(kddr.data.map(_.toString))
val model = mx.model
val k = model.predict(vector)
val distance = Vectors.sqdist(vector, model.clusterCenters(k))
Prediction(k, distance, mx.id, kddr.data, kddr.label.getOrElse("???"))
}
}
/**
* Detect which record is an anomaly based on the given threshold (per cluster)
*
* @param rdd
* @param anomalyThresholds
* @return
*/
def detectAnomalies(rdd: RDD[Prediction], anomalyThresholds: Seq[(String, Seq[Double])]): RDD[AnalysedPrediction] = {
rdd.map{ prediction =>
val k = prediction.cluster
val distance = prediction.distance
val anomalyByStrategy = anomalyThresholds.map { case (name, thresholds) =>
val threshold = thresholds(k)
(name, distance > threshold)
}
AnalysedPrediction(anomalyByStrategy, prediction)
}
}
/**
* Print some details about anomalies detected
*
* @param apreds
* @param anomalyThresholds
* @return
*/
def toOutputLines(apreds: RDD[AnalysedPrediction], anomalyThresholds: Seq[(String, Seq[Double])]) = {
val totalPredictions = apreds.count()
// We consider it an anomaly if any of the thresholds we applied yield true (anomaly)
val anomalies = apreds.filter { case (apred) =>
apred.anomalyPredictions.map(_._2).foldLeft(false)((accu, ano) => accu || ano)
}
val totalAno = anomalies.count()
val anomaliesByStrategy = anomalies
.flatMap(x => x.anomalyPredictions).groupByKey()
.map(pair => (pair._1, pair._2.count(_ == true))).collect().sortBy(_._1).toSeq
val summary = "" +:
f"Distance Thresholds: " +:
f"${anomalyThresholds.map{ case(name, thresholds) =>
val thrsStr = thresholds.mkString(", ")
f" ${name}%-28s : ${thrsStr}" }.
mkString("\\n")}" +:
f"Total predictions: $totalPredictions" +:
f"Total anomalies: $totalAno" +:
f"Anomalies By Strategy: " +:
anomaliesByStrategy.map {
pair => f"| ${pair._1}%-28s | ${pair._2}%8d |"
}.mkString("\\n") +:
"" +: Nil
val anomaliesHeader = anomalyThresholds.map { case (name, thrs) => f"${name}%-28s" }.mkString(" | ")
val header =
f"| ${anomaliesHeader} | ${"Timestamp"}%-28s | ${"Label"}%-20s | ${"Clust"}%-5s | ${"Distance"}%-12s | ${"Model Id"}%-30s | Input Vector" +:
Nil
val table =
anomalies.map { case (apred) =>
val anoData = apred.anomalyPredictions.map{ case(name, ano) =>
if(ano) f"${"ANOMALY"}%-28s" else f"${" "}%-28s"
}.mkString(" | ")
val prediction = apred.prediction
f"| ${anoData} | ${prediction.timestamp}%-28s | ${prediction.label}%-20s | ${prediction.cluster}%5d | ${prediction.distance}%12.6f | ${prediction.modelId}%-30s | ${prediction.kddr.data.mkString(", ")} |"
}.collect.toSeq
summary ++ header ++ table
}
}
| tupol/sparx-mllib | src/main/scala/com/lunatic/mlx/kddcup99/mllib/modgen/PredictKMeans.scala | Scala | apache-2.0 | 5,557 |
package com.vivint.ceph.model
sealed trait Location {
def ipOpt: Option[String]
def portOpt: Option[Int]
def hostnameOpt: Option[String]
def withIP(ip: String): Location
}
object Location {
val empty = PartialLocation(None, None)
}
case class PartialLocation(ip: Option[String], port: Option[Int]) extends Location {
def ipOpt: Option[String] = ip
def portOpt: Option[Int] = port
def hostnameOpt: Option[String] = None
def withIP(ip: String) = port match {
case Some(p) => IPLocation(ip, p)
case None => PartialLocation(Some(ip), None)
}
}
sealed trait IPLocationLike extends Location {
def ip: String
def port: Int
def ipOpt: Option[String] = Some(ip)
def portOpt: Option[Int] = Some(port)
}
case class IPLocation(ip: String, port: Int) extends Location with IPLocationLike {
def withIP(ip: String) = copy(ip = ip)
def hostnameOpt = None
}
case class ServiceLocation(hostname: String, ip: String, port: Int) extends Location with IPLocationLike {
def withIP(ip: String) = copy(ip = ip)
def hostnameOpt = Some(hostname)
}
| vivint-smarthome/ceph-on-mesos | src/main/scala/com/vivint/ceph/model/Location.scala | Scala | apache-2.0 | 1,072 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.geotools.tools.export
import com.beust.jcommander.Parameters
import org.geotools.data.DataStore
import org.locationtech.geomesa.geotools.tools.GeoToolsDataStoreCommand
import org.locationtech.geomesa.geotools.tools.GeoToolsDataStoreCommand.GeoToolsDataStoreParams
import org.locationtech.geomesa.geotools.tools.export.GeoToolsPlaybackCommand.GeoToolsPlaybackParams
import org.locationtech.geomesa.tools.export.PlaybackCommand
import org.locationtech.geomesa.tools.export.PlaybackCommand.PlaybackParams
class GeoToolsPlaybackCommand extends PlaybackCommand[DataStore] with GeoToolsDataStoreCommand {
override val params: GeoToolsPlaybackParams = new GeoToolsPlaybackParams
}
object GeoToolsPlaybackCommand {
@Parameters(commandDescription = "Playback features from a data store, based on the feature date")
class GeoToolsPlaybackParams extends PlaybackParams with GeoToolsDataStoreParams
}
| aheyne/geomesa | geomesa-gt/geomesa-gt-tools/src/main/scala/org/locationtech/geomesa/geotools/tools/export/GeoToolsPlaybackCommand.scala | Scala | apache-2.0 | 1,396 |
package org.otw.open.screens
import com.badlogic.gdx.scenes.scene2d.Actor
import com.badlogic.gdx.utils.Array
import org.otw.open.actors.{StaticAnimationActor, MenuButtonActor, BackgroundActor}
import org.otw.open.controllers._
import org.otw.open.listeners.DispatchEventListener
import org.otw.open.testconfig.UnitSpec
import org.scalatest.BeforeAndAfterEach
/**
* Created by eilievska on 2/22/2016.
*/
class ActionResultScreenTest extends UnitSpec with BeforeAndAfterEach {
test("when build stage is invoked, the stage should have 6 actors") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val allActors: Array[Actor] = screen.getActors
assert(allActors.size == 6)
}
test("when stage is build, the first actor should be the Background Actor.") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val allActors: Array[Actor] = screen.getActors
allActors.get(0) match {
case x: BackgroundActor => assert(true)
case _ => assert(false)
}
}
test("when stage is build and level is 4, the second actor should be disabled next level button") {
GameState.setLevel(4)
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(2) match {
case x: MenuButtonActor => x
case _ => throw new ClassCastException
}
assert(actor.getListeners.size == 0)
assert(actor.imageFileName == "disabled-next-level.png")
assert(actor.position.x == 722 && actor.position.y == 5)
}
test("when stage is build and level is NOT 4, the second actor should be next level button") {
GameState.setLevel(2)
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(2) match {
case x: MenuButtonActor => x
case _ => throw new ClassCastException
}
assert(actor.imageFileName == "next-level.png")
assert(actor.position.x == 722 && actor.position.y == 5)
assert(actor.getListeners.size == 1)
val listener = actor.getListeners.get(0) match {
case x: DispatchEventListener => x
case _ => throw new ClassCastException
}
assert(listener.screenChangeEvent == NextLevel)
}
test("when stage is build the third actor should be retry level button") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(3) match {
case x: MenuButtonActor => x
case _ => throw new ClassCastException
}
assert(actor.imageFileName == "retry-level.png")
assert(actor.position.x == 535 && actor.position.y == 5)
assert(actor.getListeners.size == 1)
val listener = actor.getListeners.get(0) match {
case x: DispatchEventListener => x
case _ => throw new ClassCastException
}
assert(listener.screenChangeEvent == RetryLevel)
}
test("when stage is build the fourth actor should be to main menu button") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(4) match {
case x: MenuButtonActor => x
case _ => throw new ClassCastException
}
assert(actor.imageFileName == "to-main-menu.png")
assert(actor.position.x == 348 && actor.position.y == 5)
assert(actor.getListeners.size == 1)
val listener = actor.getListeners.get(0) match {
case x: DispatchEventListener => x
case _ => throw new ClassCastException
}
assert(listener.screenChangeEvent == ToMainMenu)
}
test("when stage is build the fifth actor should be to other theme button") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(5) match {
case x: MenuButtonActor => x
case _ => throw new ClassCastException
}
assert(actor.imageFileName == "to-other-theme.png")
assert(actor.position.x == 909 && actor.position.y == 5)
assert(actor.getListeners.size == 1)
val listener = actor.getListeners.get(0) match {
case x: DispatchEventListener => x
case _ => throw new ClassCastException
}
assert(listener.screenChangeEvent == OtherTheme)
}
test("when stage is build, and action IS SUCCESSFUL the sixth actor should be a static animation actor") {
val screen = new ActionResultScreen(true)
screen.buildStage()
val actor = screen.getActors.get(1) match {
case x: StaticAnimationActor => x
case _ => throw new ClassCastException
}
assert(actor.atlasFileName == "happy-animation.atlas")
assert(actor.position.x == 464 && actor.position.y == 194)
}
test("when stage is build, and action IS NOT SUCCESSFUL the sixth actor should be a static animation actor") {
val screen = new ActionResultScreen(false)
screen.buildStage()
val actor = screen.getActors.get(1) match {
case x: StaticAnimationActor => x
case _ => throw new ClassCastException
}
assert(actor.atlasFileName == "unhappy-animation.atlas")
assert(actor.position.x == 464 && actor.position.y == 194)
}
}
| danielEftimov/OPEN | core/test/org/otw/open/screens/ActionResultScreenTest.scala | Scala | apache-2.0 | 5,039 |
package com.ldaniels528.ricochet.entity
import java.awt.Color
/**
* Represents a semi-stationary brick
* @author "Lawrence Daniels" <lawrence.daniels@gmail.com>
*/
case class Brick(x: Double, var y: Double, width: Int, height: Int, color: Color)
extends GenericBrick(width, height, color) {
override def update(tick: Double, maxX: Int, maxY: Int) = {
y += tick / 50d
}
} | ldaniels528/ricochet | src/main/scala/com/ldaniels528/ricochet/entity/Brick.scala | Scala | apache-2.0 | 387 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.