code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package com.nabijaczleweli.minecrasmer.util
import com.nabijaczleweli.minecrasmer.compat.ICompat
import com.nabijaczleweli.minecrasmer.reference.{Configuration, Reference}
import com.nabijaczleweli.minecrasmer.util.StringUtils._
import net.minecraftforge.fml.common.Loader.isModLoaded
object CompatUtil {
implicit class CompatConv(val compat: ICompat) extends AnyVal {
def hasAllLoaded = {
var allModsLoaded = true
for(modid <- compat.getModIDs if allModsLoaded)
if(!(modid == null || isModLoaded(modid)))
allModsLoaded = false
allModsLoaded
}
def getModList =
if(compat.getModIDs.isEmpty)
"Vanilla"
else {
val itr = {
for(modid <- compat.getModIDs) yield
if(modid == null)
"Vanilla"
else
modid
}.iterator
var modids = new StringBuilder(itr.next())
while(itr.hasNext)
modids ++= {
val modid = itr.next()
if(itr.hasNext)
s", $modid"
else
s" and $modid"
}
modids
}
def getModIds =
if(compat.getModIDs.isEmpty)
"Vanilla"
else {
var modids = new StringBuilder
for(modid <- compat.getModIDs)
modids ++= {
if(modid == null)
"Vanilla"
else
modid toUpper 0
}
modids
}
def shouldPreLoad = {
val toRet = Configuration.config.getBoolean(s"preLoad$getModIds", Reference.CONFIG_COMPAT_CATEGORY, true, s"Should the compat with $getModList be preLoaded.")
Configuration.saveIfNeeded()
toRet
}
def shouldLoad = {
val toRet = Configuration.config.getBoolean(s"load$getModIds", Reference.CONFIG_COMPAT_CATEGORY, true, s"Should the compat with $getModList be loaded.")
Configuration.saveIfNeeded()
toRet
}
}
implicit class CompatClassExt(val cls: Class[_ <: ICompat]) extends AnyVal {
def getSimplestName =
cls.getSimpleName before "$"
}
}
| nabijaczleweli/ASMifier | src/main/scala/com/nabijaczleweli/minecrasmer/util/CompatUtil.scala | Scala | mit | 1,856 |
package vggames.dbff
import javax.servlet.ServletContextListener
import javax.servlet.ServletContextEvent
import java.util.Enumeration
import scala.io.Source
import scala.slick.driver.SQLiteDriver.simple._
import scala.slick.jdbc.JdbcBackend.Database
import scala.slick.jdbc.JdbcBackend.Database.dynamicSession
import java.sql.DriverManager
import java.sql.ResultSet
import java.sql.PreparedStatement
import java.util.Scanner
class Listener extends ServletContextListener {
implicit def addVersion(line: String) = new {
val regex = "^(\\\\d+).*".r
val regex(versionAsString) = line
def version = versionAsString.toInt
}
override def contextInitialized(event: ServletContextEvent) {
val dbVersion = DB.version
Read("/dbff/dbff.files").getLines.foreach { line =>
if (line.version > dbVersion) {
DB.bump(line, line.version)
}
}
}
override def contextDestroyed(event: ServletContextEvent) {
}
}
object Read {
def apply(filename: String) = Source.fromInputStream(this.getClass.getResourceAsStream(filename))
}
object DB {
Class.forName("org.sqlite.JDBC")
def version: Int = {
if (versionTableExists)
query("""SELECT version FROM dbff_version;""")(_.getInt(1))
else {
println("Criando tabela de controle de versao do schema")
update("""CREATE TABLE dbff_version ("version" int)""")
update("""INSERT INTO dbff_version values(0)""")
0
}
}
def bump(name: String, version: Int) = {
val filename = "/dbff/" + name
println("Rodando script " + filename)
Read(filename).getLines.foreach { line =>
println("Running: " + line)
update(line)
}
update("UPDATE dbff_version SET version=" + version)
}
def versionTableExists: Boolean = query("SELECT name FROM sqlite_master WHERE type='table' AND name='dbff_version';")(_.next)
def update(sql: String): Unit = run(sql, stmt => { stmt.executeUpdate; Noop }, noop)
def query[T](sql: String)(produceResult: ResultSet => T): T = run(sql, _.executeQuery, produceResult)
type Closeable = { def close(): Unit }
def noop(a: Closeable) = {}
object Noop {
def close() = {}
}
def run[T, V <: Closeable](sql: String, executeStatement: PreparedStatement => V, produceResult: V => T): T = {
autoclose(DriverManager.getConnection("jdbc:sqlite:games.db")) { con =>
autoclose(con.prepareStatement(sql)) { stmt =>
autoclose(executeStatement(stmt))(produceResult)
}
}
}
def autoclose[T <: Closeable, R](code: => T)(f: T => R): R = {
val applied = code
try {
f(applied)
} finally {
applied.close
}
}
}
| vidageek/games | web/src/main/scala/vggames/dbff/Listener.scala | Scala | gpl-3.0 | 2,653 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package scaps.eclipse.ui.view.handlers
import org.eclipse.core.commands.AbstractHandler
import org.eclipse.core.commands.ExecutionEvent
import org.eclipse.ui.PlatformUI
import scaps.eclipse.ui.handlers.IndexUCHandler
import org.eclipse.ui.handlers.HandlerUtil
class ScapsRunIndexerHandler extends AbstractHandler {
def execute(event: ExecutionEvent): Object = {
val window = HandlerUtil.getActiveWorkbenchWindowChecked(event)
IndexUCHandler().runIndexer(window)
null
}
}
| flomerz/scala-ide-scaps | scala-ide-scaps-plugin/src/main/scala/scaps/eclipse/ui/view/handlers/ScapsRunIndexerHandler.scala | Scala | mpl-2.0 | 697 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.util
import scala.scalajs.js
import org.junit.Assert._
import org.junit.Test
import java.util.Formatter
class FormatterJSTest {
@Test def formatUndefined(): Unit = {
val fmt = new Formatter()
val res = fmt.format("%s", js.undefined).toString()
fmt.close()
assertEquals("undefined", res)
}
@Test def formatUndefinedWithInterpolator(): Unit = {
assertEquals("undefined", f"${js.undefined}%s")
}
}
| scala-js/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/javalib/util/FormatterJSTest.scala | Scala | apache-2.0 | 741 |
/**
* A very fast and memory efficient class to encode and decode to and from BASE64 in full accordance
* with RFC 2045.<br><br>
* On Windows XP sp1 with 1.4.2_04 and later ;), this encoder and decoder is about 10 times faster
* on small arrays (10 - 1000 bytes) and 2-3 times as fast on larger arrays (10000 - 1000000 bytes)
* compared to <code>sun.misc.Encoder()/Decoder()</code>.<br><br>
*
* On byte arrays the encoder is about 20% faster than Jakarta Commons Base64 Codec for encode and
* about 50% faster for decoding large arrays. This implementation is about twice as fast on very small
* arrays (< 30 bytes). If source/destination is a <code>String</code> this
* version is about three times as fast due to the fact that the Commons Codec result has to be recoded
* to a <code>String</code> from <code>byte[]</code>, which is very expensive.<br><br>
*
* This encode/decode algorithm doesn't create any temporary arrays as many other codecs do, it only
* allocates the resulting array. This produces less garbage and it is possible to handle arrays twice
* as large as algorithms that create a temporary array. (E.g. Jakarta Commons Codec). It is unknown
* whether Sun's <code>sun.misc.Encoder()/Decoder()</code> produce temporary arrays but since performance
* is quite low it probably does.<br><br>
*
* The encoder produces the same output as the Sun one except that the Sun's encoder appends
* a trailing line separator if the last character isn't a pad. Unclear why but it only adds to the
* length and is probably a side effect. Both are in conformance with RFC 2045 though.<br>
* Commons codec seem to always att a trailing line separator.<br><br>
*
* <b>Note!</b>
* The encode/decode method pairs (types) come in three versions with the <b>exact</b> same algorithm and
* thus a lot of code redundancy. This is to not create any temporary arrays for transcoding to/from different
* format types. The methods not used can simply be commented out.<br><br>
*
* There is also a "fast" version of all decode methods that works the same way as the normal ones, but
* har a few demands on the decoded input. Normally though, these fast verions should be used if the source if
* the input is known and it hasn't bee tampered with.<br><br>
*
* If you find the code useful or you find a bug, please send me a note at base64 @ miginfocom . com.
*
* Licence (BSD):
* ==============
*
* Copyright (c) 2004, Mikael Grev, MiG InfoCom AB. (base64 @ miginfocom . com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list
* of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* Neither the name of the MiG InfoCom AB nor the names of its contributors may be
* used to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
* @version 2.2
* @author Mikael Grev
* Date: 2004-aug-02
* Time: 11:31:11
*
* Adapted in 2009 by Mathias Doenitz.
*/
package org.http4s.internal.parboiled2.util
private[http4s] object Base64 {
private var RFC2045: Base64 = _
private var CUSTOM: Base64 = _
def custom(): Base64 = {
if (CUSTOM == null) {
CUSTOM = new Base64("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-_")
}
CUSTOM
}
def rfc2045(): Base64 = {
if (RFC2045 == null) {
RFC2045 = new Base64("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=")
}
RFC2045
}
}
private[http4s] class Base64(alphabet: String) {
if (alphabet == null || alphabet.length() != 65) {
throw new IllegalArgumentException()
}
val CA = alphabet.substring(0, 64).toCharArray
val fillChar = alphabet.charAt(64)
val IA: Array[Int] = Array.fill(256)(-1)
(0 until CA.length).foreach { i =>
IA(CA(i).toInt) = i
}
IA(fillChar.toInt) = 0
def getAlphabet: Array[Char] = {
CA
}
/**
* Decodes a BASE64 encoded char array. All illegal characters will be ignored and can handle both arrays with
* and without line separators.
*
* @param sArr The source array. <code>null</code> or length 0 will return an empty array.
* @return The decoded array of bytes. May be of length 0. Will be <code>null</code> if the legal characters
* (including '=') isn't divideable by 4. (I.e. definitely corrupted).
*/
def decode(sArr: Array[Char]): Array[Byte] = {
// Check special case
val sLen = if(sArr != null) {
sArr.length
}
else {
0
}
if (sLen == 0) {
return Array.empty[Byte]
}
// Count illegal characters (including '\\r', '\\n') to know what size the returned array will be,
// so we don't have to reallocate & copy it later.
// If input is "pure" (I.e. no line separators or illegal chars) base64 this loop can be commented out.
var sepCnt = 0 // Number of separator characters. (Actually illegal characters, but that's a bonus...)
(0 until sLen).foreach { i =>
if (IA(sArr(i).toInt) < 0) {
sepCnt += 1
}
}
// Check so that legal chars (including '=') are evenly divideable by 4 as specified in RFC 2045.
if ((sLen - sepCnt) % 4 != 0) {
return null
}
var pad = 0
var i = sLen-1
while (i > 0 && IA(sArr(i).toInt) <= 0) {
if (sArr(i) == fillChar) {
pad += 1
}
i -= 1
}
val len = ((sLen - sepCnt) * 6 >> 3) - pad
val dArr = Array.ofDim[Byte](len) // Preallocate byte[] of exact length
var s = 0
var d = 0
while (d < len) {
// Assemble three bytes into an int from four "valid" characters.
var i = 0
var j = 0
// j only increased if a valid char was found.
while (j < 4) {
val c = IA(sArr(s).toInt)
s += 1
if (c >= 0) {
i |= c << (18 - j * 6)
} else {
j -= 1
}
j += 1
}
// Add the bytes
dArr(d) = (i >> 16).toByte
d += 1
if (d < len) {
dArr(d) = (i >> 8).toByte
d += 1
if (d < len) {
dArr(d) = i.toByte
d += 1
}
}
}
dArr
}
/**
* Decodes a BASE64 encoded char array that is known to be resonably well formatted. The method is about twice as
* fast as {@link #decode(char[])}. The preconditions are:<br>
* + The array must have a line length of 76 chars OR no line separators at all (one line).<br>
* + Line separator must be "\\r\\n", as specified in RFC 2045
* + The array must not contain illegal characters within the encoded string<br>
* + The array CAN have illegal characters at the beginning and end, those will be dealt with appropriately.<br>
*
* @param sArr The source array. Length 0 will return an empty array. <code>null</code> will throw an exception.
* @return The decoded array of bytes. May be of length 0.
*/
def decodeFast(sArr: Array[Char]): Array[Byte] = {
// Check special case
val sLen = sArr.length
if (sLen == 0) {
return Array.empty[Byte]
}
// Start and end index after trimming.
var sIx = 0
var eIx = sLen - 1
// Trim illegal chars from start
while (sIx < eIx && IA(sArr(sIx).toInt) < 0) {
sIx += 1
}
// Trim illegal chars from end
while (eIx > 0 && IA(sArr(eIx).toInt) < 0) {
eIx -= 1
}
// get the padding count (=) (0, 1 or 2)
// Count '=' at end.
val pad = if (sArr(eIx) == fillChar) {
if (sArr(eIx - 1) == fillChar) {
2
}
else {
1
}
}
else {
0
}
// Content count including possible separators
val cCnt = eIx - sIx + 1
// Count '=' at end.
val sepCnt = if (sLen > 76) {
(if (sArr(76) == '\\r') {
cCnt / 78
}
else {
0
}) << 1
}
else {
0
}
val len = ((cCnt - sepCnt) * 6 >> 3) - pad // The number of decoded bytes
val dArr = Array.ofDim[Byte](len); // Preallocate byte() of exact length
// Decode all but the last 0 - 2 bytes.
var d = 0
var cc = 0
val eLen = (len / 3) * 3
while (d < eLen) {
// Assemble three bytes into an int from four "valid" characters.
var i = IA(sArr(sIx).toInt) << 18
sIx += 1
i = i | IA(sArr(sIx).toInt) << 12
sIx += 1
i = i | IA(sArr(sIx).toInt) << 6
sIx += 1
i = i | IA(sArr(sIx).toInt)
sIx += 1
// Add the bytes
dArr(d) = (i >> 16).toByte
d += 1
dArr(d) = (i >> 8).toByte
d += 1
dArr(d) = i.toByte
d += 1
// If line separator, jump over it.
cc += 1
if (sepCnt > 0 && cc == 19) {
sIx += 2
cc = 0
}
}
if (d < len) {
// Decode last 1-3 bytes (incl '=') into 1-3 bytes
var i = 0
var j = 0
while (sIx <= eIx - pad) {
i |= IA(sArr(sIx).toInt) << (18 - j * 6)
sIx += 1
j += 1
}
var r = 16
while (d < len) {
dArr(d) = (i >> r).toByte
d += 1
r -= 8
}
}
dArr
}
/**
* Encodes a raw byte array into a BASE64 <code>String</code> representation in accordance with RFC 2045.
*
* @param sArr The bytes to convert. If <code>null</code> or length 0 an empty array will be returned.
* @param lineSep Optional "\\r\\n" after 76 characters, unless end of file.<br>
* No line separator will be in breach of RFC 2045 which specifies max 76 per line but will be a
* little faster.
* @return A BASE64 encoded array. Never <code>null</code>.
*/
def encodeToString(sArr: Array[Byte], lineSep: Boolean): String = {
// Reuse char[] since we can't create a String incrementally anyway and StringBuffer/Builder would be slower.
new String(encodeToChar(sArr, lineSep))
}
/**
* Encodes a raw byte array into a BASE64 <code>char[]</code> representation i accordance with RFC 2045.
*
* @param sArr The bytes to convert. If <code>null</code> or length 0 an empty array will be returned.
* @param lineSep Optional "\\r\\n" after 76 characters, unless end of file.<br>
* No line separator will be in breach of RFC 2045 which specifies max 76 per line but will be a
* little faster.
* @return A BASE64 encoded array. Never <code>null</code>.
*/
def encodeToChar(sArr: Array[Byte], lineSep: Boolean): Array[Char] = {
// Check special case
val sLen = if (sArr != null) {
sArr.length
}
else {
0
}
if (sLen == 0) {
return Array.empty[Char]
}
val eLen = (sLen / 3) * 3 // Length of even 24-bits.
val cCnt = ((sLen - 1) / 3 + 1) << 2 // Returned character count
// Length of returned array
val dLen = cCnt + (if (lineSep == true) {
(cCnt - 1) / 76 << 1
}
else {
0
})
val dArr = Array.ofDim[Char](dLen)
// Encode even 24-bits
var s = 0
var d = 0
var cc = 0
while (s < eLen) {
// Copy next three bytes into lower 24 bits of int, paying attension to sign.
var i = (sArr(s) & 0xff) << 16
s += 1
i = i | ((sArr(s) & 0xff) << 8)
s += 1
i = i | (sArr(s) & 0xff)
s += 1
// Encode the int into four chars
dArr(d) = CA((i >>> 18) & 0x3f)
d += 1
dArr(d) = CA((i >>> 12) & 0x3f)
d += 1
dArr(d) = CA((i >>> 6) & 0x3f)
d += 1
dArr(d) = CA(i & 0x3f)
d += 1
// Add optional line separator
cc += 1
if (lineSep && cc == 19 && d < dLen - 2) {
dArr(d) = '\\r'
d += 1
dArr(d) = '\\n'
d += 1
cc = 0
}
}
// Pad and encode last bits if source isn't even 24 bits.
val left = sLen - eLen; // 0 - 2.
if (left > 0) {
// Prepare the int
val i = ((sArr(eLen) & 0xff) << 10) | (if (left == 2) {
(sArr(sLen - 1) & 0xff) << 2
}
else {
0
})
// Set last four chars
dArr(dLen - 4) = CA(i >> 12)
dArr(dLen - 3) = CA((i >>> 6) & 0x3f)
dArr(dLen - 2) = if(left == 2) {
CA(i & 0x3f)
}
else {
fillChar
}
dArr(dLen - 1) = fillChar
}
dArr
}
}
| ZizhengTai/http4s | parboiled2/src/main/scala/org/http4s/internal/parboiled2/util/Base64.scala | Scala | apache-2.0 | 13,520 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Enterprise Data Management Council
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
//package org.edmcouncil.serializer
//
//import java.io.{ IOException, BufferedInputStream, File, FileInputStream }
//import java.nio.file.attribute.BasicFileAttributes
//import java.nio.file._
//
//import grizzled.slf4j.Logging
//import org.edmcouncil.util.{ PotentialFile, BaseURL, PotentialDirectory }
//import org.semanticweb.owlapi.io.{ OWLOntologyDocumentSource, StreamDocumentSource }
//import org.semanticweb.owlapi.model.IRI
//
//import scala.io.Source
//
///**
// * The ImportResolver tries to find an imported ontology based on a given base directory and base URL.
// *
// * See https://jira.edmcouncil.org/browse/RDFSER-7
// */
//class ImportResolver private (baseDir: PotentialDirectory, baseUrl: BaseURL, importedIri: IRI) extends Logging {
//
// import ImportResolver._
//
// private[this] implicit val codec = scala.io.Codec.UTF8
//
// type TryPathFunction = () ⇒ Path
//
// val importedUrl = importedIri.toURI.toString
// val matchingBaseUrl = baseUrl.matchesWith(importedUrl)
// val baseDirExists = baseDir.exists
// val baseUrlSpecified = baseUrl.isSpecified
// val remainderOfImportUrl = baseUrl.strip(importedUrl)
// val shouldBeFound = matchingBaseUrl && baseDirExists && baseUrlSpecified && remainderOfImportUrl.isDefined
// //val firstPath = baseDir.path.get.resolve(remainderOfImportUrl.get)
//
// private val pathsToBeTried: Seq[Path] = Seq(baseDir.path.get)
//
// private val rdfFileMatcherPattern = (
// "**/" + remainderOfImportUrl.get + checkFileExtensions.mkString(".{", ",", "}")
// ).replace("/.", ".").toLowerCase
//
// private val rdfFileMatcher = pathMatcher(s"glob:$rdfFileMatcherPattern")
//
// /**
// * tryPath is called for each Path entry in a Seq[Path] collection. The first one that matches is going
// * to be imported.
// */
// private val tryPath = new PartialFunction[Path, File] {
//
// var file: Option[File] = None
//
// def apply(path: Path): File = file.get
//
// def isDefinedAt(path: Path): Boolean = {
// //info(s"isDefinedAt: $path")
// val walker = new DirectoryWalker(rdfFileMatcher)
// Files.walkFileTree(path, walker)
// file = walker.result
// file.isDefined
// }
// }
//
// private[this] val tryAll = pathsToBeTried collectFirst tryPath
//
// /**
// * The first Path in the list of pathsToBeTried that points to an existing ontology file
// */
// val resource = tryAll
//
// val found = resource.isDefined
//
// def inputStream = resource.map((file: File) ⇒ new BufferedInputStream(new FileInputStream(file)))
// def inputSource = inputStream.map(Source.fromInputStream(_)(codec))
// def inputDocumentSource: Option[OWLOntologyDocumentSource] = inputStream.map(new StreamDocumentSource(_, importedIri))
//}
//
//object ImportResolver extends Logging {
//
// private val fileSystem = FileSystems.getDefault
// private def pathMatcher(syntaxAndPattern: String) = fileSystem.getPathMatcher(syntaxAndPattern)
// private val checkFileExtensions = Seq("rdf", "owl", "ttl", "nt", "n3") // TODO: Get this list from either OWLAPI or Sesame
//
// def apply(basePath: Path, baseUri: BaseURL, importedIri: IRI) =
// new ImportResolver(PotentialDirectory(basePath), baseUri, importedIri)
//
// def apply(baseDir: PotentialDirectory, baseUrl: BaseURL, importedIri: IRI) =
// new ImportResolver(baseDir, baseUrl, importedIri)
//}
//
//class DirectoryWalker(matcher: PathMatcher) extends SimpleFileVisitor[Path] with Logging {
//
// var result: Option[File] = None
//
// /**
// * Compares the pattern against the file or directory name and returns true if we found a valid RDF file
// */
// private def checkFile(path: Path): Boolean = {
// val normalizedPath = path.normalize()
// val potentialFile = PotentialFile(Some(normalizedPath.toString.toLowerCase))
// if (!matcher.matches(potentialFile.path.get)) {
// debug(s"Tried $normalizedPath, no match ${potentialFile}")
// return false
// }
// info(s"Found $normalizedPath -> ${normalizedPath.toRealPath().toString}")
// result = Some(path.toFile)
// true
// }
//
// /**
// * Invoke the pattern matching method on each file.
// */
// override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult =
// if (attrs.isRegularFile && checkFile(file)) FileVisitResult.TERMINATE else FileVisitResult.CONTINUE
//
// /**
// * Invoke the pattern matching method on each directory.
// */
// override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = FileVisitResult.CONTINUE
//
// override def visitFileFailed(file: Path, exc: IOException): FileVisitResult = {
// println(exc)
// FileVisitResult.CONTINUE
// }
//}
| edmcouncil/rdf-serializer | src/main/scala/org/edmcouncil/rdf_toolkit/owlapi_serializer/ImportResolver.scala | Scala | mit | 5,867 |
package chapter04
// What happens when you zip together two strings, such as "Hello".zip("World")?
// Come up with a plausible use case.
object Exercise10 extends App {
println("Hello".zip("World"))
// Keyboard layout.
println("qwertyu".zip("йцукенг"))
}
| vsuharnikov/books-exercises | scala/scala-for-the-impatient/src/main/scala/chapter04/Exercise10.scala | Scala | mit | 270 |
package ammonite.sshd
import java.io.{OutputStream, InputStream}
import ammonite.sshd.util.Implicits
import org.apache.sshd
class ShellSession(runner: SshServer.TerminalTask) extends sshd.server.Command {
var in: InputStream = _
var out: OutputStream = _
var err: OutputStream = _
var exit: Option[sshd.server.ExitCallback] = None
var thread: Option[Thread] = None
def setInputStream(in: InputStream) { this.in = in }
def setOutputStream(out: OutputStream) { this.out = new SshOutputStream(out) }
def setErrorStream(err: OutputStream) { this.err = err }
def setExitCallback(exit: org.apache.sshd.server.ExitCallback) { this.exit = Option(exit) }
def start(env: org.apache.sshd.server.Environment) {
import Implicits._
val thread = new Thread({
runner(in, out)
this.thread = None
exit.foreach(_.onExit(0, "repl finished"))
})
this.thread = Some(thread)
thread.start()
}
def destroy() {
thread.foreach(_.interrupt())
}
class SshOutputStream(out:OutputStream) extends OutputStream {
override def close() { out.close() }
override def flush() { out.flush() }
override def write(b: Int) { // ssh only accepts new lines with \r
if (b.toChar == '\n') out.write('\r')
out.write(b)
}
override def write(bytes: Array[Byte]):Unit = for {
i ← bytes.indices
} write(bytes(i))
override def write(bytes: Array[Byte], off: Int, len: Int) {
write(bytes.slice(off, off + len))
}
}
}
| dant3/ammonite-sshd | src/main/scala/ammonite/sshd/ShellSession.scala | Scala | apache-2.0 | 1,506 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.hbase.index
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.filter.{Filter => HFilter}
import org.locationtech.geomesa.curve.Z3SFC
import org.locationtech.geomesa.hbase.HBaseFilterStrategyType
import org.locationtech.geomesa.hbase.data._
import org.locationtech.geomesa.hbase.filters.Z3HBaseFilter
import org.locationtech.geomesa.index.filters.Z3Filter
import org.locationtech.geomesa.index.index.{Z3Index, Z3ProcessingValues}
import org.opengis.feature.simple.SimpleFeatureType
case object HBaseZ3Index extends HBaseLikeZ3Index with HBasePlatform {
override protected def createPushDownFilters(ds: HBaseDataStore,
sft: SimpleFeatureType,
filter: HBaseFilterStrategyType,
transform: Option[(String, SimpleFeatureType)]): Seq[HFilter] = {
val z3Filter = Z3Index.currentProcessingValues.map { case Z3ProcessingValues(sfc, _, xy, _, times) =>
configureZ3PushDown(sfc, xy, times)
}
super.createPushDownFilters(ds, sft, filter, transform) ++ z3Filter.toSeq
}
private def configureZ3PushDown(sfc: Z3SFC,
xy: Seq[(Double, Double, Double, Double)],
times: Map[Short, Seq[(Long, Long)]]): HFilter = {
// we know we're only going to scan appropriate periods, so leave out whole ones
val wholePeriod = Seq((sfc.time.min.toLong, sfc.time.max.toLong))
val filteredTimes = times.filter(_._2 != wholePeriod)
val normalizedXY = xy.map { case (xmin, ymin, xmax, ymax) =>
Array(sfc.lon.normalize(xmin), sfc.lat.normalize(ymin), sfc.lon.normalize(xmax), sfc.lat.normalize(ymax))
}.toArray
var minEpoch: Int = Short.MaxValue
var maxEpoch: Int = Short.MinValue
val tOpts = filteredTimes.toSeq.sortBy(_._1).map { case (bin, times) =>
times.map { case (t1, t2) =>
val lt = sfc.time.normalize(t1)
val ut = sfc.time.normalize(t2)
if (lt < minEpoch) minEpoch = lt
if (ut > maxEpoch) maxEpoch = ut
Array(lt, ut)
}.toArray
}.toArray
new Z3HBaseFilter(new Z3Filter(normalizedXY, tOpts, minEpoch.toShort, maxEpoch.toShort, 1, 8))
}
}
trait HBaseLikeZ3Index extends HBaseFeatureIndex with Z3Index[HBaseDataStore, HBaseFeature, Mutation, Query] {
override val version: Int = 1
}
| spandanagrawal/geomesa | geomesa-hbase/geomesa-hbase-datastore/src/main/scala/org/locationtech/geomesa/hbase/index/HBaseZ3Index.scala | Scala | apache-2.0 | 2,905 |
package com.typesafe.sbt.packager.archetypes
import sbt._
/**
* Available settings/tasks for the [[com.typesafe.sbt.packager.archetypes.JavaAppPackaging]]
* and all depending archetypes.
*/
trait JavaAppKeys {
// TODO - we should change this key name in future versions; it also specified
// the location of the systemd EnvironmentFile
val bashScriptEnvConfigLocation = SettingKey[Option[String]](
"bashScriptEnvConfigLocation",
"The location of a bash script that will be sourced before running the app."
)
val scriptClasspathOrdering = TaskKey[Seq[(File, String)]](
"scriptClasspathOrdering",
"The order of the classpath used at runtime for the bat/bash scripts."
)
val projectDependencyArtifacts = TaskKey[Seq[Attributed[File]]](
"projectDependencyArtifacts",
"The set of exported artifacts from our dependent projects."
)
val scriptClasspath = TaskKey[Seq[String]](
"scriptClasspath",
"A list of relative filenames (to the lib/ folder in the distribution) of what to include on the classpath."
)
}
| fsat/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/archetypes/JavaAppKeys.scala | Scala | bsd-2-clause | 1,063 |
package mechanics
import scala.collection.mutable.Buffer
class Cell(val terrainType: Terrain.Value) {
private val charBuffer = Buffer[Character]()
private var buildingOption: Option[Building] = None
private var buildingPartOption: Option[(Int, Int)] = None
private val itemBuffer = Buffer[Item]()
def add(character: Character) = {
charBuffer += character
}
def add(building: Building, part: (Int, Int))= {
buildingOption = Some(building)
buildingPartOption = Some(part)
}
def add(item: Item) = {
itemBuffer += item
}
def characters = charBuffer.toVector
def building = buildingOption
def buildingPart = buildingPartOption
def items = itemBuffer.toVector
def isEmpty = characters.isEmpty && building.isEmpty && items.isEmpty
}
object Cell {
def apply() = new Cell(Terrain.PLAIN)
} | Berthur/AgeOfLords | src/mechanics/Cell.scala | Scala | gpl-3.0 | 851 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv
package laws
package discipline
import laws._
import org.scalacheck.Arbitrary
object CellEncoderTests {
def apply[A: CellEncoderLaws: Arbitrary](implicit al: Arbitrary[LegalCell[A]]): CellEncoderTests[A] =
EncoderTests[String, A, codecs.type]
}
| nrinaudo/scala-csv | laws/shared/src/main/scala/kantan/csv/laws/discipline/CellEncoderTests.scala | Scala | mit | 872 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.sst
import slamdata.Predef._
import quasar.ejson.EJson
import quasar.tpe._
import quasar.contrib.iota.mkInject
import matryoshka._
import matryoshka.patterns.EnvT
import scalaz._, Scalaz._
import simulacrum._
import iotaz.{TListK, CopK, TNilK}
import iotaz.TListK.:::
/** Defines how to extract the `PrimaryTag` from `F`. */
@typeclass
trait ExtractPrimary[F[_]] {
def primaryTag[A](fa: F[A]): Option[PrimaryTag]
}
object ExtractPrimary {
import ops._
implicit def copk[LL <: TListK](implicit M: Materializer[LL]): ExtractPrimary[CopK[LL, ?]] =
M.materialize(offset = 0)
sealed trait Materializer[LL <: TListK] {
def materialize(offset: Int): ExtractPrimary[CopK[LL, ?]]
}
object Materializer {
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
implicit def base[F[_]](
implicit
F: ExtractPrimary[F]
): Materializer[F ::: TNilK] = new Materializer[F ::: TNilK] {
override def materialize(offset: Int): ExtractPrimary[CopK[F ::: TNilK, ?]] = {
val I = mkInject[F, F ::: TNilK](offset)
new ExtractPrimary[CopK[F ::: TNilK, ?]] {
override def primaryTag[A](cfa: CopK[F ::: TNilK, A]): Option[PrimaryTag] = cfa match {
case I(fa) => fa.primaryTag
}
}
}
}
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
implicit def induct[F[_], LL <: TListK](
implicit
F: ExtractPrimary[F],
LL: Materializer[LL]
): Materializer[F ::: LL] = new Materializer[F ::: LL] {
override def materialize(offset: Int): ExtractPrimary[CopK[F ::: LL, ?]] = {
val I = mkInject[F, F ::: LL](offset)
new ExtractPrimary[CopK[F ::: LL, ?]] {
override def primaryTag[A](cfa: CopK[F ::: LL, A]): Option[PrimaryTag] = cfa match {
case I(fa) => fa.primaryTag
case other => LL.materialize(offset + 1).primaryTag(other.asInstanceOf[CopK[LL, A]])
}
}
}
}
}
implicit val taggedExtractPrimary: ExtractPrimary[Tagged] =
new ExtractPrimary[Tagged] {
def primaryTag[A](fa: Tagged[A]) = some(fa.tag.right)
}
implicit def typeFExtractPrimary[L](
implicit L: Recursive.Aux[L, EJson]
): ExtractPrimary[TypeF[L, ?]] =
new ExtractPrimary[TypeF[L, ?]] {
def primaryTag[A](fa: TypeF[L, A]) =
fa match {
case TypeF.Const(l) => some(primaryTagOf(l))
case _ => TypeF.primary(fa) map (_.left)
}
}
implicit def envTExtractPrimary[E, F[_]: ExtractPrimary]
: ExtractPrimary[EnvT[E, F, ?]] =
new ExtractPrimary[EnvT[E, F, ?]] {
def primaryTag[A](fa: EnvT[E, F, A]) = fa.lower.primaryTag
}
}
| slamdata/slamengine | sst/src/main/scala/quasar/sst/ExtractPrimary.scala | Scala | apache-2.0 | 3,310 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
import Classes.Applicative
import Types._
/**
* An abstraction over a higher-order type constructor `K[x[y]]` with the purpose of abstracting
* over heterogeneous sequences like `KList` and `TupleN` with elements with a common type
* constructor as well as homogeneous sequences `Seq[M[T]]`.
*/
trait AList[K[L[x]]] {
def transform[M[_], N[_]](value: K[M], f: M ~> N): K[N]
def traverse[M[_], N[_], P[_]](value: K[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[K[P]]
def foldr[M[_], A](value: K[M], f: (M[_], A) => A, init: A): A
def toList[M[_]](value: K[M]): List[M[_]] = foldr[M, List[M[_]]](value, _ :: _, Nil)
def apply[M[_], C](value: K[M], f: K[Id] => C)(implicit a: Applicative[M]): M[C] =
a.map(f, traverse[M, M, Id](value, idK[M])(a))
}
object AList {
type Empty = AList[ConstK[Unit]#l]
/** AList for Unit, which represents a sequence that is always empty.*/
val empty: Empty = new Empty {
def transform[M[_], N[_]](in: Unit, f: M ~> N) = ()
def foldr[M[_], T](in: Unit, f: (M[_], T) => T, init: T) = init
override def apply[M[_], C](in: Unit, f: Unit => C)(implicit app: Applicative[M]): M[C] =
app.pure(f(()))
def traverse[M[_], N[_], P[_]](in: Unit, f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[Unit] = np.pure(())
}
type SeqList[T] = AList[λ[L[x] => List[L[T]]]]
/** AList for a homogeneous sequence. */
def seq[T]: SeqList[T] = new SeqList[T] {
def transform[M[_], N[_]](s: List[M[T]], f: M ~> N) = s.map(f.fn[T])
def foldr[M[_], A](s: List[M[T]], f: (M[_], A) => A, init: A): A =
s.reverse.foldLeft(init)((t, m) => f(m, t))
override def apply[M[_], C](s: List[M[T]], f: List[T] => C)(
implicit ap: Applicative[M]
): M[C] = {
def loop[V](in: List[M[T]], g: List[T] => V): M[V] =
in match {
case Nil => ap.pure(g(Nil))
case x :: xs =>
val h = (ts: List[T]) => (t: T) => g(t :: ts)
ap.apply(loop(xs, h), x)
}
loop(s, f)
}
def traverse[M[_], N[_], P[_]](s: List[M[T]], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[List[P[T]]] = ???
}
/** AList for the arbitrary arity data structure KList. */
def klist[KL[M[_]] <: KList.Aux[M, KL]]: AList[KL] = new AList[KL] {
def transform[M[_], N[_]](k: KL[M], f: M ~> N) = k.transform(f)
def foldr[M[_], T](k: KL[M], f: (M[_], T) => T, init: T): T = k.foldr(f, init)
override def apply[M[_], C](k: KL[M], f: KL[Id] => C)(implicit app: Applicative[M]): M[C] =
k.apply(f)(app)
def traverse[M[_], N[_], P[_]](k: KL[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[KL[P]] = k.traverse[N, P](f)(np)
override def toList[M[_]](k: KL[M]) = k.toList
}
type Single[A] = AList[λ[L[x] => L[A]]]
/** AList for a single value. */
def single[A]: Single[A] = new Single[A] {
def transform[M[_], N[_]](a: M[A], f: M ~> N) = f(a)
def foldr[M[_], T](a: M[A], f: (M[_], T) => T, init: T): T = f(a, init)
def traverse[M[_], N[_], P[_]](a: M[A], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[P[A]] = f(a)
}
/** Example: calling `AList.SplitK[K, Task]#l` returns the type lambda `A[x] => K[A[Task[x]]`. */
sealed trait SplitK[K[L[x]], B[x]] { type l[A[x]] = K[(A ∙ B)#l] }
type ASplit[K[L[x]], B[x]] = AList[SplitK[K, B]#l]
/** AList that operates on the outer type constructor `A` of a composition `[x] A[B[x]]` for type constructors `A` and `B`. */
def asplit[K[L[x]], B[x]](base: AList[K]): ASplit[K, B] = new ASplit[K, B] {
type Split[L[x]] = K[(L ∙ B)#l]
def transform[M[_], N[_]](value: Split[M], f: M ~> N): Split[N] =
base.transform[(M ∙ B)#l, (N ∙ B)#l](value, nestCon[M, N, B](f))
def traverse[M[_], N[_], P[_]](value: Split[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[Split[P]] = {
val g = nestCon[M, (N ∙ P)#l, B](f)
base.traverse[(M ∙ B)#l, N, (P ∙ B)#l](value, g)(np)
}
def foldr[M[_], A](value: Split[M], f: (M[_], A) => A, init: A): A =
base.foldr[(M ∙ B)#l, A](value, f, init)
}
// TODO: auto-generate
sealed trait T2K[A, B] { type l[L[x]] = (L[A], L[B]) }
type T2List[A, B] = AList[T2K[A, B]#l]
def tuple2[A, B]: T2List[A, B] = new T2List[A, B] {
type T2[M[_]] = (M[A], M[B])
def transform[M[_], N[_]](t: T2[M], f: M ~> N): T2[N] = (f(t._1), f(t._2))
def foldr[M[_], T](t: T2[M], f: (M[_], T) => T, init: T): T = f(t._1, f(t._2, init))
def traverse[M[_], N[_], P[_]](t: T2[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T2[P]] = {
val g = (Tuple2.apply[P[A], P[B]] _).curried
np.apply(np.map(g, f(t._1)), f(t._2))
}
}
sealed trait T3K[A, B, C] { type l[L[x]] = (L[A], L[B], L[C]) }
type T3List[A, B, C] = AList[T3K[A, B, C]#l]
def tuple3[A, B, C]: T3List[A, B, C] = new T3List[A, B, C] {
type T3[M[_]] = (M[A], M[B], M[C])
def transform[M[_], N[_]](t: T3[M], f: M ~> N) = (f(t._1), f(t._2), f(t._3))
def foldr[M[_], T](t: T3[M], f: (M[_], T) => T, init: T): T = f(t._1, f(t._2, f(t._3, init)))
def traverse[M[_], N[_], P[_]](t: T3[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T3[P]] = {
val g = (Tuple3.apply[P[A], P[B], P[C]] _).curried
np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3))
}
}
sealed trait T4K[A, B, C, D] { type l[L[x]] = (L[A], L[B], L[C], L[D]) }
type T4List[A, B, C, D] = AList[T4K[A, B, C, D]#l]
def tuple4[A, B, C, D]: T4List[A, B, C, D] = new T4List[A, B, C, D] {
type T4[M[_]] = (M[A], M[B], M[C], M[D])
def transform[M[_], N[_]](t: T4[M], f: M ~> N) = (f(t._1), f(t._2), f(t._3), f(t._4))
def foldr[M[_], T](t: T4[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, init))))
def traverse[M[_], N[_], P[_]](t: T4[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T4[P]] = {
val g = (Tuple4.apply[P[A], P[B], P[C], P[D]] _).curried
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4))
}
}
sealed trait T5K[A, B, C, D, E] { type l[L[x]] = (L[A], L[B], L[C], L[D], L[E]) }
type T5List[A, B, C, D, E] = AList[T5K[A, B, C, D, E]#l]
def tuple5[A, B, C, D, E]: T5List[A, B, C, D, E] = new T5List[A, B, C, D, E] {
type T5[M[_]] = (M[A], M[B], M[C], M[D], M[E])
def transform[M[_], N[_]](t: T5[M], f: M ~> N) = (f(t._1), f(t._2), f(t._3), f(t._4), f(t._5))
def foldr[M[_], T](t: T5[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, f(t._5, init)))))
def traverse[M[_], N[_], P[_]](t: T5[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T5[P]] = {
val g = (Tuple5.apply[P[A], P[B], P[C], P[D], P[E]] _).curried
np.apply(np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)), f(t._5))
}
}
sealed trait T6K[A, B, C, D, E, F] { type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F]) }
type T6List[A, B, C, D, E, F] = AList[T6K[A, B, C, D, E, F]#l]
def tuple6[A, B, C, D, E, F]: T6List[A, B, C, D, E, F] = new T6List[A, B, C, D, E, F] {
type T6[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F])
def transform[M[_], N[_]](t: T6[M], f: M ~> N) =
(f(t._1), f(t._2), f(t._3), f(t._4), f(t._5), f(t._6))
def foldr[M[_], T](t: T6[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, f(t._5, f(t._6, init))))))
def traverse[M[_], N[_], P[_]](t: T6[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T6[P]] = {
val g = (Tuple6.apply[P[A], P[B], P[C], P[D], P[E], P[F]] _).curried
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
)
}
}
sealed trait T7K[A, B, C, D, E, F, G] {
type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F], L[G])
}
type T7List[A, B, C, D, E, F, G] = AList[T7K[A, B, C, D, E, F, G]#l]
def tuple7[A, B, C, D, E, F, G]: T7List[A, B, C, D, E, F, G] = new T7List[A, B, C, D, E, F, G] {
type T7[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F], M[G])
def transform[M[_], N[_]](t: T7[M], f: M ~> N) =
(f(t._1), f(t._2), f(t._3), f(t._4), f(t._5), f(t._6), f(t._7))
def foldr[M[_], T](t: T7[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, f(t._5, f(t._6, f(t._7, init)))))))
def traverse[M[_], N[_], P[_]](t: T7[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T7[P]] = {
val g = (Tuple7.apply[P[A], P[B], P[C], P[D], P[E], P[F], P[G]] _).curried
np.apply(
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
),
f(t._7)
)
}
}
sealed trait T8K[A, B, C, D, E, F, G, H] {
type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F], L[G], L[H])
}
type T8List[A, B, C, D, E, F, G, H] = AList[T8K[A, B, C, D, E, F, G, H]#l]
def tuple8[A, B, C, D, E, F, G, H]: T8List[A, B, C, D, E, F, G, H] =
new T8List[A, B, C, D, E, F, G, H] {
type T8[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F], M[G], M[H])
def transform[M[_], N[_]](t: T8[M], f: M ~> N) =
(f(t._1), f(t._2), f(t._3), f(t._4), f(t._5), f(t._6), f(t._7), f(t._8))
def foldr[M[_], T](t: T8[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, f(t._5, f(t._6, f(t._7, f(t._8, init))))))))
def traverse[M[_], N[_], P[_]](t: T8[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T8[P]] = {
val g = (Tuple8.apply[P[A], P[B], P[C], P[D], P[E], P[F], P[G], P[H]] _).curried
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
),
f(t._7)
),
f(t._8)
)
}
}
sealed trait T9K[A, B, C, D, E, F, G, H, I] {
type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F], L[G], L[H], L[I])
}
type T9List[A, B, C, D, E, F, G, H, I] = AList[T9K[A, B, C, D, E, F, G, H, I]#l]
def tuple9[A, B, C, D, E, F, G, H, I]: T9List[A, B, C, D, E, F, G, H, I] =
new T9List[A, B, C, D, E, F, G, H, I] {
type T9[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F], M[G], M[H], M[I])
def transform[M[_], N[_]](t: T9[M], f: M ~> N) =
(f(t._1), f(t._2), f(t._3), f(t._4), f(t._5), f(t._6), f(t._7), f(t._8), f(t._9))
def foldr[M[_], T](t: T9[M], f: (M[_], T) => T, init: T): T =
f(t._1, f(t._2, f(t._3, f(t._4, f(t._5, f(t._6, f(t._7, f(t._8, f(t._9, init)))))))))
def traverse[M[_], N[_], P[_]](t: T9[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T9[P]] = {
val g = (Tuple9.apply[P[A], P[B], P[C], P[D], P[E], P[F], P[G], P[H], P[I]] _).curried
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
),
f(t._7)
),
f(t._8)
),
f(t._9)
)
}
}
sealed trait T10K[A, B, C, D, E, F, G, H, I, J] {
type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F], L[G], L[H], L[I], L[J])
}
type T10List[A, B, C, D, E, F, G, H, I, J] = AList[T10K[A, B, C, D, E, F, G, H, I, J]#l]
def tuple10[A, B, C, D, E, F, G, H, I, J]: T10List[A, B, C, D, E, F, G, H, I, J] =
new T10List[A, B, C, D, E, F, G, H, I, J] {
type T10[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F], M[G], M[H], M[I], M[J])
def transform[M[_], N[_]](t: T10[M], f: M ~> N) =
(f(t._1), f(t._2), f(t._3), f(t._4), f(t._5), f(t._6), f(t._7), f(t._8), f(t._9), f(t._10))
def foldr[M[_], T](t: T10[M], f: (M[_], T) => T, init: T): T =
f(
t._1,
f(t._2, f(t._3, f(t._4, f(t._5, f(t._6, f(t._7, f(t._8, f(t._9, f(t._10, init)))))))))
)
def traverse[M[_], N[_], P[_]](t: T10[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T10[P]] = {
val g =
(Tuple10.apply[P[A], P[B], P[C], P[D], P[E], P[F], P[G], P[H], P[I], P[J]] _).curried
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
),
f(t._7)
),
f(t._8)
),
f(t._9)
),
f(t._10)
)
}
}
sealed trait T11K[A, B, C, D, E, F, G, H, I, J, K] {
type l[L[x]] = (L[A], L[B], L[C], L[D], L[E], L[F], L[G], L[H], L[I], L[J], L[K])
}
type T11List[A, B, C, D, E, F, G, H, I, J, K] = AList[T11K[A, B, C, D, E, F, G, H, I, J, K]#l]
def tuple11[A, B, C, D, E, F, G, H, I, J, K]: T11List[A, B, C, D, E, F, G, H, I, J, K] =
new T11List[A, B, C, D, E, F, G, H, I, J, K] {
type T11[M[_]] = (M[A], M[B], M[C], M[D], M[E], M[F], M[G], M[H], M[I], M[J], M[K])
def transform[M[_], N[_]](t: T11[M], f: M ~> N) =
(
f(t._1),
f(t._2),
f(t._3),
f(t._4),
f(t._5),
f(t._6),
f(t._7),
f(t._8),
f(t._9),
f(t._10),
f(t._11)
)
def foldr[M[_], T](t: T11[M], f: (M[_], T) => T, init: T): T =
f(
t._1,
f(
t._2,
f(t._3, f(t._4, f(t._5, f(t._6, f(t._7, f(t._8, f(t._9, f(t._10, f(t._11, init)))))))))
)
)
def traverse[M[_], N[_], P[_]](t: T11[M], f: M ~> (N ∙ P)#l)(
implicit np: Applicative[N]
): N[T11[P]] = {
val g = (Tuple11
.apply[P[A], P[B], P[C], P[D], P[E], P[F], P[G], P[H], P[I], P[J], P[K]] _).curried
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(
np.apply(np.apply(np.apply(np.map(g, f(t._1)), f(t._2)), f(t._3)), f(t._4)),
f(t._5)
),
f(t._6)
),
f(t._7)
),
f(t._8)
),
f(t._9)
),
f(t._10)
),
f(t._11)
)
}
}
}
| sbt/sbt | internal/util-collection/src/main/scala/sbt/internal/util/AList.scala | Scala | apache-2.0 | 15,021 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import scala.reflect.ClassTag
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDDOperationScope
import org.apache.spark.streaming.{Duration, StreamingContext, Time}
import org.apache.spark.streaming.scheduler.RateController
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.Utils
/**
* This is the abstract base class for all input streams. This class provides methods
* start() and stop() which is called by Spark Streaming system to start and stop receiving data.
* Input streams that can generate RDDs from new data by running a service/thread only on
* the driver node (that is, without running a receiver on worker nodes), can be
* implemented by directly inheriting this InputDStream. For example,
* FileInputDStream, a subclass of InputDStream, monitors a HDFS directory from the driver for
* new files and generates RDDs with the new files. For implementing input streams
* that requires running a receiver on the worker nodes, use
* [[org.apache.spark.streaming.dstream.ReceiverInputDStream]] as the parent class.
*
* @param ssc_ Streaming context that will execute this input stream
*/
abstract class InputDStream[T: ClassTag] (ssc_ : StreamingContext)
extends DStream[T](ssc_) {
private[streaming] var lastValidTime: Time = null
ssc.graph.addInputStream(this)
/** This is an unique identifier for the input stream. */
val id = ssc.getNewInputStreamId()
// Keep track of the freshest rate for this stream using the rateEstimator
protected[streaming] val rateController: Option[RateController] = None
/** A human-readable name of this InputDStream */
private[streaming] def name: String = {
// e.g. FlumePollingDStream -> "Flume polling stream"
val newName = Utils.getFormattedClassName(this)
.replaceAll("InputDStream", "Stream")
.split("(?=[A-Z])")
.filter(_.nonEmpty)
.mkString(" ")
.toLowerCase
.capitalize
s"$newName [$id]"
}
/**
* The base scope associated with the operation that created this DStream.
*
* For InputDStreams, we use the name of this DStream as the scope name.
* If an outer scope is given, we assume that it includes an alternative name for this stream.
*/
protected[streaming] override val baseScope: Option[String] = {
val scopeName = Option(ssc.sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY))
.map { json => RDDOperationScope.fromJson(json).name + s" [$id]" }
.getOrElse(name.toLowerCase)
Some(new RDDOperationScope(scopeName).toJson)
}
/**
* Checks whether the 'time' is valid wrt slideDuration for generating RDD.
* Additionally it also ensures valid times are in strictly increasing order.
* This ensures that InputDStream.compute() is called strictly on increasing
* times.
*/
override private[streaming] def isTimeValid(time: Time): Boolean = {
if (!super.isTimeValid(time)) {
false // Time not valid
} else {
// Time is valid, but check it it is more than lastValidTime
if (lastValidTime != null && time < lastValidTime) {
logWarning("isTimeValid called with " + time + " where as last valid time is " +
lastValidTime)
}
lastValidTime = time
true
}
}
override def dependencies: List[DStream[_]] = List()
override def slideDuration: Duration = {
if (ssc == null) throw new Exception("ssc is null")
if (ssc.graph.batchDuration == null) throw new Exception("batchDuration is null")
ssc.graph.batchDuration
}
/** Method called to start receiving data. Subclasses must implement this method. */
def start()
/** Method called to stop receiving data. Subclasses must implement this method. */
def stop()
}
| chenc10/Spark-PAF | streaming/src/main/scala/org/apache/spark/streaming/dstream/InputDStream.scala | Scala | apache-2.0 | 4,593 |
package com.stulsoft.graphx
import org.apache.spark._
import org.apache.spark.graphx._
// To make some of the examples work we will also need RDD
import org.apache.spark.rdd.RDD
/**
* @see [[https://spark.apache.org/docs/latest/graphx-programming-guide.html#getting-started Getting Started]]
* @author Yuriy Stul.
*/
object PropertyGraph extends App {
test1()
def test1(): Unit = {
println("==>test1")
val conf = new SparkConf().setAppName("PropertyGraph").setMaster("local[*]")
val sc = new SparkContext(conf)
// Create an RDD for the vertices
val users: RDD[(VertexId, (String, String))] =
sc.parallelize(Array((3L, ("rxin", "student")), (7L, ("jgonzal", "postdoc")),
(5L, ("franklin", "prof")), (2L, ("istoica", "prof"))))
// Create an RDD for edges
val relationships: RDD[Edge[String]] =
sc.parallelize(Array(Edge(3L, 7L, "collab"), Edge(5L, 3L, "advisor"),
Edge(2L, 5L, "colleague"), Edge(5L, 7L, "pi")))
// Define a default user in case there are relationship with missing user
val defaultUser = ("John Doe", "Missing")
// Build the initial Graph
val graph = Graph(users, relationships, defaultUser)
// Count all users which are postdocs
val c1 = graph.vertices.filter { case (id, (name, pos)) => pos == "postdoc" }.count
// Count all users which are prof
val c11 = graph.vertices.filter { case (id, (name, pos)) => pos == "prof" }.count
// Count all the edges where src > dst
val c2 = graph.edges.filter(e => e.srcId > e.dstId).count
// Count all the edges where src < dst
val c3 = graph.edges.filter(e => e.srcId < e.dstId).count
// Count all the edges where src > dst
val c4 = graph.edges.filter { case Edge(src, dst, prop) => src > dst }.count
// Count all the edges where src < dst
val c5 = graph.edges.filter { case Edge(src, dst, prop) => src < dst }.count
println(s"c1=$c1, c11=$c11, c2=$c2, c3=$c3, c4=$c4, c5=$c5")
// Use the triplets view to create an RDD of facts.
val facts: RDD[String] =
graph.triplets.map(triplet =>
triplet.srcAttr._1 + " is the " + triplet.attr + " of " + triplet.dstAttr._1)
facts.collect.foreach(println(_))
sc.stop()
println("<==test1")
}
}
| ysden123/poc | pspark/graphx/src/main/scala/com/stulsoft/graphx/PropertyGraph.scala | Scala | mit | 2,263 |
import sbt._
import sbt.Keys._
import sbtfilter.Plugin.FilterKeys._
import scoverage.ScoverageSbtPlugin._
object Build extends sbt.Build {
lazy val avpath = Project("wandou-avpath", file("."))
.settings(basicSettings: _*)
.settings(Formatting.settings: _*)
.settings(Formatting.buildFileSettings: _*)
.settings(releaseSettings: _*)
.settings(sbtrelease.ReleasePlugin.releaseSettings: _*)
.settings(libraryDependencies ++= Dependencies.avro ++ Dependencies.test)
.settings(Packaging.settings: _*)
.settings(sbtavro.SbtAvro.avroSettings ++ avroSettingsTest: _*)
.settings(instrumentSettings: _*)
.settings(net.virtualvoid.sbt.graph.Plugin.graphSettings: _*)
lazy val basicSettings = Seq(
organization := "com.wandoulabs.avro",
version := "0.1.3-SNAPSHOT",
scalaVersion := "2.11.6",
scalacOptions ++= Seq("-unchecked", "-deprecation"),
resolvers ++= Seq(
"Sonatype OSS Releases" at "https://oss.sonatype.org/content/repositories/releases",
"Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots",
"Typesafe repo" at "http://repo.typesafe.com/typesafe/releases/"),
javacOptions ++= Seq("-source", "1.6", "-target", "1.6"))
lazy val avroSettings = Seq(
sbtavro.SbtAvro.stringType in sbtavro.SbtAvro.avroConfig := "String",
sourceDirectory in sbtavro.SbtAvro.avroConfig <<= (resourceDirectory in Compile)(_ / "avsc"),
version in sbtavro.SbtAvro.avroConfig := "1.7.7")
// Todo rewrite sbt-avro to compile in Test phase.
lazy val avroSettingsTest = Seq(
sbtavro.SbtAvro.stringType in sbtavro.SbtAvro.avroConfig := "String",
sourceDirectory in sbtavro.SbtAvro.avroConfig <<= (resourceDirectory in Test)(_ / "avsc"),
javaSource in sbtavro.SbtAvro.avroConfig <<= (sourceManaged in Test)(_ / "java" / "compiled_avro"),
version in sbtavro.SbtAvro.avroConfig := "1.7.7")
lazy val releaseSettings = Seq(
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (version.value.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := { (repo: MavenRepository) => false },
pomExtra := pomXml)
lazy val pomXml =
(<url>https://github.com/wandoulabs/avpath</url>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:wandoulabs/avpath.git</url>
<connection>scm:git:git@github.com:wandoulabs/avpath.git</connection>
</scm>)
lazy val noPublishing = Seq(
publish := (),
publishLocal := (),
// required until these tickets are closed https://github.com/sbt/sbt-pgp/issues/42,
// https://github.com/sbt/sbt-pgp/issues/36
publishTo := None)
}
object Dependencies {
val SLF4J_VERSION = "1.7.7"
val log = Seq(
"org.slf4j" % "slf4j-api" % SLF4J_VERSION,
"org.slf4j" % "jcl-over-slf4j" % SLF4J_VERSION,
"org.slf4j" % "log4j-over-slf4j" % SLF4J_VERSION,
"ch.qos.logback" % "logback-classic" % "1.1.2")
val test = Seq(
"org.scalamock" %% "scalamock-scalatest-support" % "3.2.1" % Test,
"org.scalatest" %% "scalatest" % "2.2.4" % Test)
val avro = Seq(
"org.apache.avro" % "avro" % "1.7.7")
val basic: Seq[ModuleID] = log ++ test ++ avro
val all = basic
}
object Formatting {
import com.typesafe.sbt.SbtScalariform
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
import ScalariformKeys._
val BuildConfig = config("build") extend Compile
val BuildSbtConfig = config("buildsbt") extend Compile
// invoke: build:scalariformFormat
val buildFileSettings: Seq[Setting[_]] = SbtScalariform.noConfigScalariformSettings ++
inConfig(BuildConfig)(SbtScalariform.configScalariformSettings) ++
inConfig(BuildSbtConfig)(SbtScalariform.configScalariformSettings) ++ Seq(
scalaSource in BuildConfig := baseDirectory.value / "project",
scalaSource in BuildSbtConfig := baseDirectory.value,
includeFilter in (BuildConfig, format) := ("*.scala": FileFilter),
includeFilter in (BuildSbtConfig, format) := ("*.sbt": FileFilter),
format in BuildConfig := {
val x = (format in BuildSbtConfig).value
(format in BuildConfig).value
},
ScalariformKeys.preferences in BuildConfig := formattingPreferences,
ScalariformKeys.preferences in BuildSbtConfig := formattingPreferences)
val settings = SbtScalariform.scalariformSettings ++ Seq(
ScalariformKeys.preferences in Compile := formattingPreferences,
ScalariformKeys.preferences in Test := formattingPreferences)
val formattingPreferences = {
import scalariform.formatter.preferences._
FormattingPreferences()
.setPreference(RewriteArrowSymbols, false)
.setPreference(AlignParameters, true)
.setPreference(AlignSingleLineCaseStatements, true)
.setPreference(DoubleIndentClassDeclaration, true)
.setPreference(IndentSpaces, 2)
}
}
object Packaging {
import com.typesafe.sbt.SbtNativePackager._
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.archetypes._
val settings = packagerSettings ++ deploymentSettings ++
packageArchetype.java_application ++ Seq(
name := "wandou-avpath",
NativePackagerKeys.packageName := "wandou-avpath")
}
| wandoulabs/avpath | project/Build.scala | Scala | apache-2.0 | 5,606 |
package org.labrad.types
import scala.collection.mutable
object Parsers {
import fastparse.noApi._
import org.labrad.util.Parsing._
import org.labrad.util.Parsing.AllowWhitespace._
private def stripComments(tag: String): String =
tag.replaceAll("""\\{[^\\{\\}]*\\}""", "") // remove bracketed comments
.split(":")(0) // strip off any trailing comments
def parsePattern(tag: String): Pattern = parseOrThrow(fullPattern, stripComments(tag))
def parseType(tag: String): Type = parseOrThrow(fullType, stripComments(tag))
def parseUnit(s: String): Seq[(String, Ratio)] = parseOrThrow(unitStr, s)
val fullType = P( Whitespace ~ aType ~ Whitespace ~ End )
val aType: Parser[Type] =
P( errorType
| singleType.rep(sep = ",".?).map {
case Seq() => TNone
case Seq(t) => t
case ts => TCluster(ts: _*)
}
)
val noneType: Parser[Type] =
P( "_" ).map { _ => TNone }
val errorType: Parser[Type] =
P( "E" ~ singleType.? ).map { t => TError(t getOrElse TNone) }
val someType: Parser[Type] =
P( Token("b").map { _ => TBool }
| Token("i").map { _ => TInt }
// | "i8" ^^ { _ => TInt8 }
// | "i16" ^^ { _ => TInt16 }
// | "i32" ^^ { _ => TInt }
// | "i64" ^^ { _ => TInt64 }
| Token("w").map { _ => TUInt }
// | "u8" ^^ { _ => TUInt8 }
// | "u16" ^^ { _ => TUInt16 }
// | "u32" ^^ { _ => TUInt }
// | "u64" ^^ { _ => TUInt64 }
| Token("s").map { _ => TStr }
| Token("y").map { _ => TBytes }
| Token("t").map { _ => TTime }
| valueType
| complexType
| arrayType
| clusterType
)
val singleType: Parser[Type] = P( noneType | someType )
val valueType: Parser[Type] =
P( "v" ~ units.? ).map { u => TValue(u) }
val complexType: Parser[Type] =
P( "c" ~ units.? ).map { u => TComplex(u) }
val units: Parser[String] =
P( "[" ~ Re("""[^\\[\\]]*""").! ~ "]" )
val arrayType: Parser[Type] =
P( "*" ~ number.? ~ singleType ).map { case (d, t) => TArr(t, d getOrElse 1) }
val number: Parser[Int] = P( Re("""\\d+""").! ).map { _.toInt }
val clusterType: Parser[Type] =
P( "(" ~ singleType.rep(sep = ",".?) ~ ")" ).map { ts => TCluster(ts: _*) }
val fullPattern = P( Whitespace ~ aPattern ~ Whitespace ~ End )
val aPattern: Parser[Pattern] =
P( errorPattern
| nonemptyPattern.rep(sep = "|").map {
case Seq() => TNone
case Seq(p) => p
case ps => PChoice(ps: _*)
}
)
val nonemptyPattern: Parser[Pattern] =
P( singlePattern.rep(sep = ",".?, min = 1).map {
case Seq(p) => p
case ps => PCluster(ps: _*)
}
)
val errorPattern: Parser[Pattern] =
P( "E" ~ singlePattern.? ).map { p => PError(p getOrElse TNone) }
val somePattern: Parser[Pattern] =
P( Token("?").map { _ => PAny }
| valuePattern
| complexPattern
| arrayPattern
| expandoPattern
| clusterPattern
| choicePattern
| someType
)
val singlePattern: Parser[Pattern] = P( noneType | somePattern )
val valuePattern: Parser[Pattern] =
P( "v" ~ units.? ).map { u => PValue(u) }
val complexPattern: Parser[Pattern] =
P( "c" ~ units.? ).map { u => PComplex(u) }
val arrayPattern: Parser[Pattern] =
P( "*" ~ number.? ~ singlePattern ).map { case (d, t) => PArr(t, d getOrElse 1) }
val expandoPattern: Parser[Pattern] =
P( "(" ~ somePattern ~ "...)" ).map { p => PExpando(p) }
val clusterPattern: Parser[Pattern] =
P( "(" ~ somePattern.rep(sep = ",".?) ~ ")" ).map { ps => PCluster(ps: _*) }
val choicePattern: Parser[Pattern] =
P( "<" ~ nonemptyPattern.rep(sep = "|") ~ ">" ).map { ps => PChoice(ps: _*) }
val unitStr: Parser[Seq[(String, Ratio)]] =
P( firstTerm ~ (divTerm | mulTerm).rep ).map {
case (name, exp, rest) => (name, exp) +: rest
}
val firstTerm = P( "1".? ~ divTerm | term )
val mulTerm = P( "*" ~ term )
val divTerm = P( "/" ~ term ).map { case (name, exp) => (name, -exp) }
val term: Parser[(String, Ratio)] =
P( termName.! ~ exponent.? ).map {
case (name, expOpt) => (name, expOpt.getOrElse(Ratio(1)))
}
val termName = P( Re("""[A-Za-z'"]+""") )
val exponent: Parser[Ratio] =
P( "^" ~ "-".!.? ~ number ~ ("/" ~ number).? ).map {
case (None, n, None ) => Ratio( n)
case (None, n, Some(d)) => Ratio( n, d)
case (Some(_), n, None ) => Ratio(-n)
case (Some(_), n, Some(d)) => Ratio(-n, d)
}
}
sealed trait Pattern extends Serializable { self: Pattern =>
def accepts(tag: String): Boolean = accepts(Type(tag))
def accepts(pat: Pattern): Boolean
def apply(typ: Type): Option[Type]
private def combinations[T](seqs: Seq[Seq[T]]): Seq[Seq[T]] = seqs match {
case Seq() => Seq(Seq())
case Seq(heads, rest @ _*) =>
val tails = combinations(rest)
for (head <- heads; tail <- tails) yield head +: tail
}
// TODO: remove pattern expansion once clients support patterns
def expand: Seq[Pattern] = self match {
case PChoice(ps @ _*) => ps.flatMap(_.expand)
case PCluster(ps @ _*) => combinations(ps.map(_.expand)).map(PCluster(_: _*))
case PExpando(p) => Seq(PAny)
case PArr(p, depth) => p.expand.map(PArr(_, depth))
case p => Seq(p)
}
}
object Pattern {
def apply(s: String): Pattern = Parsers.parsePattern(s)
def reduce(pats: Pattern*): Pattern = {
def flattenChoices(p: Pattern): Seq[Pattern] = p match {
case PChoice(ps @ _*) => ps.flatMap(flattenChoices)
case p => Seq(p)
}
val pat = PChoice(pats: _*)
val buf = mutable.Buffer.empty[Pattern]
for (p <- flattenChoices(pat)) if (!buf.exists(_ accepts p)) buf += p
buf.toSeq match {
case Seq(p) => p
case ps => PChoice(ps: _*)
}
}
/**
* Determine whether a given pattern accepts another pattern.
*
* This handles correctly the case where either pattern is a PChoice, which
* the accepts methods on individual Pattern subclasses do not.
* TODO: refactor accepts methods on Pattern subclasses to use this, so they
* will work against all patterns, including PChoice.
*/
def accepts(a: Pattern, b: Pattern): Boolean = {
(a, b) match {
case (PChoice(as @ _*), PChoice(bs @ _*)) =>
bs.forall(b => as.exists(a => a.accepts(b)))
case (PChoice(as @ _*), b) =>
as.exists(a => a.accepts(b))
case (a, PChoice(bs @ _*)) =>
bs.forall(b => a.accepts(b))
case (a, b) =>
a.accepts(b)
}
}
}
/** implementation of important Pattern methods for concrete types */
trait ConcreteType { self: Type =>
def accepts(typ: Pattern) = typ == self
def apply(typ: Type): Option[Type] =
if (typ == self) Some(self) else None
}
sealed trait Type extends Pattern {
/** Indicates whether data of this type has fixed byte length */
def fixedWidth: Boolean
/** Byte width for data of this type */
def dataWidth: Int
}
object Type {
def apply(s: String): Type = Parsers.parseType(s)
}
case object PAny extends Pattern {
def accepts(typ: Pattern) = true
def apply(typ: Type): Option[Type] = Some(typ)
override def toString = "?"
}
case class PChoice(choices: Pattern*) extends Pattern {
def accepts(pat: Pattern) = pat match {
case PChoice(ps @ _*) => ps forall { p => choices.exists(_ accepts p) }
case p => choices.exists(_ accepts p)
}
def apply(typ: Type): Option[Type] = {
choices.view.map(_(typ)).collectFirst { case Some(t) => t }
}
override def toString = s"<${choices.mkString("|")}>"
}
case class PExpando(pat: Pattern) extends Pattern {
def accepts(typ: Pattern) = typ match {
case PExpando(p) => pat accepts p
case PCluster(elems @ _*) => elems forall { pat accepts _ }
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TCluster(elems @ _*) =>
val types = for (e <- elems) yield pat(e).getOrElse(return None)
Some(TCluster(types: _*))
case _ => None
}
override def toString = s"($pat...)"
}
class PCluster protected(val elems: Pattern*) extends Pattern {
def accepts(typ: Pattern) = typ match {
case PCluster(others @ _*) if others.size == elems.size =>
(elems zip others) forall { case (elem, other) => elem accepts other }
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TCluster(es @ _*) if es.length == elems.length =>
val types = for ((elem, e) <- elems zip es) yield elem(e).getOrElse(return None)
Some(TCluster(types: _*))
case _ => None
}
override def toString = s"(${elems.mkString})"
override def equals(other: Any): Boolean = other match {
case p: PCluster => p.elems == elems
case _ => false
}
override def hashCode: Int = elems.hashCode
}
object PCluster {
def apply(elems: Pattern*) = new PCluster(elems: _*)
def unapplySeq(pat: Pattern): Option[Seq[Pattern]] = pat match {
case p: PCluster => Some(p.elems)
case _ => None
}
}
case class TCluster(override val elems: Type*) extends PCluster(elems: _*) with Type {
lazy val dataWidth = elems.map(_.dataWidth).sum
lazy val fixedWidth = elems.forall(_.fixedWidth)
lazy val offsets = elems.map(_.dataWidth).scan(0)(_ + _).dropRight(1)
def size = elems.size
def apply(i: Int) = elems(i)
def offset(i: Int) = offsets(i)
}
class PArr protected(val elem: Pattern, val depth: Int) extends Pattern {
require(depth > 0)
def accepts(typ: Pattern) = typ match {
case PArr(e, d) if d == depth => elem accepts e
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TArr(e, d) if d == depth => elem(e).map(TArr(_, d))
case _ => None
}
override val toString = depth match {
case 1 => "*" + elem.toString
case d => "*" + d + elem.toString
}
override def equals(other: Any): Boolean = other match {
case p: PArr => p.elem == elem && p.depth == depth
case _ => false
}
override def hashCode: Int = (elem, depth).hashCode
}
object PArr {
def apply(elem: Pattern, depth: Int = 1) = new PArr(elem, depth)
def unapply(pat: Pattern): Option[(Pattern, Int)] = pat match {
case p: PArr => Some((p.elem, p.depth))
case _ => None
}
}
case class TArr(override val elem: Type, override val depth: Int = 1) extends PArr(elem, depth) with Type {
override val toString = depth match {
case 1 => "*" + elem.toString
case d => "*" + d + elem.toString
}
val fixedWidth = false
val dataWidth = 4 * depth + 4
def offset(index: Int) = index * elem.dataWidth
}
class PError protected(val payload: Pattern) extends Pattern {
def accepts(typ: Pattern) = typ match {
case PError(p) => payload accepts p
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TError(p) => payload(p).map(TError(_))
case _ => None
}
override val toString = "E" + payload.toString
override def equals(other: Any): Boolean = other match {
case p: PError => p.payload == payload
case _ => false
}
override def hashCode: Int = payload.hashCode
}
object PError {
def apply(payload: Pattern = TNone) = new PError(payload)
def unapply(pat: Pattern): Option[Pattern] = pat match {
case p: PError => Some(p.payload)
case _ => None
}
}
case class TError(override val payload: Type) extends PError(payload) with Type {
val fixedWidth = false
val dataWidth = 4 + 4 + payload.dataWidth
}
case object TNone extends Type with ConcreteType {
override val toString = "_"
val fixedWidth = true
val dataWidth = 0
}
case object TBool extends Type with ConcreteType {
override val toString = "b"
val fixedWidth = true
val dataWidth = 1
}
case object TInt extends Type with ConcreteType {
override val toString = "i"
val fixedWidth = true
val dataWidth = 4
}
//case object TInt8 extends Type with ConcreteType {
// override val toString = "i8"
// val fixedWidth = true
// val dataWidth = 1
//}
//
//case object TInt16 extends Type with ConcreteType {
// override val toString = "i16"
// val fixedWidth = true
// val dataWidth = 2
//}
//
//case object TInt64 extends Type with ConcreteType {
// override val toString = "i64"
// val fixedWidth = true
// val dataWidth = 8
//}
case object TUInt extends Type with ConcreteType {
override val toString = "w"
val fixedWidth = true
val dataWidth = 4
}
//case object TUInt8 extends Type with ConcreteType {
// override val toString = "w8"
// val fixedWidth = true
// val dataWidth = 1
//}
//
//case object TUInt16 extends Type with ConcreteType {
// override val toString = "w16"
// val fixedWidth = true
// val dataWidth = 2
//}
//
//case object TUInt64 extends Type with ConcreteType {
// override val toString = "w64"
// val fixedWidth = true
// val dataWidth = 8
//}
case object TStr extends Type with ConcreteType {
override val toString = "s"
val fixedWidth = false
val dataWidth = 4
}
case object TBytes extends Type with ConcreteType {
override val toString = "y"
val fixedWidth = false
val dataWidth = 4
}
case object TTime extends Type with ConcreteType {
override val toString = "t"
val fixedWidth = true
val dataWidth = 16
}
class PValue protected(val units: Option[String]) extends Pattern {
def accepts(typ: Pattern) = typ match {
case PValue(u) => (units, u) match {
case (None, None) => true
case (None, Some(u)) => true
case (Some(units), None) => true
case (Some(units), Some(u)) => units == u
}
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TValue(unit) =>
units match {
case Some(u) => Some(TValue(u))
case None => Some(TValue(unit))
}
case _ => None
}
override val toString = units match {
case None => "v"
case Some(units) => s"v[$units]"
}
override def equals(other: Any): Boolean = other match {
case p: PValue => p.units == units
case _ => false
}
override def hashCode: Int = units.hashCode
}
object PValue {
def apply(units: String) = new PValue(Option(units))
def apply(units: Option[String] = None) = new PValue(units)
def unapply(pat: Pattern): Option[Option[String]] = pat match {
case p: PValue => Some(p.units)
case _ => None
}
}
case class TValue(override val units: Option[String] = None) extends PValue(units) with Type {
val fixedWidth = true
val dataWidth = 8
}
object TValue {
def apply(units: String) = new TValue(Option(units))
}
class PComplex protected(val units: Option[String]) extends Pattern {
def accepts(typ: Pattern) = typ match {
case PComplex(u) => (units, u) match {
case (None, None) => true
case (None, Some(u)) => true
case (Some(units), None) => true
case (Some(units), Some(u)) => units == u
}
case _ => false
}
def apply(typ: Type): Option[Type] = typ match {
case TComplex(unit) =>
units match {
case Some(u) => Some(TComplex(u))
case None => Some(TComplex(unit))
}
case _ => None
}
override val toString = units match {
case None => "c"
case Some(units) => s"c[$units]"
}
override def equals(other: Any): Boolean = other match {
case p: PComplex => p.units == units
case _ => false
}
override def hashCode: Int = units.hashCode
}
object PComplex {
def apply(units: String) = new PComplex(Option(units))
def apply(units: Option[String] = None) = new PComplex(units)
def unapply(pat: Pattern): Option[Option[String]] = pat match {
case p: PComplex => Some(p.units)
case _ => None
}
}
case class TComplex(override val units: Option[String] = None) extends PComplex(units) with Type {
def fixedWidth = true
def dataWidth = 16
}
object TComplex {
def apply(units: String) = new TComplex(Option(units))
}
| labrad/scalabrad | core/src/main/scala/org/labrad/types/Type.scala | Scala | mit | 15,895 |
package com.bitunified.cb.node
class NodeCluster (sizes:Int,starts:Int){
var size:Int=sizes
var start:Int=starts
var end:Int=(start+(size-1))
} | bitunified/node-platform | np-server/src/main/scala-2.11/com/bitunified/cb/node/NodeCluster.scala | Scala | mit | 151 |
package im.actor.server.cqrs
import akka.actor.{ ActorRef, ActorRefFactory, PoisonPill, Props }
import akka.pattern.ask
import akka.http.scaladsl.util.FastFuture
import akka.persistence.SnapshotMetadata
import akka.util.Timeout
import im.actor.config.ActorConfig
import im.actor.serialization.ActorSerializer
import scala.concurrent.Future
object ResumableProjection {
ActorSerializer.register(
110001 → classOf[ResumableProjectionEvents.OffsetWritten],
110002 → classOf[ResumableProjectionState]
)
def apply(id: String)(implicit factory: ActorRefFactory) = new ResumableProjection(id)
private[cqrs] val maxHits = 20
}
private[cqrs] trait ResumableProjectionStateBase extends ProcessorState[ResumableProjectionState] {
this: ResumableProjectionState ⇒
import ResumableProjectionEvents._
override def updated(e: Event): ResumableProjectionState = e match {
case ow: OffsetWritten ⇒
copy(offset = ow.offset, hits = hits + 1)
}
override def withSnapshot(metadata: SnapshotMetadata, snapshot: Any): ResumableProjectionState = snapshot match {
case s: ResumableProjectionState ⇒ s
}
}
final class ResumableProjection(id: String)(implicit factory: ActorRefFactory) {
import factory.dispatcher
private case class SaveOffset(offset: Long)
private object SaveOffsetAck
private object GetOffset
private case class GetOffsetResponse(offset: Long)
private implicit val timeout = Timeout(ActorConfig.defaultTimeout)
private var isStopped = false
private val _actor = factory.actorOf(Props(new Processor[ResumableProjectionState] {
import ResumableProjectionEvents._
override def persistenceId: String = s"RProj_$id"
override protected def getInitialState: ResumableProjectionState = ResumableProjectionState(0L, 0)
override protected def handleQuery: PartialFunction[Any, Future[Any]] = {
case GetOffset ⇒ FastFuture.successful(GetOffsetResponse(state.offset))
}
override protected def handleCommand: Receive = {
case SaveOffset(offset) ⇒ persist(OffsetWritten(offset)) { e ⇒
commit(e)
if (state.hits > 10) {
saveSnapshot(state)
setState(state.copy(hits = 0))
}
sender() ! SaveOffsetAck
}
}
}))
private def actor: ActorRef =
if (isStopped) throw new IllegalStateException("Projection is stopped")
else _actor
def latestOffset: Future[Long] = (actor ? GetOffset).mapTo[GetOffsetResponse] map (_.offset)
def saveOffset(offset: Long): Future[Unit] = (actor ? SaveOffset(offset)) map (_ ⇒ ())
def stop(): Unit = {
actor ! PoisonPill
isStopped = true
}
} | actorapp/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/cqrs/ResumableProjection.scala | Scala | agpl-3.0 | 2,663 |
package org.psolution.altu.admin.api.swagger
import org.psolution.altu.admin.api.route.{InfoService, MenuService}
import scala.collection.immutable.HashSet
object SwaggerApiRegistry {
def getRegistredInstances : Set[Class[_]] = {
var classes = new HashSet[Class[_]]
classes.+=(classOf[InfoService])
classes.+=(classOf[MenuService])
classes
}
}
| PublicSolution/altu-admin | api/src/main/scala/org/psolution/altu/admin/api/swagger/SwaggerApiRegistry.scala | Scala | apache-2.0 | 369 |
package com.airbnb.aerosolve.training
import java.io.{StringReader, BufferedWriter, BufferedReader, StringWriter}
import java.util
import com.airbnb.aerosolve.core.models.SplineModel.WeightSpline
import com.airbnb.aerosolve.core.models.{ModelFactory, SplineModel}
import com.airbnb.aerosolve.core.{Example, FeatureVector}
import com.airbnb.aerosolve.core.util.Spline
import java.util.{Scanner, HashMap}
import com.typesafe.config.ConfigFactory
import org.apache.spark.SparkContext
import org.junit.Test
import org.slf4j.LoggerFactory
import org.junit.Assert.assertEquals
import org.junit.Assert.assertTrue
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
class SplineTrainerTest {
val log = LoggerFactory.getLogger("SplineTrainerTest")
def makeExample(x : Double,
y : Double,
target : Double) : Example = {
val example = new Example
val item: FeatureVector = new FeatureVector
item.setFloatFeatures(new java.util.HashMap)
val floatFeatures = item.getFloatFeatures
floatFeatures.put("$rank", new java.util.HashMap)
floatFeatures.get("$rank").put("", target)
floatFeatures.put("loc", new java.util.HashMap)
val loc = floatFeatures.get("loc")
loc.put("x", x)
loc.put("y", y)
example.addToExample(item)
return example
}
def makeSplineModel() : SplineModel = {
val model: SplineModel = new SplineModel()
val weights = new java.util.HashMap[String, java.util.Map[String, WeightSpline]]()
val innerA = new java.util.HashMap[String, WeightSpline]
val innerB = new java.util.HashMap[String, WeightSpline]
val a = new WeightSpline(1.0f, 10.0f, 2)
val b = new WeightSpline(1.0f, 10.0f, 2)
a.splineWeights(0) = 1.0f
a.splineWeights(1) = 2.0f
b.splineWeights(0) = 1.0f
b.splineWeights(1) = 5.0f
weights.put("A", innerA)
weights.put("B", innerB)
innerA.put("a", a)
innerB.put("b", b)
model.setNumBins(2)
model.setWeightSpline(weights)
model.setOffset(0.5f)
model.setSlope(1.5f)
return model
}
def makeConfig(loss : String, dropout : Double, extraArgs : String) : String = {
"""
|identity_transform {
| transform : list
| transforms: []
|}
|model_config {
| num_bags : 3
| loss : "%s"
| %s
| rank_key : "$rank"
| rank_threshold : 0.0
| learning_rate : 0.5
| num_bins : 16
| iterations : 10
| smoothing_tolerance : 0.1
| linfinity_threshold : 0.01
| linfinity_cap : 1.0
| dropout : %f
| min_count : 0
| subsample : 1.0
| context_transform : identity_transform
| item_transform : identity_transform
| combined_transform : identity_transform
|}
""".stripMargin.format(loss, extraArgs, dropout)
}
@Test
def testSplineTrainerLogistic : Unit = {
testSplineTrainer("logistic", 0.0, "")
}
@Test
def testSplineTrainerHinge : Unit = {
testSplineTrainer("hinge", 0.0, "")
}
@Test
def testSplineTrainerLogisticWithDropout : Unit = {
testSplineTrainer("logistic", 0.2, "")
}
@Test
def testSplineTrainerHingeWithDropout : Unit = {
testSplineTrainer("hinge", 0.2, "")
}
@Test
def testSplineTrainerHingeWithMargin : Unit = {
testSplineTrainer("hinge", 0.0, "margin : 0.5")
}
@Test
def testSplineTrainerLogisticMultiscale : Unit = {
testSplineTrainer("logistic", 0.0, "multiscale : [5, 7, 16]")
}
@Test
def testSplineTrainerHingeMultiscale : Unit = {
testSplineTrainer("hinge", 0.0, "multiscale : [ 5, 7, 16]")
}
@Test
def testSplineTrainerHingeMultiscaleLessBags : Unit = {
testSplineTrainer("hinge", 0.2, "multiscale : [ 7, 16]")
}
def testSplineTrainer(loss : String, dropout : Double, extraArgs : String) = {
val examples = ArrayBuffer[Example]()
val label = ArrayBuffer[Double]()
val rnd = new java.util.Random(1234)
var numPos : Int = 0;
for (i <- 0 until 200) {
val x = 2.0 * rnd.nextDouble() - 1.0
val y = 10.0 * (2.0 * rnd.nextDouble() - 1.0)
val poly = x * x + 0.1 * y * y + 0.1 * x + 0.2 * y - 0.1 + Math.sin(x)
val rank = if (poly < 1.0) {
1.0
} else {
-1.0
}
if (rank > 0) numPos = numPos + 1
label += rank
examples += makeExample(x, y, rank)
}
var sc = new SparkContext("local", "SplineTest")
try {
val config = ConfigFactory.parseString(makeConfig(loss, dropout, extraArgs))
val input = sc.parallelize(examples)
val model = SplineTrainer.train(sc, input, config, "model_config")
val weights = model.getWeightSpline.asScala
for (familyMap <- weights) {
for (featureMap <- familyMap._2.asScala) {
log.info(("family=%s,feature=%s,"
+ "minVal=%f, maxVal=%f, weights=%s")
.format(familyMap._1,
featureMap._1,
featureMap._2.spline.getMinVal,
featureMap._2.spline.getMaxVal,
featureMap._2.spline.toString
)
)
}
}
var numCorrect : Int = 0
var i : Int = 0
val labelArr = label.toArray
for (ex <- examples) {
val score = model.scoreItem(ex.example.get(0))
if (score * labelArr(i) > 0) {
numCorrect += 1
}
i += 1
}
val fracCorrect : Double = numCorrect * 1.0 / examples.length
log.info("Num correct = %d, frac correct = %f, num pos = %d, num neg = %d"
.format(numCorrect, fracCorrect, numPos, examples.length - numPos))
assertTrue(fracCorrect > 0.6)
val inside = makeExample(0, 0.0, 0.0)
val builder = new java.lang.StringBuilder()
val insideScore = model.debugScoreItem(inside.example.get(0), builder)
log.info(builder.toString)
val outside = makeExample(10.0, 10.0, 0.0)
val builder2 = new java.lang.StringBuilder()
val outsideScore = model.debugScoreItem(outside.example.get(0), builder2)
log.info(builder2.toString)
assert(insideScore > outsideScore)
val swriter = new StringWriter()
val writer = new BufferedWriter(swriter)
model.save(writer)
writer.close()
val str = swriter.toString()
val sreader = new StringReader(str)
val reader = new BufferedReader(sreader)
log.info(str)
val model2Opt = ModelFactory.createFromReader(reader)
assertTrue(model2Opt.isPresent())
val model2 = model2Opt.get()
for (ex <- examples) {
val score = model.scoreItem(ex.example.get(0))
val score2 = model2.scoreItem(ex.example.get(0))
assertEquals(score, score2, 0.01f)
}
} finally {
sc.stop
sc = null
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.master.port")
}
}
@Test
def testAddSpline(): Unit = {
val model = makeSplineModel()
// add an existing feature without overwrite
model.addSpline("A", "a", 0.0f, 1.0f, false)
// add an existing feature with overwrite
model.addSpline("B", "b", 0.0f, 1.0f, true)
// add a new family with overwrite
model.addSpline("C", "c", 0.0f, 1.0f, true)
// add an existing
val weights = model.getWeightSpline.asScala
for (familyMap <- weights) {
for (featureMap <- familyMap._2.asScala) {
val family = familyMap._1
val feature = featureMap._1
val spline = featureMap._2.spline
log.info(("family=%s,feature=%s,minVal=%f, maxVal=%f, weights=%s")
.format(family, feature, spline.getMinVal, spline.getMaxVal, spline.toString))
if (family.equals("A")) {
assertTrue(feature.equals("a"))
assertEquals(spline.getMaxVal, 10.0f, 0.01f)
assertEquals(spline.getMinVal, 1.0f, 0.01f)
assertEquals(spline.evaluate(1.0f), 1.0f, 0.01f)
assertEquals(spline.evaluate(10.0f), 2.0f, 0.01f)
} else if (family.equals("B")) {
assertTrue(feature.equals("b"))
assertEquals(spline.getMaxVal, 1.0f, 0.01f)
assertEquals(spline.getMinVal, 0.0f, 0.01f)
} else {
assertTrue(family.equals("C"))
assertTrue(feature.equals("c"))
assertEquals(spline.getMaxVal, 1.0f, 0.01f)
assertEquals(spline.getMinVal, 0.0f, 0.01f)
}
}
}
}
}
| zcqqq/aerosolve | training/src/test/scala/com/airbnb/aerosolve/training/SplineTrainerTest.scala | Scala | apache-2.0 | 8,562 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.stats
import org.geotools.data.Query
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.util.factory.Hints
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.TestGeoMesaDataStore
import org.locationtech.geomesa.index.index.z3.Z3Index
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class StatsBasedEstimatorTest extends Specification {
val sft = SimpleFeatureTypes.createType("test", "trackId:String:index=true,dtg:Date,*geom:Point:srid=4326")
val ds = new TestGeoMesaDataStore(true)
val features = Seq.tabulate(10) { i =>
ScalaSimpleFeature.create(sft, s"$i", s"track-$i", f"2018-01-01T$i%02d:00:00.000Z", s"POINT (4$i 55)")
}
step {
ds.createSchema(sft)
features.foreach(_.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE))
ds.getFeatureSource(sft.getTypeName).addFeatures(new ListFeatureCollection(sft, features.toArray[SimpleFeature]))
}
"StatsBasedEstimator" should {
"handle not null counts" in {
ds.stats.getCount(sft, ECQL.toFilter("trackId is not null")) must beSome(10L)
}
"select better query plans over not null" in {
val filter = ECQL.toFilter("NOT (trackId IS NULL) AND " +
"dtg > 2018-01-01T00:00:00+00:00 AND dtg < 2018-01-01T08:00:00+00:00 AND " +
"CONTAINS(POLYGON ((44 54, 44 56, 48 56, 48 54, 44 54)), geom) AND " +
"NOT (dtg IS NULL) AND " +
"INCLUDE")
val plans = ds.getQueryPlan(new Query(sft.getTypeName, filter, Array("trackId", "dtg")))
plans must haveLength(1)
plans.head.filter.index must beAnInstanceOf[Z3Index]
}
}
step {
ds.dispose()
}
}
| locationtech/geomesa | geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/stats/StatsBasedEstimatorTest.scala | Scala | apache-2.0 | 2,443 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.ref
class SoftReference[+T <: AnyRef](value : T, queue : ReferenceQueue[T]) extends ReferenceWrapper[T] {
def this(value : T) = this(value, null)
val underlying: java.lang.ref.SoftReference[_ <: T] =
new SoftReferenceWithWrapper[T](value, queue, this)
}
/**
* A companion object that implements an extractor for `SoftReference` values
*/
object SoftReference {
/** Creates a `SoftReference` pointing to `value` */
def apply[T <: AnyRef](value: T): SoftReference[T] = new SoftReference(value)
/** Optionally returns the referenced value, or `None` if that value no longer exists */
def unapply[T <: AnyRef](sr: SoftReference[T]): Option[T] = Option(sr.underlying.get)
}
private class SoftReferenceWithWrapper[T <: AnyRef](value: T, queue: ReferenceQueue[T], val wrapper: SoftReference[T])
extends java.lang.ref.SoftReference[T](value, if (queue == null) null else queue.underlying.asInstanceOf[java.lang.ref.ReferenceQueue[T]]) with ReferenceWithWrapper[T]
| lrytz/scala | src/library/scala/ref/SoftReference.scala | Scala | apache-2.0 | 1,287 |
package scalariform.formatter
import scalariform.parser._
import scalariform.formatter.preferences._
trait AnnotationFormatter { self: HasFormattingPreferences with TypeFormatter with ExprFormatter ⇒
def format(annotation: Annotation)(implicit formatterState: FormatterState): FormatResult = {
val Annotation(_, annotationType, argumentExprss, newlineOption) = annotation
var formatResult: FormatResult = NoFormatResult
formatResult = formatResult.before(annotationType.firstToken, Compact)
formatResult ++= format(annotationType)
for (argumentExprs ← argumentExprss)
formatResult ++= format(argumentExprs)._1
for (newline ← newlineOption)
formatResult = formatResult.formatNewline(newline, Compact) // TODO: rethink
formatResult
}
}
| jkinkead/scalariform | scalariform/src/main/scala/scalariform/formatter/AnnotationFormatter.scala | Scala | mit | 789 |
package com.github.ldaniels528.trifecta.modules.kafka
import java.io.PrintStream
import java.text.SimpleDateFormat
import java.util.Date
import _root_.kafka.common.TopicAndPartition
import com.github.ldaniels528.commons.helpers.OptionHelper._
import com.github.ldaniels528.commons.helpers.ResourceHelper._
import com.github.ldaniels528.commons.helpers.StringHelper._
import com.github.ldaniels528.commons.helpers.TimeHelper.Implicits._
import com.github.ldaniels528.trifecta.command._
import com.github.ldaniels528.trifecta.io._
import com.github.ldaniels528.trifecta.io.kafka.KafkaMicroConsumer.{MessageData, ReplicaBroker, contentFilter}
import com.github.ldaniels528.trifecta.io.kafka.KafkaZkUtils.BrokerDetails
import com.github.ldaniels528.trifecta.io.kafka._
import com.github.ldaniels528.trifecta.io.zookeeper.ZKProxy
import com.github.ldaniels528.trifecta.messages.codec.avro.AvroCodec
import com.github.ldaniels528.trifecta.messages.codec.json.JsonTransCoding
import com.github.ldaniels528.trifecta.messages.codec.{MessageCodecFactory, MessageDecoder}
import com.github.ldaniels528.trifecta.messages.logic.{Condition, ConditionCompiler}
import com.github.ldaniels528.trifecta.messages.{BinaryMessage, KeyAndMessage, MessageInputSource, MessageOutputSource}
import com.github.ldaniels528.trifecta.modules.Module
import com.github.ldaniels528.trifecta.modules.ModuleHelper._
import com.github.ldaniels528.trifecta.modules.kafka.KafkaCliFacade._
import com.github.ldaniels528.trifecta.util.ParsingHelper._
import com.github.ldaniels528.trifecta.{TxConfig, TxRuntimeContext}
import scala.collection.concurrent.TrieMap
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
/**
* Apache Kafka Module
* @author lawrence.daniels@gmail.com
*/
class KafkaModule(config: TxConfig) extends Module {
private var zkProxy_? : Option[ZKProxy] = None
private val out: PrintStream = config.out
// incoming messages cache
private var incomingMessageCache = Map[TopicAndPartition, Inbound]()
private var lastInboundCheck: Long = _
// define the offset for message cursor navigation commands
private val navigableCursors = TrieMap[String, KafkaNavigableCursor]()
private val watchCursors = TrieMap[TopicAndGroup, KafkaWatchCursor]()
private var currentTopic: Option[String] = None
private var currentTopicAndGroup: Option[TopicAndGroup] = None
private var watching: Boolean = false
// create the facade
private val facade = new KafkaCliFacade(config)
/**
* Returns the list of brokers from Zookeeper
* @return the list of [[Broker]]s
*/
private lazy val brokers: Seq[Broker] = facade.brokers
def defaultFetchSize: Int = config.getOrElse("defaultFetchSize", "65536").toInt
def defaultFetchSize_=(sizeInBytes: Int): Option[AnyRef] = config.set("defaultFetchSize", sizeInBytes.toString)
// the bound commands
override def getCommands(implicit rt: TxRuntimeContext): Seq[Command] = Seq(
// connection-related commands
Command(this, "kconnect", connect, UnixLikeParams(Seq("host" -> false, "port" -> false)), help = "Establishes a connection to Zookeeper"),
Command(this, "ksandbox", sandBox, UnixLikeParams(), help = "Launches a Kafka Sandbox (local/embedded server instance)"),
// basic message creation & retrieval commands
Command(this, "kget", getMessage, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "offset" -> false), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-p" -> "partition", "-ts" -> "YYYY-MM-DDTHH:MM:SS")), help = "Retrieves the message at the specified offset for a given topic partition"),
Command(this, "kgetkey", getMessageKey, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "offset" -> false), Seq("-f" -> "format", "-s" -> "fetchSize")), help = "Retrieves the key of the message at the specified offset for a given topic partition"),
Command(this, "kgetsize", getMessageSize, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "offset" -> false), Seq("-s" -> "fetchSize")), help = "Retrieves the size of the message at the specified offset for a given topic partition"),
Command(this, "kput", publishMessage, UnixLikeParams(Seq("topic" -> false, "key" -> false, "message" -> true)), help = "Publishes a message to a topic"),
// consumer group-related commands
Command(this, "kcommit", commitOffset, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "groupId" -> true, "offset" -> true), Seq("-m" -> "metadata")), help = "Commits the offset for a given topic and group"),
Command(this, "kconsumers", getConsumers, UnixLikeParams(Nil, Seq("-t" -> "topicPrefix", "-c" -> "consumerPrefix")), help = "Returns a list of the consumers from ZooKeeper"),
Command(this, "kfetch", fetchOffsets, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "groupId" -> true)), help = "Retrieves the offset for a given topic and group"),
Command(this, "kreset", resetConsumerGroup, UnixLikeParams(Seq("topic" -> false, "groupId" -> false)), help = "Sets a consumer group ID to zero for all partitions"),
Command(this, "kwatch", watchConsumerGroup, UnixLikeParams(Seq("topic" -> false, "groupId" -> false), Seq("-a" -> "avroCodec")), help = "Creates a message watch cursor for a given topic"),
Command(this, "kwatchnext", watchNext, UnixLikeParams(Seq("topic" -> false, "groupId" -> false), Seq("-a" -> "avroCodec")), help = "Returns the next message from the watch cursor"),
Command(this, "kwatchstop", watchStop, UnixLikeParams(Seq("topic" -> false, "groupId" -> false)), help = "Stops watching a consumer for a given topic"),
// navigable cursor-related commands
Command(this, "kcursor", getNavigableCursor, UnixLikeParams(Nil, Seq("-t" -> "topicPrefix")), help = "Displays the message cursor(s)"),
Command(this, "kfirst", getFirstMessage, UnixLikeParams(Seq("topic" -> false, "partition" -> false), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-p" -> "partition")), help = "Returns the first message for a given topic"),
Command(this, "klast", getLastMessage, UnixLikeParams(Seq("topic" -> false, "partition" -> false), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-p" -> "partition")), help = "Returns the last message for a given topic"),
Command(this, "knext", getNextMessage, UnixLikeParams(Seq("delta" -> false), flags = Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource")), help = "Attempts to retrieve the next message"),
Command(this, "kprev", getPreviousMessage, UnixLikeParams(Seq("delta" -> false), flags = Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource")), help = "Attempts to retrieve the message at the previous offset"),
Command(this, "kswitch", switchCursor, UnixLikeParams(Seq("topic" -> true)), help = "Switches the currently active topic cursor"),
// query-related commands
Command(this, "copy", copyMessages, UnixLikeParams(Nil, Seq("-a" -> "avroSchema", "-i" -> "inputSource", "-o" -> "outputSource", "-n" -> "numRecordsToCopy")), help = "Copies messages from an input source to an output source"),
Command(this, "kcount", countMessages, UnixLikeParams(Seq("field" -> true, "operator" -> true, "value" -> true), Seq("-a" -> "avroCodec", "-f" -> "format", "-t" -> "topic")), help = "Counts the messages matching a given condition"),
Command(this, "kfind", findMessages, UnixLikeParams(Seq("field" -> true, "operator" -> true, "value" -> true), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-t" -> "topic", "-n" -> "limit")), "Finds messages matching a given condition and exports them to a topic"),
Command(this, "kfindone", findOneMessage, UnixLikeParams(Seq("field" -> true, "operator" -> true, "value" -> true), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-t" -> "topic")), "Returns the first occurrence of a message matching a given condition"),
Command(this, "kfindnext", findNextMessage, UnixLikeParams(Seq("field" -> true, "operator" -> true, "value" -> true), Seq("-a" -> "avroCodec", "-f" -> "format", "-o" -> "outputSource", "-p" -> "partition", "-t" -> "topic")), "Returns the first occurrence of a message matching a given condition"),
Command(this, "kgetminmax", getMessageMinMaxSize, UnixLikeParams(Seq("topic" -> false, "partition" -> false, "startOffset" -> true, "endOffset" -> true), Seq("-s" -> "fetchSize")), help = "Retrieves the smallest and largest message sizes for a range of offsets for a given partition"),
// topic/message information and statistics commands
Command(this, "kbrokers", getBrokers, UnixLikeParams(), help = "Returns a list of the brokers from ZooKeeper"),
Command(this, "kreplicas", getReplicas, UnixLikeParams(Seq("topic" -> true)), help = "Returns a list of the replicas for a topic"),
Command(this, "kfetchsize", fetchSizeGetOrSet, UnixLikeParams(Seq("fetchSize" -> false)), help = "Retrieves or sets the default fetch size for all Kafka queries"),
Command(this, "kinbound", inboundMessages, UnixLikeParams(Seq("topicPrefix" -> false), Seq("-w" -> "wait-time")), help = "Retrieves a list of topics with new messages (since last query)"),
Command(this, "kls", getTopics, UnixLikeParams(Seq("topicPrefix" -> false), Seq("-l" -> "listMode")), help = "Lists all existing topics"),
Command(this, "kstats", getStatistics, UnixLikeParams(Seq("topic" -> false, "beginPartition" -> false, "endPartition" -> false)), help = "Returns the partition details for a given topic")
)
/**
* Returns a Kafka Topic input source
* @param url the given input URL (e.g. "topic:shocktrade.quotes.avro")
* @return the option of a Kafka Topic input source
*/
override def getInputSource(url: String): Option[KafkaTopicMessageInputSource] = {
url.extractProperty("topic:") map (new KafkaTopicMessageInputSource(brokers, _))
}
/**
* Returns a Kafka Topic output source
* @param url the given output URL (e.g. "topic:shocktrade.quotes.avro")
* @return the option of a Kafka Topic output source
*/
override def getOutputSource(url: String): Option[KafkaTopicMessageOutputSource] = {
url.extractProperty("topic:") map (new KafkaTopicMessageOutputSource(brokers, _))
}
override def moduleName = "kafka"
override def moduleLabel = "kafka"
override def prompt: String = {
(if (navigableCursor.isEmpty || (watching && watchCursor.isDefined)) promptForWatchCursor
else promptForNavigableCursor) getOrElse "/"
}
private def promptForNavigableCursor: Option[String] = navigableCursor map (c => s"${c.topic}/${c.partition}:${c.offset}")
private def promptForWatchCursor: Option[String] = watchCursor map (g => s"[w]${g.topic}/${g.partition}:${g.offset}")
override def shutdown(): Unit = {
Try(facade.shutdown())
()
}
override def supportedPrefixes: Seq[String] = Seq("topic")
/**
* Returns the cursor for the current topic partition
* @return the cursor for the current topic partition
*/
private def navigableCursor: Option[KafkaNavigableCursor] = currentTopic.flatMap(navigableCursors.get)
/**
* Returns the cursor for the current topic partition
* @return the cursor for the current topic partition
*/
private def watchCursor: Option[KafkaWatchCursor] = currentTopicAndGroup.flatMap(watchCursors.get)
/**
* Commits the offset for a given topic and group ID
* @example kcommit com.shocktrade.alerts 0 devc0 123678
* @example kcommit devc0 123678
*/
def commitOffset(params: UnixLikeArgs) {
// get the arguments (topic, partition, groupId and offset)
val (topic, partition, groupId, offset) = params.args match {
case aGroupId :: anOffset :: Nil => navigableCursor map (c => (c.topic, c.partition, aGroupId, parseOffset(anOffset))) getOrElse dieNoCursor
case aTopic :: aPartition :: aGroupId :: anOffset :: Nil => (aTopic, parsePartition(aPartition), aGroupId, parseOffset(anOffset))
case _ => dieSyntax(params)
}
// commit the offset
facade.commitOffset(topic, partition, groupId, offset, params("-m"))
}
/**
* Establishes a connection to Zookeeper
* @example kconnect
* @example kconnect localhost
* @example kconnect localhost:2181
*/
def connect(params: UnixLikeArgs) {
// determine the requested end-point
val connectionString = params.args match {
case Nil => config.zooKeeperConnect
case zconnectString :: Nil => zconnectString
case _ => dieSyntax(params)
}
// connect to the remote peer
zkProxy_?.foreach(_.close())
zkProxy_? = Option(ZKProxy(connectionString))
}
/**
* Copy messages from the specified input source to an output source
* @return the I/O count
* @example {{{ copy -i topic:greetings -o topic:greetings2 }}}
* @example {{{ copy -i topic:shocktrade.keystats.avro -o file:json:/tmp/keystats.json -a file:avro/keyStatistics.avsc }}}
* @example {{{ copy -i topic:shocktrade.keystats.avro -o es:/quotes/keystats/$symbol -a file:avro/keyStatistics.avsc }}}
* @example {{{ copy -i topic:shocktrade.quotes.avro -o file:json:/tmp/quotes.json -a file:avro/quotes.avsc }}}
* @example {{{ copy -i topic:quotes.avro -o es:/quotes/$exchange/$symbol -a file:avro/quotes.avsc }}}
*/
def copyMessages(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): AsyncIO = {
// get the input source
val inputSource = getInputSource(params) getOrElse die("No input source defined")
// get the output source
val outputSource = getOutputSource(params) getOrElse die("No output source defined")
// get an optional decoder
val decoder = getMessageDecoder(params)(rt.config)
// copy the messages from the input source to the output source
copyOperation(inputSource, outputSource, decoder)
}
/**
* Copies messages from the input source to the output source
* @return an asynchronous I/O result
*/
private def copyOperation(reader: MessageInputSource, writer: MessageOutputSource, decoder: Option[MessageDecoder[_]]) = {
AsyncIO { counter =>
var found: Boolean = true
reader use { in =>
writer use { out =>
while (found) {
// read the record
val data = reader.read
found = data.isDefined
if (found) counter.updateReadCount(1)
// write the record
data.foreach { rec =>
writer.write(rec, decoder)
counter.updateWriteCount(1)
}
}
}
}
}
}
/**
* Launches a Kafka Sandbox (local server instance)
* @example ksandbox
*/
def sandBox(params: UnixLikeArgs): Unit = {
val instance = KafkaSandbox()
connect(UnixLikeArgs(Some("ksandbox"), List(instance.getConnectString)))
}
/**
* Counts the messages matching a given condition [references cursor]
* @example kcount frequency >= 1200
*/
def countMessages(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): AsyncIO = {
// was a topic and/or Avro decoder specified?
val topic_? = params("-t")
// get the input topic and decoder from the cursor
val (topic, decoder) = {
val specifiedDecoder = topic_?.map(topic => topic -> resolveDecoder(topic, params))
val cursorDecoder = navigableCursor.map(c => c.topic -> getMessageDecoder(params)(config) ?? c.decoder)
(specifiedDecoder ?? cursorDecoder) getOrElse dieNoCursor
}
// get the criteria
val conditions = Seq(parseCondition(params, decoder))
// perform the count
facade.countMessages(topic, conditions, decoder)
}
/**
* Returns the offsets for a given topic and group ID
* @example kfetch com.shocktrade.alerts 0 dev
* @example kfetch dev
*/
def fetchOffsets(params: UnixLikeArgs): Option[Long] = {
// get the arguments (topic, partition, groupId)
val (topic, partition, groupId) = params.args match {
case aGroupId :: Nil => navigableCursor map (c => (c.topic, c.partition, aGroupId)) getOrElse dieNoCursor
case aTopic :: aPartition :: aGroupId :: Nil => (aTopic, parsePartition(aPartition), aGroupId)
case _ => dieSyntax(params)
}
// perform the action
facade.fetchOffsets(topic, partition, groupId)
}
/**
* Retrieves or sets the default fetch size for all Kafka queries
* @example kfetchsize
* @example kfetchsize 65536
*/
def fetchSizeGetOrSet(params: UnixLikeArgs): AnyVal = {
params.args.headOption match {
case Some(fetchSize) => defaultFetchSize = parseInt("fetchSize", fetchSize); ()
case None => defaultFetchSize
}
}
/**
* Returns the first message that corresponds to the given criteria
* @example kfindone volume > 1000000
* @example kfindone volume > 1000000 -a file:avro/quotes.avsc
* @example kfindone volume > 1000000 -t shocktrade.quotes.avro -a file:avro/quotes.avsc
* @example kfindone lastTrade < 1 and volume > 1000000 -a file:avro/quotes.avsc
*/
def findOneMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Future[Option[Either[Option[MessageData], Option[Any]]]] = {
// was a topic and/or Avro decoder specified?
val topic_? = params("-t")
val avro_? = getMessageDecoder(params)(config)
// get the topic and partition from the cursor
val (topic, decoder_?) = {
if (topic_?.isDefined) (topic_?.get, avro_?)
else navigableCursor map (c => (c.topic, avro_? ?? c.decoder)) getOrElse dieNoCursor
}
// perform the search
KafkaMicroConsumer.findOne(topic, brokers, parseCondition(params, decoder_?)) map {
_ map { case (partition, md) =>
getMessage(topic, partition, md.offset, params)
}
}
}
/**
* Returns the first next message that corresponds to the given criteria starting from the current position
* within the current partition.
* @example kfindnext volume > 1000000
* @example kfindnext volume > 1000000 -a file:avro/quotes.avsc
* @example kfindnext volume > 1000000 -t shocktrade.quotes.avro -p 5 -a file:avro/quotes.avsc
*/
def findNextMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Future[Option[Either[Option[MessageData], Option[Any]]]] = {
// was a topic, partition and/or Avro decoder specified?
val topic_? = params("-t")
val partition_? = params("-p") map parsePartition
val avro_? = getMessageDecoder(params)(config)
// get the topic and partition from the cursor
val (topic, partition, decoder_?) = {
navigableCursor.map(c => (topic_? getOrElse c.topic, partition_? getOrElse c.partition, avro_?))
.getOrElse {
topic_?.map(t => (t, partition_? getOrElse 0, avro_?)) getOrElse dieNoCursor
}
}
// TODO need the recorded offsets as a starting point for finding the next record
// get the criteria
val condition = parseCondition(params, decoder_?)
// perform the search
KafkaMicroConsumer.findNext(TopicAndPartition(topic, partition), brokers, condition) map {
_ map (md => getMessage(topic, partition, md.offset, params))
}
}
/**
* Finds messages that corresponds to the given criteria and exports them to a topic
* @example kfind frequency > 5000 -o topic:highFrequency.quotes
* @example kfind -t shocktrade.quotes.avro -a file:avro/quotes.avsc volume > 1000000 -o topic:hft.shocktrade.quotes.avro
*/
def findMessages(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): AsyncIO = {
// was a topic and/or Avro decoder specified?
val topic_? = params("-t")
val avro_? = getMessageDecoder(params)(config)
// get the input topic and decoder from the cursor
val (topic, decoder_?) = {
if (topic_?.isDefined) (topic_?.get, avro_?)
else navigableCursor map (c => (c.topic, if (avro_?.isDefined) avro_? else c.decoder)) getOrElse dieNoCursor
}
// get the criteria
val conditions = Seq(parseCondition(params, decoder_?))
// get the output handler
val outputHandler = params("-o") match {
case Some(url) => rt.getOutputHandler(url) getOrElse die(s"Unrecognized output source '$url'")
case None => new ConsoleMessageOutputSource()
}
// find the messages
facade.findMessages(topic, decoder_?, conditions, outputHandler)
}
/**
* Retrieves the list of Kafka brokers
*/
def getBrokers(params: UnixLikeArgs): Seq[BrokerDetails] = KafkaMicroConsumer.getBrokerList
/**
* Retrieves the list of Kafka replicas for a given topic
*/
def getReplicas(params: UnixLikeArgs): Seq[ReplicaBroker] = {
params.args match {
case topic :: Nil => KafkaMicroConsumer.getReplicas(topic, brokers) sortBy (_.partition)
case _ => dieSyntax(params)
}
}
/**
* Retrieves the navigable message cursor for the current topic
* @example kcursor
* @example kcursor shocktrade.keystats.avro
*/
def getNavigableCursor(params: UnixLikeArgs): Seq[KafkaNavigableCursor] = {
// get the topic & consumer prefixes
val topicPrefix = params("-t")
// filter the cursors by topic prefix
navigableCursors.values.filter(c => contentFilter(topicPrefix, c.topic)).toSeq
}
/**
* Sets the navigable message cursor
*/
private def setNavigableCursor(topic: String, partition: Int, messageData: Option[MessageData], decoder: Option[MessageDecoder[_]]) {
messageData map (m => KafkaNavigableCursor(topic, partition, m.offset, m.nextOffset, decoder)) foreach (navigableCursors(topic) = _)
currentTopic = Option(topic)
watching = false
}
/**
* Switches between topic cursors
* @example kswitch shocktrade.keystats.avro
*/
def switchCursor(params: UnixLikeArgs) {
for {
topic <- params.args.headOption
cursor <- navigableCursors.get(topic)
} {
currentTopic = Option(topic)
watching = false
}
}
/**
* Retrieves the fetch size (-s) from the given parameters
* @param params the given Unix-style parameters
* @return the fetch size
*/
private def getFetchSize(params: UnixLikeArgs): Int = {
params("-s") map (parseInt("fetchSize", _)) getOrElse defaultFetchSize
}
/**
* Returns the first message for a given topic
* @example kfirst
* @example kfirst -p 5
* @example kfirst com.shocktrade.quotes.csv 0
*/
def getFirstMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Option[Either[Option[MessageData], Option[Any]]] = {
// get the arguments
val (topic, partition0) = extractTopicAndPartition(params.args)
// check for a partition override flag
val partition = params("-p") map parsePartition getOrElse partition0
// return the first message for the topic partition
facade.getFirstOffset(topic, partition) map (getMessage(topic, partition, _, params))
}
/**
* Returns the last offset for a given topic
* @example klast
* @example klast -p 5
* @example klast com.shocktrade.alerts 0
*/
def getLastMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Option[Either[Option[MessageData], Option[Any]]] = {
// get the arguments
val (topic, partition0) = extractTopicAndPartition(params.args)
// check for a partition override flag
val partition = params("-p") map parsePartition getOrElse partition0
// return the last message for the topic partition
facade.getLastOffset(topic, partition) map (offset => getMessage(topic, partition, Math.max(0L, offset - 1), params))
}
/**
* Returns the message for a given topic partition and offset
* @example kget 3456
* @example kget com.shocktrade.alerts 0 3456
* @example kget -o es:/quotes/quote/AAPL
*/
def getMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Either[Option[MessageData], Option[Any]] = {
// get the arguments
val (topic, partition0, offset) = extractTopicPartitionAndOffset(params.args)
// check for a partition override flag
val partition: Int = params("-p") map parsePartition getOrElse partition0
// generate and return the message
getMessage(topic, partition, offset, params)
}
/**
* Retrieves either a binary or decoded message
* @param topic the given topic
* @param partition the given partition
* @param offset the given offset
* @param params the given Unix-style argument
* @return either a binary or decoded message
*/
def getMessage(topic: String, partition: Int, offset: Long, params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Either[Option[MessageData], Option[Any]] = {
// requesting a message from an instance in time?
val instant: Option[Long] = params("-ts") map {
case s if s.matches("\\\\d+") => s.toLong
case s if s.matches("\\\\d{4}[-]\\\\d{2}-\\\\d{2}[T]\\\\d{2}[:]\\\\d{2}[:]\\\\d{2}") => new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss").parse(s).getTime
case s => throw die(s"Illegal timestamp format '$s' - expected either EPOC (Long) or yyyy-MM-dd'T'HH:mm:ss format")
}
// retrieve the message
val messageData = new KafkaMicroConsumer(TopicAndPartition(topic, partition), brokers) use { consumer =>
val myOffset: Long = instant flatMap (t => consumer.getOffsetsBefore(t).headOption) getOrElse offset
consumer.fetch(myOffset)(getFetchSize(params)).headOption
}
// determine which decoder to use; either the user specified decoder, cursor's decoder or none
val decoder: Option[MessageDecoder[_]] = resolveDecoder(topic, params)
// if a decoder was found, use it to decode the message
val decodedMessage = decoder.flatMap(decodeMessage(messageData, _))
// write the message to an output source handler
for {
out <- getOutputSource(params)
md <- messageData
} {
out use (_.write(KeyAndMessage(md.key, md.message), decoder))
}
// capture the message's offset and decoder
setNavigableCursor(topic, partition, messageData, decoder)
// return either a binary message or a decoded message
if (decodedMessage.nonEmpty) Right(decodedMessage)
else Left(messageData)
}
/**
* Resolves the optional decoder URL (e.g. "-a" (Avro)) for the given topic. Note, if the url is "default"
* then the default decoder (configured in $HOME/.trifecta/decoders) will be used
* @param topic the given Kafka topic (e.g. "shocktrade.quotes.avro")
* @param params the given [[UnixLikeArgs]]
* @param rt the implicit [[TxRuntimeContext]]
* @return an option of the [[MessageDecoder]]
*/
private def resolveDecoder(topic: String, params: UnixLikeArgs)(implicit rt: TxRuntimeContext): Option[MessageDecoder[_]] = {
val specifiedDecoder = getMessageDecoder(params)(config)
val cursorDecoder = navigableCursors.get(topic) flatMap (_.decoder)
specifiedDecoder ?? cursorDecoder
}
/**
* Decodes the given message
* @param messageData the given option of a message
* @param aDecoder the given message decoder
* @return the decoded message
*/
private def decodeMessage(messageData: Option[BinaryMessage], aDecoder: MessageDecoder[_]) = {
// only JSON/Avro decoders are supported
val decoder: MessageDecoder[_] = aDecoder match {
case jsTransCoder: JsonTransCoding => jsTransCoder
case _ => throw new IllegalStateException("Only JSON/Avro decoding is supported")
}
// decode the message
for {
md <- messageData
message = decoder.decode(md.message) match {
case Success(value) => value
case Failure(e) =>
throw new IllegalStateException(e.getMessage, e)
}
} yield message
}
/**
* Returns the message key for a given topic partition and offset
* @example kget com.shocktrade.alerts 0 3456
* @example kget 3456
*/
def getMessageKey(params: UnixLikeArgs): Option[Any] = {
// get the arguments
val (topic, partition, offset) = extractTopicPartitionAndOffset(params.args)
// retrieve (or guess) the value's format
val valueType = params("-f") getOrElse "bytes"
// retrieve the key
facade.getMessageKey(topic, partition, offset, getFetchSize(params)) map (decodeValue(_, valueType))
}
/**
* Returns the size of the message for a given topic partition and offset
* @example kgetsize com.shocktrade.alerts 0 5567
* @example kgetsize 5567
*/
def getMessageSize(params: UnixLikeArgs): Option[Int] = {
// get the arguments (topic, partition, groupId and offset)
val (topic, partition, offset) = extractTopicPartitionAndOffset(params.args)
// perform the action
facade.getMessageSize(topic, partition, offset, getFetchSize(params))
}
/**
* Returns the minimum and maximum message size for a given topic partition and offset range
* @example kgetmaxsize com.shocktrade.alerts 0 2100 5567
* @example kgetmaxsize 2100 5567
*/
def getMessageMinMaxSize(params: UnixLikeArgs): Seq[MessageMaxMin] = {
// get the arguments (topic, partition, startOffset and endOffset)
val (topic, partition, startOffset, endOffset) = params.args match {
case offset0 :: offset1 :: Nil => navigableCursor map (c => (c.topic, c.partition, parseOffset(offset0), parseOffset(offset1))) getOrElse dieNoCursor
case aTopic :: aPartition :: aStartOffset :: anEndOffset :: Nil => (aTopic, parsePartition(aPartition), parseOffset(aStartOffset), parseOffset(anEndOffset))
case _ => dieSyntax(params)
}
// perform the action
facade.getMessageMinMaxSize(topic, partition, startOffset, endOffset, getFetchSize(params))
}
/**
* Optionally returns the next message
* @example knext
* @example knext +10
*/
def getNextMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext) = {
navigableCursor map { case KafkaNavigableCursor(topic, partition, offset, nextOffset, decoder) =>
val delta = params.args.headOption map (parseDelta("position delta", _))
val theOffset = delta map (nextOffset + _) getOrElse nextOffset
val lastOffset = facade.getLastOffset(topic, partition)
if (lastOffset.exists(theOffset > _)) {
for {
(min, max) <- facade.getTopicPartitionRange(topic)
overflowPartition = (partition + 1) % (max + 1)
overflowOffset <- facade.getFirstOffset(topic, overflowPartition)
} yield getMessage(topic, overflowPartition, overflowOffset, params)
}
else getMessage(topic, partition, theOffset, params)
}
}
/**
* Optionally returns the previous message
* @example kprev
* @example kprev +10
*/
def getPreviousMessage(params: UnixLikeArgs)(implicit rt: TxRuntimeContext) = {
navigableCursor map { case KafkaNavigableCursor(topic, partition, offset, nextOffset, decoder) =>
val delta = params.args.headOption map (parseDelta("position delta", _))
val theOffset = Math.max(0, delta map (offset - _) getOrElse (offset - 1))
val firstOffset = facade.getFirstOffset(topic, partition)
if (firstOffset.exists(theOffset < _)) {
for {
(min, max) <- facade.getTopicPartitionRange(topic)
overflowPartition = if (partition <= min) max else (partition - 1) % (max + 1)
overflowOffset <- facade.getLastOffset(topic, overflowPartition)
} yield getMessage(topic, overflowPartition, overflowOffset, params)
}
else getMessage(topic, partition, theOffset, params)
}
}
/**
* Returns the number of available messages for a given topic
* @example kstats com.shocktrade.alerts 0 4
* @example kstats com.shocktrade.alerts
* @example kstats
*/
def getStatistics(params: UnixLikeArgs): Iterable[TopicOffsets] = {
// interpret based on the input arguments
val results = params.args match {
case Nil =>
val topic = navigableCursor map (_.topic) getOrElse dieNoCursor
val partitions = KafkaMicroConsumer.getTopicList(brokers).filter(_.topic == topic).map(_.partitionId)
if (partitions.nonEmpty) Option((topic, partitions.min, partitions.max)) else None
case topic :: Nil =>
val partitions = KafkaMicroConsumer.getTopicList(brokers).filter(_.topic == topic).map(_.partitionId)
if (partitions.nonEmpty) Option((topic, partitions.min, partitions.max)) else None
case topic :: aPartition :: Nil =>
Option((topic, parsePartition(aPartition), parsePartition(aPartition)))
case topic :: partitionA :: partitionB :: Nil =>
Option((topic, parsePartition(partitionA), parsePartition(partitionB)))
case _ =>
dieSyntax(params)
}
results match {
case Some((topic, partition0, partition1)) =>
if (navigableCursor.isEmpty) {
facade.getFirstOffset(topic, partition0) ?? facade.getLastOffset(topic, partition0) map (offset =>
KafkaNavigableCursor(topic, partition0, offset, offset + 1, None)) foreach (navigableCursors(topic) = _)
currentTopic = Option(topic)
}
facade.getStatisticsData(topic, partition0, partition1)
case _ => Nil
}
}
/**
* Returns a list of topics
* @example kls com.shocktrade.alerts
* @example kls
*/
def getTopics(params: UnixLikeArgs): Either[Seq[TopicItem], Seq[TopicItemCompact]] = {
// get the prefix and compact/detailed list indicator
val prefix = params("-l") ?? params.args.headOption
val detailed = params.contains("-l")
// get the raw topic data
facade.getTopics(prefix, detailed)
}
/**
* Retrieves a list of all topics with new messages (since last query)
* @example kinbound com.shocktrade.quotes
*/
def inboundMessages(params: UnixLikeArgs): Iterable[Inbound] = {
val prefix = params.args.headOption
// get the optional wait time parameter
val waitTime = params("-w") map (parseInt("wait time in seconds", _))
// is this the initial call to this command?
if (waitTime.isDefined || incomingMessageCache.isEmpty || (System.currentTimeMillis() - lastInboundCheck) >= 1.hour) {
out.println("Sampling data; this may take a few seconds...")
// generate some data to fill the cache
inboundMessageStatistics()
// wait for the specified time in second
Thread.sleep((waitTime getOrElse 3).seconds)
}
// capture the current time
lastInboundCheck = System.currentTimeMillis()
// get the inbound topic data
inboundMessageStatistics(prefix)
}
/**
* Generates an iteration of inbound message statistics
* @param topicPrefix the given topic prefix (e.g. "myTopic123")
* @return an iteration of inbound message statistics
*/
private def inboundMessageStatistics(topicPrefix: Option[String] = None): Iterable[Inbound] = {
// start by retrieving a list of all topics
val topics = KafkaMicroConsumer.getTopicList(brokers)
.filter(t => t.topic == topicPrefix.getOrElse(t.topic))
.groupBy(_.topic)
// generate the inbound data
val inboundData = (topics flatMap { case (topic, details) =>
// get the range of partitions for each topic
val partitions = details.map(_.partitionId)
val (beginPartition, endPartition) = (partitions.min, partitions.max)
// retrieve the statistics for each topic
facade.getStatisticsData(topic, beginPartition, endPartition) map { o =>
val prevInbound = incomingMessageCache.get(TopicAndPartition(o.topic, o.partition))
val lastCheckTime = prevInbound.map(_.lastCheckTime.getTime) getOrElse System.currentTimeMillis()
val currentTime = System.currentTimeMillis()
val elapsedTime = 1 + (currentTime - lastCheckTime) / 1000L
val change = prevInbound map (o.endOffset - _.endOffset) getOrElse 0L
val rate = BigDecimal(change.toDouble / elapsedTime).setScale(1, BigDecimal.RoundingMode.UP).toDouble
Inbound(o.topic, o.partition, o.startOffset, o.endOffset, change, rate, new Date(currentTime))
}
}).toSeq
// cache the unfiltered inbound data
incomingMessageCache = incomingMessageCache ++ Map(inboundData map (i => TopicAndPartition(i.topic, i.partition) -> i): _*)
// filter out the non-changed records
inboundData.filterNot(_.change == 0) sortBy (-_.change)
}
/**
* Publishes the given message to a given topic
* @example kput greetings a0.00.11.22.33.44.ef.11 "Hello World"
* @example kput a0.00.11.22.33.44.ef.11 "Hello World" (references cursor)
* @example kput "Hello World" (references cursor)
*/
def publishMessage(params: UnixLikeArgs): Unit = {
// get the topic, key and message
val (topic, key, message) = params.args match {
case aMessage :: Nil => navigableCursor map (c => (c.topic, "", aMessage)) getOrElse dieNoCursor
case aKey :: aMessage :: Nil => navigableCursor map (c => (c.topic, aKey, aMessage)) getOrElse dieNoCursor
case aTopic :: aKey :: aMessage :: Nil => (aTopic, aKey, aMessage)
case _ => dieSyntax(params)
}
// convert the key and message to binary
val keyBytes = if (isDottedHex(key)) parseDottedHex(key) else key.getBytes(config.encoding)
val msgBytes = if (isDottedHex(message)) parseDottedHex(message) else message.getBytes(config.encoding)
// publish the message
facade.publishMessage(topic, keyBytes, msgBytes)
}
/**
* Retrieves the list of Kafka consumers
* @example kconsumers
* @example kconsumers -c ld_group
* @example kconsumers -t shocktrade.keystats.avro
*/
def getConsumers(params: UnixLikeArgs): Future[List[ConsumerDelta]] = {
// get the topic & consumer prefixes
val consumerPrefix = params("-c")
val topicPrefix = params("-t")
// get the Kafka consumer groups
facade.getConsumers(consumerPrefix, topicPrefix, config.isStormConsumers)
}
/**
* Sets the offset of a consumer group ID to zero for all partitions
* @example kreset
* @example kreset ld_group
* @example kreset com.shocktrade.quotes.csv ld_group
*/
def resetConsumerGroup(params: UnixLikeArgs): Unit = {
val TopicAndGroup(topic, groupId) = getTopicAndGroup(params)
facade.resetConsumerGroup(topic, groupId)
}
/**
* Watches a consumer group allowing the user to navigate one-direction (forward)
* through new messages.
* @example kwatch ld_group
* @example kwatch com.shocktrade.quotes.avro ld_group
* @example kwatch com.shocktrade.quotes.avro ld_group -a file:avro/quotes.avsc
*/
def watchConsumerGroup(params: UnixLikeArgs): Either[Iterable[WatchCursorItem], Option[Future[Any]]] = {
if (params.args.isEmpty) Left {
watchCursors map { case (tag, KafkaWatchCursor(topic, groupId, partition, offset, _, _, decoder)) =>
WatchCursorItem(groupId, topic, partition, offset, decoder)
}
}
else Right {
val TopicAndGroup(topic, groupId) = getTopicAndGroup(params)
for {
(min, max) <- facade.getTopicPartitionRange(topic)
consumer = KafkaMacroConsumer(zk.connectionString, groupId, Nil: _*)
iterator = consumer.iterate(topic, (max - min) + 1)
decoder = params("-a") map AvroCodec.resolve
} yield {
val cursor = KafkaWatchCursor(topic, groupId, partition = 0, offset = 0L, consumer, iterator, decoder)
updateWatchCursor(cursor, cursor.partition, cursor.offset, autoClose = false)
watchNext(params)
}
}
}
/**
* Reads the next message from the watch cursor
* @example kwatchnext
* @example kwatchnext ld_group
* @example kwatchnext com.shocktrade.quotes.avro ld_group
* @example kwatchnext com.shocktrade.quotes.avro ld_group -a file:avro/quotes.avsc
*/
def watchNext(params: UnixLikeArgs) = {
// get the arguments
val topicAndGroup = getTopicAndGroup(params)
// was a decoder defined?
val decoder_? : Option[MessageDecoder[_]] = params("-a") map AvroCodec.resolve
Future {
watchCursors.get(topicAndGroup) flatMap { cursor =>
val iterator = cursor.iterator
if (!iterator.hasNext) None
else {
val binaryMessage = Option(iterator.next())
updateWatchCursor(cursor, binaryMessage.map(_.partition).getOrElse(0), binaryMessage.map(_.offset).getOrElse(0L))
// if a decoder is defined, use it to decode the message
val decodedMessage = (decoder_? ?? cursor.decoder).flatMap(decodeMessage(binaryMessage, _))
// return either the decoded message or the binary message
if (decodedMessage.nonEmpty) decodedMessage else binaryMessage
}
}
}
}
/**
* Stops watching a consumer for a given topic
* @example kwatchstop
* @example kwatchstop ld_group
* @example kwatchstop com.shocktrade.quotes.avro ld_group
*/
def watchStop(params: UnixLikeArgs): Try[Option[Any]] = {
// get the arguments
val key = getTopicAndGroup(params)
// if there's already a registered topic & group, close it
Try(watchCursors.remove(key) map (_.consumer.close()))
}
private def getMessageDecoder(params: UnixLikeArgs)(implicit config: TxConfig): Option[MessageDecoder[_]] = {
(params("-a") ?? params("-f")) flatMap (MessageCodecFactory.getDecoder(config, _))
}
private def getTopicAndGroup(params: UnixLikeArgs): TopicAndGroup = {
params.args match {
case Nil => watchCursor map (c => TopicAndGroup(c.topic, c.groupId)) getOrElse dieNoCursor
case groupId :: Nil if watchCursor.isDefined => watchCursor map (c => TopicAndGroup(c.topic, groupId)) getOrElse dieNoCursor
case groupId :: Nil => navigableCursor map (c => TopicAndGroup(c.topic, groupId)) getOrElse dieNoCursor
case topic :: groupId :: Nil => TopicAndGroup(topic, groupId)
case _ => dieSyntax(params)
}
}
private def updateWatchCursor(c: KafkaWatchCursor, partition: Int, offset: Long, autoClose: Boolean = true) {
val key = TopicAndGroup(c.topic, c.groupId)
// if there's already a registered topic & group, close it
if (autoClose) Try(watchCursors.remove(key) foreach (_.consumer.close()))
// set the current topic & group and create a new cursor
currentTopicAndGroup = Option(key)
watchCursors(key) = KafkaWatchCursor(c.topic, c.groupId, partition, offset, c.consumer, c.iterator, c.decoder)
// update the navigable cursor for the given topic
navigableCursors(c.topic) = KafkaNavigableCursor(c.topic, partition, offset, offset + 1, c.decoder)
currentTopic = Option(c.topic)
watching = true
}
/**
* Retrieves the topic and partition from the given arguments
* @param args the given arguments
* @return a tuple containing the topic and partition
*/
private def extractTopicAndPartition(args: List[String]): (String, Int) = {
args match {
case Nil => navigableCursor map (c => (c.topic, c.partition)) getOrElse dieNoCursor
case aTopic :: Nil => (aTopic, 0)
case aTopic :: aPartition :: Nil => (aTopic, parsePartition(aPartition))
case _ => die("Invalid arguments")
}
}
/**
* Retrieves the topic, partition and offset from the given arguments
* @param args the given arguments
* @return a tuple containing the topic, partition and offset
*/
private def extractTopicPartitionAndOffset(args: List[String]): (String, Int, Long) = {
args match {
case Nil => navigableCursor map (c => (c.topic, c.partition, c.offset)) getOrElse dieNoCursor
case anOffset :: Nil => navigableCursor map (c => (c.topic, c.partition, parseOffset(anOffset))) getOrElse dieNoCursor
case aTopic :: aPartition :: anOffset :: Nil => (aTopic, parsePartition(aPartition), parseOffset(anOffset))
case _ => die("Invalid arguments")
}
}
/**
* Parses a condition statement
* @param params the given [[UnixLikeArgs]]
* @param decoder the optional [[MessageDecoder]]
* @example lastTrade < 1 and volume > 1000000
* @return a collection of [[Condition]] objects
*/
private def parseCondition(params: UnixLikeArgs, decoder: Option[MessageDecoder[_]]): Condition = {
ConditionCompiler.parseCondition(params.args.iterator, decoder).getOrElse(dieSyntax(params))
}
/**
* Returns the connected Zookeeper Proxy
* @return the connected Zookeeper Proxy
*/
private implicit def zk: ZKProxy = {
zkProxy_? match {
case Some(zk) => zk
case None =>
val zk = ZKProxy(config.zooKeeperConnect)
zkProxy_? = Option(zk)
zk
}
}
}
| ldaniels528/trifecta | app-cli/src/main/scala/com/github/ldaniels528/trifecta/modules/kafka/KafkaModule.scala | Scala | apache-2.0 | 44,715 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import collection.immutable.TreeSet
import java.lang.reflect.{InvocationTargetException, Method, Modifier}
import org.scalatest.Suite.checkForPublicNoArgConstructor
import org.scalatest.Suite.TestMethodPrefix
import org.scalatest.Suite.IgnoreAnnotation
import org.scalatest.Suite.InformerInParens
import FixtureSuite.FixtureAndInformerInParens
import FixtureSuite.FixtureInParens
import FixtureSuite.testMethodTakesAFixtureAndInformer
import FixtureSuite.testMethodTakesAnInformer
import FixtureSuite.testMethodTakesAFixture
import FixtureSuite.simpleNameForTest
import FixtureSuite.argsArrayForTestName
import org.scalatest.events._
import Suite.anErrorThatShouldCauseAnAbort
/**
* <code>Suite</code> that can pass a fixture object into its tests.
*
* <p>
* This trait behaves similarly to trait <code>org.scalatest.Suite</code>, except that tests may have a fixture parameter. The type of the
* fixture parameter is defined by the abstract <code>FixtureParam</code> type, which is declared as a member of this trait.
* This trait also declares an abstract <code>withFixture</code> method. This <code>withFixture</code> method
* takes a <code>OneArgTest</code>, which is a nested trait defined as a member of this trait.
* <code>OneArgTest</code> has an <code>apply</code> method that takes a <code>FixtureParam</code>.
* This <code>apply</code> method is responsible for running a test.
* This trait's <code>runTest</code> method delegates the actual running of each test to <code>withFixture</code>, passing
* in the test code to run via the <code>OneArgTest</code> argument. The <code>withFixture</code> method (abstract in this trait) is responsible
* for creating the fixture argument and passing it to the test function.
* </p>
*
* <p>
* Subclasses of this trait must, therefore, do three things differently from a plain old <code>org.scalatest.Suite</code>:
* </p>
*
* <ol>
* <li>define the type of the fixture parameter by specifying type <code>FixtureParam</code></li>
* <li>define the <code>withFixture(OneArgTest)</code> method</li>
* <li>write test methods that take a fixture parameter (You can also define test methods that don't take a fixture parameter.)</li>
* </ol>
*
* <p>
* Here's an example:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureSuite
* import java.io.FileReader
* import java.io.FileWriter
* import java.io.File
*
* class MySuite extends FixtureSuite {
*
* // 1. define type FixtureParam
* type FixtureParam = FileReader
*
* // 2. define the withFixture method
* def withFixture(test: OneArgTest) {
*
* val FileName = "TempFile.txt"
*
* // Set up the temp file needed by the test
* val writer = new FileWriter(FileName)
* try {
* writer.write("Hello, test!")
* }
* finally {
* writer.close()
* }
*
* // Create the reader needed by the test
* val reader = new FileReader(FileName)
*
* try {
* // Run the test using the temp file
* test(reader)
* }
* finally {
* // Close and delete the temp file
* reader.close()
* val file = new File(FileName)
* file.delete()
* }
* }
*
* // 3. write test methods that take a fixture parameter
* def testReadingFromTheTempFile(reader: FileReader) {
* var builder = new StringBuilder
* var c = reader.read()
* while (c != -1) {
* builder.append(c.toChar)
* c = reader.read()
* }
* assert(builder.toString === "Hello, test!")
* }
*
* def testFirstCharOfTheTempFile(reader: FileReader) {
* assert(reader.read() === 'H')
* }
*
* // (You can also write tests methods that don't take a fixture parameter.)
* def testWithoutAFixture() {
* without fixture {
* assert(1 + 1 === 2)
* }
* }
* }
* </pre>
*
* <p>
* If the fixture you want to pass into your tests consists of multiple objects, you will need to combine
* them into one object to use this trait. One good approach to passing multiple fixture objects is
* to encapsulate them in a tuple. Here's an example that takes the tuple approach:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureSuite
* import scala.collection.mutable.ListBuffer
*
* class MySuite extends FixtureSuite {
*
* type FixtureParam = (StringBuilder, ListBuffer[String])
*
* def withFixture(test: OneArgTest) {
*
* // Create needed mutable objects
* val stringBuilder = new StringBuilder("ScalaTest is ")
* val listBuffer = new ListBuffer[String]
*
* // Invoke the test function, passing in the mutable objects
* test(stringBuilder, listBuffer)
* }
*
* def testEasy(fixture: Fixture) {
* val (builder, buffer) = fixture
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun(fixture: Fixture) {
* val (builder, buffer) = fixture
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* </pre>
*
* <p>
* When using a tuple to pass multiple fixture objects, it is usually helpful to give names to each
* individual object in the tuple with a pattern-match assignment, as is done at the beginning
* of each test method here with:
* </p>
*
* <pre>
* val (builder, buffer) = fixture
* </pre>
*
* <p>
* Another good approach to passing multiple fixture objects is
* to encapsulate them in a case class. Here's an example that takes the case class approach:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureSuite
* import scala.collection.mutable.ListBuffer
*
* class MySuite extends FixtureSuite {
*
* case class FixtureHolder(builder: StringBuilder, buffer: ListBuffer[String])
*
* type FixtureParam = FixtureHolder
*
* def withFixture(test: OneArgTest) {
*
* // Create needed mutable objects
* val stringBuilder = new StringBuilder("ScalaTest is ")
* val listBuffer = new ListBuffer[String]
*
* // Invoke the test function, passing in the mutable objects
* test(FixtureHolder(stringBuilder, listBuffer))
* }
*
* def testEasy(fixture: Fixture) {
* import fixture._
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun(fixture: Fixture) {
* fixture.builder.append("fun!")
* assert(fixture.builder.toString === "ScalaTest is fun!")
* assert(fixture.buffer.isEmpty)
* }
* }
* </pre>
*
* <p>
* When using a case class to pass multiple fixture objects, it can be helpful to make the names of each
* individual object available as a single identifier with an import statement. This is the approach
* taken by the <code>testEasy</code> method in the previous example. Because it imports the members
* of the fixture object, the test method code can just use them as unqualified identifiers:
* </p>
*
* <pre>
* def testEasy(fixture: Fixture) {
* import fixture._
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
* </pre>
*
* <p>
* Alternatively, you may sometimes prefer to qualify each use of a fixture object with the name
* of the fixture parameter. This approach, taken by the <code>testFun</code> method in the previous
* example, makes it more obvious which variables in your test method
* are part of the passed-in fixture:
* </p>
*
* <pre>
* def testFun(fixture: Fixture) {
* fixture.builder.append("fun!")
* assert(fixture.builder.toString === "ScalaTest is fun!")
* assert(fixture.buffer.isEmpty)
* }
* </pre>
*
* <p>
* <strong>Configuring fixtures and tests</strong>
* </p>
*
* <p>
* Sometimes you may want to write tests that are configurable. For example, you may want to write
* a suite of tests that each take an open temp file as a fixture, but whose file name is specified
* externally so that the file name can be can be changed from run to run. To accomplish this
* the <code>OneArgTest</code> trait has a <code>configMap</code>
* method, which will return a <code>Map[String, Any]</code> from which configuration information may be obtained.
* The <code>runTest</code> method of this trait will pass a <code>OneArgTest</code> to <code>withFixture</code>
* whose <code>configMap</code> method returns the <code>configMap</code> passed to <code>runTest</code>.
* Here's an example in which the name of a temp file is taken from the passed <code>configMap</code>:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureSuite
* import java.io.FileReader
* import java.io.FileWriter
* import java.io.File
*
* class MySuite extends FixtureSuite {
*
* type FixtureParam = FileReader
*
* def withFixture(test: OneArgTest) {
*
* require(
* test.configMap.contains("TempFileName"),
* "This suite requires a TempFileName to be passed in the configMap"
* )
*
* // Grab the file name from the configMap
* val FileName = test.configMap("TempFileName")
*
* // Set up the temp file needed by the test
* val writer = new FileWriter(FileName)
* try {
* writer.write("Hello, test!")
* }
* finally {
* writer.close()
* }
*
* // Create the reader needed by the test
* val reader = new FileReader(FileName)
*
* try {
* // Run the test using the temp file
* test(reader)
* }
* finally {
* // Close and delete the temp file
* reader.close()
* val file = new File(FileName)
* file.delete()
* }
* }
*
* def testReadingFromTheTempFile(reader: FileReader) {
* var builder = new StringBuilder
* var c = reader.read()
* while (c != -1) {
* builder.append(c.toChar)
* c = reader.read()
* }
* assert(builder.toString === "Hello, test!")
* }
*
* def testFirstCharOfTheTempFile(reader: FileReader) {
* assert(reader.read() === 'H')
* }
* }
* </pre>
*
* <p>
* If you want to pass into each test the entire <code>configMap</code> that was passed to <code>runTest</code>, you
* can mix in trait <code>ConfigMapFixture</code>. See the <a href="ConfigMapFixture.html">documentation
* for <code>ConfigMapFixture</code></a> for the details, but here's a quick
* example of how it looks:
* </p>
*
* <pre>
* import org.scalatest.fixture.FixtureSuite
* import org.scalatest.fixture.ConfigMapFixture
*
* class MySuite extends FixtureSuite with ConfigMapFixture {
*
* def testHello(configMap: Map[String, Any]) {
* // Use the configMap passed to runTest in the test
* assert(configMap.contains("hello")
* }
*
* def testWorld(configMap: Map[String, Any]) {
* assert(configMap.contains("world")
* }
* }
* </pre>
*
* <p>
* Note: because a <code>FixtureSuite</code>'s test methods are invoked with reflection at runtime, there is no good way to
* create a <code>FixtureSuite</code> containing test methods that take different fixtures. If you find you need to do this,
* you may want to split your class into multiple <code>FixtureSuite</code>s, each of which contains test methods that take the
* common <code>Fixture</code> type defined in that class, or use a <a href="MultipleFixtureFunSuite.html"><code>MultipleFixtureFunSuite</code></a>.
* </p>
*
* @author Bill Venners
*/
trait FixtureSuite extends org.scalatest.Suite { thisSuite =>
/**
* The type of the fixture parameter that can be passed into tests in this suite.
*/
protected type FixtureParam
/**
* Trait whose instances encapsulate a test function that takes a fixture and config map.
*
* <p>
* The <code>FixtureSuite</code> trait's implementation of <code>runTest</code> passes instances of this trait
* to <code>FixtureSuite</code>'s <code>withFixture</code> method, such as:
* </p>
*
* <pre>
* def testSomething(fixture: Fixture) {
* // ...
* }
* def testSomethingElse(fixture: Fixture, info: Informer) {
* // ...
* }
* </pre>
*
* <p>
* For more detail and examples, see the
* <a href="FixtureSuite.html">documentation for trait <code>FixtureSuite</code></a>.
* </p>
*/
protected trait OneArgTest extends (FixtureParam => Unit) {
/**
* The name of this test.
*/
def name: String
/**
* Run the test, using the passed <code>FixtureParam</code>.
*/
def apply(fixture: FixtureParam)
/**
* Return a <code>Map[String, Any]</code> containing objects that can be used
* to configure the fixture and test.
*/
def configMap: Map[String, Any]
}
/*
* Trait whose instances encapsulate a test function that takes no fixture and config map.
*
* <p>
* The <code>FixtureSuite</code> trait's implementation of <code>runTest</code> passes instances of this trait
* to <code>FixtureSuite</code>'s <code>withFixture</code> method for test methods that take no
* fixture, such as:
* </p>
*
* <pre>
* def testSomething() {
* // ...
* }
* def testSomethingElse(info: Informer) {
* // ...
* }
* </pre>
*
* <p>
* This trait enables <code>withFixture</code> method implementatinos to detect test that
* don't require a fixture. If a fixture is expensive to create and cleanup, <code>withFixture</code>
* method implementations can opt to not create fixtures for tests that don't need them.
* For more detail and examples, see the
* <a href="FixtureSuite.html">documentation for trait <code>FixtureSuite</code></a>.
* </p>
*/
/* protected trait FixturelessTest extends OneArgTest with (() => Unit) {
/**
* Run the test that takes no <code>Fixture</code>.
*/
def apply()
} */
/**
* Run the passed test function with a fixture created by this method.
*
* <p>
* This method should create the fixture object needed by the tests of the
* current suite, invoke the test function (passing in the fixture object),
* and if needed, perform any clean up needed after the test completes.
* For more detail and examples, see the <a href="FixtureSuite.html">main documentation for this trait</a>.
* </p>
*
* @param fun the <code>OneArgTest</code> to invoke, passing in a fixture
*/
protected def withFixture(test: OneArgTest)
private[fixture] class TestFunAndConfigMap(val name: String, test: FixtureParam => Any, val configMap: Map[String, Any])
extends OneArgTest {
def apply(fixture: FixtureParam) {
test(fixture)
}
}
private[fixture] class FixturelessTestFunAndConfigMap(override val name: String, test: () => Any, override val configMap: Map[String, Any])
extends NoArgTest {
def apply() { test() }
}
// Need to override this one becaue it call getMethodForTestName
override def tags: Map[String, Set[String]] = {
def getTags(testName: String) =
/* AFTER THE DEPRECATION CYCLE FOR GROUPS TO TAGS (0.9.8), REPLACE THE FOLLOWING FOR LOOP WITH THIS COMMENTED OUT ONE
THAT MAKES SURE ANNOTATIONS ARE TAGGED WITH TagAnnotation.
for {
a <- getMethodForTestName(testName).getDeclaredAnnotations
annotationClass = a.annotationType
if annotationClass.isAnnotationPresent(classOf[TagAnnotation])
} yield annotationClass.getName
*/
for (a <- getMethodForTestName(testName).getDeclaredAnnotations)
yield a.annotationType.getName
val elements =
for (testName <- testNames; if !getTags(testName).isEmpty)
yield testName -> (Set() ++ getTags(testName))
Map() ++ elements
}
override def testNames: Set[String] = {
def takesInformer(m: Method) = {
val paramTypes = m.getParameterTypes
paramTypes.length == 1 && classOf[Informer].isAssignableFrom(paramTypes(0))
}
def takesTwoParamsOfTypesAnyAndInformer(m: Method) = {
val paramTypes = m.getParameterTypes
val hasTwoParams = paramTypes.length == 2
hasTwoParams && classOf[Informer].isAssignableFrom(paramTypes(1))
}
def takesOneParamOfAnyType(m: Method) = m.getParameterTypes.length == 1
def isTestMethod(m: Method) = {
val isInstanceMethod = !Modifier.isStatic(m.getModifiers())
// name must have at least 4 chars (minimum is "test")
val simpleName = m.getName
val firstFour = if (simpleName.length >= 4) simpleName.substring(0, 4) else ""
val paramTypes = m.getParameterTypes
val hasNoParams = paramTypes.length == 0
// Discover testNames(Informer) because if we didn't it might be confusing when someone
// actually wrote a testNames(Informer) method and it was silently ignored.
val isTestNames = simpleName == "testNames"
// Also, will discover both
// testNames(Object) and testNames(Object, Informer). Reason is if I didn't discover these
// it would likely just be silently ignored, and that might waste users' time
isInstanceMethod && (firstFour == "test") && ((hasNoParams && !isTestNames) ||
takesInformer(m) || takesOneParamOfAnyType(m) || takesTwoParamsOfTypesAnyAndInformer(m))
}
val testNameArray =
for (m <- getClass.getMethods; if isTestMethod(m)) yield
if (takesInformer(m))
m.getName + InformerInParens
else if (takesOneParamOfAnyType(m))
m.getName + FixtureInParens
else if (takesTwoParamsOfTypesAnyAndInformer(m))
m.getName + FixtureAndInformerInParens
else m.getName
TreeSet[String]() ++ testNameArray
}
protected override def runTest(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) {
if (testName == null || reporter == null || stopper == null || configMap == null || tracker == null)
throw new NullPointerException
val stopRequested = stopper
val report = wrapReporterIfNecessary(reporter)
val method = getMethodForTestName(testName)
// Create a Rerunner if the Suite has a no-arg constructor
val hasPublicNoArgConstructor = checkForPublicNoArgConstructor(getClass)
val rerunnable =
if (hasPublicNoArgConstructor)
Some(new TestRerunner(getClass.getName, testName))
else
None
val testStartTime = System.currentTimeMillis
report(TestStarting(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, None, rerunnable))
try {
if (testMethodTakesAFixtureAndInformer(testName) || testMethodTakesAFixture(testName)) {
val testFun: FixtureParam => Unit = {
(fixture: FixtureParam) => {
val anyRefFixture: AnyRef = fixture.asInstanceOf[AnyRef] // TODO zap this cast
val args: Array[Object] =
if (testMethodTakesAFixtureAndInformer(testName)) {
val informer =
new Informer {
def apply(message: String) {
if (message == null)
throw new NullPointerException
report(InfoProvided(tracker.nextOrdinal(), message, Some(NameInfo(thisSuite.suiteName, Some(thisSuite.getClass.getName), Some(testName)))))
}
}
Array(anyRefFixture, informer)
}
else
Array(anyRefFixture)
method.invoke(thisSuite, args: _*)
}
}
withFixture(new TestFunAndConfigMap(testName, testFun, configMap))
}
else { // Test method does not take a fixture
val testFun: () => Unit = {
() => {
val args: Array[Object] =
if (testMethodTakesAnInformer(testName)) {
val informer =
new Informer {
def apply(message: String) {
if (message == null)
throw new NullPointerException
report(InfoProvided(tracker.nextOrdinal(), message, Some(NameInfo(thisSuite.suiteName, Some(thisSuite.getClass.getName), Some(testName)))))
}
}
Array(informer)
}
else
Array()
method.invoke(this, args: _*)
}
}
withFixture(new FixturelessTestFunAndConfigMap(testName, testFun, configMap))
}
val duration = System.currentTimeMillis - testStartTime
report(TestSucceeded(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, Some(duration), None, rerunnable))
}
catch {
case ite: InvocationTargetException =>
val t = ite.getTargetException
t match {
case _: TestPendingException =>
report(TestPending(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName))
case e if !anErrorThatShouldCauseAnAbort(e) =>
val duration = System.currentTimeMillis - testStartTime
handleFailedTest(t, hasPublicNoArgConstructor, testName, rerunnable, report, tracker, duration)
case e => throw e
}
case e if !anErrorThatShouldCauseAnAbort(e) =>
val duration = System.currentTimeMillis - testStartTime
handleFailedTest(e, hasPublicNoArgConstructor, testName, rerunnable, report, tracker, duration)
case e => throw e
}
}
// TODO: This is identical with the one in Suite. Factor it out to an object somewhere.
private def handleFailedTest(throwable: Throwable, hasPublicNoArgConstructor: Boolean, testName: String,
rerunnable: Option[Rerunner], report: Reporter, tracker: Tracker, duration: Long) {
val message =
if (throwable.getMessage != null) // [bv: this could be factored out into a helper method]
throwable.getMessage
else
throwable.toString
report(TestFailed(tracker.nextOrdinal(), message, thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, Some(throwable), Some(duration), None, rerunnable))
}
private def getMethodForTestName(testName: String) = {
val candidateMethods = getClass.getMethods.filter(_.getName == simpleNameForTest(testName))
val found =
if (testMethodTakesAFixtureAndInformer(testName))
candidateMethods.find(
candidateMethod => {
val paramTypes = candidateMethod.getParameterTypes
paramTypes.length == 2 && paramTypes(1) == classOf[Informer]
}
)
else if (testMethodTakesAnInformer(testName))
candidateMethods.find(
candidateMethod => {
val paramTypes = candidateMethod.getParameterTypes
paramTypes.length == 1 && paramTypes(0) == classOf[Informer]
}
)
else if (testMethodTakesAFixture(testName))
candidateMethods.find(
candidateMethod => {
val paramTypes = candidateMethod.getParameterTypes
paramTypes.length == 1
}
)
else
candidateMethods.find(_.getParameterTypes.length == 0)
found match {
case Some(method) => method
case None =>
throw new IllegalArgumentException(Resources("testNotFound", testName))
}
}
/*
/*
* Object that encapsulates a test function, which does not take a fixture,
* and a config map.
*
* <p>
* The <code>FixtureSuite</code> trait's implementation of <code>runTest</code> passes instances of this trait
* to <code>FixtureSuite</code>'s <code>withFixture</code> method for tests that do not require a fixture to
* be passed. For more detail and examples, see the
* <a href="FixtureSuite.html">documentation for trait <code>FixtureSuite</code></a>.
* </p>
*/
protected trait NoArgTestFunction extends (FixtureParam => Any) {
/**
* Run the test, ignoring the passed <code>Fixture</code>.
*
* <p>
* This traits implementation of this method invokes the overloaded form
* of <code>apply</code> that takes no parameters.
* </p>
*/
final def apply(fixture: Fixture): Any = {
apply()
}
/**
* Run the test without a <code>Fixture</code>.
*/
def apply()
}
protected class WithoutWord {
def fixture(fun: => Any): NoArgTestFunction = {
new NoArgTestFunction {
def apply() { fun }
}
}
}
protected def without = new WithoutWord */
}
private object FixtureSuite {
val FixtureAndInformerInParens = "(FixtureParam, Informer)"
val FixtureInParens = "(FixtureParam)"
private def testMethodTakesAFixtureAndInformer(testName: String) = testName.endsWith(FixtureAndInformerInParens)
private def testMethodTakesAnInformer(testName: String) = testName.endsWith(InformerInParens)
private def testMethodTakesAFixture(testName: String) = testName.endsWith(FixtureInParens)
private def simpleNameForTest(testName: String) =
if (testName.endsWith(FixtureAndInformerInParens))
testName.substring(0, testName.length - FixtureAndInformerInParens.length)
else if (testName.endsWith(FixtureInParens))
testName.substring(0, testName.length - FixtureInParens.length)
else if (testName.endsWith(InformerInParens))
testName.substring(0, testName.length - InformerInParens.length)
else
testName
private def argsArrayForTestName(testName: String): Array[Class[_]] =
if (testMethodTakesAFixtureAndInformer(testName))
Array(classOf[Object], classOf[Informer])
else
Array(classOf[Informer])
}
| kevinwright/scalatest | src/main/scala/org/scalatest/fixture/FixtureSuite.scala | Scala | apache-2.0 | 26,330 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.core.server
import com.google.common.io.Files
import com.typesafe.config.ConfigException
import java.io.File
import java.nio.charset.Charset
import java.util.Properties
import org.specs2.mutable.Specification
import play.api.{ Mode, Play, PlayException }
import play.core.ApplicationProvider
object ServerStartSpec extends Specification {
sequential
def withTempDir[T](block: File => T) = {
val temp = Files.createTempDir()
try {
block(temp)
} finally {
def rm(file: File): Unit = file match {
case dir if dir.isDirectory =>
dir.listFiles().foreach(rm)
dir.delete()
case f => f.delete()
}
rm(temp)
}
}
case class ExitException(message: String, cause: Option[Throwable] = None, returnCode: Int = -1) extends Exception(s"Exit with $message, $cause, $returnCode", cause.orNull)
def startResult[A](f: => A): Either[String, A] = try Right(f) catch {
case ServerStartException(message, _) => Left(message)
case e: PlayException => Left(e.getMessage)
case e: ConfigException => Left(e.getMessage)
}
def exitResult[A](f: => A): Either[String, A] = try Right(f) catch {
case ExitException(message, _, _) => Left(message)
}
class FakeServerStart(val defaultServerProvider: ServerProvider) extends ServerStart {
override protected def createApplicationProvider(config: ServerConfig): ApplicationProvider = {
new FakeApplicationProvider(config.rootDir)
}
}
/** A mocked ServerProcess */
class FakeServerProcess(
val args: Seq[String] = Seq(),
propertyMap: Map[String, String] = Map(),
val pid: Option[String] = None) extends ServerProcess {
val classLoader: ClassLoader = getClass.getClassLoader
val properties = new Properties()
for ((k, v) <- propertyMap) { properties.put(k, v) }
private var hooks = Seq.empty[() => Unit]
def addShutdownHook(hook: => Unit) = {
hooks = hooks :+ (() => hook)
}
def shutdown(): Unit = {
for (h <- hooks) h.apply()
}
def exit(message: String, cause: Option[Throwable] = None, returnCode: Int = -1): Nothing = {
throw new ExitException(message, cause, returnCode)
}
}
class FakeApplicationProvider(appPath: File) extends ApplicationProvider {
val path = appPath
def get = ??? // Never called, because we're not serving requests
}
// A family of fake servers for us to test
class FakeServer(config: ServerConfig, appProvider: ApplicationProvider) extends Server with ServerWithStop {
def applicationProvider = appProvider
def mode = config.mode
def mainAddress = ???
@volatile var stopCallCount = 0
override def stop() = {
stopCallCount += 1
super.stop()
}
}
class FakeServerProvider extends ServerProvider {
override def createServer(config: ServerConfig, appProvider: ApplicationProvider) = new FakeServer(config, appProvider)
}
class FakeServer2(config: ServerConfig, appProvider: ApplicationProvider) extends FakeServer(config, appProvider)
class FakeServerProvider2 extends ServerProvider {
override def createServer(config: ServerConfig, appProvider: ApplicationProvider) = new FakeServer2(config, appProvider)
}
class InvalidCtorFakeServerProvider(foo: String) extends FakeServerProvider {
override def createServer(config: ServerConfig, appProvider: ApplicationProvider) = ???
}
class PrivateCtorFakeServer private () extends FakeServerProvider {
override def createServer(config: ServerConfig, appProvider: ApplicationProvider) = ???
}
"serverStart.readServerConfigSettings" should {
"read settings from the current process (root dir in args, default HTTP port)" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(Seq(tempDir.getAbsolutePath))
startResult(serverStart.readServerConfigSettings(process)) must beRight.which { config: ServerConfig =>
config.rootDir must_== tempDir
config.port must_== Some(9000)
config.sslPort must_== None
config.mode must_== Mode.Prod
config.properties must_== process.properties
config.configuration.getString("play.server.dir") must_== Some(tempDir.getAbsolutePath)
}
}
"read settings from the current process (root dir, HTTP port and HTTPS port in props)" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
propertyMap = Map(
"user.dir" -> tempDir.getAbsolutePath,
"http.port" -> "80",
"https.port" -> "443"
)
)
startResult(serverStart.readServerConfigSettings(process)) must beRight { config: ServerConfig =>
config.rootDir must_== tempDir
config.port must_== Some(80)
config.sslPort must_== Some(443)
config.mode must_== Mode.Prod
config.properties must_== process.properties
config.configuration.getString("play.server.dir") must_== Some(tempDir.getAbsolutePath)
}
}
"require a root dir path when reading settings" in {
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess()
startResult(serverStart.readServerConfigSettings(process)) must beLeft
}
"require an HTTP or HTTPS port when reading settings" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
args = Seq(tempDir.getAbsolutePath),
propertyMap = Map("http.port" -> "disabled")
)
startResult(serverStart.readServerConfigSettings(process)) must_== Left("Must provide either an HTTP or HTTPS port")
}
"require an integer HTTP port when reading settings" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
args = Seq(tempDir.getAbsolutePath),
propertyMap = Map("http.port" -> "xyz")
)
startResult(serverStart.readServerConfigSettings(process)) must beLeft
}
"require an integer HTTPS port when reading settings" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
args = Seq(tempDir.getAbsolutePath),
propertyMap = Map("https.port" -> "xyz")
)
startResult(serverStart.readServerConfigSettings(process)) must beLeft
}
}
"serverStart.readServerProviderSetting" should {
"return default by default" in withTempDir { tempDir =>
val defaultServerProvider = new FakeServerProvider
val serverStart = new FakeServerStart(defaultServerProvider)
val process = new FakeServerProcess()
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
serverStart.readServerProviderSetting(process, configuration) must be(defaultServerProvider)
}
"create a custom provider when the server.provider property is supplied" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val serverProviderClass = classOf[FakeServerProvider]
val process = new FakeServerProcess(
propertyMap = Map("server.provider" -> serverProviderClass.getName)
)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
serverStart.readServerProviderSetting(process, configuration).getClass must_== serverProviderClass
}
"fail if the class doesn't exist" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
propertyMap = Map("server.provider" -> "garble.barble.Phnarble")
)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
startResult(serverStart.readServerProviderSetting(process, configuration)) must_== Left("Couldn't find ServerProvider class 'garble.barble.Phnarble'")
}
"fail if the class doesn't implement ServerProvider" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val serverProvider = classOf[String].getName
val process = new FakeServerProcess(
propertyMap = Map("server.provider" -> serverProvider)
)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
startResult(serverStart.readServerProviderSetting(process, configuration)) must_== Left(s"Class $serverProvider must implement ServerProvider interface")
}
"fail if the class doesn't have a default constructor" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val serverProvider = classOf[InvalidCtorFakeServerProvider].getName
val process = new FakeServerProcess(
propertyMap = Map("server.provider" -> serverProvider)
)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
startResult(serverStart.readServerProviderSetting(process, configuration)) must_== Left(s"ServerProvider class $serverProvider must have a public default constructor")
}
"fail if the class has a private constructor" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val serverProvider = classOf[PrivateCtorFakeServer].getName
val process = new FakeServerProcess(
propertyMap = Map("server.provider" -> serverProvider)
)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
startResult(serverStart.readServerProviderSetting(process, configuration)) must_== Left(s"ServerProvider class $serverProvider must have a public default constructor")
}
}
"serverStart.createPidFile" should {
"create a pid file with the current id, then remove it on process shutdown" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val pid = "12345"
val process = new FakeServerProcess(pid = Some(pid))
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
val pidFile = new File(tempDir, "RUNNING_PID")
configuration.getString("play.server.dir") must_== Some(tempDir.getAbsolutePath)
configuration.getString("play.server.pidfile.path") must_== Some(pidFile.getAbsolutePath)
startResult(serverStart.createPidFile(process, configuration)) must_== Right(Some(pidFile))
try {
pidFile.exists must beTrue
Files.toString(pidFile, Charset.forName("US-ASCII")) must_== pid
} finally {
process.shutdown()
}
}
"fail to create a pid file if it can't get the process pid" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(pid = None)
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
startResult(serverStart.createPidFile(process, configuration)) must_== Left("Couldn't determine current process's pid")
}
"fail to create a pid file if the pid file already exists" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(pid = Some("123"))
val configuration = ServerConfig.loadConfiguration(process.properties, tempDir)
Files.write("x".getBytes, new File(tempDir, "RUNNING_PID"))
startResult(serverStart.createPidFile(process, configuration)) must_== Left(s"This application is already running (Or delete ${tempDir.getAbsolutePath}/RUNNING_PID file).")
}
}
"serverStart.start" should {
"read settings, create a pid file, start the the server and register shutdown hooks" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess(
args = Seq(tempDir.getAbsolutePath),
pid = Some("999")
)
val server = serverStart.start(process)
val pidFile = new File(tempDir, "RUNNING_PID")
try {
server.getClass must_== classOf[FakeServer]
pidFile.exists must beTrue
server.asInstanceOf[FakeServer].stopCallCount must_== 0
} finally {
process.shutdown()
}
pidFile.exists must beFalse
server.asInstanceOf[FakeServer].stopCallCount must_== 1
}
"read settings, create custom ServerProvider, create a pid file, start the the server and register shutdown hooks" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider2)
val process = new FakeServerProcess(
args = Seq(tempDir.getAbsolutePath),
propertyMap = Map("server.provider" -> classOf[FakeServerProvider2].getName),
pid = Some("999")
)
val server = serverStart.start(process)
val pidFile = new File(tempDir, "RUNNING_PID")
try {
server.getClass must_== classOf[FakeServer2]
pidFile.exists must beTrue
server.asInstanceOf[FakeServer2].stopCallCount must_== 0
} finally {
process.shutdown()
}
pidFile.exists must beFalse
server.asInstanceOf[FakeServer2].stopCallCount must_== 1
}
"exit with an error if settings are wrong" in withTempDir { tempDir =>
val serverStart = new FakeServerStart(new FakeServerProvider)
val process = new FakeServerProcess()
exitResult {
serverStart.start(process)
} must beLeft
}
}
}
| jyotikamboj/container | pf-framework/src/play-server/src/test/scala/play/core/server/ServerStartSpec.scala | Scala | mit | 13,739 |
package model.json
/**
* Created by camilosampedro on 29/01/17.
*/
case class SSHOrderJson(superUser: Boolean, interrupt: Boolean, command: String)
| camilosampedro/Aton | app/model/json/SSHOrderJson.scala | Scala | gpl-3.0 | 153 |
package edu.gemini.osgi.tools.idea
import edu.gemini.osgi.tools.FileUtils._
import IdeaModule._
import java.io.{FileFilter, File}
import edu.gemini.osgi.tools.{Version, BundleVersion}
class IdeaModule(
dir: File, // folder the IML file will be in
depMods: Seq[String], // modules we need to refer to
depJars: Seq[File], // project-local jars
testJars: Seq[File] // jars only used for testing
) {
def moduleRelativePath(to: File): String =
"$MODULE_DIR$/%s".format(relativePath(dir, to))
def module: xml.Elem =
<module type="JAVA_MODULE" version="4">
{scalaComponent}
{rootComponent}
</module>
def scalaComponent: xml.Elem =
<component name="FacetManager" />
def rootComponent: xml.Elem =
<component name="NewModuleRootManager" inherit-compiler-output="true">
<content url="file://$MODULE_DIR$">
{srcDirs(false).map(d => sourceFolder(d, false))}
{mgdSrcDirs.map(d => sourceFolder(d, false))}
{srcDirs(true).map(d => sourceFolder(d, true))}
{excludeDirs.map(excludeFolder)}
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
{depMods.map(bv => bundleDependency(bv))}
{depJars.map(j => libraryDependency(j))}
{testJars.map(j => libraryDependency(j, isTest = true))}
<orderEntry type="library" name="scala-sdk" level="project" />
</component>
def srcDirs(isTest: Boolean): List[File]= {
val srcMain = new File(dir, "src/" + (if (isTest) "test" else "main"))
if (srcMain.isDirectory) srcMain.listFiles.toList else Nil
}
def mgdSrcDirs: List[File] =
findFile(new File(dir, "target"), "src_managed").toList.flatMap(childDirs)
def excludeDirs: List[File] = {
def exclude(fs: List[File], path: String, res: List[File]): List[File] = fs match {
case Nil => res
case (h :: t) =>
if (h.getPath == path) exclude(t, path, res)
else if (path.startsWith(h.getPath)) exclude(childDirs(h) ++ t, path, res)
else exclude(t, path, h :: res)
}
val root = new File(dir, "target")
val mgd = findFile(root, "src_managed")
mgd.fold(List(root)) { m => exclude(List(root), m.getPath, Nil) }
}
def sourceFolder(f: File, isTest: Boolean): xml.Elem =
<sourceFolder url={"file://%s".format(moduleRelativePath(f))} isTestSource={isTest.toString} />
def excludeFolder(f: File): xml.Elem =
<excludeFolder url={"file://%s".format(moduleRelativePath(f))} />
def bundleDependency(mod: String): xml.Elem =
<orderEntry type="module" module-name={mod} exported="" />
def libraryDependency(jar: File, isTest: Boolean = false): xml.Elem =
<orderEntry type="module-library" scope={if (isTest) "TEST" else ""} exported="" >
<library>
<CLASSES>
<root url={"jar://%s!/".format(moduleRelativePath(jar))} />
</CLASSES>
</library>
</orderEntry>
}
object IdeaModule {
val srcDirFileFilter = new FileFilter {
def accept(f: File) =
f.isDirectory && f.getName.startsWith("src") && f.getName != "src-test"
}
/**
* Returns the orderEntry elements for all "TEST" scoped dependencies in the
* given module file, if it exists.
*/
def testScopeDependencies(iml: File): xml.NodeSeq =
if (!iml.exists()) xml.NodeSeq.Empty else extractTestEntries(xml.XML.loadFile(iml))
private def extractTestEntries(root: xml.Elem): xml.NodeSeq =
(root \\ "orderEntry") filter {
oe => oe.attribute("scope").exists(_.text == "TEST")
}
def moduleName(bl: BundleLoc): String = moduleName(bl.name, bl.version)
def moduleName(bv: BundleVersion): String = moduleName(bv.manifest.symbolicName, bv.manifest.version)
def moduleName(root: String, v: Version): String = "%s-%s".format(root, v)
}
| arturog8m/ocs | project/src/main/scala/edu/gemini/osgi/tools/idea/IdeaModule.scala | Scala | bsd-3-clause | 3,819 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.maven
import java.io.File
import java.util.Collections
import java.util.{ List => JList }
import com.lightbend.lagom.dev.PortAssigner.ProjectName
import com.lightbend.lagom.dev.Colors
import com.lightbend.lagom.dev.ConsoleHelper
import com.lightbend.lagom.dev.ServiceBindingInfo
import javax.inject.Inject
import org.apache.maven.RepositoryUtils
import org.apache.maven.execution.MavenSession
import org.apache.maven.model.Dependency
import org.apache.maven.plugin.AbstractMojo
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
/**
* Start a service.
*/
class StartMojo @Inject() (serviceManager: ServiceManager, session: MavenSession) extends AbstractMojo {
@BeanProperty
var lagomService: Boolean = _
@BeanProperty
var playService: Boolean = _
@BeanProperty
var serviceAddress: String = _
@BeanProperty
var serviceEnableSsl: Boolean = _
/** @deprecated As of release 1.5.0. Use serviceHttpPort instead */
@BeanProperty @Deprecated
var servicePort: Int = _
@BeanProperty
var serviceHttpPort: Int = _
@BeanProperty
var serviceHttpsPort: Int = _
@BeanProperty
var servicePortRange: PortRangeBean = new PortRangeBean
@BeanProperty
var serviceLocatorPort: Int = _
@BeanProperty
var serviceLocatorEnabled: Boolean = _
@BeanProperty
var serviceLocatorUrl: String = _
@BeanProperty
var cassandraEnabled: Boolean = _
@BeanProperty
var cassandraPort: Int = _
@BeanProperty
var externalProjects: JList[ExternalProject] = Collections.emptyList()
@BeanProperty
var watchDirs: JList[String] = Collections.emptyList()
override def execute(): Unit = {
if (servicePort != -1) {
// this property is also marked as deprecated in
// the plugin.xml descriptor, but somehow mvn is not printing anything. Therefore, we add a warning ourselves.
getLog.warn(
"Lagom's maven plugin property 'servicePort' is deprecated as of release 1.5.0. Use serviceHttpPort instead."
)
// for backward compatibility, we must set the http port to servicePort
// if the later was configured by the user
if (serviceHttpPort == -1) serviceHttpPort = servicePort
else
getLog.warn(
s"Both 'serviceHttpPort' ($serviceHttpPort) and 'servicePort' ($servicePort) are configured, 'servicePort' will be ignored"
)
}
val project = session.getCurrentProject
val resolvedWatchDirs = watchDirs.asScala.map { dir =>
val file = new File(dir)
if (!file.isAbsolute) {
new File(project.getBasedir, dir)
} else file
}
if (!lagomService && !playService) {
sys.error(s"${project.getArtifactId} is not a Lagom service!")
}
val serviceLocatorUrl = (serviceLocatorEnabled, this.serviceLocatorUrl) match {
case (false, _) => None
case (true, null) => Some(s"http://localhost:$serviceLocatorPort")
case (true, configured) => Some(configured)
}
def selectPort(servicePort: Int, useTls: Boolean): Int = {
if (servicePort == -1) {
val portMap = serviceManager.getPortMap(
servicePortRange,
externalProjects.asScala.map(d => d.artifact.getGroupId + ":" + d.artifact.getArtifactId),
serviceEnableSsl
)
val portName = {
val pn = ProjectName(project.getArtifactId)
if (useTls) pn.withTls else pn
}
val port = portMap.get(portName)
port.map(_.value).getOrElse {
sys.error(s"No port selected for service ${project.getArtifactId} (use TLS: $useTls)")
}
} else {
servicePort
}
}
val selectedPort = selectPort(serviceHttpPort, useTls = false)
val selectedHttpsPort =
if (serviceEnableSsl) selectPort(serviceHttpsPort, useTls = true)
else -1
val cassandraPort = if (cassandraEnabled) {
Some(this.cassandraPort)
} else None
serviceManager.startServiceDevMode(
project,
serviceAddress,
selectedPort,
selectedHttpsPort,
serviceLocatorUrl,
cassandraPort,
playService,
resolvedWatchDirs
)
}
}
/**
* Stop a service.
*/
class StopMojo @Inject() (serviceManager: ServiceManager, session: MavenSession) extends AbstractMojo {
@BeanProperty
var lagomService: Boolean = _
@BeanProperty
var playService: Boolean = _
override def execute(): Unit = {
val project = session.getCurrentProject
if (!lagomService && !playService) {
sys.error(s"${project.getArtifactId} is not a Lagom service!")
}
serviceManager.stopService(project)
}
}
class StartExternalProjects @Inject() (serviceManager: ServiceManager, session: MavenSession) extends AbstractMojo {
@BeanProperty
var externalProjects: JList[ExternalProject] = Collections.emptyList()
@BeanProperty
var serviceEnableSsl: Boolean = false
@BeanProperty
var servicePortRange: PortRangeBean = new PortRangeBean
@BeanProperty
var serviceAddress: String = _
@BeanProperty
var serviceLocatorPort: Int = _
@BeanProperty
var serviceLocatorEnabled: Boolean = _
@BeanProperty
var serviceLocatorUrl: String = _
@BeanProperty
var cassandraEnabled: Boolean = _
@BeanProperty
var cassandraPort: Int = _
override def execute(): Unit = {
val serviceLocatorUrl = (serviceLocatorEnabled, this.serviceLocatorUrl) match {
case (false, _) => None
case (true, null) => Some(s"http://localhost:$serviceLocatorPort")
case (true, configured) => Some(configured)
}
val cassandraPort = if (cassandraEnabled) {
Some(this.cassandraPort)
} else None
lazy val portMap = serviceManager.getPortMap(
servicePortRange,
externalProjects.asScala.map(d => d.artifact.getGroupId + ":" + d.artifact.getArtifactId),
serviceEnableSsl
)
externalProjects.asScala.foreach { project =>
if (project.artifact == null || project.artifact.getGroupId == null || project.artifact.getArtifactId == null ||
project.artifact.getVersion == null) {
sys.error("External projects must specify an artifact with a groupId, artifactId and version")
}
def selectPort(servicePort: Int, useTls: Boolean) = {
if (servicePort == -1) {
val artifactBasename = project.artifact.getGroupId + ":" + project.artifact.getArtifactId
val portName = {
val pn = ProjectName(artifactBasename)
if (useTls) pn.withTls else pn
}
val port = portMap.get(portName)
port.map(_.value).getOrElse {
sys.error(s"No port selected for service $artifactBasename (use TLS: $useTls)")
}
} else {
servicePort
}
}
val selectedPort = selectPort(project.serviceHttpPort, useTls = false)
val selectedHttpsPort =
if (serviceEnableSsl) selectPort(project.serviceHttpsPort, useTls = true)
else -1
val serviceCassandraPort = cassandraPort.filter(_ => project.cassandraEnabled)
val dependency =
RepositoryUtils.toDependency(project.artifact, session.getRepositorySession.getArtifactTypeRegistry)
serviceManager.startExternalProject(
dependency,
serviceAddress,
selectedPort,
selectedHttpsPort,
serviceLocatorUrl,
serviceCassandraPort,
project.playService
)
}
}
}
class StopExternalProjects @Inject() (serviceManager: ServiceManager, session: MavenSession) extends AbstractMojo {
@BeanProperty
var externalProjects: JList[ExternalProject] = Collections.emptyList()
override def execute(): Unit = {
externalProjects.asScala.foreach { project =>
val dependency =
RepositoryUtils.toDependency(project.artifact, session.getRepositorySession.getArtifactTypeRegistry)
serviceManager.stopExternalProject(dependency)
}
}
}
class ExternalProject {
@BeanProperty
var artifact: Dependency = _
@BeanProperty
var playService: Boolean = false
/** @deprecated As of release 1.5.0. Use serviceHttpPort instead. */
@BeanProperty @Deprecated
var servicePort: Int = -1
@BeanProperty
var serviceHttpPort: Int = -1
@BeanProperty
var serviceHttpsPort: Int = -1
@BeanProperty
var cassandraEnabled: Boolean = true
}
/**
* Starts all services.
*/
class StartAllMojo @Inject() (facade: MavenFacade, logger: MavenLoggerProxy, session: MavenSession)
extends AbstractMojo {
private val consoleHelper: ConsoleHelper = new ConsoleHelper(new Colors("lagom.noformat"))
override def execute(): Unit = {
val services = facade.locateServices
executeGoal("startKafka")
executeGoal("startCassandra")
executeGoal("startServiceLocator")
executeGoal("startExternalProjects")
services.foreach { project =>
facade.executeMavenPluginGoal(project, "start")
}
}
def executeGoal(name: String): Boolean = {
facade.executeMavenPluginGoal(session.getCurrentProject, name)
}
}
/**
* Stops all services.
*/
class StopAllMojo @Inject() (facade: MavenFacade, session: MavenSession) extends AbstractMojo {
@BeanProperty
var externalProjects: JList[Dependency] = Collections.emptyList()
override def execute(): Unit = {
val services = facade.locateServices
services.foreach { service =>
facade.executeMavenPluginGoal(service, "stop")
}
executeGoal("stopExternalProjects")
executeGoal("stopServiceLocator")
executeGoal("stopCassandra")
executeGoal("stopKafka")
}
def executeGoal(name: String) = {
facade.executeMavenPluginGoal(session.getCurrentProject, name)
}
}
/**
* Run a service, blocking until the user hits enter before stopping it again.
*/
class RunMojo @Inject() (mavenFacade: MavenFacade, logger: MavenLoggerProxy, session: MavenSession)
extends AbstractMojo {
// This Mojo shares a lot of code (duplicate) with RunAllMojo
private val consoleHelper = new ConsoleHelper(new Colors("lagom.noformat"))
override def execute(): Unit = {
val project = session.getCurrentProject
mavenFacade.executeMavenPluginGoal(project, "start")
val bindingInfo: ServiceBindingInfo =
LagomKeys.LagomServiceBindings
.get(project)
.map { bindings =>
ServiceBindingInfo(project.getArtifactId, bindings)
}
.getOrElse {
sys.error(s"Service ${project.getArtifactId} is not running?")
}
consoleHelper.printStartScreen(logger, Seq(bindingInfo))
consoleHelper.blockUntilExit()
mavenFacade.executeMavenPluginGoal(project, "stop")
}
}
/**
* Run a service, blocking until the user hits enter before stopping it again.
*/
class RunAllMojo @Inject() (facade: MavenFacade, logger: MavenLoggerProxy, session: MavenSession) extends AbstractMojo {
// This Mojo shares a lot of code (duplicate) with RunMojo
val consoleHelper = new ConsoleHelper(new Colors("lagom.noformat"))
override def execute(): Unit = {
val services = facade.locateServices
executeGoal("startAll")
val bindingInfos: Seq[ServiceBindingInfo] = services.map { project =>
LagomKeys.LagomServiceBindings
.get(project)
.map { bindings =>
ServiceBindingInfo(project.getArtifactId, bindings)
}
.getOrElse {
sys.error(s"Service ${project.getArtifactId} is not running?")
}
}
consoleHelper.printStartScreen(logger, bindingInfos)
consoleHelper.blockUntilExit()
executeGoal("stopAll")
}
def executeGoal(name: String) = {
facade.executeMavenPluginGoal(session.getCurrentProject, name)
}
}
| lagom/lagom | dev/maven-plugin/src/main/scala/com/lightbend/lagom/maven/ServiceMojos.scala | Scala | apache-2.0 | 11,737 |
package org.jetbrains.plugins.scala.lang.refactoring.move.members
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, PsiReference}
import com.intellij.refactoring.move.MoveHandlerDelegate
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.extensions.ObjectExt
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil
class ScalaMoveMembersHandler extends MoveHandlerDelegate {
override def tryToMove(element: PsiElement, project: Project, dataContext: DataContext, reference: PsiReference, editor: Editor): Boolean = {
element match {
case _: ScTypeDefinition | _: ScClassParameter => false
case NotSupportedMember(message) =>
val refactoringName = "Move members"
ScalaRefactoringUtil.showErrorHint(message, refactoringName, null)(project, editor)
true
case objectMember(obj, member) =>
val dialog = new ScalaMoveMembersDialog(project, true, obj, member)
dialog.show()
true
case _ => false
}
}
private object objectMember {
def unapply(member: ScMember): Option[(ScObject, ScMember)] =
member.containingClass.asOptionOf[ScObject].map((_, member))
}
private object NotSupportedMember {
def unapply(member: ScMember): Option[String] = {
if (ScalaPsiUtil.isImplicit(member))
Some(ScalaBundle.message("move.members.not.supported.implicits"))
else if (!hasStablePath(member))
Some(ScalaBundle.message("move.members.supported.only.stable.objects"))
else if (member.hasModifierProperty("override"))
Some(ScalaBundle.message("move.members.not.supported.overridden"))
else None
}
private def hasStablePath(member: ScMember): Boolean = member.containingClass match {
case obj: ScObject => ScalaPsiUtil.hasStablePath(obj)
case _ => false
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/move/members/ScalaMoveMembersHandler.scala | Scala | apache-2.0 | 2,254 |
package com.typesafe.slick.docs
//#imports
import scala.concurrent.ExecutionContext.Implicits.global
import slick.jdbc.H2Profile.api._
import slick.jdbc.H2Profile
//#imports
object CodeGenerator extends App {
val uri = "#slick.db.default"
val profile = "slick.jdbc.H2Profile"
val jdbcDriver = "org.h2.Driver"
val url = "jdbc:postgresql://localhost/test"
val outputFolder = ""
val pkg = "demo"
val user = ""
val password = ""
if(false){
val db = Database.forURL("jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1", driver="org.h2.Driver")
//#default-runner-uri
slick.codegen.SourceCodeGenerator.main(
Array(uri, outputFolder)
)
//#default-runner-uri
//#default-runner
slick.codegen.SourceCodeGenerator.main(
Array(profile, jdbcDriver, url, outputFolder, pkg)
)
//#default-runner
//#default-runner-with-auth
slick.codegen.SourceCodeGenerator.main(
Array(profile, jdbcDriver, url, outputFolder, pkg, user, password)
)
//#default-runner-with-auth
//#customization
import slick.codegen.SourceCodeGenerator
// fetch data model
val modelAction = H2Profile.createModel(Some(H2Profile.defaultTables)) // you can filter specific tables here
val modelFuture = db.run(modelAction)
// customize code generator
val codegenFuture = modelFuture.map(model => new SourceCodeGenerator(model) {
// override mapped table and class name
override def entityName =
dbTableName => dbTableName.dropRight(1).toLowerCase.toCamelCase
override def tableName =
dbTableName => dbTableName.toLowerCase.toCamelCase
// add some custom import
override def code = "import foo.{MyCustomType,MyCustomTypeMapper}" + "\\n" + super.code
// override table generator
override def Table = new Table(_){
// disable entity class generation and mapping
override def EntityType = new EntityType{
override def classEnabled = false
}
// override contained column generator
override def Column = new Column(_){
// use the data model member of this column to change the Scala type,
// e.g. to a custom enum or anything else
override def rawType =
if(model.name == "SOME_SPECIAL_COLUMN_NAME") "MyCustomType" else super.rawType
}
}
})
codegenFuture.onSuccess { case codegen =>
codegen.writeToFile(
"slick.jdbc.H2Profile","some/folder/","some.packag","Tables","Tables.scala"
)
}
//#customization
}
}
| Asamsig/slick | doc/code/CodeGenerator.scala | Scala | bsd-2-clause | 2,553 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactRouter
import scala.scalajs.js
import com.glipka.easyReactJS.react._
import GlobalDefinition._
@js.native
class Miss extends js.Any {} | glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactRouter/Miss.scala | Scala | apache-2.0 | 757 |
import org.scalatest._
import scala.collection.immutable._
import alexadewit_on_github.unit_converter._
import argonaut._, Argonaut._, DecodeResult._
import MeasurementSystemCodecs._
class MeasurementSystemSpec extends FlatSpec {
val mililitre = UnitOfMeasure("Millilitre", "mL", 1, Volume )
val decilitre = UnitOfMeasure("Decilitre", "dL", 10, Volume )
val litre = UnitOfMeasure("Litre", "L", 1000, Volume )
val decalitre = UnitOfMeasure("Decalitre", "daL", 10000, Volume )
val gram = UnitOfMeasure("Gram", "g", 1, Mass )
val kilogram = UnitOfMeasure("Kilogram", "kg", 1000, Mass )
val unitsOfMeasure = List(
mililitre,
decilitre,
litre,
decalitre,
gram,
kilogram
)
val metricSystem = MeasurementSystem.fromUnitList( "Metric System", unitsOfMeasure )
/*
* Tests
*/
"Get units" should "retrieve a list of all units of a UnitCategory" in {
val expected = List(
gram,
kilogram
)
assert( metricSystem.getUnits( Mass ).get == expected )
}
"Get units" should "result in none when trying to get a nonexistant category." in {
assert( metricSystem.getUnits( Distance ).isEmpty )
}
"Organize Units" should "properly sort units of measure by their category" in {
val expected = Map(
Volume -> List(
mililitre,
decilitre,
litre,
decalitre
),
Mass -> List(
gram,
kilogram
)
)
assert( MeasurementSystem.organizeUnits( unitsOfMeasure ) == expected )
}
"Organize Units" should "return an empty map when fed an empty list" in {
val expected: Map[UnitCategory, List[UnitOfMeasure]] = new HashMap()
assert( MeasurementSystem.organizeUnits( List[UnitOfMeasure]() ) == expected )
}
"One imperial gallon" should "convert to 4.54609 litres" in {
val imperialGallon = UnitOfMeasure("Gallon", "gal", 4546.09, Volume )
val oneGallon = Measure( 1, imperialGallon )
assert( metricSystem.convert( oneGallon ).get == Measure( 4.54609, litre ) )
}
"One hundred imperial gallons" should "convert to 45.4609 Decalitres" in {
val imperialGallon = UnitOfMeasure("Gallon", "gal", 4546.09, Volume )
val hundredGallons = Measure( 100, imperialGallon )
assert( metricSystem.convert( hundredGallons ).get == Measure( 45.4609, decalitre ) )
}
"One decalitre" should "convert to one decalitre" in {
val oneDecalitre = Measure(1, decalitre )
assert( metricSystem.convert( oneDecalitre ).get == Measure( 1, decalitre ) )
}
"One mile" should "return an empty option when metricsystem does not have distance units" in {
val mile = UnitOfMeasure( "Mile", "mi", 1609.344, Distance )
val oneMile = Measure( 1, mile )
assert( metricSystem.convert( oneMile ).isEmpty )
}
"Handled unit categories" should "return a set of the unit categories a measurement system handles" in {
assert( metricSystem.handledUnitCategories == Set( Volume, Mass ) )
}
"A measurement system" should "perform reasonable equality comparisons" in {
val mililitre2 = UnitOfMeasure("Millilitre", "mL", 1, Volume )
val decilitre2 = UnitOfMeasure("Decilitre", "dL", 10, Volume )
val litre2 = UnitOfMeasure("Litre", "L", 1000, Volume )
val decalitre2 = UnitOfMeasure("Decalitre", "daL", 10000, Volume )
val gram2 = UnitOfMeasure("Gram", "g", 1, Mass )
val kilogram2 = UnitOfMeasure("Kilogram", "kg", 1000, Mass )
val unitsOfMeasure2 = List(
mililitre2,
litre2,
decilitre2,
decalitre2,
gram2,
kilogram2
)
val metricSystem2 = MeasurementSystem.fromUnitList( "Metric System", unitsOfMeasure2 )
assert( metricSystem == metricSystem2 )
}
/*
* Tests of Json conversions
*/
"A Measurement System" should "parse correctly from JSON" in {
val metricString = """
{
"name" : "metric",
"units" : [
{
"name" : "Millilitre",
"short_name" : "mL",
"magnitude" : 1,
"unit_category" : "volume"
},
{
"name" : "Decilitre",
"short_name" : "dL",
"magnitude" : 10,
"unit_category" : "volume"
}
]
}
"""
val expected = MeasurementSystem.fromUnitList("metric", List(mililitre, decilitre) )
assert( Parse.decodeOption[MeasurementSystem](metricString).get == expected )
}
}
| AlexaDeWit/UnitConverter | src/test/scala/MeasurementSystem.scala | Scala | bsd-3-clause | 4,400 |
package io.reactors
package container
import io.reactors.algebra._
import scala.collection._
/** Stores elements of a monoid.
*
* Incrementally computes an aggregation of the stored elements,
* using a binary associative operator. The aggregation can be accessed
* with the `signal` method.
*/
class MonoidCatamorph[@spec(Int, Long, Double) T, @spec(Int, Long, Double) S](
val get: S => T, val zero: T, val op: (T, T) => T
) extends RCatamorph[T, S] with RContainer.Modifiable {
import MonoidCatamorph._
private[reactors] var subscription: Subscription = _
private[reactors] var root: Node[T] = null
private[reactors] var leaves: mutable.Map[S, Leaf[T]] = null
private val insertsEmitter = new Events.Emitter[S]
private val removesEmitter = new Events.Emitter[S]
private var rootValue: RCell[T] = null
def inserts: Events[S] = insertsEmitter
def removes: Events[S] = removesEmitter
def init(mc: MonoidCatamorph[T, S]) {
root = new Empty(zero)
leaves = mutable.Map[S, Leaf[T]]()
rootValue = RCell(root.value)
subscription = Subscription.empty
}
init(this)
def signal: Signal[T] = rootValue
protected def newLeaf(v: S, f: S => T) = new Leaf[T](() => f(v), null)
def unsubscribe() = subscription.unsubscribe()
def +=(v: S): Boolean = try {
acquireModify()
if (!leaves.contains(v)) {
val leaf = newLeaf(v, get)
leaves(v) = leaf
root = root.insert(leaf, op)
rootValue := root.value
insertsEmitter.react(v, null)
true
} else false
} finally releaseModify()
def -=(v: S): Boolean = try {
acquireModify()
if (leaves.contains(v)) {
val leaf = leaves(v)
root = leaf.remove(zero, op)
leaves.remove(v)
rootValue := root.value
removesEmitter.react(v, null)
true
} else false
} finally releaseModify()
def container = this
def push(v: S): Boolean = try {
acquireModify()
if (leaves.contains(v)) {
val leaf = leaves(v)
leaf.pushUp(op)
rootValue := root.value
true
} else false
} finally releaseModify()
def size = leaves.size
def foreach(f: S => Unit) = leaves.keys.foreach(f)
}
object MonoidCatamorph {
def apply[@spec(Int, Long, Double) T](implicit m: Monoid[T]) =
new MonoidCatamorph[T, T](v => v, m.zero, m.operator)
implicit def factory[@spec(Int, Long, Double) T: Monoid] =
new RContainer.Factory[T, MonoidCatamorph[T, T]] {
def apply(inserts: Events[T], removes: Events[T]): MonoidCatamorph[T, T] = {
val m = implicitly[Monoid[T]]
val mc = new MonoidCatamorph[T, T](v => v, m.zero, m.operator)
mc.subscription = new Subscription.Composite(
inserts.onEvent(mc += _),
removes.onEvent(mc -= _)
)
mc
}
}
sealed trait Node[@spec(Int, Long, Double) T] {
def height: Int
def value: T
def parent: Inner[T]
def parent_=(p: Inner[T]): Unit
def pushUp(op: (T, T) => T): Unit
def insert(leaf: Leaf[T], op: (T, T) => T): Node[T]
def toString(indent: Int): String
def housekeep(op: (T, T) => T) {}
def asInner = this.asInstanceOf[Inner[T]]
override def toString = toString(0)
def localString: String
}
class Inner[@spec(Int, Long, Double) T](
var height: Int, var left: Node[T], var right: Node[T], var parent: Inner[T]
) extends Node[T] {
private var v: T = _
def value: T = v
def value_=(v: T) = this.v = v
def pushUp(op: (T, T) => T) {
v = op(left.value, right.value)
if (parent != null) parent.pushUp(op)
}
private def balance = left.height - right.height
private def heightOf(l: Node[T], r: Node[T]) = 1 + math.max(l.height, r.height)
override def housekeep(op: (T, T) => T) {
height = heightOf(left, right)
value = op(left.value, right.value)
}
def insert(leaf: Leaf[T], op: (T, T) => T): Node[T] = {
right = right.insert(leaf, op)
right.parent = this
housekeep(op)
rebalance(op)
}
def rebalance(op: (T, T) => T): Node[T] = {
val b = balance
if (b < -1) {
if (right.asInner.balance > 0) {
right = right.asInner.rotr(op)
right.parent = this
}
rotl(op)
} else if (b > 1) {
if (left.asInner.balance < 0) {
left = left.asInner.rotl(op)
left.parent = this
}
rotr(op)
} else this
}
def rotl(op: (T, T) => T): Inner[T] = {
val ntop = this.right.asInner
this.right = ntop.left
this.right.parent = this
ntop.left = this
this.parent = ntop
ntop.parent = null
this.housekeep(op)
ntop.housekeep(op)
ntop
}
def rotr(op: (T, T) => T): Inner[T] = {
val ntop = this.left.asInner
this.left = ntop.right
this.left.parent = this
ntop.right = this
this.parent = ntop
ntop.parent = null
this.housekeep(op)
ntop.housekeep(op)
ntop
}
private def isLeft = parent.left eq this
def fixUp(op: (T, T) => T): Node[T] = {
// check if both children are non-null
// note that both can never be null
val result = if (left == null) {
if (parent == null) {
right.parent = null
right
} else {
if (isLeft) parent.left = right
else parent.right = right
right.parent = parent
parent.fixUp(op)
}
} else if (right == null) {
if (parent == null) {
left.parent = null
left
} else {
if (isLeft) parent.left = left
else parent.right = left
left.parent = parent
parent.fixUp(op)
}
} else {
housekeep(op)
val above = this.parent
val wasLeft = (above ne null) && isLeft
val n = rebalance(op)
n.parent = above
if (above != null) {
if (wasLeft) above.left = n
else above.right = n
}
if (n.parent != null) n.asInner.parent.fixUp(op)
else n
}
result
}
def toString(indent: Int) = " " * indent +
s"Inner($height, \\n${left.toString(indent + 2)}, \\n${right.toString(indent + 2)})"
def localString = s"Inner(h = $height, l.h = ${left.height}, r.h = ${right.height})"
}
class Leaf[@spec(Int, Long, Double) T](val get: () => T, var parent: Inner[T])
extends Node[T] {
def height = 0
def value = get()
def pushUp(op: (T, T) => T) {
if (parent != null) parent.pushUp(op)
}
def insert(leaf: Leaf[T], op: (T, T) => T): Node[T] = {
val inner = new Inner(1, this, leaf, null)
this.parent = inner
leaf.parent = inner
inner.value = op(this.value, leaf.value)
inner
}
def remove(zero: T, op: (T, T) => T): Node[T] = {
if (parent == null) {
// the only value left
new Empty(zero)
} else {
def isLeft = parent.left eq this
if (isLeft) parent.left = null
else parent.right = null
parent.fixUp(op)
}
}
def toString(indent: Int) = " " * indent + s"Leaf(${get()})"
def localString = s"Leaf(${get()})"
}
class Empty[@spec(Int, Long, Double) T](val value: T) extends Node[T] {
def height = 0
def parent = null
def parent_=(p: Inner[T]) = throw new IllegalStateException
def pushUp(op: (T, T) => T) {}
def insert(leaf: Leaf[T], op: (T, T) => T) = leaf
def toString(indent: Int) = " " * indent + s"Empty($value)"
def localString = s"Empty($value)"
}
}
| storm-enroute/reactors | reactors-container/shared/src/main/scala/io/reactors/container/MonoidCatamorph.scala | Scala | bsd-3-clause | 7,592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s
import io.fabric8.kubernetes.api.model.{Container, Pod}
private[spark] case class PodWithDetachedInitContainer(
pod: Pod,
initContainer: Container,
mainContainer: Container)
| publicRoman/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/PodWithDetachedInitContainer.scala | Scala | apache-2.0 | 1,025 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.instrumentation
import org.bdgenomics.utils.instrumentation.Metrics
/**
* Contains [[Timers]] that are used to instrument ADAM.
*/
object Timers extends Metrics {
// Load methods
val LoadAlignments = timer("Load Alignments")
val LoadContigFragments = timer("Load Contig Fragments")
val LoadCoverage = timer("Load Coverage")
val LoadFeatures = timer("Load Features")
val LoadFragments = timer("Load Fragments")
val LoadGenotypes = timer("Load Genotypes")
val LoadReferenceFile = timer("Load ReferenceFile")
val LoadSequenceDictionary = timer("Load SequenceDictionary")
val LoadVariants = timer("Load Variants")
// Format specific load methods
val LoadBam = timer("Load BAM/CRAM/SAM format")
val LoadBed = timer("Load BED6/12 format")
val LoadFasta = timer("Load FASTA format")
val LoadFastq = timer("Load FASTQ format")
val LoadGff3 = timer("Load GFF3 format")
val LoadGtf = timer("Load GTF/GFF2 format")
val LoadIndexedBam = timer("Load indexed BAM format")
val LoadIndexedVcf = timer("Load indexed VCF format")
val LoadInterleavedFastq = timer("Load interleaved FASTQ format")
val LoadInterleavedFastqFragments = timer("Load interleaved FASTQ format as Fragments")
val LoadIntervalList = timer("Load IntervalList format")
val LoadNarrowPeak = timer("Load NarrowPeak format")
val LoadPairedFastq = timer("Load paired FASTQ format")
val LoadParquet = timer("Load Parquet + Avro format")
val LoadUnpairedFastq = timer("Load unpaired FASTQ format")
val LoadVcf = timer("Load VCF format")
// Trim Reads
val TrimReadsInDriver = timer("Trim Reads")
val TrimRead = timer("Trim Reads")
val TrimCigar = timer("Trim Cigar")
val TrimMDTag = timer("Trim MD Tag")
// Trim Low Quality Read Groups
val TrimLowQualityInDriver = timer("Trim Low Quality Read Groups")
// Mark Duplicates
val MarkDuplicatesInDriver = timer("Mark Duplicates")
val CreateReferencePositionPair = timer("Create Reference Position Pair")
val PerformDuplicateMarking = timer("Perform Duplicate Marking")
val ScoreAndMarkReads = timer("Score and Mark Reads")
val MarkReads = timer("Mark Reads")
// Recalibrate Base Qualities
val BQSRInDriver = timer("Base Quality Recalibration")
val CreatingKnownSnpsTable = timer("Creating Known SNPs Table")
val CollectingSnps = timer("Collecting SNPs")
val BroadcastingKnownSnps = timer("Broadcasting known SNPs")
val ComputeCovariates = timer("Compute Covariates")
val ObservingRead = timer("Observing covariates per read")
val ReadCovariates = timer("Computing covariates per read base")
val ComputingDinucCovariate = timer("Computing dinuc covariate")
val ComputingCycleCovariate = timer("Computing cycle covariate")
val ReadResidues = timer("Splitting read into residues")
val CheckingForMask = timer("Checking if residue is masked")
val ObservationAccumulatorComb = timer("Observation Accumulator: comb")
val ObservationAccumulatorSeq = timer("Observation Accumulator: seq")
val RecalibrateRead = timer("Recalibrate Read")
val ComputeQualityScore = timer("Compute Quality Score")
val GetExtraValues = timer("Get Extra Values")
val CreatingRecalibrationTable = timer("Creating recalibration table")
val InvertingRecalibrationTable = timer("Inverting recalibration table")
val QueryingRecalibrationTable = timer("Querying recalibration table")
// Realign Indels
val RealignIndelsInDriver = timer("Realign Indels")
val FindTargets = timer("Find Targets")
val CreateIndelRealignmentTargets = timer("Create Indel Realignment Targets for Read")
val SortTargets = timer("Sort Targets")
val JoinTargets = timer("Join Targets")
val MapTargets = timer("Map Targets")
val RealignTargetGroup = timer("Realign Target Group")
val GetReferenceFromReads = timer("Get Reference From Reads")
val GetReferenceFromFile = timer("Get Reference From File")
val ComputingOriginalScores = timer("Computing Original Mismatch Scores")
val SweepReadsOverConsensus = timer("Sweeping Reads Over A Single Consensus")
val SweepReadOverReferenceForQuality = timer("Sweep Read Over Reference For Quality")
val FinalizingRealignments = timer("Finalizing Realignments")
// Sort Reads
val SortReads = timer("Sort Reads")
val SortByIndex = timer("Sort Reads By Index")
// File Saving
val SAMSave = timer("SAM Save")
val ConvertToSAM = timer("Convert To SAM")
val ConvertToSAMRecord = timer("Convert To SAM Record")
val SaveAsADAM = timer("Save File In ADAM Format")
val WriteADAMRecord = timer("Write ADAM Record")
val WriteBAMRecord = timer("Write BAM Record")
val WriteSAMRecord = timer("Write SAM Record")
val WriteCRAMRecord = timer("Write CRAM Record")
// org.bdgenomics.adam.rdd.TreeRegionJoin
val TreeJoin = timer("Running broadcast join with interval tree")
val BuildingTrees = timer("Building interval tree")
val SortingRightSide = timer("Sorting right side of join")
val GrowingTrees = timer("Growing forest of trees")
val RunningMapSideJoin = timer("Running map-side join")
// org.bdgenomics.adam.rdd.GenomicRDD
val InnerBroadcastJoin = timer("Inner broadcast region join")
val RightOuterBroadcastJoin = timer("Right outer broadcast region join")
val BroadcastJoinAndGroupByRight = timer("Broadcast join followed by group-by on right")
val RightOuterBroadcastJoinAndGroupByRight = timer("Right outer broadcast join followed by group-by on right")
val InnerShuffleJoin = timer("Inner shuffle region join")
val RightOuterShuffleJoin = timer("Right outer shuffle region join")
val LeftOuterShuffleJoin = timer("Left outer shuffle region join")
val FullOuterShuffleJoin = timer("Full outer shuffle region join")
val ShuffleJoinAndGroupByLeft = timer("Shuffle join followed by group-by on left")
val RightOuterShuffleJoinAndGroupByLeft = timer("Right outer shuffle join followed by group-by on left")
}
| laserson/adam | adam-core/src/main/scala/org/bdgenomics/adam/instrumentation/Timers.scala | Scala | apache-2.0 | 6,713 |
package io.github.binaryfoo.lagotto.shell
import java.io.File
import io.github.binaryfoo.lagotto.reader.FileIO
import io.github.binaryfoo.lagotto.{AggregateLogEntry, JposEntry, LagoTest}
class LogFilterTest extends LagoTest {
import io.github.binaryfoo.lagotto.LogFilters.NaiveParser.LogFilter
import io.github.binaryfoo.lagotto.LogFilters.NaiveParser.LogFilter.filterFor
import io.github.binaryfoo.lagotto.LogFilters.NaiveParser.parseAndExpr
"Contains operator" should "use a regex if value like /regex/" in {
val filter = filterFor("""mti~/\\d\\d[13]\\d/""")
filter(JposEntry("0" -> "0210")) shouldBe true
filter(JposEntry("0" -> "0200")) shouldBe false
filter(JposEntry("0" -> "0230")) shouldBe true
filter(JposEntry("0" -> "0220")) shouldBe false
filter(JposEntry("0" -> "023")) shouldBe false
}
it should "negate the regex for value like /regex/ with operator !~" in {
val filter = filterFor("""mti!~/\\d\\d[13]\\d/""")
filter.field shouldBe "mti"
filter(JposEntry("0" -> "0210")) shouldBe false
filter(JposEntry("0" -> "0200")) shouldBe true
filter(JposEntry("0" -> "0230")) shouldBe false
filter(JposEntry("0" -> "0220")) shouldBe true
filter(JposEntry("0" -> "023")) shouldBe true
}
"in operator" should "match value in set" in {
val filter = filterFor("socket in (10.0.0.1:8000,10.0.0.2:8001)")
filter.field shouldBe "socket"
filter(JposEntry("realm" -> "channel/10.0.0.1:8000")) shouldBe true
filter(JposEntry("realm" -> "channel/10.0.0.1:8001")) shouldBe false
filter(JposEntry("realm" -> "channel/10.0.0.2:8001")) shouldBe true
filter(JposEntry("realm" -> "channel/10.0.0.2:8000")) shouldBe false
filter(JposEntry("realm" -> "channel/10.0.0.3:8000")) shouldBe false
}
"not operator" should "match value not in set" in {
val filter = filterFor("msgType not in (send,receive)")
filter.field shouldBe "msgType"
filter(JposEntry("msgType" -> "bollox")) shouldBe true
filter(JposEntry("msgType" -> "send")) shouldBe false
filter(JposEntry("msgType" -> "receive")) shouldBe false
}
"in file operator" should "match line from file" in {
val temp = fileHolding("10.0.0.1:8000", "10.0.0.2:8001")
val filter = filterFor(s"""socket in file "$temp"""")
filter.field shouldBe "socket"
filter(JposEntry("realm" -> "channel/10.0.0.1:8000")) shouldBe true
filter(JposEntry("realm" -> "channel/10.0.0.1:8001")) shouldBe false
filter(JposEntry("realm" -> "channel/10.0.0.2:8001")) shouldBe true
filter(JposEntry("realm" -> "channel/10.0.0.2:8000")) shouldBe false
filter(JposEntry("realm" -> "channel/10.0.0.3:8000")) shouldBe false
}
"not in file operator" should "match values not in files" in {
val tempFile = fileHolding("send", "receive")
val filter = filterFor(s"""msgType not in file "$tempFile"""")
filter.field shouldBe "msgType"
filter(JposEntry("msgType" -> "bollox")) shouldBe true
filter(JposEntry("msgType" -> "send")) shouldBe false
filter(JposEntry("msgType" -> "receive")) shouldBe false
}
"Equals operator" should "allow comparison with empty string" in {
val filter = filterFor("exception!=")
filter.field shouldBe "exception"
filter(JposEntry("exception" -> "oh my")) shouldBe true
filter(JposEntry("exception" -> "")) shouldBe false
filter(JposEntry()) shouldBe false
}
"Regex contains operator" should "not match null" in {
val filter = filterFor("""mti!~/3.*/""")
filter(JposEntry()) shouldBe false
}
"Greater than operator" should "be applicable to the result of count(condition)" in {
val filter = filterFor("count(mti=0200)>10")
filter.field shouldBe "count(mti=0200)"
filter(AggregateLogEntry(Map.empty, Seq("count(mti=0200)" -> "10"))) shouldBe true
filter(AggregateLogEntry(Map.empty, Seq("count(mti=0200)" -> "9"))) shouldBe false
}
"Regex operator" should "be applicable to the result of count(condition)" in {
val filter = filterFor("count(mti=0200)~/1[01]1/")
filter.field shouldBe "count(mti=0200)"
filter(AggregateLogEntry(Map.empty, Seq("count(mti=0200)" -> "101"))) shouldBe true
filter(AggregateLogEntry(Map.empty, Seq("count(mti=0200)" -> "111"))) shouldBe true
filter(AggregateLogEntry(Map.empty, Seq("count(mti=0200)" -> "191"))) shouldBe false
}
"Filters" should "compare equal for the same expression" in {
filterFor("lifespan=10") shouldEqual filterFor("lifespan=10")
filterFor("lifespan>10") shouldEqual filterFor("lifespan>10")
filterFor("lifespan<10") shouldEqual filterFor("lifespan<10")
filterFor("lifespan~10") shouldEqual filterFor("lifespan~10")
}
"Filters" should "compare not equal for different expressions" in {
filterFor("lifespan=10") shouldNot equal(filterFor("lifespan!=10"))
filterFor("lifespan=10") shouldNot equal(filterFor("lifespan=11"))
filterFor("lifespan=10") shouldNot equal(filterFor("delay=10"))
}
"AndFilter" should "pass when all children pass" in {
val filter = parseAndExpr("0=0200,realm=scheme").get
filter(JposEntry("0" -> "0200", "realm" -> "scheme")) shouldBe true
}
it should "pass fail when a children fails" in {
val filter = parseAndExpr("0=0200,realm=scheme").get
filter(JposEntry("0" -> "0210", "realm" -> "scheme")) shouldBe false
filter(JposEntry("0" -> "0200", "realm" -> "silly")) shouldBe false
}
it should "work with only one child" in {
val filter = parseAndExpr("0=0210").get
filter(JposEntry("0" -> "0200")) shouldBe false
filter(JposEntry("0" -> "0210")) shouldBe true
}
"ChannelWith" should "match immediate condition" in {
val filter = LogFilter.unapply("channelWith(48=magic)").get
filter(JposEntry("48" -> "magic")) shouldBe true
}
it should "match message with the same realm as a previously matched message" in {
val filter = LogFilter.unapply("channelWith(48=magic)").get
filter(JposEntry("48" -> "magic", "realm" -> "some/0.0.0.0:1234")) // stateful filter
filter(JposEntry("realm" -> "some/0.0.0.0:1234")) shouldBe true
filter(JposEntry("realm" -> "some/0.0.0.0:1235")) shouldBe false
}
it should "not match message with the same realm as a previously matched message once that channel is closed" in {
val filter = LogFilter.unapply("channelWith(48=magic)").get
filter(JposEntry("48" -> "magic", "realm" -> "some/0.0.0.0:1234")) // stateful filter
filter(JposEntry("realm" -> "some/0.0.0.0:1234", "msgType" -> "session-end")) shouldBe true
filter(JposEntry("realm" -> "some/0.0.0.0:1234")) shouldBe false
}
private def fileHolding(lines: String*): String = {
val temp = tempFile()
FileIO.writeLines(temp.getAbsolutePath, lines)
temp.getAbsolutePath
}
}
| binaryfoo/lagotto | src/test/scala/io/github/binaryfoo/lagotto/shell/LogFilterTest.scala | Scala | mit | 6,783 |
package com.twitter.querulous.query
import java.sql.{Timestamp, Connection}
class DebuggingQueryFactory(queryFactory: QueryFactory, log: String => Unit) extends QueryFactory {
def apply(connection: Connection, queryClass: QueryClass, query: String, params: Any*) = {
new DebuggingQuery(queryFactory(connection, queryClass, query, params: _*), log, query, params)
}
}
class DebuggingQuery(query: Query, log: String => Unit, queryString: String, params: Seq[Any])
extends QueryProxy(query) {
def makeDebugString(param: Any): String = {
param match {
case s: String =>
"\\""+s+"\\""
case c: Char =>
"'"+c+"'"
case l: Long =>
l.toString
case i: Int =>
i.toString
case b: Array[Byte] =>
"(" + b.size + " bytes)"
case b: Boolean =>
b.toString
case d: Double =>
d.toString
case t: Timestamp =>
t.toString
case is: Iterable[_] =>
is.map(makeDebugString).mkString("(", ", ", ")")
case nv: NullValue =>
"null"
case _ =>
"Unknown argument type."
}
}
override def delegate[A](f: => A) = {
log(queryString + " " + params.map(makeDebugString).mkString("(", ", ", ")"))
f
}
}
| kievbs/querulous210 | src/main/scala/com/twitter/querulous/query/DebuggingQuery.scala | Scala | apache-2.0 | 1,252 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import java.io.{ByteArrayOutputStream, InputStream}
import java.nio.ByteBuffer
import org.mockito.ArgumentMatchers.{eq => meq}
import org.mockito.Mockito.{mock, when}
import org.apache.spark._
import org.apache.spark.internal.config
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.serializer.{JavaSerializer, SerializerManager}
import org.apache.spark.storage.{BlockManager, BlockManagerId, ShuffleBlockId}
/**
* Wrapper for a managed buffer that keeps track of how many times retain and release are called.
*
* We need to define this class ourselves instead of using a spy because the NioManagedBuffer class
* is final (final classes cannot be spied on).
*/
class RecordingManagedBuffer(underlyingBuffer: NioManagedBuffer) extends ManagedBuffer {
var callsToRetain = 0
var callsToRelease = 0
override def size(): Long = underlyingBuffer.size()
override def nioByteBuffer(): ByteBuffer = underlyingBuffer.nioByteBuffer()
override def createInputStream(): InputStream = underlyingBuffer.createInputStream()
override def convertToNetty(): AnyRef = underlyingBuffer.convertToNetty()
override def retain(): ManagedBuffer = {
callsToRetain += 1
underlyingBuffer.retain()
}
override def release(): ManagedBuffer = {
callsToRelease += 1
underlyingBuffer.release()
}
}
class BlockStoreShuffleReaderSuite extends SparkFunSuite with LocalSparkContext {
/**
* This test makes sure that, when data is read from a HashShuffleReader, the underlying
* ManagedBuffers that contain the data are eventually released.
*/
test("read() releases resources on completion") {
val testConf = new SparkConf(false)
// Create a SparkContext as a convenient way of setting SparkEnv (needed because some of the
// shuffle code calls SparkEnv.get()).
sc = new SparkContext("local", "test", testConf)
val reduceId = 15
val shuffleId = 22
val numMaps = 6
val keyValuePairsPerMap = 10
val serializer = new JavaSerializer(testConf)
// Make a mock BlockManager that will return RecordingManagedByteBuffers of data, so that we
// can ensure retain() and release() are properly called.
val blockManager = mock(classOf[BlockManager])
// Create a buffer with some randomly generated key-value pairs to use as the shuffle data
// from each mappers (all mappers return the same shuffle data).
val byteOutputStream = new ByteArrayOutputStream()
val serializationStream = serializer.newInstance().serializeStream(byteOutputStream)
(0 until keyValuePairsPerMap).foreach { i =>
serializationStream.writeKey(i)
serializationStream.writeValue(2*i)
}
// Setup the mocked BlockManager to return RecordingManagedBuffers.
val localBlockManagerId = BlockManagerId("test-client", "test-client", 1)
when(blockManager.blockManagerId).thenReturn(localBlockManagerId)
val buffers = (0 until numMaps).map { mapId =>
// Create a ManagedBuffer with the shuffle data.
val nioBuffer = new NioManagedBuffer(ByteBuffer.wrap(byteOutputStream.toByteArray))
val managedBuffer = new RecordingManagedBuffer(nioBuffer)
// Setup the blockManager mock so the buffer gets returned when the shuffle code tries to
// fetch shuffle data.
val shuffleBlockId = ShuffleBlockId(shuffleId, mapId, reduceId)
when(blockManager.getLocalBlockData(meq(shuffleBlockId))).thenReturn(managedBuffer)
managedBuffer
}
// Make a mocked MapOutputTracker for the shuffle reader to use to determine what
// shuffle data to read.
val mapOutputTracker = mock(classOf[MapOutputTracker])
when(mapOutputTracker.getMapSizesByExecutorId(
shuffleId, reduceId, reduceId + 1)).thenReturn {
// Test a scenario where all data is local, to avoid creating a bunch of additional mocks
// for the code to read data over the network.
val shuffleBlockIdsAndSizes = (0 until numMaps).map { mapId =>
val shuffleBlockId = ShuffleBlockId(shuffleId, mapId, reduceId)
(shuffleBlockId, byteOutputStream.size().toLong, mapId)
}
Seq((localBlockManagerId, shuffleBlockIdsAndSizes)).toIterator
}
// Create a mocked shuffle handle to pass into HashShuffleReader.
val shuffleHandle = {
val dependency = mock(classOf[ShuffleDependency[Int, Int, Int]])
when(dependency.serializer).thenReturn(serializer)
when(dependency.aggregator).thenReturn(None)
when(dependency.keyOrdering).thenReturn(None)
new BaseShuffleHandle(shuffleId, dependency)
}
val serializerManager = new SerializerManager(
serializer,
new SparkConf()
.set(config.SHUFFLE_COMPRESS, false)
.set(config.SHUFFLE_SPILL_COMPRESS, false))
val taskContext = TaskContext.empty()
val metrics = taskContext.taskMetrics.createTempShuffleReadMetrics()
val blocksByAddress = mapOutputTracker.getMapSizesByExecutorId(
shuffleId, reduceId, reduceId + 1)
val shuffleReader = new BlockStoreShuffleReader(
shuffleHandle,
blocksByAddress,
taskContext,
metrics,
serializerManager,
blockManager)
assert(shuffleReader.read().length === keyValuePairsPerMap * numMaps)
// Calling .length above will have exhausted the iterator; make sure that exhausting the
// iterator caused retain and release to be called on each buffer.
buffers.foreach { buffer =>
assert(buffer.callsToRetain === 1)
assert(buffer.callsToRelease === 1)
}
}
}
| goldmedal/spark | core/src/test/scala/org/apache/spark/shuffle/BlockStoreShuffleReaderSuite.scala | Scala | apache-2.0 | 6,388 |
package com.nextcentury.techexchange.todofrp
import javax.ws.rs.ext.ExceptionMapper
import javax.ws.rs.ext.Provider
import javax.ws.rs.core.Response
import javax.ws.rs.Produces
@Provider
@Produces(Array("application/json"))
class NoSuchElementExceptionMapper extends ExceptionMapper[NoSuchElementException] {
override def toResponse(exception: NoSuchElementException) =
Response.status(Response.Status.NOT_FOUND).entity(exception).build
}
| rpokorny/techexchange-frp-todolist | backend/src/main/scala/com/nextcentury/techexchange/todofrp/NoSuchElementExceptionMapper.scala | Scala | apache-2.0 | 447 |
/*
Copyright (c) 2013-2015, David B. Dahl, Brigham Young University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the <ORGANIZATION> nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.zeppelin.rinterpreter.rscala
// TODO: Add libdir to constructor
import java.io._
import java.net.{InetAddress, ServerSocket}
import org.slf4j.{Logger, LoggerFactory}
import scala.language.dynamics
class RClient (private val in: DataInputStream,
private val out: DataOutputStream,
val debug: Boolean = true) extends Dynamic {
var damagedState : Boolean = false
private val logger: Logger = LoggerFactory.getLogger(getClass)
case class RObjectRef(val reference : String) {
override def toString() = ".$"+reference
}
/** __For rscala developers only__: Sets whether debugging output should be displayed. */
def debug_=(v: Boolean) = {
if ( v != debug ) {
if ( debug ) logger.debug("Sending DEBUG request.")
out.writeInt(RClient.Protocol.DEBUG)
out.writeInt(if ( v ) 1 else 0)
out.flush()
}
}
def exit() = {
logger.debug("Sending EXIT request.")
out.writeInt(RClient.Protocol.EXIT)
out.flush()
}
def eval(snippet: String, evalOnly: Boolean = true): Any = try {
if (damagedState) throw new RException("Connection to R already damaged")
logger.debug("Sending EVAL request.")
out.writeInt(RClient.Protocol.EVAL)
RClient.writeString(out,snippet)
out.flush()
val status = in.readInt()
val output = RClient.readString(in)
if ( output != "" ) {
logger.error("R Error " + snippet + " " + output)
throw new RException(snippet, output)
}
if ( status != RClient.Protocol.OK ) throw new RException(snippet, output, "Error in R evaluation.")
if ( evalOnly ) null else get(".rzeppelin.last.value")._1
} catch {
case e : java.net.SocketException => {
logger.error("Connection to R appears to have shut down" + e)
damagedState = true
}
}
def evalI0(snippet: String) = { eval(snippet,true); getI0(".rzeppelin.last.value") }
def evalB0(snippet: String) = { eval(snippet,true); getB0(".rzeppelin.last.value") }
def evalS0(snippet: String) = { eval(snippet,true); getS0(".rzeppelin.last.value") }
def evalI1(snippet: String) = { eval(snippet,true); getI1(".rzeppelin.last.value") }
def evalB1(snippet: String) = { eval(snippet,true); getB1(".rzeppelin.last.value") }
def evalS1(snippet: String) = { eval(snippet,true); getS1(".rzeppelin.last.value") }
def evalR( snippet: String) = { eval(snippet,true); getR( ".rzeppelin.last.value") }
def set(identifier: String, value: Any): Unit = set(identifier,value,"",true)
def set(identifier: String, value: Any, index: String = "", singleBrackets: Boolean = true): Unit = {
if (damagedState) throw new RException("Connection to R already damaged")
val v = value
if ( index == "" ) out.writeInt(RClient.Protocol.SET)
else if ( singleBrackets ) {
out.writeInt(RClient.Protocol.SET_SINGLE)
RClient.writeString(out,index)
} else {
out.writeInt(RClient.Protocol.SET_DOUBLE)
RClient.writeString(out,index)
}
RClient.writeString(out,identifier)
if ( v == null || v.isInstanceOf[Unit] ) {
logger.debug("... which is null")
out.writeInt(RClient.Protocol.NULLTYPE)
out.flush()
if ( index != "" ) {
val status = in.readInt()
if ( status != RClient.Protocol.OK ) {
val output = RClient.readString(in)
if ( output != "" ) {
logger.error("R error setting " + output)
throw new RException(identifier + value.toString(), output, "Error setting")
}
throw new RException("Error in R evaluation. Set " + identifier + " to " + value.toString())
}
}
return
}
val c = v.getClass
logger.debug("... whose class is: "+c)
logger.debug("... and whose value is: "+v)
if ( c.isArray ) {
c.getName match {
case "[I" =>
val vv = v.asInstanceOf[Array[Int]]
out.writeInt(RClient.Protocol.VECTOR)
out.writeInt(vv.length)
out.writeInt(RClient.Protocol.INTEGER)
for ( i <- 0 until vv.length ) out.writeInt(vv(i))
case "[Z" =>
val vv = v.asInstanceOf[Array[Boolean]]
out.writeInt(RClient.Protocol.VECTOR)
out.writeInt(vv.length)
out.writeInt(RClient.Protocol.BOOLEAN)
for ( i <- 0 until vv.length ) out.writeInt(if ( vv(i) ) 1 else 0)
case "[Ljava.lang.String;" =>
val vv = v.asInstanceOf[Array[String]]
out.writeInt(RClient.Protocol.VECTOR)
out.writeInt(vv.length)
out.writeInt(RClient.Protocol.STRING)
for ( i <- 0 until vv.length ) RClient.writeString(out,vv(i))
case _ =>
throw new RException("Unsupported array type: "+c.getName)
}
} else {
c.getName match {
case "java.lang.Integer" =>
out.writeInt(RClient.Protocol.ATOMIC)
out.writeInt(RClient.Protocol.INTEGER)
out.writeInt(v.asInstanceOf[Int])
case "java.lang.Boolean" =>
out.writeInt(RClient.Protocol.ATOMIC)
out.writeInt(RClient.Protocol.BOOLEAN)
out.writeInt(if (v.asInstanceOf[Boolean]) 1 else 0)
case "java.lang.String" =>
out.writeInt(RClient.Protocol.ATOMIC)
out.writeInt(RClient.Protocol.STRING)
RClient.writeString(out,v.asInstanceOf[String])
case _ =>
throw new RException("Unsupported non-array type: "+c.getName)
}
}
out.flush()
if ( index != "" ) {
val status = in.readInt()
if ( status != RClient.Protocol.OK ) {
val output = RClient.readString(in)
if ( output != "" ) throw new RException(identifier + value.toString(), output, "Error setting")
throw new RException("Error in R evaluation.")
}
}
}
def get(identifier: String, asReference: Boolean = false): (Any,String) = {
logger.debug("Getting: "+identifier)
out.writeInt(if ( asReference ) RClient.Protocol.GET_REFERENCE else RClient.Protocol.GET)
RClient.writeString(out,identifier)
out.flush()
if ( asReference ) {
val r = in.readInt() match {
case RClient.Protocol.REFERENCE => (RObjectRef(RClient.readString(in)),"RObject")
case RClient.Protocol.UNDEFINED_IDENTIFIER =>
throw new RException("Undefined identifier")
}
return r
}
in.readInt match {
case RClient.Protocol.NULLTYPE =>
logger.debug("Getting null.")
(null,"Null")
case RClient.Protocol.ATOMIC =>
logger.debug("Getting atomic.")
in.readInt() match {
case RClient.Protocol.INTEGER => (in.readInt(),"Int")
case RClient.Protocol.DOUBLE => (in.readDouble(),"Double")
case RClient.Protocol.BOOLEAN => (( in.readInt() != 0 ),"Boolean")
case RClient.Protocol.STRING => (RClient.readString(in),"String")
case _ => throw new RException("Protocol error")
}
case RClient.Protocol.VECTOR =>
logger.debug("Getting vector...")
val length = in.readInt()
logger.debug("... of length: "+length)
in.readInt() match {
case RClient.Protocol.INTEGER => (Array.fill(length) { in.readInt() },"Array[Int]")
case RClient.Protocol.DOUBLE => (Array.fill(length) { in.readDouble() },"Array[Double]")
case RClient.Protocol.BOOLEAN => (Array.fill(length) { ( in.readInt() != 0 ) },"Array[Boolean]")
case RClient.Protocol.STRING => (Array.fill(length) { RClient.readString(in) },"Array[String]")
case _ => throw new RException("Protocol error")
}
case RClient.Protocol.MATRIX =>
logger.debug("Getting matrix...")
val nrow = in.readInt()
val ncol = in.readInt()
logger.debug("... of dimensions: "+nrow+","+ncol)
in.readInt() match {
case RClient.Protocol.INTEGER => (Array.fill(nrow) { Array.fill(ncol) { in.readInt() } },"Array[Array[Int]]")
case RClient.Protocol.DOUBLE => (Array.fill(nrow) { Array.fill(ncol) { in.readDouble() } },"Array[Array[Double]]")
case RClient.Protocol.BOOLEAN => (Array.fill(nrow) { Array.fill(ncol) { ( in.readInt() != 0 ) } },"Array[Array[Boolean]]")
case RClient.Protocol.STRING => (Array.fill(nrow) { Array.fill(ncol) { RClient.readString(in) } },"Array[Array[String]]")
case _ => throw new RException("Protocol error")
}
case RClient.Protocol.UNDEFINED_IDENTIFIER => throw new RException("Undefined identifier")
case RClient.Protocol.UNSUPPORTED_STRUCTURE => throw new RException("Unsupported data type")
case _ => throw new RException("Protocol error")
}
}
def getI0(identifier: String): Int = get(identifier) match {
case (a,"Int") => a.asInstanceOf[Int]
case (a,"Double") => a.asInstanceOf[Double].toInt
case (a,"Boolean") => if (a.asInstanceOf[Boolean]) 1 else 0
case (a,"String") => a.asInstanceOf[String].toInt
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]](0)
case (a,"Array[Double]") => a.asInstanceOf[Array[Double]](0).toInt
case (a,"Array[Boolean]") => if ( a.asInstanceOf[Array[Boolean]](0) ) 1 else 0
case (a,"Array[String]") => a.asInstanceOf[Array[String]](0).toInt
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Int")
}
def getD0(identifier: String): Double = get(identifier) match {
case (a,"Int") => a.asInstanceOf[Int].toDouble
case (a,"Double") => a.asInstanceOf[Double]
case (a,"Boolean") => if (a.asInstanceOf[Boolean]) 1.0 else 0.0
case (a,"String") => a.asInstanceOf[String].toDouble
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]](0).toDouble
case (a,"Array[Double]") => a.asInstanceOf[Array[Double]](0)
case (a,"Array[Boolean]") => if ( a.asInstanceOf[Array[Boolean]](0) ) 1.0 else 0.0
case (a,"Array[String]") => a.asInstanceOf[Array[String]](0).toDouble
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Double")
}
def getB0(identifier: String): Boolean = get(identifier) match {
case (a,"Int") => a.asInstanceOf[Int] != 0
case (a,"Boolean") => a.asInstanceOf[Boolean]
case (a,"String") => a.asInstanceOf[String].toLowerCase != "false"
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]](0) != 0
case (a,"Array[Boolean]") => a.asInstanceOf[Array[Boolean]](0)
case (a,"Array[String]") => a.asInstanceOf[Array[String]](0).toLowerCase != "false"
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Boolean")
}
def getS0(identifier: String): String = get(identifier) match {
case (a,"Int") => a.asInstanceOf[Int].toString
case (a,"Boolean") => a.asInstanceOf[Boolean].toString
case (a,"String") => a.asInstanceOf[String]
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]](0).toString
case (a,"Array[Boolean]") => a.asInstanceOf[Array[Boolean]](0).toString
case (a,"Array[String]") => a.asInstanceOf[Array[String]](0)
case (_,tp) => throw new RException(s"Unable to cast ${tp} to String")
}
def getI1(identifier: String): Array[Int] = get(identifier) match {
case (a,"Int") => Array(a.asInstanceOf[Int])
case (a,"Boolean") => Array(if (a.asInstanceOf[Boolean]) 1 else 0)
case (a,"String") => Array(a.asInstanceOf[String].toInt)
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]]
case (a,"Array[Boolean]") => a.asInstanceOf[Array[Boolean]].map(x => if (x) 1 else 0)
case (a,"Array[String]") => a.asInstanceOf[Array[String]].map(_.toInt)
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Array[Int]")
}
def getB1(identifier: String): Array[Boolean] = get(identifier) match {
case (a,"Int") => Array(a.asInstanceOf[Int] != 0)
case (a,"Boolean") => Array(a.asInstanceOf[Boolean])
case (a,"String") => Array(a.asInstanceOf[String].toLowerCase != "false")
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]].map(_ != 0)
case (a,"Array[Boolean]") => a.asInstanceOf[Array[Boolean]]
case (a,"Array[String]") => a.asInstanceOf[Array[String]].map(_.toLowerCase != "false")
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Array[Boolean]")
}
def getS1(identifier: String): Array[String] = get(identifier) match {
case (a,"Int") => Array(a.asInstanceOf[Int].toString)
case (a,"Boolean") => Array(a.asInstanceOf[Boolean].toString)
case (a,"String") => Array(a.asInstanceOf[String])
case (a,"Array[Int]") => a.asInstanceOf[Array[Int]].map(_.toString)
case (a,"Array[Boolean]") => a.asInstanceOf[Array[Boolean]].map(_.toString)
case (a,"Array[String]") => a.asInstanceOf[Array[String]]
case (_,tp) => throw new RException(s"Unable to cast ${tp} to Array[String]")
}
def getR(identifier: String): RObjectRef = get(identifier,true) match {
case (a,"RObject") => a.asInstanceOf[RObjectRef]
case (_,tp) => throw new RException(s"Unable to cast ${tp} to RObject")
}
def gc(): Unit = {
logger.debug("Sending GC request.")
out.writeInt(RClient.Protocol.GC)
out.flush()
}
}
object RClient {
object Protocol {
// Data Types
val UNSUPPORTED_TYPE = 0
val INTEGER = 1
val DOUBLE = 2
val BOOLEAN = 3
val STRING = 4
val DATE = 5
val DATETIME = 6
// Data Structures
val UNSUPPORTED_STRUCTURE = 10
val NULLTYPE = 11
val REFERENCE = 12
val ATOMIC = 13
val VECTOR = 14
val MATRIX = 15
val LIST = 16
val DATAFRAME = 17
val S3CLASS = 18
val S4CLASS = 19
val JOBJ = 20
// Commands
val EXIT = 100
val RESET = 101
val GC = 102
val DEBUG = 103
val EVAL = 104
val SET = 105
val SET_SINGLE = 106
val SET_DOUBLE = 107
val GET = 108
val GET_REFERENCE = 109
val DEF = 110
val INVOKE = 111
val SCALAP = 112
// Result
val OK = 1000
val ERROR = 1001
val UNDEFINED_IDENTIFIER = 1002
// Misc.
val CURRENT_SUPPORTED_SCALA_VERSION = "2.10"
}
def writeString(out: DataOutputStream, string: String): Unit = {
val bytes = string.getBytes("UTF-8")
val length = bytes.length
out.writeInt(length)
out.write(bytes,0,length)
}
def readString(in: DataInputStream): String = {
val length = in.readInt()
val bytes = new Array[Byte](length)
in.readFully(bytes)
new String(bytes,"UTF-8")
}
def isMatrix[T](x: Array[Array[T]]): Boolean = {
if ( x.length != 0 ) {
val len = x(0).length
for ( i <- 1 until x.length ) {
if ( x(i).length != len ) return false
}
}
true
}
import scala.sys.process._
private val logger: Logger = LoggerFactory.getLogger(getClass)
val OS = sys.props("os.name").toLowerCase match {
case s if s.startsWith("""windows""") => "windows"
case s if s.startsWith("""linux""") => "linux"
case s if s.startsWith("""unix""") => "linux"
case s if s.startsWith("""mac""") => "macintosh"
case _ => throw new RException("Unrecognized OS")
}
val defaultArguments = OS match {
case "windows" => Array[String]("--vanilla","--silent","--slave","--ess")
case "linux" => Array[String]("--vanilla","--silent","--slave","--interactive")
case "unix" => Array[String]("--vanilla","--silent","--slave","--interactive")
case "macintosh" => Array[String]("--vanilla","--silent","--slave","--interactive")
}
lazy val defaultRCmd = OS match {
case "windows" => findROnWindows
case "linux" => """R"""
case "unix" => """R"""
case "macintosh" => """R"""
}
def findROnWindows: String = {
val NEWLINE = sys.props("line.separator")
var result : String = null
for ( root <- List("HKEY_LOCAL_MACHINE","HKEY_CURRENT_USER") ) {
val out = new StringBuilder()
val logger = ProcessLogger((o: String) => { out.append(o); out.append(NEWLINE) },(e: String) => {})
try {
("reg query \\"" + root + "\\\\Software\\\\R-core\\\\R\\" /v \\"InstallPath\\"") ! logger
val a = out.toString.split(NEWLINE).filter(_.matches("""^\\s*InstallPath\\s*.*"""))(0)
result = a.split("REG_SZ")(1).trim() + """\\bin\\R.exe"""
} catch {
case _ : Throwable =>
}
}
if ( result == null ) throw new RException("Cannot locate R using Windows registry.")
else return result
}
def reader(label: String)(input: InputStream) = {
val in = new BufferedReader(new InputStreamReader(input))
var line = in.readLine()
while ( line != null ) {
logger.debug(label+line)
line = in.readLine()
}
in.close()
}
class ScalaSockets(portsFilename: String) {
private val logger: Logger = LoggerFactory.getLogger(getClass)
val serverIn = new ServerSocket(0,0,InetAddress.getByName(null))
val serverOut = new ServerSocket(0,0,InetAddress.getByName(null))
locally {
logger.info("Trying to open ports filename: "+portsFilename)
val portNumberFile = new File(portsFilename)
val p = new PrintWriter(portNumberFile)
p.println(serverIn.getLocalPort+" "+serverOut.getLocalPort)
p.close()
logger.info("Servers are running on port "+serverIn.getLocalPort+" "+serverOut.getLocalPort)
}
val socketIn = serverIn.accept
logger.info("serverinaccept done")
val in = new DataInputStream(new BufferedInputStream(socketIn.getInputStream))
logger.info("in has been created")
val socketOut = serverOut.accept
logger.info("serverouacceptdone")
val out = new DataOutputStream(new BufferedOutputStream(socketOut.getOutputStream))
logger.info("out is done")
}
def makeSockets(portsFilename : String) = new ScalaSockets(portsFilename)
def apply(): RClient = apply(defaultRCmd)
def apply(rCmd: String, libdir : String = "",debug: Boolean = false, timeout: Int = 60): RClient = {
logger.debug("Creating processIO")
var cmd: PrintWriter = null
val command = rCmd +: defaultArguments
val processCmd = Process(command)
val processIO = new ProcessIO(
o => { cmd = new PrintWriter(o) },
reader("STDOUT DEBUG: "),
reader("STDERR DEBUG: "),
true
)
val portsFile = File.createTempFile("rscala-","")
val processInstance = processCmd.run(processIO)
val snippet = s"""
rscala:::rServe(rscala:::newSockets('${portsFile.getAbsolutePath.replaceAll(File.separator,"/")}',debug=${if ( debug ) "TRUE" else "FALSE"},timeout=${timeout}))
q(save='no')
"""
while ( cmd == null ) Thread.sleep(100)
logger.info("sending snippet " + snippet)
cmd.println(snippet)
cmd.flush()
val sockets = makeSockets(portsFile.getAbsolutePath)
sockets.out.writeInt(Protocol.OK)
sockets.out.flush()
try {
assert( readString(sockets.in) == org.apache.zeppelin.rinterpreter.rscala.Version )
} catch {
case _: Throwable => throw new RException("The scala and R versions of the package don't match")
}
apply(sockets.in,sockets.out)
}
/** __For rscala developers only__: Returns an instance of the [[RClient]] class. */
def apply(in: DataInputStream, out: DataOutputStream): RClient = new RClient(in,out)
} | ankurmitujjain/incubator-zeppelin | r/src/main/scala/org/apache/zeppelin/rinterpreter/rscala/RClient.scala | Scala | apache-2.0 | 20,746 |
package models
import scala.annotation.tailrec
/**
* Created by einevea on 27/01/15.
*/
abstract class Item(id:Long, name:String, description:String) {
def getPrice():Price
}
case class Simple(id:Long, name:String, description:String, price:Price) extends Item(id, name, description){
override def getPrice(): Price = price
}
case class Complex(id:Long, name:String, description:String, properties:List[Property]) extends Item(id, name, description){
lazy val price:Price = calcPrice()
private def calcPrice():Price = {
sum(for (property <- properties) yield property.price)
}
def sum(xs: List[Price]): Price = {
@tailrec
def inner(xs: List[Price], accum: Price): Price = {
xs match {
case x :: tail => inner(tail, accum + x)
case Nil => accum
}
}
inner(xs, Price.zero)
}
override def getPrice(): Price = price
}
object Item{
def apply(id:Long, name:String, description:String, price:Price) = {new Simple(id, name, description, price)}
def apply(id:Long, name:String, description:String, properties:List[Property]) = {new Complex(id, name, description, properties)}
} | einevea/pizzaPlay | app/models/Item.scala | Scala | mit | 1,148 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.internals.builders
import minitest.SimpleTestSuite
import monifu.concurrent.extensions._
import monifu.concurrent.schedulers.TestScheduler
import monifu.reactive.Ack.Continue
import monifu.reactive.{Ack, Observable, Observer}
import scala.concurrent.Future
import scala.concurrent.duration._
object TimerSuite extends SimpleTestSuite {
test("should do timerRepeated") {
implicit val s = TestScheduler()
var received = 0
Observable.timerRepeated(0.seconds, 1.second, 1L)
.onSubscribe(new Observer[Long] {
def onNext(elem: Long): Future[Ack] = {
received += 1
Future.delayedResult(100.millis)(Continue)
}
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = ()
})
s.tick()
assertEquals(received, 1)
s.tick()
assertEquals(received, 1)
s.tick(900.millis)
assertEquals(received, 1)
s.tick(100.millis)
assertEquals(received, 2)
s.tick(1.second)
assertEquals(received, 3)
}
}
| virtualirfan/monifu | monifu/shared/src/test/scala/monifu/reactive/internals/builders/TimerSuite.scala | Scala | apache-2.0 | 1,717 |
package xitrum.handler
import java.net.SocketAddress
import scala.collection.mutable.{Map => MMap}
import nl.grons.metrics.scala.Histogram
import io.netty.handler.codec.http.{HttpRequest, HttpResponse}
import xitrum.{Action, Config, Log}
import xitrum.scope.request.RequestEnv
import xitrum.sockjs.SockJsAction
import xitrum.action.Net
object AccessLog {
def logFlashSocketPolicyFileAccess(remoteAddress: SocketAddress) {
Log.info(Net.clientIp(remoteAddress) + " (flash socket policy file)")
}
def logStaticFileAccess(remoteAddress: SocketAddress, request: HttpRequest, response: HttpResponse) {
Log.info(
Net.remoteIp(remoteAddress, request) + " " +
request.getMethod + " " +
request.getUri + " -> " +
response.getStatus.code +
" (static file)"
)
}
def logResourceInJarAccess(remoteAddress: SocketAddress, request: HttpRequest, response: HttpResponse) {
Log.info(
Net.remoteIp(remoteAddress, request) + " " +
request.getMethod + " " +
request.getUri + " -> " +
response.getStatus.code +
" (JAR resource)"
)
}
def logActionAccess(action: Action, beginTimestamp: Long, cacheSecs: Int, hit: Boolean, e: Throwable = null) {
if (e == null) {
Log.info(msgWithTime(action.getClass.getName, action, beginTimestamp) + extraInfo(action, cacheSecs, hit))
} else {
Log.error("Dispatch error " + msgWithTime(action.getClass.getName, action, beginTimestamp) + extraInfo(action, cacheSecs, hit), e)
}
}
def logWebSocketAccess(className: String, action: Action, beginTimestamp: Long) {
Log.info(msgWithTime(className, action, beginTimestamp) + extraInfo(action, 0, false))
}
def logOPTIONS(request: HttpRequest) {
Log.info("OPTIONS " + request.getUri)
}
//----------------------------------------------------------------------------
// Save last executeTime of each access
// Map('actionName': [timestamp, execTime])
private lazy val lastExecTimeMap = MMap[String, Array[Long]]()
private def msgWithTime(className: String, action: Action, beginTimestamp: Long) = {
val endTimestamp = System.currentTimeMillis()
val dt = endTimestamp - beginTimestamp
val env = action.handlerEnv
val isSockJSMetricsChannelClient =
action.isInstanceOf[SockJsAction] &&
action.asInstanceOf[SockJsAction].pathPrefix == "xitrum/metrics/channel"
// Ignore the actions of metrics itself, to avoid showing them at the metrics viewer
if (Config.xitrum.metrics.isDefined &&
Config.xitrum.metrics.get.actions &&
!isSockJSMetricsChannelClient)
{
val histograms = xitrum.Metrics.registry.getHistograms
val actionClassName = action.getClass.getName
val histogram =
if (histograms.containsKey(actionClassName))
histograms.get(actionClassName)
else
xitrum.Metrics.histogram(actionClassName)
histogram.asInstanceOf[Histogram] += dt
lastExecTimeMap(actionClassName) = Array(System.currentTimeMillis, dt)
}
action.remoteIp + " " +
action.request.getMethod + " " +
action.request.getUri + " -> " +
className +
(if (env.queryParams.nonEmpty) ", queryParams: " + RequestEnv.inspectParamsWithFilter(env.queryParams) else "") +
(if (env.bodyTextParams.nonEmpty) ", bodyTextParams: " + RequestEnv.inspectParamsWithFilter(env.bodyTextParams) else "") +
(if (env.pathParams.nonEmpty) ", pathParams: " + RequestEnv.inspectParamsWithFilter(env.pathParams) else "") +
(if (env.bodyFileParams.nonEmpty) ", bodyFileParams: " + RequestEnv.inspectParamsWithFilter(env.bodyFileParams) else "") +
(if (action.isDoneResponding) " -> " + action.response.getStatus.code else "") +
", " + dt + " [ms]"
}
private def extraInfo(action: Action, cacheSecs: Int, hit: Boolean) = {
if (cacheSecs == 0) {
if (action.isDoneResponding) "" else " (async)"
} else {
if (hit) {
if (cacheSecs < 0) " (action cache hit)" else " (page cache hit)"
} else {
if (cacheSecs < 0) " (action cache miss)" else " (page cache miss)"
}
}
}
}
| georgeOsdDev/xitrum | src/main/scala/xitrum/handler/AccessLog.scala | Scala | mit | 4,274 |
abstract class A {
val x: Nothing = exit()
x./* */asInstanceOf[String]
x./* */hashCode
def anyObject[T](): T
anyObject()./* */asInstanceOf[String]
} | ilinum/intellij-scala | testdata/resolve2/basic/Nothing.scala | Scala | apache-2.0 | 159 |
package neuroflow.nets.gpu
import breeze.linalg._
import breeze.stats._
import jcuda.jcublas.{JCublas2, cublasHandle}
import neuroflow.core.Activator._
import neuroflow.core.IllusionBreaker.SettingsNotSupportedException
import neuroflow.core.Network._
import neuroflow.core.{CanProduce, _}
import neuroflow.cuda._
import neuroflow.dsl._
import scala.annotation.tailrec
import scala.collection.Seq
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
/**
*
* Convolutional Neural Network running on CUDA,
* uses gradient descent to optimize the loss function.
*
* @author bogdanski
* @since 31.08.17
*
*/
object ConvNetwork {
implicit object double extends Constructor[Double, ConvNetworkDouble] {
def apply(ls: Seq[Layer[Double]], loss: LossFunction[Double], settings: Settings[Double])(implicit breeder: WeightBreeder[Double]): ConvNetworkDouble = {
ConvNetworkDouble(ls, loss, settings, breeder(ls))
}
}
implicit object weights_double extends neuroflow.core.WeightBreeder.Initializer[Double]
implicit object single extends Constructor[Float, ConvNetworkFloat] {
def apply(ls: Seq[Layer[Float]], loss: LossFunction[Float], settings: Settings[Float])(implicit breeder: WeightBreeder[Float]): ConvNetworkFloat = {
ConvNetworkFloat(ls, loss, settings, breeder(ls))
}
}
implicit object weights_float extends neuroflow.core.WeightBreeder.Initializer[Float]
}
// <editor-fold defaultstate="collapsed" desc="Double Precision Impl">
case class ConvNetworkDouble(layers: Seq[Layer[Double]], lossFunction: LossFunction[Double], settings: Settings[Double], weights: Weights[Double],
identifier: String = "neuroflow.nets.gpu.ConvNetwork", numericPrecision: String = "Double")
extends CNN[Double] with WaypointLogic[Double] {
implicit val handle = new cublasHandle
JCublas2.cublasCreate(handle)
type Vector = DenseVector[Double]
type Matrix = DenseMatrix[Double]
type Tensor = Tensor3D[Double]
type Vectors = Seq[DenseVector[Double]]
type Matrices = Seq[DenseMatrix[Double]]
type Tensors = Seq[Tensor3D[Double]]
private val _allLayers = layers.map {
case d: Dense[Double] => d
case c: Convolution[Double] => c
}.toArray
private def activatorMapping(a: Activator[_], b: Double) = {
a match {
case x: ReLU[_] => (CuMatrix.Activators.relu[Double] , CuMatrix.Activators.relu_derivative[Double] , b)
case x: Linear[_] => (CuMatrix.Activators.linear[Double] , CuMatrix.Activators.linear_derivative[Double] , b)
case x: Square[_] => (CuMatrix.Activators.square[Double] , CuMatrix.Activators.square_derivative[Double] , b)
case x: Sigmoid[_] => (CuMatrix.Activators.sigmoid[Double] , CuMatrix.Activators.sigmoid_derivative[Double] , b)
case x: Tanh[_] => (CuMatrix.Activators.tanh[Double] , CuMatrix.Activators.tanh_derivative[Double] , b)
case x => throw new SettingsNotSupportedException(s"This activator is not implemented for CUDA: ${a.symbol}.")
}
}
private val _activators = _allLayers.map {
case h: HasActivator[_] => h.activator match {
case x: Activator[_] with Bias[Double] => activatorMapping(x.activator, x.bias)
case x: Activator[_] => activatorMapping(x, 0.0)
}
}
private val _lastLayerIdx = weights.size - 1
private val _convLayers =
_allLayers.zipWithIndex.map(_.swap).filter {
case (_, _: Convolution[_]) => true
case _ => false
}.toMap.mapValues {
case c: Convolution[Double] => c
}
private val _outputDim = _allLayers.last.neurons
private val _lastC = _convLayers.maxBy(_._1)._1
private val _lastL = _allLayers.indices.last
private val _cuWeights = weights.map(m => CuMatrix.fromDense(m))
/**
* Computes output for `x`.
*/
def apply(x: Tensor): Vector = {
sink(x.matrix, _lastLayerIdx, batchSize = 1).toDenseVector
}
/**
* Computes output for given inputs `in`
* using efficient batch mode.
*/
def batchApply(xs: Tensors): Vectors = {
BatchBreeder.unsliceMatrixByRow {
sink(BatchBreeder.horzCatTensorBatch(xs), _lastLayerIdx, batchSize = xs.size)
}
}
/**
* `apply` under a focused layer.
*/
def focus[L <: Layer[Double]](l: L)(implicit cp: CanProduce[(Matrix, L), l.algebraicType]): Tensor => l.algebraicType = {
val lwi = layers.zipWithIndex
val idx = lwi.find(_._1 eq l).orElse {
val p = lwi.filter(_._1 == l)
if (p.size > 1) warn(s"Focus layer $l is ambiguous. Taking first. " +
"Alternatively, use a direct object reference to the desired layer.")
p.headOption
} match {
case Some((l, i)) => debug(s"Found focus layer $l at index $i."); i
case None => warn(s"Focus layer $l not found. Fallback to last layer."); _lastLayerIdx
}
(in: Tensor) => {
cp(sink(in.matrix, idx, batchSize = 1), l)
}
}
/**
* Trains this net with input `xs` against output `ys`.
*/
def train(xs: Tensors, ys: Vectors): Try[Run] = Try {
import settings._
val batchSize = settings.batchSize.getOrElse(xs.size)
require(xs.size == ys.size, s"Mismatch between sample sizes. (${xs.size} != ${ys.size})")
if (settings.verbose) {
if(xs.size % batchSize != 0) warn(s"Batches are not even. (${xs.size} % $batchSize = ${xs.size % batchSize} != 0)")
info(s"Training with ${xs.size} samples, batch size = $batchSize, batches = ${math.ceil(xs.size.toDouble / batchSize.toDouble).toInt}.")
info(s"Breeding batches ...")
}
val (xsys, batchSizes) = BatchBreeder.breedCNN(xs, ys, batchSize)
gcThreshold match {
case Some(bytes) => GcThreshold.set(bytes)
case None =>
}
run(xsys, learningRate(1 -> 1.0), batchSizes, precision, batch = 0, batches = xsys.size, iteration = 1, iterations, startTime = System.currentTimeMillis())
}
private def sink(x: Matrix, target: Int, batchSize: Int): Matrix = {
val r1 = flow(x, target, batchSize)
val r2 = if (target == _lastLayerIdx) lossFunction.sink(r1) else r1
r2
}
private def flow(in: Matrix, target: Int, batchSize: Int): Matrix = {
val _fa = ArrayBuffer.empty[CuMatrix[Double]]
val _fr = ArrayBuffer.empty[CuMatrix[Double]] // raw, unshaped
@tailrec def conv(_in: CuMatrix[Double], i: Int): Unit = {
val l = _convLayers(i)
val p = _cuWeights(i) * convolute(_in, l, batchSize)
p += _activators(i)._3
val a = _activators(i)._1(p)
_fa += { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
_fr += a
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: CuMatrix[Double], i: Int): Unit = {
val l = _allLayers(i)
val p = _in * _cuWeights(i)
p += _activators(i)._3
val a = _activators(i)._1(p)
_fa += a
_fr += a
if (i < _lastL) fully(a, i + 1)
}
conv(CuMatrix.fromDense(in), 0)
fully(_fa(_lastC), _lastC + 1)
val r = _fr(target).toDense
_fa.foreach(_.release())
_fr.foreach(_.release())
r
}
/**
* The training loop.
*/
@tailrec private def run(xsys: Seq[(Matrix, Matrix)], stepSize: Double, batchSizes: Map[Int, Int], precision: Double,
batch: Int, batches: Int, iteration: Int, maxIterations: Int, startTime: Long): Run = {
val batchSize = batchSizes(batch)
val (x, y) = (xsys(batch)._1, xsys(batch)._2)
val loss =
if (settings.approximation.isDefined) adaptWeightsApprox(x, y, stepSize, batchSize)
else adaptWeights(x, y, stepSize, batchSize)
val lossMean = mean(loss)
if (settings.verbose) info(f"Iteration $iteration.${batch + 1}, Avg. Loss = $lossMean%.6g, Vector: $loss")
maybeGraph(lossMean)
waypoint(syncWeights)(iteration)
if (lossMean > precision && iteration < maxIterations) {
run(xsys, settings.learningRate(iteration + 1 -> stepSize), batchSizes,
precision, (batch + 1) % batches, batches, iteration + 1, maxIterations, startTime)
} else {
info(f"Took $iteration of $maxIterations iterations.")
Run(startTime, System.currentTimeMillis(), iteration)
}
}
/**
* Copies batch to GPU, computes gradient for weights, updates weights using gradient descent and returns the loss matrix.
*/
private def adaptWeights(x: Matrix, y: Matrix, stepSize: Double, batchSize: Int): Matrix = {
import settings.updateRule
val (_x, _y) = (CuMatrix.fromDense(x), CuMatrix.fromDense(y))
val loss = CuMatrix.zeros[Double](batchSize, _outputDim)
val fa = collection.mutable.Map.empty[Int, CuMatrix[Double]]
val fb = collection.mutable.Map.empty[Int, CuMatrix[Double]]
val fc = collection.mutable.Map.empty[Int, CuMatrix[Double]]
val dws = collection.mutable.Map.empty[Int, CuMatrix[Double]]
val ds = collection.mutable.Map.empty[Int, CuMatrix[Double]]
@tailrec def conv(_in: CuMatrix[Double], i: Int): Unit = {
val l = _convLayers(i)
val c = convolute(_in, l, batchSize)
val p = _cuWeights(i) * c
p += _activators(i)._3
val a = _activators(i)._1(p)
val b = _activators(i)._2(p)
fa += i -> {
if (i == _lastC) {
val rb = reshape_batch(a, l.dimOut, batchSize)
a.release()
rb
} else a
}
fb += i -> b
fc += i -> c
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: CuMatrix[Double], i: Int): Unit = {
val p = _in * _cuWeights(i)
p += _activators(i)._3
val a = _activators(i)._1(p)
val b = _activators(i)._2(p)
fa += i -> a
fb += i -> b
if (i < _lastL) fully(a, i + 1)
}
@tailrec def derive(i: Int): Unit = {
if (i == _lastLayerIdx) {
val (err, grad) = lossFunction(_y, fa(i))
val d = grad *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
loss += err
derive(i - 1)
} else if (i < _lastLayerIdx && i > _lastC) {
val d = (ds(i + 1) * _cuWeights(i + 1).t) *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
derive(i - 1)
} else if (i == _lastC) {
val l = _convLayers(i)
val d1 = ds(i + 1) * _cuWeights(i + 1).t
val d2 = reshape_batch_backprop(d1, l.dimOut, batchSize)
val d = d2 *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
} else {
val l = _convLayers(i + 1)
val ww = reshape_batch(_cuWeights(i + 1), (l.field._1, l.field._2, l.filters), l.dimIn._3)
val dc = convolute_backprop(ds(i + 1), l, batchSize)
val d = ww * dc *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
if (i > 0) derive(i - 1)
}
}
conv(_x, 0)
fully(fa(_lastC), _lastC + 1)
derive(_lastLayerIdx)
ds.values.foreach(_.release())
fa.values.foreach(_.release())
fb.values.foreach(_.release())
fc.values.foreach(_.release())
(0 to _lastLayerIdx).foreach(i => updateRule(_cuWeights(i), dws(i), stepSize, i))
dws.values.foreach(_.release())
_x.release()
_y.release()
val lossReduced = (loss.t * CuMatrix.ones[Double](loss.rows, 1)).t
lossReduced.toDense
}
/** For debugging, approximates the gradients using `settings.approximation`. */
private def adaptWeightsApprox(xs: Matrix, ys: Matrix, stepSize: Double, batchSize: Int): Matrix = {
require(settings.updateRule.isInstanceOf[Debuggable[Double]])
val _rule: Debuggable[Double] = settings.updateRule.asInstanceOf[Debuggable[Double]]
def lossFunc(): Matrix = {
val loss = lossFunction(ys, flow(xs, _lastLayerIdx, batchSize))._1
val reduced = (loss.t * DenseMatrix.ones[Double](loss.rows, 1)).t
reduced
}
val out = lossFunc()
def approximateGradient(weightLayer: Int, weight: (Int, Int)): Double = {
sum(settings.approximation.get.apply(weights, lossFunc, syncWithGPU, weightLayer, weight))
}
def syncWithGPU(): Unit = {
weights.zip(_cuWeights).foreach {
case (w, cw) => cw := w
}
}
val updates = collection.mutable.HashMap.empty[(Int, (Int, Int)), Double]
val grads = collection.mutable.HashMap.empty[(Int, (Int, Int)), Double]
val debug = collection.mutable.HashMap.empty[Int, Matrix]
weights.zipWithIndex.foreach {
case (l, idx) =>
debug += idx -> l.copy
l.foreachPair { (k, v) =>
val grad = approximateGradient(idx, k)
updates += (idx, k) -> (v - (stepSize * grad))
grads += (idx, k) -> grad
}
}
updates.foreach {
case ((wl, k), v) =>
weights(wl).update(k, v)
}
grads.foreach {
case ((wl, k), v) =>
debug(wl).update(k, v)
}
_rule.lastGradients = debug
syncWithGPU()
out
}
private def syncWeights(): Unit = {
weights.zip(_cuWeights).foreach {
case (w, cw) => w := cw.toDense
}
}
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc="Single Precision Impl">
case class ConvNetworkFloat(layers: Seq[Layer[Float]], lossFunction: LossFunction[Float], settings: Settings[Float], weights: Weights[Float],
identifier: String = "neuroflow.nets.gpu.ConvNetwork", numericPrecision: String = "Single")
extends CNN[Float] with WaypointLogic[Float] {
implicit val handle = new cublasHandle
JCublas2.cublasCreate(handle)
type Vector = DenseVector[Float]
type Matrix = DenseMatrix[Float]
type Tensor = Tensor3D[Float]
type Vectors = Seq[DenseVector[Float]]
type Matrices = Seq[DenseMatrix[Float]]
type Tensors = Seq[Tensor3D[Float]]
private val _allLayers = layers.map {
case d: Dense[Float] => d
case c: Convolution[Float] => c
}.toArray
private def activatorMapping(a: Activator[_], b: Float) = {
a match {
case x: ReLU[_] => (CuMatrix.Activators.relu[Float] , CuMatrix.Activators.relu_derivative[Float] , b)
case x: Linear[_] => (CuMatrix.Activators.linear[Float] , CuMatrix.Activators.linear_derivative[Float] , b)
case x: Square[_] => (CuMatrix.Activators.square[Float] , CuMatrix.Activators.square_derivative[Float] , b)
case x: Sigmoid[_] => (CuMatrix.Activators.sigmoid[Float] , CuMatrix.Activators.sigmoid_derivative[Float] , b)
case x: Tanh[_] => (CuMatrix.Activators.tanh[Float] , CuMatrix.Activators.tanh_derivative[Float] , b)
case x => throw new SettingsNotSupportedException(s"This activator is not implemented for CUDA: ${a.symbol}.")
}
}
private val _activators = _allLayers.map {
case h: HasActivator[_] => h.activator match {
case x: Activator[_] with Bias[Float] => activatorMapping(x.activator, x.bias)
case x: Activator[_] => activatorMapping(x, 0.0f)
}
}
private val _lastLayerIdx = weights.size - 1
private val _convLayers =
_allLayers.zipWithIndex.map(_.swap).filter {
case (_, _: Convolution[_]) => true
case _ => false
}.toMap.mapValues {
case c: Convolution[Float] => c
}
private val _outputDim = _allLayers.last.neurons
private val _lastC = _convLayers.maxBy(_._1)._1
private val _lastL = _allLayers.indices.last
private val _cuWeights = weights.map(m => CuMatrix.fromDense(m))
/**
* Computes output for `x`.
*/
def apply(x: Tensor): Vector = {
sink(x.matrix, _lastLayerIdx, batchSize = 1).toDenseVector
}
/**
* Computes output for given inputs `in`
* using efficient batch mode.
*/
def batchApply(xs: Tensors): Vectors = {
BatchBreeder.unsliceMatrixByRow {
sink(BatchBreeder.horzCatTensorBatch(xs), _lastLayerIdx, batchSize = xs.size)
}
}
/**
* `apply` under a focused layer.
*/
def focus[L <: Layer[Float]](l: L)(implicit cp: CanProduce[(Matrix, L), l.algebraicType]): Tensor => l.algebraicType = {
val lwi = layers.zipWithIndex
val idx = lwi.find(_._1 eq l).orElse {
val p = lwi.filter(_._1 == l)
if (p.size > 1) warn(s"Focus layer $l is ambiguous. Taking first. " +
"Alternatively, use a direct object reference to the desired layer.")
p.headOption
} match {
case Some((l, i)) => debug(s"Found focus layer $l at index $i."); i
case None => warn(s"Focus layer $l not found. Fallback to last layer."); _lastLayerIdx
}
(in: Tensor) => {
cp(sink(in.matrix, idx, batchSize = 1), l)
}
}
/**
* Trains this net with input `xs` against output `ys`.
*/
def train(xs: Tensors, ys: Vectors): Try[Run] = Try {
import settings._
val batchSize = settings.batchSize.getOrElse(xs.size)
require(xs.size == ys.size, s"Mismatch between sample sizes. (${xs.size} != ${ys.size})")
if (settings.verbose) {
if(xs.size % batchSize != 0) warn(s"Batches are not even. (${xs.size} % $batchSize = ${xs.size % batchSize} != 0)")
info(s"Training with ${xs.size} samples, batch size = $batchSize, batches = ${math.ceil(xs.size.toFloat / batchSize.toFloat).toInt}.")
info(s"Breeding batches ...")
}
val (xsys, batchSizes) = BatchBreeder.breedCNN(xs, ys, batchSize)
gcThreshold match {
case Some(bytes) => GcThreshold.set(bytes)
case None =>
}
run(xsys, learningRate(1 -> 1.0f), batchSizes, precision, batch = 0, batches = xsys.size, iteration = 1, iterations, startTime = System.currentTimeMillis())
}
private def sink(x: Matrix, target: Int, batchSize: Int): Matrix = {
val r1 = flow(x, target, batchSize)
val r2 = if (target == _lastLayerIdx) lossFunction.sink(r1) else r1
r2
}
private def flow(in: Matrix, target: Int, batchSize: Int): Matrix = {
val _fa = ArrayBuffer.empty[CuMatrix[Float]]
val _fr = ArrayBuffer.empty[CuMatrix[Float]] // raw, unshaped
@tailrec def conv(_in: CuMatrix[Float], i: Int): Unit = {
val l = _convLayers(i)
val p = _cuWeights(i) * convolute(_in, l, batchSize)
p += _activators(i)._3
val a = _activators(i)._1(p)
_fa += { if (i == _lastC) reshape_batch(a, l.dimOut, batchSize) else a }
_fr += a
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: CuMatrix[Float], i: Int): Unit = {
val l = _allLayers(i)
val p = _in * _cuWeights(i)
p += _activators(i)._3
val a = _activators(i)._1(p)
_fa += a
_fr += a
if (i < _lastL) fully(a, i + 1)
}
conv(CuMatrix.fromDense(in), 0)
fully(_fa(_lastC), _lastC + 1)
val r = _fr(target).toDense
_fa.foreach(_.release())
_fr.foreach(_.release())
r
}
/**
* The training loop.
*/
@tailrec private def run(xsys: Seq[(Matrix, Matrix)], stepSize: Float, batchSizes: Map[Int, Int], precision: Double,
batch: Int, batches: Int, iteration: Int, maxIterations: Int, startTime: Long): Run = {
val batchSize = batchSizes(batch)
val (x, y) = (xsys(batch)._1, xsys(batch)._2)
val loss =
if (settings.approximation.isDefined) adaptWeightsApprox(x, y, stepSize, batchSize)
else adaptWeights(x, y, stepSize, batchSize)
val lossMean = mean(loss)
if (settings.verbose) info(f"Iteration $iteration.${batch + 1}, Avg. Loss = $lossMean%.6g, Vector: $loss")
maybeGraph(lossMean)
waypoint(syncWeights)(iteration)
if (lossMean > precision && iteration < maxIterations) {
run(xsys, settings.learningRate(iteration + 1 -> stepSize), batchSizes,
precision, (batch + 1) % batches, batches, iteration + 1, maxIterations, startTime)
} else {
info(f"Took $iteration of $maxIterations iterations.")
Run(startTime, System.currentTimeMillis(), iteration)
}
}
/**
* Copies batch to GPU, computes gradient for weights, updates weights using gradient descent and returns the loss matrix.
*/
private def adaptWeights(x: Matrix, y: Matrix, stepSize: Float, batchSize: Int): Matrix = {
import settings.updateRule
val (_x, _y) = (CuMatrix.fromDense(x), CuMatrix.fromDense(y))
val loss = CuMatrix.zeros[Float](batchSize, _outputDim)
val fa = collection.mutable.Map.empty[Int, CuMatrix[Float]]
val fb = collection.mutable.Map.empty[Int, CuMatrix[Float]]
val fc = collection.mutable.Map.empty[Int, CuMatrix[Float]]
val dws = collection.mutable.Map.empty[Int, CuMatrix[Float]]
val ds = collection.mutable.Map.empty[Int, CuMatrix[Float]]
@tailrec def conv(_in: CuMatrix[Float], i: Int): Unit = {
val l = _convLayers(i)
val c = convolute(_in, l, batchSize)
val p = _cuWeights(i) * c
p += _activators(i)._3
val a = _activators(i)._1(p)
val b = _activators(i)._2(p)
fa += i -> {
if (i == _lastC) {
val rb = reshape_batch(a, l.dimOut, batchSize)
a.release()
rb
} else a
}
fb += i -> b
fc += i -> c
p.release()
if (i < _lastC) conv(a, i + 1)
}
@tailrec def fully(_in: CuMatrix[Float], i: Int): Unit = {
val p = _in * _cuWeights(i)
p += _activators(i)._3
val a = _activators(i)._1(p)
val b = _activators(i)._2(p)
fa += i -> a
fb += i -> b
p.release()
if (i < _lastL) fully(a, i + 1)
}
@tailrec def derive(i: Int): Unit = {
if (i == _lastLayerIdx) {
val (err, grad) = lossFunction(_y, fa(i))
val d = grad *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
loss += err
err.release()
grad.release()
derive(i - 1)
} else if (i < _lastLayerIdx && i > _lastC) {
val d = (ds(i + 1) * _cuWeights(i + 1).t) *:* fb(i)
val dw = fa(i - 1).t * d
dws += i -> dw
ds += i -> d
derive(i - 1)
} else if (i == _lastC) {
val l = _convLayers(i)
val d1 = ds(i + 1) * _cuWeights(i + 1).t
val d2 = reshape_batch_backprop(d1, l.dimOut, batchSize)
val d = d2 *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
d1.release()
d2.release()
if (i > 0) derive(i - 1)
} else {
val l = _convLayers(i + 1)
val ww = reshape_batch(_cuWeights(i + 1), (l.field._1, l.field._2, l.filters), l.dimIn._3)
val dc = convolute_backprop(ds(i + 1), l, batchSize)
val d = ww * dc *:* fb(i)
val dw = d * fc(i).t
dws += i -> dw
ds += i -> d
ww.release()
dc.release()
if (i > 0) derive(i - 1)
}
}
conv(_x, 0)
fully(fa(_lastC), _lastC + 1)
derive(_lastLayerIdx)
ds.values.foreach(_.release())
fa.values.foreach(_.release())
fb.values.foreach(_.release())
fc.values.foreach(_.release())
(0 to _lastLayerIdx).foreach(i => updateRule(_cuWeights(i), dws(i), stepSize, i))
dws.values.foreach(_.release())
_x.release()
_y.release()
val lossReduced = (loss.t * CuMatrix.ones[Float](loss.rows, 1)).t
lossReduced.toDense
}
/** For debugging, approximates the gradients using `settings.approximation`. */
private def adaptWeightsApprox(xs: Matrix, ys: Matrix, stepSize: Float, batchSize: Int): Matrix = {
require(settings.updateRule.isInstanceOf[Debuggable[Float]])
val _rule: Debuggable[Float] = settings.updateRule.asInstanceOf[Debuggable[Float]]
def lossFunc(): Matrix = {
val loss = lossFunction(ys, flow(xs, _lastLayerIdx, batchSize))._1
val reduced = (loss.t * DenseMatrix.ones[Float](loss.rows, 1)).t
reduced
}
val out = lossFunc()
def approximateGradient(weightLayer: Int, weight: (Int, Int)): Float = {
sum(settings.approximation.get.apply(weights, lossFunc, syncWithGPU, weightLayer, weight))
}
def syncWithGPU(): Unit = {
weights.zip(_cuWeights).foreach {
case (w, cw) => cw := w
}
}
val updates = collection.mutable.HashMap.empty[(Int, (Int, Int)), Float]
val grads = collection.mutable.HashMap.empty[(Int, (Int, Int)), Float]
val debug = collection.mutable.HashMap.empty[Int, Matrix]
weights.zipWithIndex.foreach {
case (l, idx) =>
debug += idx -> l.copy
l.foreachPair { (k, v) =>
val grad = approximateGradient(idx, k)
updates += (idx, k) -> (v - (stepSize * grad))
grads += (idx, k) -> grad
}
}
updates.foreach {
case ((wl, k), v) =>
weights(wl).update(k, v)
}
grads.foreach {
case ((wl, k), v) =>
debug(wl).update(k, v)
}
_rule.lastGradients = debug
syncWithGPU()
out
}
private def syncWeights(): Unit = {
weights.zip(_cuWeights).foreach {
case (w, cw) => w := cw.toDense
}
}
}
// </editor-fold>
| zenecture/neuroflow | core/src/main/scala/neuroflow/nets/gpu/ConvNetwork.scala | Scala | apache-2.0 | 25,046 |
/*
* Copyright (c) 2014-15 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
package ops
import labelled.field
import poly._
object union {
import shapeless.labelled.FieldType
/**
* Type class supporting union member selection.
*
* @author Miles Sabin
*/
@annotation.implicitNotFound(msg = "No field ${K} in union ${C}")
trait Selector[C <: Coproduct, K] extends Serializable {
type V
type Out = Option[V]
def apply(l : C): Out
}
trait LowPrioritySelector {
type Aux[C <: Coproduct, K, V0] = Selector[C, K] { type V = V0 }
implicit def tlSelector[H, T <: Coproduct, K]
(implicit st : Selector[T, K]): Aux[H :+: T, K, st.V] =
new Selector[H :+: T, K] {
type V = st.V
def apply(u : H :+: T): Out = u match {
case Inl(l) => None
case Inr(r) => if(st == null) None else st(r)
}
}
}
object Selector extends LowPrioritySelector {
def apply[C <: Coproduct, K](implicit selector: Selector[C, K]): Aux[C, K, selector.V] = selector
implicit def hdSelector[K, V0, T <: Coproduct]: Aux[FieldType[K, V0] :+: T, K, V0] =
new Selector[FieldType[K, V0] :+: T, K] {
type V = V0
def apply(u : FieldType[K, V] :+: T): Out = u match {
case Inl(l) => Some(l)
case Inr(r) => None
}
}
}
/**
* Type class supporting collecting the keys of a union as an `HList`.
*
* @author Miles Sabin
*/
trait Keys[U <: Coproduct] extends DepFn0 with Serializable { type Out <: HList }
object Keys {
def apply[U <: Coproduct](implicit keys: Keys[U]): Aux[U, keys.Out] = keys
type Aux[U <: Coproduct, Out0 <: HList] = Keys[U] { type Out = Out0 }
implicit def cnilKeys[U <: CNil]: Aux[U, HNil] =
new Keys[U] {
type Out = HNil
def apply(): Out = HNil
}
implicit def coproductKeys[K, V, T <: Coproduct](implicit wk: Witness.Aux[K], kt: Keys[T]): Aux[FieldType[K, V] :+: T, K :: kt.Out] =
new Keys[FieldType[K, V] :+: T] {
type Out = K :: kt.Out
def apply(): Out = wk.value :: kt()
}
}
/**
* Type class supporting collecting the value of a union as a `Coproduct`.
*
* @author Miles Sabin
*/
trait Values[U <: Coproduct] extends DepFn1[U] with Serializable { type Out <: Coproduct }
object Values {
def apply[U <: Coproduct](implicit values: Values[U]): Aux[U, values.Out] = values
type Aux[U <: Coproduct, Out0 <: Coproduct] = Values[U] { type Out = Out0 }
implicit def cnilValues[U <: CNil]: Aux[U, CNil] =
new Values[U] {
type Out = CNil
def apply(u: U): Out = u
}
implicit def coproductValues[K, V, T <: Coproduct](implicit vt: Values[T]): Aux[FieldType[K, V] :+: T, V :+: vt.Out] =
new Values[FieldType[K, V] :+: T] {
type Out = V :+: vt.Out
def apply(l: FieldType[K, V] :+: T): Out = l match {
case Inl(l) => Inl(l)
case Inr(r) => Inr(vt(r))
}
}
}
/**
* Type class supporting converting this union to a `Coproduct` of key-value pairs.
*
* @author Alexandre Archambault
*/
trait Fields[U <: Coproduct] extends DepFn1[U] with Serializable {
type Out <: Coproduct
}
object Fields {
def apply[U <: Coproduct](implicit fields: Fields[U]): Aux[U, fields.Out] = fields
type Aux[L <: Coproduct, Out0 <: Coproduct] = Fields[L] { type Out = Out0 }
implicit val cnilFields: Aux[CNil, CNil] =
new Fields[CNil] {
type Out = CNil
def apply(u: CNil) = u
}
implicit def cconsFields[K, V, T <: Coproduct](implicit
key: Witness.Aux[K],
tailFields: Fields[T]
): Aux[FieldType[K, V] :+: T, (K, V) :+: tailFields.Out] =
new Fields[FieldType[K, V] :+: T] {
type Out = (K, V) :+: tailFields.Out
def apply(u: FieldType[K, V] :+: T) =
u match {
case Inl(v) => Inl(key.value -> v)
case Inr(t) => Inr(tailFields(t))
}
}
}
/**
* Type class combining `Keys` and `Values` for convenience and compilation speed.
* It's similar to `Fields`, but produces distinct `HList` and `Coproduct`
* instead of a zipped `Coproduct`.
*
* @author Jisoo Park
*/
trait UnzipFields[L <: Coproduct] extends Serializable {
type Keys <: HList
type Values <: Coproduct
def keys: Keys
def values(u: L): Values
}
object UnzipFields {
def apply[L <: Coproduct](implicit uf: UnzipFields[L]): Aux[L, uf.Keys, uf.Values] = uf
type Aux[L <: Coproduct, K <: HList, V <: Coproduct] = UnzipFields[L] { type Keys = K; type Values = V }
implicit def cnilUnzipFields[L <: CNil]: Aux[L, HNil, L] =
new UnzipFields[L] {
type Keys = HNil
type Values = L
def keys = HNil
def values(u: L): L = u
}
implicit def cconsUnzipFields[K, V, T <: Coproduct](implicit
key: Witness.Aux[K],
tailUF: UnzipFields[T]
): Aux[FieldType[K, V] :+: T, K :: tailUF.Keys, V :+: tailUF.Values] =
new UnzipFields[FieldType[K, V] :+: T] {
type Keys = K :: tailUF.Keys
type Values = V :+: tailUF.Values
def keys = key.value :: tailUF.keys
def values(u: FieldType[K, V] :+: T) =
u match {
case Inl(v) => Inl(v)
case Inr(t) => Inr(tailUF.values(t))
}
}
}
/**
* Type class supporting converting this union to a `Map` whose keys and values
* are typed as the Lub of the keys and values of this union.
*
* @author Alexandre Archambault
*/
trait ToMap[U <: Coproduct] extends DepFn1[U] with Serializable {
type Key
type Value
type Out = Map[Key, Value]
}
object ToMap {
def apply[U <: Coproduct](implicit toMap: ToMap[U]): Aux[U, toMap.Key, toMap.Value] = toMap
type Aux[U <: Coproduct, Key0, Value0] = ToMap[U] { type Key = Key0; type Value = Value0 }
implicit def cnilToMap[K, V]: Aux[CNil, K, V] =
new ToMap[CNil] {
type Key = K
type Value = V
def apply(l: CNil) = Map.empty
}
implicit val cnilToMapAnyNothing: Aux[CNil, Any, Nothing] = cnilToMap[Any, Nothing]
implicit def csingleToMap[K, V](implicit
wk: Witness.Aux[K]
): Aux[FieldType[K, V] :+: CNil, K, V] =
new ToMap[FieldType[K, V] :+: CNil] {
type Key = K
type Value = V
def apply(c: FieldType[K, V] :+: CNil) = (c: @unchecked) match {
case Inl(h) => Map(wk.value -> (h: V))
}
}
implicit def coproductToMap[HK, HV, TH, TT <: Coproduct, TK, TV, K, V](implicit
tailToMap: ToMap.Aux[TH :+: TT, TK, TV],
keyLub: Lub[HK, TK, K],
valueLub: Lub[HV, TV, V],
wk: Witness.Aux[HK]
): Aux[FieldType[HK, HV] :+: TH :+: TT, K, V] =
new ToMap[FieldType[HK, HV] :+: TH :+: TT] {
type Key = K
type Value = V
def apply(c: FieldType[HK, HV] :+: TH :+: TT) = c match {
case Inl(h) => Map(keyLub.left(wk.value) -> valueLub.left(h: HV))
case Inr(t) => tailToMap(t).map{case (k, v) => keyLub.right(k) -> valueLub.right(v)}
}
}
}
/**
* Type class supporting mapping a higher rank function over the values of a union.
*
* @author Alexandre Archambault
*/
trait MapValues[HF, U <: Coproduct] extends DepFn1[U] with Serializable { type Out <: Coproduct }
object MapValues {
def apply[HF, U <: Coproduct](implicit mapValues: MapValues[HF, U]): Aux[HF, U, mapValues.Out] = mapValues
type Aux[HF, U <: Coproduct, Out0 <: Coproduct] = MapValues[HF, U] { type Out = Out0 }
implicit def cnilMapValues[HF]: Aux[HF, CNil, CNil] =
new MapValues[HF, CNil] {
type Out = CNil
def apply(c: CNil) = c
}
implicit def cconsMapValues[HF, K, V, T <: Coproduct](implicit
hc: Case1[HF, V],
tailMapValues: MapValues[HF, T]
): Aux[HF, FieldType[K, V] :+: T, FieldType[K, hc.Result] :+: tailMapValues.Out] =
new MapValues[HF, FieldType[K, V] :+: T] {
type Out = FieldType[K, hc.Result] :+: tailMapValues.Out
def apply(c: FieldType[K, V] :+: T) = c match {
case Inl(h) => Inl(field[K](hc(h: V)))
case Inr(t) => Inr(tailMapValues(t))
}
}
}
}
| isaka/shapeless | core/src/main/scala/shapeless/ops/unions.scala | Scala | apache-2.0 | 8,857 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.uberstore.seqindex
import java.io.RandomAccessFile
import java.nio.ByteBuffer
import java.util.{TreeMap => JTreeMap}
import com.comcast.xfinity.sirius.uberstore.common.Checksummer
import com.comcast.xfinity.sirius.uberstore.common.Fnv1aChecksummer
import scala.annotation.tailrec
object SeqIndexBinaryFileOps {
/**
* Create an instance using bufferSize buffer size for bulk
* operations (loadIndex).
*/
def apply() = {
// XXX: Buffer sizing is not exposed since at this point since as
// the code stands it hasn't permeated above this layer.
// Simpler to leave it out of the picture for now.
new SeqIndexBinaryFileOps(Fnv1aChecksummer())
}
}
/**
* Class providing low level file operations for a binary
* based sequence index with checksum based data protection.
*
* @param checksummer Checksummer used to calculate entry checksums
* @param bufferSize size of the buffer to be used for reading from passed
* in handles. Each read operation will have its own buffer
*/
class SeqIndexBinaryFileOps private[seqindex](checksummer: Checksummer,
bufferSize: Int = 24 * 1024) {
/**
* Persist sequence to offset mapping in the index file at the
* current position of writeHandle.
*
* This function has the side effect of advancing writeHandle
* to the end of the written data.
*
* Not thread safe with respect to writeHandle
*
* @param writeHandle the RandomAccessFile to persist into
* @param seq the sequence number to store
* @param offset the offset associated with seq
*/
def put(writeHandle: RandomAccessFile, seq: Long, offset: Long): Unit = {
val byteBuf = ByteBuffer.allocate(24)
byteBuf.putLong(8, seq).putLong(16, offset)
val chksum = checksummer.checksum(byteBuf.array)
byteBuf.putLong(0, chksum)
writeHandle.write(byteBuf.array)
}
/**
* Load all sequence -> offset mappings from the input file handle.
*
* Has the side effect of advancing the file pointer to the end of
* the file.
*
* Not thread safe with respect to indexFileHandle
*
* @param indexFileHandle the file handle to read from
*
* @return the SortedMap[Long, Long] of sequence -> offset mappings
*/
// XXX: this may better serve as part of PersistedSeqIndex, or something
// else, as it is slightly higher level and can be composed of file ops
// public api, but that's for later
def loadIndex(indexFileHandle: RandomAccessFile): JTreeMap[Long, Long] = {
val byteBuf = ByteBuffer.allocate(bufferSize)
readIndex(indexFileHandle, byteBuf)
}
/**
* Read an entry off of a handle, with the side effect of advancing
* the handle.
*
* It is the caller's duty to ensure that the handle is properly located,
* aligned, and that data is available.
*
* Not thread safe with respect to indexFileHandle
*
* @param indexFileHandle RandomAccessFile for the index file, it's offset
* will be advanced 24 bytes (entry length)
*/
def readEntry(indexFileHandle: RandomAccessFile): (Long, Long) = {
val byteBuf = ByteBuffer.allocate(24)
indexFileHandle.read(byteBuf.array)
readEntry(byteBuf)
}
@tailrec
private def readIndex(indexFileHandle: RandomAccessFile,
byteBuf: ByteBuffer,
soFar: JTreeMap[Long, Long] = new JTreeMap[Long, Long]): JTreeMap[Long, Long] = {
val bytesRead = indexFileHandle.read(byteBuf.array)
if (bytesRead > 0) {
val chunk = decodeChunk(byteBuf, bytesRead)
chunk.foreach((seqOff) => soFar.put(seqOff._1, seqOff._2))
if (bytesRead == byteBuf.limit()) {
readIndex(indexFileHandle, byteBuf, soFar)
} else {
soFar
}
} else {
soFar
}
}
private def decodeChunk(byteBuf: ByteBuffer, chunkSize: Int): List[(Long, Long)] = {
var chunk = List[(Long, Long)]()
byteBuf.position(0)
val entryBuf = ByteBuffer.allocate(24)
while (byteBuf.position() != chunkSize) {
byteBuf.get(entryBuf.array)
val (seq, offset) = readEntry(entryBuf)
chunk ::= (seq, offset)
}
chunk
}
// reads an entry (destructively) from entryBuf, at entryBuf's current posisition,
// advancing the position past the entry
private def readEntry(entryBuf: ByteBuffer): (Long, Long) = {
val chksum = entryBuf.getLong(0)
entryBuf.putLong(0, 0L)
if (chksum != checksummer.checksum(entryBuf.array)) {
throw new IllegalStateException("Sequence cache corrupted")
}
val seq = entryBuf.getLong(8)
val offset = entryBuf.getLong(16)
(seq, offset)
}
}
| Comcast/sirius | src/main/scala/com/comcast/xfinity/sirius/uberstore/seqindex/SeqIndexBinaryFileOps.scala | Scala | apache-2.0 | 5,354 |
package com.clemble.query.model
/**
* Simple abstraction to represent field Projection
*/
sealed trait Projection
case class Include(field: String) extends Projection
case class Exclude(field: String) extends Projection | clemble/scala-query-dsl | src/main/scala/com/clemble/query/model/Projection.scala | Scala | apache-2.0 | 224 |
package io.getquill.quotation
import scala.reflect.ClassTag
import io.getquill.ast._
import io.getquill.norm.BetaReduction
import io.getquill.util.Messages.RichContext
import io.getquill.util.Interleave
import io.getquill.dsl.CoreDsl
import scala.collection.immutable.StringOps
import scala.reflect.macros.TypecheckException
trait Parsing extends EntityConfigParsing {
this: Quotation =>
import c.universe.{ Ident => _, Constant => _, Function => _, If => _, Block => _, _ }
case class Parser[T](p: PartialFunction[Tree, T])(implicit ct: ClassTag[T]) {
def apply(tree: Tree) =
unapply(tree).getOrElse {
c.fail(s"Tree '$tree' can't be parsed to '${ct.runtimeClass.getSimpleName}'")
}
def unapply(tree: Tree): Option[T] =
tree match {
case q"$source.withFilter(($alias) => $body)" if (alias.name.toString.contains("ifrefutable")) =>
unapply(source)
case other =>
p.lift(tree)
}
}
val astParser: Parser[Ast] = Parser[Ast] {
case q"$i: $typ" => astParser(i)
case `liftParser`(value) => value
case `valParser`(value) => value
case `patMatchValParser`(value) => value
case `valueParser`(value) => value
case `quotedAstParser`(value) => value
case `queryParser`(value) => value
case `functionParser`(value) => value
case `actionParser`(value) => value
case `infixParser`(value) => value
case `orderingParser`(value) => value
case `operationParser`(value) => value
case `identParser`(value) => value
case `propertyParser`(value) => value
case `stringInterpolationParser`(value) => value
case `optionOperationParser`(value) => value
case `boxingParser`(value) => value
case `ifParser`(value) => value
case `patMatchParser`(value) => value
case `blockParser`(block) => block
}
val blockParser: Parser[Block] = Parser[Block] {
case q"{..$exprs}" if exprs.size > 1 => Block(exprs.map(astParser(_)))
}
val valParser: Parser[Val] = Parser[Val] {
case q"val $name: $typ = $body" => Val(ident(name), astParser(body))
}
val patMatchValParser: Parser[Val] = Parser[Val] {
case q"$mods val $name: $typ = ${ patMatchParser(value) }" =>
Val(ident(name), value)
}
val patMatchParser: Parser[Ast] = Parser[Ast] {
case q"$expr match { case ($fields) => $body }" =>
patMatchParser(expr, fields, body)
}
private def patMatchParser(tupleTree: Tree, fieldsTree: Tree, bodyTree: Tree) = {
val tuple = astParser(tupleTree)
val fields = astParser(fieldsTree)
val body = astParser(bodyTree)
def property(path: List[Int]) =
path.foldLeft(tuple) {
case (t, i) => Property(t, s"_${i + 1}")
}
def reductions(ast: Ast, path: List[Int] = List()): List[(Ident, Ast)] = {
ast match {
case ident: Ident => List(ident -> property(path))
case Tuple(elems) =>
elems.zipWithIndex.flatMap {
case (elem, idx) => reductions(elem, path :+ idx)
}
case other =>
c.fail(s"Please report a bug. Expected tuple, val, or ident, got '$other'")
}
}
BetaReduction(body, reductions(fields): _*)
}
val ifParser: Parser[If] = Parser[If] {
case q"if($a) $b else $c" => If(astParser(a), astParser(b), astParser(c))
}
val liftParser: Parser[Lift] = Parser[Lift] {
case q"$pack.liftScalar[$t]($value)($encoder)" => ScalarValueLift(value.toString, value, encoder)
case q"$pack.liftCaseClass[$t]($value)" => CaseClassValueLift(value.toString, value)
case q"$pack.liftQueryScalar[$t, $u]($value)($encoder)" => ScalarQueryLift(value.toString, value, encoder)
case q"$pack.liftQueryCaseClass[$t, $u]($value)" => CaseClassQueryLift(value.toString, value)
// Unused, it's here only to make eclipse's presentation compiler happy :(
case q"$pack.lift[$t]($value)" => ScalarValueLift(value.toString, value, q"null")
case q"$pack.liftQuery[$t, $u]($value)" => ScalarQueryLift(value.toString, value, q"null")
}
val quotedAstParser: Parser[Ast] = Parser[Ast] {
case q"$pack.unquote[$t]($quoted)" => astParser(quoted)
case t if (t.tpe <:< c.weakTypeOf[CoreDsl#Quoted[Any]]) =>
unquote[Ast](t) match {
case Some(ast) if (!IsDynamic(ast)) =>
t match {
case t: c.universe.Block => ast // expand quote(quote(body)) locally
case t =>
Rebind(c)(t, ast, astParser(_)) match {
case Some(ast) => ast
case None => QuotedReference(t, ast)
}
}
case other => Dynamic(t)
}
}
val boxingParser: Parser[Ast] = Parser[Ast] {
// BigDecimal
case q"$pack.int2bigDecimal(${ astParser(v) })" => v
case q"$pack.long2bigDecimal(${ astParser(v) })" => v
case q"$pack.double2bigDecimal(${ astParser(v) })" => v
case q"$pack.javaBigDecimal2bigDecimal(${ astParser(v) })" => v
// Predef autoboxing
case q"$pack.byte2Byte(${ astParser(v) })" => v
case q"$pack.short2Short(${ astParser(v) })" => v
case q"$pack.char2Character(${ astParser(v) })" => v
case q"$pack.int2Integer(${ astParser(v) })" => v
case q"$pack.long2Long(${ astParser(v) })" => v
case q"$pack.float2Float(${ astParser(v) })" => v
case q"$pack.double2Double(${ astParser(v) })" => v
case q"$pack.boolean2Boolean(${ astParser(v) })" => v
case q"$pack.augmentString(${ astParser(v) })" => v
case q"$pack.unaugmentString(${ astParser(v) })" => v
case q"$pack.Byte2byte(${ astParser(v) })" => v
case q"$pack.Short2short(${ astParser(v) })" => v
case q"$pack.Character2char(${ astParser(v) })" => v
case q"$pack.Integer2int(${ astParser(v) })" => v
case q"$pack.Long2long(${ astParser(v) })" => v
case q"$pack.Float2float(${ astParser(v) })" => v
case q"$pack.Double2double(${ astParser(v) })" => v
case q"$pack.Boolean2boolean(${ astParser(v) })" => v
}
val queryParser: Parser[Ast] = Parser[Ast] {
case q"$source.schema(($alias) => $body)" =>
val config = parseEntityConfig(body)
ConfiguredEntity(astParser(source), config.alias, config.properties)
case q"$pack.query[${ t: Type }]($ct)" if (t.typeSymbol.isClass) =>
SimpleEntity(t.typeSymbol.name.decodedName.toString)
case q"$pack.query[${ _ }]($ct)" =>
Dynamic {
c.typecheck(q"""
new ${c.prefix}.Quoted[${c.prefix}.EntityQuery[T]] {
override def ast = io.getquill.ast.SimpleEntity($ct.runtimeClass.getSimpleName)
}
""")
}
case q"$source.filter(($alias) => $body)" if (is[CoreDsl#Query[Any]](source)) =>
Filter(astParser(source), identParser(alias), astParser(body))
case q"$source.withFilter(($alias) => $body)" if (is[CoreDsl#Query[Any]](source)) =>
Filter(astParser(source), identParser(alias), astParser(body))
case q"$source.map[$t](($alias) => $body)" if (is[CoreDsl#Query[Any]](source)) =>
Map(astParser(source), identParser(alias), astParser(body))
case q"$source.flatMap[$t](($alias) => $body)" if (is[CoreDsl#Query[Any]](source)) =>
FlatMap(astParser(source), identParser(alias), astParser(body))
case q"$source.sortBy[$t](($alias) => $body)($ord)" if (is[CoreDsl#Query[Any]](source)) =>
SortBy(astParser(source), identParser(alias), astParser(body), astParser(ord))
case q"$source.groupBy[$t](($alias) => $body)" if (is[CoreDsl#Query[Any]](source)) =>
GroupBy(astParser(source), identParser(alias), astParser(body))
case q"$a.min[$t]" if (is[CoreDsl#Query[Any]](a)) => Aggregation(AggregationOperator.`min`, astParser(a))
case q"$a.max[$t]" if (is[CoreDsl#Query[Any]](a)) => Aggregation(AggregationOperator.`max`, astParser(a))
case q"$a.avg[$t]($n)" if (is[CoreDsl#Query[Any]](a)) => Aggregation(AggregationOperator.`avg`, astParser(a))
case q"$a.sum[$t]($n)" if (is[CoreDsl#Query[Any]](a)) => Aggregation(AggregationOperator.`sum`, astParser(a))
case q"$a.size" if (is[CoreDsl#Query[Any]](a)) => Aggregation(AggregationOperator.`size`, astParser(a))
case q"$source.take($n)" if (is[CoreDsl#Query[Any]](source)) =>
Take(astParser(source), astParser(n))
case q"$source.drop($n)" if (is[CoreDsl#Query[Any]](source)) =>
Drop(astParser(source), astParser(n))
case q"$source.union[$t]($n)" if (is[CoreDsl#Query[Any]](source)) =>
Union(astParser(source), astParser(n))
case q"$source.unionAll[$t]($n)" if (is[CoreDsl#Query[Any]](source)) =>
UnionAll(astParser(source), astParser(n))
case q"$source.++[$t]($n)" if (is[CoreDsl#Query[Any]](source)) =>
UnionAll(astParser(source), astParser(n))
case q"${ joinCallParser(typ, a, Some(b)) }.on(($aliasA, $aliasB) => $body)" =>
Join(typ, a, b, identParser(aliasA), identParser(aliasB), astParser(body))
case q"${ joinCallParser(typ, a, None) }($aliasA => $body)" =>
val alias = identParser(aliasA)
Join(typ, a, a, alias, alias, astParser(body))
case q"${ joinCallParser(typ, a, b) }" =>
c.fail("a join clause must be followed by 'on'.")
case q"$source.distinct" if (is[CoreDsl#Query[Any]](source)) =>
Distinct(astParser(source))
case q"$source.nested" if (is[CoreDsl#Query[Any]](source)) =>
Nested(astParser(source))
}
implicit val orderingParser: Parser[Ordering] = Parser[Ordering] {
case q"$pack.implicitOrd[$t]" => AscNullsFirst
case q"$pack.Ord.apply[..$t](..$elems)" => TupleOrdering(elems.map(orderingParser(_)))
case q"$pack.Ord.asc[$t]" => Asc
case q"$pack.Ord.desc[$t]" => Desc
case q"$pack.Ord.ascNullsFirst[$t]" => AscNullsFirst
case q"$pack.Ord.descNullsFirst[$t]" => DescNullsFirst
case q"$pack.Ord.ascNullsLast[$t]" => AscNullsLast
case q"$pack.Ord.descNullsLast[$t]" => DescNullsLast
}
implicit val propertyAliasParser: Parser[PropertyAlias] = Parser[PropertyAlias] {
case q"(($x1) => $pack.Predef.ArrowAssoc[$t]($x2.$prop).$arrow[$v](${ alias: String }))" =>
PropertyAlias(prop.decodedName.toString, alias)
}
val joinCallParser: Parser[(JoinType, Ast, Option[Ast])] = Parser[(JoinType, Ast, Option[Ast])] {
case q"$a.join[$t, $u]($b)" if (is[CoreDsl#Query[Any]](a)) => (InnerJoin, astParser(a), Some(astParser(b)))
case q"$a.leftJoin[$t, $u]($b)" if (is[CoreDsl#Query[Any]](a)) => (LeftJoin, astParser(a), Some(astParser(b)))
case q"$a.rightJoin[$t, $u]($b)" if (is[CoreDsl#Query[Any]](a)) => (RightJoin, astParser(a), Some(astParser(b)))
case q"$a.fullJoin[$t, $u]($b)" if (is[CoreDsl#Query[Any]](a)) => (FullJoin, astParser(a), Some(astParser(b)))
case q"$a.join[$t]" if (is[CoreDsl#Query[Any]](a)) => (InnerJoin, astParser(a), None)
case q"$a.leftJoin[$t]" if (is[CoreDsl#Query[Any]](a)) => (LeftJoin, astParser(a), None)
case q"$a.rightJoin[$t]" if (is[CoreDsl#Query[Any]](a)) => (RightJoin, astParser(a), None)
}
val infixParser: Parser[Infix] = Parser[Infix] {
case q"$infix.as[$t]" =>
infixParser(infix)
case q"$pack.InfixInterpolator(scala.StringContext.apply(..${ parts: List[String] })).infix(..$params)" =>
Infix(parts, params.map(astParser(_)))
}
val functionParser: Parser[Function] = Parser[Function] {
case q"new { def apply[..$t1](...$params) = $body }" =>
Function(params.flatten.map(p => p: Tree).map(identParser(_)), astParser(body))
case q"(..$params) => $body" =>
Function(params.map(identParser(_)), astParser(body))
}
val identParser: Parser[Ident] = Parser[Ident] {
case t: ValDef => identClean(Ident(t.name.decodedName.toString))
case c.universe.Ident(TermName(name)) => identClean(Ident(name))
case q"$cls.this.$i" => identClean(Ident(i.decodedName.toString))
case c.universe.Bind(TermName(name), c.universe.Ident(termNames.WILDCARD)) =>
identClean(Ident(name))
}
private def identClean(x: Ident): Ident = x.copy(name = x.name.replace("$", ""))
private def ident(x: TermName): Ident = identClean(Ident(x.decodedName.toString))
val optionOperationParser: Parser[OptionOperation] = Parser[OptionOperation] {
case q"$o.map[$t]({($alias) => $body})" if (is[Option[Any]](o)) =>
OptionOperation(OptionMap, astParser(o), identParser(alias), astParser(body))
case q"$o.forall({($alias) => $body})" if (is[Option[Any]](o)) =>
OptionOperation(OptionForall, astParser(o), identParser(alias), astParser(body))
case q"$o.exists({($alias) => $body})" if (is[Option[Any]](o)) =>
OptionOperation(OptionExists, astParser(o), identParser(alias), astParser(body))
}
val propertyParser: Parser[Ast] = Parser[Ast] {
case q"$e.$property" => Property(astParser(e), property.decodedName.toString)
}
val operationParser: Parser[Operation] = Parser[Operation] {
case `equalityOperationParser`(value) => value
case `booleanOperationParser`(value) => value
case `stringOperationParser`(value) => value
case `numericOperationParser`(value) => value
case `setOperationParser`(value) => value
case `functionApplyParser`(value) => value
}
private def operationParser(cond: Tree => Boolean)(
f: PartialFunction[String, Operator]
): Parser[Operation] = {
object operator {
def unapply(t: TermName) =
f.lift(t.decodedName.toString)
}
Parser[Operation] {
case q"$a.${ operator(op: BinaryOperator) }($b)" if (cond(a) && cond(b)) =>
BinaryOperation(astParser(a), op, astParser(b))
case q"$a.${ operator(op: UnaryOperator) }" if (cond(a)) =>
UnaryOperation(op, astParser(a))
case q"$a.${ operator(op: UnaryOperator) }()" if (cond(a)) =>
UnaryOperation(op, astParser(a))
}
}
val functionApplyParser: Parser[Operation] = Parser[Operation] {
case q"${ astParser(a) }.apply[..$t](...$values)" => FunctionApply(a, values.flatten.map(astParser(_)))
}
val equalityOperationParser: Parser[Operation] = Parser[Operation] {
case q"$a.==($b)" =>
checkTypes(a, b)
BinaryOperation(astParser(a), EqualityOperator.`==`, astParser(b))
case q"$a.equals($b)" =>
checkTypes(a, b)
BinaryOperation(astParser(a), EqualityOperator.`==`, astParser(b))
case q"$a.!=($b)" =>
checkTypes(a, b)
BinaryOperation(astParser(a), EqualityOperator.`!=`, astParser(b))
}
val booleanOperationParser: Parser[Operation] =
operationParser(is[Boolean](_)) {
case "unary_!" => BooleanOperator.`!`
case "&&" => BooleanOperator.`&&`
case "||" => BooleanOperator.`||`
}
val stringInterpolationParser: Parser[Ast] = Parser[Ast] {
case q"scala.StringContext.apply(..$parts).s(..$params)" =>
val asts =
Interleave(parts.map(astParser(_)), params.map(astParser(_)))
.filter(_ != Constant(""))
asts.tail.foldLeft(asts.head) {
case (a, b) =>
BinaryOperation(a, StringOperator.`+`, b)
}
}
val stringOperationParser: Parser[Operation] =
operationParser(t => is[String](t) || is[StringOps](t)) {
case "+" => StringOperator.`+`
case "toUpperCase" => StringOperator.`toUpperCase`
case "toLowerCase" => StringOperator.`toLowerCase`
case "toLong" => StringOperator.`toLong`
case "toInt" => StringOperator.`toInt`
}
val numericOperationParser: Parser[Operation] =
operationParser(t => isNumeric(c.WeakTypeTag(t.tpe.erasure))) {
case "unary_-" => NumericOperator.`-`
case "-" => NumericOperator.`-`
case "+" => NumericOperator.`+`
case "*" => NumericOperator.`*`
case ">" => NumericOperator.`>`
case ">=" => NumericOperator.`>=`
case "<" => NumericOperator.`<`
case "<=" => NumericOperator.`<=`
case "/" => NumericOperator.`/`
case "%" => NumericOperator.`%`
}
val setOperationParser: Parser[Operation] = {
val unary =
operationParser(is[CoreDsl#Query[Any]](_)) {
case "isEmpty" => SetOperator.`isEmpty`
case "nonEmpty" => SetOperator.`nonEmpty`
}
Parser[Operation] {
case q"$a.contains[$t]($b)" if (is[CoreDsl#Query[Any]])(a) =>
BinaryOperation(astParser(a), SetOperator.`contains`, astParser(b))
case unary(op) => op
}
}
private def isNumeric[T: WeakTypeTag] =
c.inferImplicitValue(c.weakTypeOf[Numeric[T]]) != EmptyTree
private def is[T](tree: Tree)(implicit t: TypeTag[T]) =
tree.tpe <:< t.tpe
val valueParser: Parser[Value] = Parser[Value] {
case q"null" => NullValue
case Literal(c.universe.Constant(v)) => Constant(v)
case q"((..$v))" if (v.size > 1) => Tuple(v.map(astParser(_)))
case q"(($pack.Predef.ArrowAssoc[$t1]($v1).$arrow[$t2]($v2)))" => Tuple(List(astParser(v1), astParser(v2)))
}
val actionParser: Parser[Ast] = Parser[Ast] {
case q"$query.$method(..$assignments)" if (method.decodedName.toString == "update") =>
Update(astParser(query), assignments.map(assignmentParser(_)))
case q"$query.insert(..$assignments)" =>
Insert(astParser(query), assignments.map(assignmentParser(_)))
case q"$query.delete" =>
Delete(astParser(query))
case q"$action.returning[$r](($alias) => $body)" =>
Returning(astParser(action), identParser(alias), astParser(body))
case tree @ q"$query.foreach[$t](($alias) => $body)" if (is[CoreDsl#Query[Any]](query)) =>
Foreach(astParser(query), identParser(alias), astParser(body))
}
private val assignmentParser: Parser[Assignment] = Parser[Assignment] {
case q"((${ identParser(i1) }) => $pack.Predef.ArrowAssoc[$t]($prop).$arrow[$v]($value))" =>
checkTypes(prop, value)
prop match {
case q"${ identParser(i2) }.$prop" if (i1 == i2) =>
Assignment(i1, Property(i2, prop.decodedName.toString), astParser(value))
case prop =>
c.fail(s"Invalid assignment property: '$prop'")
}
// Unused, it's here only to make eclipse's presentation compiler happy
case astParser(ast) => Assignment(Ident("unused"), Ident("unused"), Constant("unused"))
}
private def checkTypes(lhs: Tree, rhs: Tree): Unit = {
def unquoted(tree: Tree) =
is[CoreDsl#Quoted[Any]](tree) match {
case false => tree
case true => q"unquote($tree)"
}
val t = TypeName(c.freshName("T"))
try c.typecheck(
q"""
def apply[$t](lhs: $t)(rhs: $t) = ()
apply(${unquoted(lhs)})($rhs)
""",
c.TYPEmode
) catch {
case t: TypecheckException => c.error(t.msg)
}
()
}
}
| jcranky/quill | quill-core/src/main/scala/io/getquill/quotation/Parsing.scala | Scala | apache-2.0 | 19,357 |
package org.webant.worker.config
import java.io.File
import org.apache.commons.io.FileUtils
import org.apache.commons.lang3.StringUtils
import org.apache.log4j.LogManager
import org.webant.commons.entity.SiteConfig
import org.webant.commons.entity.SiteConfig.{HttpConfig, LinkProvider, ProcessorConfig, StoreProvider}
import org.webant.commons.utils.JsonUtils
import org.webant.worker.store.StoreFactory
class SiteConfigBuilder {
private val logger = LogManager.getLogger(classOf[SiteConfigBuilder])
private var siteConfig = new SiteConfig
def loadSiteConfig(path: String): SiteConfigBuilder = {
require(StringUtils.isNotBlank(path), "site config path can not be empty.")
val file = new File(path)
require(file.exists(), "site config does not exists.")
require(!file.isDirectory, "site config can not be a directory.")
val content = FileUtils.readFileToString(file, "UTF-8")
if (StringUtils.isNotBlank(content)) {
siteConfig = JsonUtils.fromJson(content, classOf[SiteConfig])
siteConfig.processors.foreach(StoreFactory.load)
logger.info(s"loading site config ${siteConfig.id}(${siteConfig.name}) from ${file.getAbsolutePath}")
}
this
}
def build(): SiteConfig = {
require(StringUtils.isNotBlank(siteConfig.id), "id can not be empty!")
require(siteConfig.seeds != null && siteConfig.seeds.nonEmpty, "seeds can not be empty!")
require(siteConfig.processors != null && siteConfig.processors.nonEmpty, "processors can not be empty!")
siteConfig
}
def id(id: String): SiteConfigBuilder = {
siteConfig.id = id
this
}
def name(name: String): SiteConfigBuilder = {
siteConfig.name = name
this
}
def description(description: String): SiteConfigBuilder = {
siteConfig.description = description
this
}
def seeds(seeds: Array[String]): SiteConfigBuilder = {
siteConfig.seeds = seeds
this
}
def priority(priority: Integer): SiteConfigBuilder = {
siteConfig.priority = priority
this
}
def interval(interval: Long): SiteConfigBuilder = {
siteConfig.setTimeInterval(interval)
this
}
def http(http: HttpConfig): SiteConfigBuilder = {
siteConfig.http = http
this
}
def linkProvider(linkProvider: LinkProvider): SiteConfigBuilder = {
siteConfig.linkProvider = linkProvider
this
}
def processors(processors: Array[ProcessorConfig]): SiteConfigBuilder = {
siteConfig.processors = processors
this
}
}
class HttpConfigBuilder {
private val httpConfig = new HttpConfig
var method: String = _
var connectTimeout: Int = _
var socketTimeout: Int = _
var encoding: String = _
var retryTimes: Int = _
var cycleRetryTimes: Int = _
var contentType: String = _
var proxy: Boolean = _
var headers: java.util.Map[String, String] = _
def build(): HttpConfig = {
httpConfig
}
def method(method: String): HttpConfigBuilder = {
httpConfig.setMethod(method)
this
}
def connectTimeout(connectTimeout: Integer): HttpConfigBuilder = {
httpConfig.setConnectTimeout(connectTimeout)
this
}
def socketTimeout(socketTimeout: Integer): HttpConfigBuilder = {
httpConfig.setSocketTimeout(socketTimeout)
this
}
def encoding(encoding: String): HttpConfigBuilder = {
httpConfig.setEncoding(encoding)
this
}
def retryTimes(retryTimes: Integer): HttpConfigBuilder = {
httpConfig.setRetryTimes(retryTimes)
this
}
def cycleRetryTimes(cycleRetryTimes: Integer): HttpConfigBuilder = {
httpConfig.setCycleRetryTimes(cycleRetryTimes)
this
}
def contentType(contentType: String): HttpConfigBuilder = {
httpConfig.setContentType(contentType)
this
}
def proxy(proxy: Boolean): HttpConfigBuilder = {
httpConfig.setProxy(proxy)
this
}
def headers(headers: java.util.Map[String, String]): HttpConfigBuilder = {
httpConfig.setHeaders(headers)
this
}
}
class PageProcessorBuilder {
private val processorConfig = new ProcessorConfig
var regex: String = _
var http: HttpConfig = _
var className: String = _
var store: Array[java.util.Map[String, String]] = _
def build(): ProcessorConfig = {
require(StringUtils.isNotBlank(processorConfig.getRegex))
processorConfig
}
def http(http: HttpConfig): PageProcessorBuilder = {
processorConfig.setHttp(http)
this
}
def regex(regex: String): PageProcessorBuilder = {
processorConfig.setRegex(regex)
this
}
def className(className: String): PageProcessorBuilder = {
processorConfig.setClassName(className)
this
}
def store(store: Array[StoreProvider]): PageProcessorBuilder = {
processorConfig.setStore(store)
this
}
}
class LinkProviderBuilder {
private val linkProvider = new LinkProvider
var className: String = _
var params: java.util.Map[String, Object] = _
def build(): LinkProvider = {
require(StringUtils.isNotBlank(linkProvider.getClassName))
linkProvider
}
def className(className: String): LinkProviderBuilder = {
linkProvider.setClassName(className)
this
}
def params(params: java.util.Map[String, Object]): LinkProviderBuilder = {
linkProvider.setParams(params)
this
}
}
| sutine/webant | webant-worker/src/main/scala/org/webant/worker/config/SiteConfigBuilder.scala | Scala | apache-2.0 | 5,230 |
package com.chatwork.sbt.aws.core
import java.io.File
import com.amazonaws.AmazonWebServiceClient
import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.auth.{ AWSCredentialsProviderChain, EnvironmentVariableCredentialsProvider, InstanceProfileCredentialsProvider, SystemPropertiesCredentialsProvider }
import com.amazonaws.regions.Region
import org.apache.commons.codec.digest.DigestUtils
import org.sisioh.config.{ Configuration => SisiohConfiguration }
import sbt._
object SbtAwsCore extends SbtAwsCore
trait SbtAwsCore {
protected def newCredentialsProvider(profileName: Option[String]) = {
new AWSCredentialsProviderChain(
new EnvironmentVariableCredentialsProvider(),
new SystemPropertiesCredentialsProvider(),
new ProfileCredentialsProvider(profileName.orNull),
new InstanceProfileCredentialsProvider()
)
}
protected def createClient[A <: AmazonWebServiceClient](serviceClass: Class[A], region: Region, profileName: Option[String]): A = {
region.createClient(serviceClass, newCredentialsProvider(profileName), null)
}
protected def md5(file: File): String =
DigestUtils.md5Hex(IO.readBytes(file))
def getConfigValuesAsSeq[A](clazz: Class[A], config: SisiohConfiguration, key: String, defaultValue: Seq[A]): Seq[A] = {
clazz match {
case x if x == classOf[String] =>
config.getStringValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
case x if x == classOf[Int] =>
config.getIntValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
case x if x == classOf[Boolean] =>
config.getBooleanValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
case x if x == classOf[Byte] =>
config.getByteValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
case x if x == classOf[Long] =>
config.getLongValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
case x if x == classOf[Double] =>
config.getDoubleValues(key).getOrElse(defaultValue).asInstanceOf[Seq[A]]
}
}
def getConfigValueOpt[A](clazz: Class[A], config: SisiohConfiguration, key: String): Option[A] = {
clazz match {
case x if x == classOf[String] =>
config.getStringValue(key).asInstanceOf[Option[A]]
case x if x == classOf[Int] =>
config.getIntValue(key).asInstanceOf[Option[A]]
case x if x == classOf[Boolean] =>
config.getBooleanValue(key).asInstanceOf[Option[A]]
case x if x == classOf[Byte] =>
config.getByteValue(key).asInstanceOf[Option[A]]
case x if x == classOf[Long] =>
config.getLongValue(key).asInstanceOf[Option[A]]
case x if x == classOf[Double] =>
config.getDoubleValue(key).asInstanceOf[Option[A]]
}
}
def getConfigValue[A](clazz: Class[A], config: SisiohConfiguration, key: String, defaultValue: A) =
getConfigValueOpt(clazz, config, key).getOrElse(defaultValue)
def getConfigValuesAsMap(config: SisiohConfiguration, key: String): Map[String, String] = {
config.getConfiguration(key)
.map(_.entrySet.map { case (k, v) => (k, v.unwrapped().toString) }.toMap).getOrElse(Map.empty)
}
} | yoshiyoshifujii/sbt-aws | sbt-aws-core/src/main/scala/com/chatwork/sbt/aws/core/SbtAwsCore.scala | Scala | mit | 3,170 |
package com.yannick_cw.elastic_indexer4s.elasticsearch.index_ops
import cats.data.EitherT
import cats.implicits._
import com.sksamuel.elastic4s.http.ElasticDsl.{addAlias, removeAlias, search, _}
import com.sksamuel.elastic4s.http.settings.IndexSettingsResponse
import com.sksamuel.elastic4s.http.{ElasticClient, Response}
import com.yannick_cw.elastic_indexer4s.Index_results.IndexError
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
case class IndexWithInfo(index: String, aliases: List[String], creationTime: Long)
trait EsOpsClientApi {
type OpsResult[A] = EitherT[Future, IndexError, A]
def removeAliasFromIndex(index: String, alias: String): OpsResult[Boolean]
def addAliasToIndex(index: String, alias: String): OpsResult[Boolean]
def sizeFor(index: String): OpsResult[Long]
def delete(index: String): OpsResult[Boolean]
def allIndicesWithAliasInfo: OpsResult[List[IndexWithInfo]]
def indicesByAgeFor(alias: String): OpsResult[List[String]] =
for {
indices <- allIndicesWithAliasInfo
} yield indices.filter(_.aliases.contains(alias)).sortBy(_.creationTime).map(_.index)
def latestIndexWithAliasSize(alias: String): OpsResult[Option[Long]] =
for {
indices <- indicesByAgeFor(alias)
size <- indices.lastOption.traverse(sizeFor)
} yield size
def removeAliasFromOldestIfExists(alias: String): OpsResult[Option[Boolean]] =
for {
indices <- indicesByAgeFor(alias)
optRemoved <- indices.headOption.traverse(removeAliasFromIndex(_, alias))
} yield optRemoved
def switchAliasToIndex(alias: String, index: String): OpsResult[Option[Boolean]] =
for {
rSuccess <- removeAliasFromOldestIfExists(alias)
aSuccess <- addAliasToIndex(index, alias)
} yield rSuccess.map(_ && aSuccess)
}
class EsOpsClient(client: ElasticClient) extends EsOpsClientApi {
implicit class WithEitherTResult[A](f: Future[Response[A]]) {
def opsResult: OpsResult[A] =
EitherT(
f.map(response =>
response.fold[Either[IndexError, A]](
Left(IndexError(s"Index creation failed with error: ${response.error}")))(Right(_))))
def opsResult[B](to: A => B): OpsResult[B] = opsResult.map(to)
}
def delete(index: String): OpsResult[Boolean] =
client.execute(deleteIndex(index)).opsResult(_.acknowledged)
private def indexCreationDate(indexName: String, response: IndexSettingsResponse): Option[Long] =
for {
indexSettings <- response.settings.get(indexName)
creationDate <- indexSettings.get("index.creation_date")
} yield creationDate.toLong
def allIndicesWithAliasInfo: OpsResult[List[IndexWithInfo]] =
for {
aliases <- client.execute(getAliases()).opsResult
settings <- client.execute(getSettings(aliases.mappings.keys.map(_.name))).opsResult
} yield
aliases.mappings
.map {
case (index, aliasi) =>
indexCreationDate(index.name, settings).map(date =>
IndexWithInfo(index.name, aliasi.toList.map(_.name), date))
}
.collect { case Some(x) => x }
.toList
def removeAliasFromIndex(index: String, alias: String): OpsResult[Boolean] =
client.execute(removeAlias(alias) on index).opsResult(_.acknowledged)
def addAliasToIndex(index: String, alias: String): OpsResult[Boolean] =
client.execute(addAlias(alias) on index).opsResult(_.acknowledged)
def sizeFor(index: String): OpsResult[Long] =
client.execute(search(index) size 0).opsResult(_.totalHits)
}
object EsOpsClient {
def apply(client: ElasticClient): EsOpsClient = new EsOpsClient(client)
}
| yannick-cw/elastic-indexer4s | src/main/scala/com/yannick_cw/elastic_indexer4s/elasticsearch/index_ops/EsOpsClientApi.scala | Scala | mit | 3,652 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.index
import java.nio.file.Path
import org.apache.cassandra.io.util.FileUtils
import org.apache.lucene.analysis.Analyzer
import org.apache.lucene.document.Document
import org.apache.lucene.index._
import org.apache.lucene.search._
import org.apache.lucene.store.{Directory, FSDirectory, NRTCachingDirectory}
/** Class wrapping a Lucene file system-based directory and its readers, writers and searchers.
*
* @param name the index name
* @param path the directory path
* @param analyzer the index writer analyzer
* @param refreshSeconds the index reader refresh frequency in seconds
* @param ramBufferMB the index writer RAM buffer size in MB
* @param maxMergeMB the directory max merge size in MB
* @param maxCachedMB the directory max cache size in MB
* @author Andres de la Pena `adelapena@stratio.com`
*/
class FSIndex(
name: String,
path: Path,
analyzer: Analyzer,
refreshSeconds: Double,
ramBufferMB: Int,
maxMergeMB: Int,
maxCachedMB: Int) {
private[this] var mergeSort: Sort = _
private[this] var fields: java.util.Set[String] = _
private[this] var directory: Directory = _
private[this] var writer: IndexWriter = _
private[this] var manager: SearcherManager = _
private[this] var reopener: ControlledRealTimeReopenThread[IndexSearcher] = _
/** Initializes this index with the specified merge sort and fields to be loaded.
*
* @param mergeSort the sort to be applied to the index during merges
* @param fields the names of the document fields to be loaded
*/
def init(mergeSort: Sort, fields: java.util.Set[String]) {
this.mergeSort = mergeSort
this.fields = fields
// Open or create directory
directory = new NRTCachingDirectory(FSDirectory.open(path), maxMergeMB, maxCachedMB)
// Setup index writer
val indexWriterConfig = new IndexWriterConfig(analyzer)
indexWriterConfig.setRAMBufferSizeMB(ramBufferMB)
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND)
indexWriterConfig.setUseCompoundFile(true)
indexWriterConfig.setMergePolicy(new SortingMergePolicy(new TieredMergePolicy, mergeSort))
writer = new IndexWriter(directory, indexWriterConfig)
// Setup NRT search
val searcherFactory: SearcherFactory = new SearcherFactory {
override def newSearcher(reader: IndexReader, previousReader: IndexReader): IndexSearcher = {
val searcher = new IndexSearcher(reader)
searcher.setSimilarity(new NoIDFSimilarity)
searcher
}
}
val tracker = new TrackingIndexWriter(writer)
manager = new SearcherManager(writer, true, searcherFactory)
reopener = new ControlledRealTimeReopenThread(tracker, manager, refreshSeconds, refreshSeconds)
reopener.start()
}
private[this] def doWithSearcher[A](f: IndexSearcher => A): A = {
val searcher = manager.acquire
try f.apply(searcher) finally manager.release(searcher)
}
def searcherManager: SearcherManager = manager
/** Upserts the specified document by first deleting the documents containing the specified term
* and then adding the new document. The delete and then add are atomic as seen by a reader on
* the same index (flush may happen only after the addition).
*
* @param term the term to identify the document(s) to be deleted
* @param document the document to be added
*/
def upsert(term: Term, document: Document) {
writer.updateDocument(term, document)
}
/** Deletes all the documents containing the specified term.
*
* @param term the term identifying the documents to be deleted
*/
def delete(term: Term) {
writer.deleteDocuments(term)
}
/** Deletes all the documents satisfying the specified query.
*
* @param query the query identifying the documents to be deleted
*/
def delete(query: Query) {
writer.deleteDocuments(query)
}
/** Deletes all the documents. */
def truncate() {
writer.deleteAll()
writer.commit()
}
/** Commits the pending changes. */
def commit() {
writer.commit()
}
/** Commits all changes to the index, waits for pending merges to complete, and closes all
* associated resources.
*/
def close() {
reopener.close()
manager.close()
writer.close()
directory.close()
}
/** Closes the index and removes all its files. */
def delete() {
try close() finally FileUtils.deleteRecursive(path.toFile)
}
/** Returns the total number of documents in this index.
*
* @return the number of documents
*/
def getNumDocs: Int = {
doWithSearcher(searcher => searcher.getIndexReader.numDocs)
}
/** Returns the total number of deleted documents in this index.
*
* @return the number of deleted documents
*/
def getNumDeletedDocs: Int = {
doWithSearcher(searcher => searcher.getIndexReader.numDeletedDocs)
}
/** Optimizes the index forcing merge segments leaving the specified number of segments.
* This operation may block until all merging completes.
*
* @param maxNumSegments the maximum number of segments left in the index after merging finishes
* @param doWait `true` if the call should block until the operation completes
*/
def forceMerge(maxNumSegments: Int, doWait: Boolean) {
writer.forceMerge(maxNumSegments, doWait)
writer.commit()
}
/** Optimizes the index forcing merge of all segments that have deleted documents.
* This operation may block until all merging completes.
*
* @param doWait `true` if the call should block until the operation completes
*/
def forceMergeDeletes(doWait: Boolean) {
writer.forceMergeDeletes(doWait)
writer.commit()
}
/** Refreshes the index readers. */
def refresh() {
manager.maybeRefreshBlocking()
}
}
/** Companion object for [[FSIndex]]. */
object FSIndex {
// Disable max boolean query clauses limit
BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE)
}
| adelapena/cassandra-lucene-index | plugin/src/main/scala/com/stratio/cassandra/lucene/index/FSIndex.scala | Scala | apache-2.0 | 6,672 |
/*
* Copyright 1998-2015 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.user
import java.sql.ResultSet
import javax.sql.DataSource
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.jdbc.core.RowMapper
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate
import org.springframework.scala.jdbc.core.JdbcTemplate
import org.springframework.stereotype.Repository
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
@Repository
class RemarkDao @Autowired() (ds:DataSource) {
private val jdbcTemplate = new JdbcTemplate(ds)
private val namedTemplate = new NamedParameterJdbcTemplate(jdbcTemplate.javaTemplate)
def remarkCount(user: User):Int = {
val count:Option[Int] = jdbcTemplate.queryForObject[Integer](
"SELECT count(*) as c FROM user_remarks WHERE user_id=?",
user.getId).map(_.toInt)
count.getOrElse(0)
}
def hasRemarks(user: User):Boolean = remarkCount(user) > 0
/**
* Получить комментарий пользователя user о ref
* @param user logged user
* @param ref user
*/
def getRemark(user: User, ref: User): Option[Remark] = {
jdbcTemplate.queryAndMap("SELECT id, ref_user_id, remark_text FROM user_remarks WHERE user_id=? AND ref_user_id=?", user.getId, ref.getId) { (rs, _) =>
new Remark(rs)
}.headOption
}
def getRemarks(user: User, refs:java.lang.Iterable[User]):java.util.Map[Integer, Remark] = {
val r:Map[Integer, Remark] = if (refs.isEmpty) {
Map.empty
} else {
namedTemplate.query(
"SELECT id, ref_user_id, remark_text FROM user_remarks WHERE user_id=:user AND ref_user_id IN (:list)",
Map("list" -> refs.map(_.getId).toSeq.asJavaCollection, "user" -> user.getId),
new RowMapper[(Integer, Remark)]() {
override def mapRow(rs: ResultSet, rowNum: Int) = {
val remark = new Remark(rs)
Integer.valueOf(remark.getRefUserId) -> remark
}
}
).toMap
}
r.asJava
}
private def setRemark(user: User, ref: User, text: String):Unit = {
if (text.nonEmpty) {
jdbcTemplate.update("INSERT INTO user_remarks (user_id,ref_user_id,remark_text) VALUES (?,?,?)", user.getId, ref.getId, text)
}
}
private def updateRemark(id: Int, text: String):Unit = {
if (text.isEmpty) {
jdbcTemplate.update("DELETE FROM user_remarks WHERE id=?", id)
} else {
jdbcTemplate.update("UPDATE user_remarks SET remark_text=? WHERE id=?", text, id)
}
}
/**
* Сохранить или обновить комментарий пользователя user о ref.
* Если комментарий нулевой длины - он удаляется из базы
*
* @param user logged user
* @param ref user
* @param text текст комментария
*/
def setOrUpdateRemark(user: User, ref: User, text: String) = {
getRemark(user, ref) match {
case Some(remark) ⇒ updateRemark(remark.getId, text)
case None ⇒ setRemark(user, ref, text)
}
}
/**
* Получить комментарии пользователя user
* @param user logged user
*/
def getRemarkList(user: User, offset: Int, sortorder: Int, limit: Int): java.util.List[Remark] = {
val qs = if (sortorder == 1) {
"SELECT id, ref_user_id, remark_text FROM user_remarks WHERE user_id=? ORDER BY remark_text ASC LIMIT ? OFFSET ?"
} else {
"SELECT user_remarks.id as id, user_remarks.user_id as user_id, user_remarks.ref_user_id as ref_user_id, user_remarks.remark_text as remark_text FROM user_remarks, users WHERE user_remarks.user_id=? AND users.id = user_remarks.ref_user_id ORDER BY users.nick ASC LIMIT ? OFFSET ?"
}
jdbcTemplate.queryAndMap(qs, user.getId, limit, offset) { (rs, _) ⇒ new Remark(rs) }
}
}
| ymn/lorsource | src/main/scala/ru/org/linux/user/RemarkDao.scala | Scala | apache-2.0 | 4,477 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.vision
import org.goseumdochi.common._
import org.bytedeco.javacpp._
import org.bytedeco.javacpp.opencv_core._
import org.bytedeco.javacpp.helper.opencv_core._
import org.bytedeco.javacpp.opencv_imgproc._
import scala.collection._
import archery.RTree
import archery.Entry
import archery.Box
object BlobAnalysis
{
trait BlobFilter
{
def apply(rect : Rect) : Boolean
}
trait BlobSorter
{
def compare(rect1 : Rect, rect2 : Rect) : Int
def getAnchor(rect : Rect) : RetinalPos
def merge(rects : Seq[Rect]) : Seq[Rect]
}
class IgnoreMedium(threshold1 : Int, threshold2 : Int) extends BlobFilter
{
override def apply(rect : Rect) : Boolean =
((rect.size.width > threshold1) && (rect.size.height > threshold2)) ||
((rect.size.width > threshold2) && (rect.size.height > threshold1))
}
class IgnoreSmall(threshold : Int) extends BlobFilter
{
override def apply(rect : Rect) : Boolean =
(rect.size.width > threshold) && (rect.size.height > threshold)
}
class IgnoreLarge(threshold : Int) extends BlobFilter
{
override def apply(rect : Rect) : Boolean =
(rect.size.width < threshold) && (rect.size.height < threshold)
}
class IgnoreExtremes(lower : Int, upper : Int) extends BlobFilter
{
val ignoreSmall = new IgnoreSmall(lower)
val ignoreLarge = new IgnoreLarge(upper)
override def apply(rect : Rect) : Boolean =
ignoreSmall(rect) || ignoreLarge(rect)
}
object KeepAll extends BlobFilter
{
override def apply(rect : Rect) : Boolean = true
}
def rectToString(r : Rect) =
{
"{(" + r.tl.x + ", " + r.tl.y + "), (" + r.br.x + ", " + r.br.y + ")}"
}
}
import BlobAnalysis._
trait BlobSizeSorterTrait extends BlobSorter
{
override def compare(rect1 : Rect, rect2 : Rect) =
(rect2.area - rect1.area).toInt
override def getAnchor(rect : Rect) =
RetinalPos((rect.tl.x + rect.br.x) / 2, (rect.tl.y + rect.br.y) / 2)
override def merge(rects : Seq[Rect]) = rects
}
object BlobSizeSorter extends BlobSizeSorterTrait
class BlobProximityMerger(proximity : Float) extends BlobSizeSorterTrait
{
private def mergeTwoOverlap(r1 : Rect, r2 : Rect) : Rect =
new Rect(
new Point(Math.min(r1.tl.x, r2.tl.x), Math.min(r1.tl.y, r2.tl.y)),
new Point(Math.max(r1.br.x, r2.br.x), Math.max(r1.br.y, r2.br.y)))
private def mergeManyOverlap(rects : Iterable[Rect]) : Rect =
rects.reduce(mergeTwoOverlap(_, _))
override def merge(rects : Seq[Rect]) =
{
val entries = rects.map(
r => Entry(Box(r.tl.x, r.tl.y, r.br.x, r.br.y), r))
val rtree = RTree(entries:_*)
val setDisjunction = DisjointSet(rects:_*)
for (r <- rects) {
val box = Box(
r.tl.x - proximity, r.tl.y - proximity,
r.br.x + proximity, r.br.y + proximity)
for (e <- rtree.searchIntersection(box)) {
val r2 = e.value
setDisjunction.union(r, r2)
}
}
val overlaps = rects.groupBy(r => setDisjunction.toNode(r).root)
overlaps.toSeq.map {
case (r, s) => {
mergeManyOverlap(s)
}
}
}
}
trait BlobAnalyzer extends VisionAnalyzer
{
private val storage = AbstractCvMemStorage.create
protected def analyzeBlobs(
img : IplImage,
blobFilter : BlobFilter,
blobMerger : BlobSorter)
: Seq[Rect] =
{
cvClearMemStorage(storage)
var contour = new CvSeq(null)
cvDilate(img, img, null, 2)
cvFindContours(img, storage, contour, Loader.sizeof(classOf[CvContour]),
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0))
val rawDebugger = newDebugger(img)
val rects = new mutable.ArrayBuffer[Rect]
while (contour != null && !contour.isNull) {
if (contour.elem_size > 0) {
val cvRect = cvBoundingRect(contour)
val rect = new Rect(cvRect.x, cvRect.y, cvRect.width, cvRect.height)
if (blobFilter(rect)) {
rawDebugger { overlay =>
overlay.drawRectangle(
OpenCvUtil.pos(rect.tl),
OpenCvUtil.pos(rect.br),
NamedColor.WHITE, 2)
}
rects += rect
}
}
contour = contour.h_next()
}
if (rects.isEmpty) {
return Seq.empty
}
blobMerger.merge(rects)
}
override def close()
{
storage.release
}
}
| lingeringsocket/goseumdochi | base/src/main/scala/org/goseumdochi/vision/BlobAnalyzer.scala | Scala | apache-2.0 | 4,966 |
package macrolog
import macrolog.TraceQualifier.DefinedTrace
/**
* @author Maksim Ochenashko
*/
trait LoggingContext
trait TraceQualifierLoggingContext extends LoggingContext {
def traceQualifier: DefinedTrace
}
object TraceQualifierLoggingContext {
def unapply(arg: TraceQualifierLoggingContext): Option[DefinedTrace] =
Some(arg.traceQualifier)
class TraceQualifierLoggingContextImpl(val traceQualifier: DefinedTrace) extends TraceQualifierLoggingContext
}
final case class PositionLoggingContext(ctx: LoggingContext, position: Position) | iRevive/macrolog | src/main/scala/macrolog/LoggingContext.scala | Scala | mit | 562 |
/*
* Copyright (c) 2017. Yuriy Stul
*/
package com.stulsoft.avro
import java.io.File
import org.apache.avro.Schema
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.apache.avro.specific.{SpecificDatumReader, SpecificDatumWriter}
import scala.io.Source
/** Serialization/deserialization in/from file
*
* @author Yuriy Stul
* @see [[http://avro.apache.org/docs/current/gettingstartedjava.html Apache Avro Getting Started (Java)]]
*/
object Main2 extends App {
test1()
def test1(): Unit = {
println("==>test1")
val fileName = "users.avro"
val schema = new Schema.Parser().parse(Source.fromURL(getClass.getResource("/user.json")).mkString)
// Serialization
val user1 = new GenericData.Record(schema)
user1.put("name", "test 1")
user1.put("favoriteNumber", 1)
user1.put("favoriteColor", "red")
val user2 = new GenericData.Record(schema)
user2.put("name", "test 2")
user2.put("favoriteNumber", 12)
user2.put("favoriteColor", "green")
val user3 = new GenericData.Record(schema)
user3.put("name", "test 3")
user3.put("favoriteNumber", 123)
user3.put("favoriteColor", "black")
val userWriter = new SpecificDatumWriter[GenericRecord](schema)
val fileWriter = new DataFileWriter[GenericRecord](userWriter)
fileWriter.create(schema, new File(fileName))
fileWriter.append(user1)
fileWriter.append(user2)
fileWriter.append(user3)
fileWriter.close()
// Deserialization
val userReader = new SpecificDatumReader[GenericRecord](schema)
val fileReader = new DataFileReader[GenericRecord](new File(fileName), userReader)
// while (fileReader.hasNext) {
// val user = fileReader.next()
// println(s"""${user.get("name")}, ${user.get("favoriteNumber")}, ${user.get("favoriteColor")}""")
// }
fileReader.forEach(user => println(s"""${user.get("name")}, ${user.get("favoriteNumber")}, ${user.get("favoriteColor")}"""))
fileReader.close()
println("<==test1")
}
}
| ysden123/poc | avro/src/main/scala/com/stulsoft/avro/Main2.scala | Scala | mit | 2,097 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.streaming.parser
import java.nio.charset.Charset
import java.text.SimpleDateFormat
import java.util
import org.apache.carbondata.core.constants.CarbonCommonConstants
object FieldConverter {
/**
* Return a String representation of the input value
* @param value input value
* @param serializationNullFormat string for null value
* @param complexDelimiters List of Complex Delimiters
* @param timeStampFormat timestamp format
* @param dateFormat date format
* @param isVarcharType whether it is varchar type. A varchar type has no string length limit
* @param level level for recursive call
*/
def objectToString(
value: Any,
serializationNullFormat: String,
complexDelimiters: util.ArrayList[String],
timeStampFormat: SimpleDateFormat,
dateFormat: SimpleDateFormat,
isVarcharType: Boolean = false,
level: Int = 0): String = {
if (value == null) {
serializationNullFormat
} else {
value match {
case s: String => if (!isVarcharType &&
s.length > CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT) {
throw new Exception("Dataload failed, String length cannot exceed " +
CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT + " characters")
} else {
s
}
case d: java.math.BigDecimal => d.toPlainString
case i: java.lang.Integer => i.toString
case d: java.lang.Double => d.toString
case t: java.sql.Timestamp => timeStampFormat format t
case d: java.sql.Date => dateFormat format d
case b: java.lang.Boolean => b.toString
case s: java.lang.Short => s.toString
case f: java.lang.Float => f.toString
case bs: Array[Byte] => new String(bs,
Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET))
case s: scala.collection.Seq[Any] =>
val delimiter = complexDelimiters.get(level)
val builder = new StringBuilder()
s.foreach { x =>
builder.append(objectToString(x, serializationNullFormat, complexDelimiters,
timeStampFormat, dateFormat, isVarcharType, level + 1))
.append(delimiter)
}
builder.substring(0, builder.length - delimiter.length())
// First convert the 'key' of Map and then append the keyValueDelimiter and then convert
// the 'value of the map and append delimiter
case m: scala.collection.Map[_, _] =>
val delimiter = complexDelimiters.get(level)
val keyValueDelimiter = complexDelimiters.get(level + 1)
val builder = new StringBuilder()
m.foreach { x =>
builder.append(objectToString(x._1, serializationNullFormat, complexDelimiters,
timeStampFormat, dateFormat, isVarcharType, level + 2))
.append(keyValueDelimiter)
builder.append(objectToString(x._2, serializationNullFormat, complexDelimiters,
timeStampFormat, dateFormat, isVarcharType, level + 2))
.append(delimiter)
}
builder.substring(0, builder.length - delimiter.length())
case r: org.apache.spark.sql.Row =>
val delimiter = complexDelimiters.get(level)
val builder = new StringBuilder()
for (i <- 0 until r.length) {
builder.append(objectToString(r(i), serializationNullFormat, complexDelimiters,
timeStampFormat, dateFormat, isVarcharType, level + 1))
.append(delimiter)
}
builder.substring(0, builder.length - delimiter.length())
case other => other.toString
}
}
}
}
| manishgupta88/carbondata | streaming/src/main/scala/org/apache/carbondata/streaming/parser/FieldConverter.scala | Scala | apache-2.0 | 4,528 |
import Dependencies._
import com.typesafe.sbt.SbtNativePackager._
import com.typesafe.sbt.packager.docker._
import com.typesafe.sbt.packager.linux._
import sbt.Keys._
import sbt._
import scoverage.ScoverageSbtPlugin
object BalboaAgent extends DockerKeys with LinuxKeys {
lazy val settings: Seq[Setting[_]] = BuildSettings.projectSettings ++ Seq(
mainClass in sbtassembly.AssemblyKeys.assembly := Some("com.socrata.balboa.agent.BalboaAgent"),
libraryDependencies <++= scalaVersion { libraries(_) },
dockerBaseImage := "socrata/java",
daemonUser in Docker := "socrata",
mappings in Docker += file("balboa-agent/ship.d/run") -> "/etc/ship.d/run",
dockerEntrypoint := Seq("/etc/ship.d/run"),
dockerCommands := dockerCommands.value ++ Seq(ExecCmd("ADD", "etc", "/etc")),
ScoverageSbtPlugin.ScoverageKeys.coverageMinimum := 61,
ScoverageSbtPlugin.ScoverageKeys.coverageExcludedPackages := "<empty>;.*\\\\.balboa\\\\.agent\\\\.BalboaAgent",
com.socrata.sbtplugins.StylePlugin.StyleKeys.styleFailOnError in Compile := true
)
def libraries(implicit scalaVersion: String): Seq[ModuleID] = BalboaCommon.libraries ++ Seq(
arm,
commons_logging,
dropwizard_metrics,
dropwizard_servlets,
junit,
json4s,
newman,
scalatest,
json4s,
scodec_core
) ++ balboa_logging
}
| socrata-platform/balboa | project/BalboaAgent.scala | Scala | apache-2.0 | 1,333 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import scala.util.Random
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.scalatestplus.selenium.WebBrowser
import org.apache.spark.ui.SparkUICssErrorHandler
class UISeleniumSuite
extends HiveThriftJdbcTest
with WebBrowser with Matchers with BeforeAndAfterAll {
implicit var webDriver: WebDriver = _
var server: HiveThriftServer2 = _
val uiPort = 20000 + Random.nextInt(10000)
override def mode: ServerMode.Value = ServerMode.binary
override def beforeAll(): Unit = {
webDriver = new HtmlUnitDriver {
getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
}
super.beforeAll()
}
override def afterAll(): Unit = {
try {
if (webDriver != null) {
webDriver.quit()
}
} finally {
super.afterAll()
}
}
override protected def serverStartCommand(port: Int) = {
val portConf = if (mode == ServerMode.binary) {
ConfVars.HIVE_SERVER2_THRIFT_PORT
} else {
ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT
}
s"""$startScript
| --master local
| --hiveconf hive.root.logger=INFO,console
| --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$metastoreJdbcUri
| --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath
| --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost
| --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode
| --hiveconf $portConf=$port
| --driver-class-path ${sys.props("java.class.path")}
| --conf spark.ui.enabled=true
| --conf spark.ui.port=$uiPort
""".stripMargin.split("\\\\s+").toSeq
}
ignore("thrift server ui test") {
withJdbcStatement("test_map") { statement =>
val baseURL = s"http://localhost:$uiPort"
val queries = Seq(
"CREATE TABLE test_map(key INT, value STRING)",
s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")
queries.foreach(statement.execute)
eventually(timeout(10.seconds), interval(50.milliseconds)) {
go to baseURL
find(cssSelector("""ul li a[href*="sql"]""")) should not be None
}
eventually(timeout(10.seconds), interval(50.milliseconds)) {
go to (baseURL + "/sql")
find(id("sessionstat")) should not be None
find(id("sqlstat")) should not be None
// check whether statements exists
queries.foreach { line =>
findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
}
}
}
}
}
| goldmedal/spark | sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala | Scala | apache-2.0 | 3,619 |
package doobie.util
import doobie.enum.nullability._
import doobie.enum.parametermode._
import doobie.enum.jdbctype._
import doobie.util.meta._
import doobie.util.capture._
import doobie.util.pretty._
import doobie.util.meta._
import scala.Predef._ // TODO: minimize
import scala.reflect.runtime.universe.TypeTag
import scalaz._, Scalaz._
import scalaz.\&/._
/** Module defining a type for analyzing the type alignment of prepared statements. */
object analysis {
/** Metadata for the JDBC end of a column/parameter mapping. */
final case class ColumnMeta(jdbcType: JdbcType, vendorTypeName: String, nullability: Nullability, name: String)
/** Metadata for the JDBC end of a column/parameter mapping. */
final case class ParameterMeta(jdbcType: JdbcType, vendorTypeName: String, nullability: Nullability, mode: ParameterMode)
sealed trait AlignmentError {
def tag: String
def index: Int
def msg: String
}
case class ParameterMisalignment(index: Int, alignment: (Meta[_], NullabilityKnown) \/ ParameterMeta) extends AlignmentError {
val tag = "P"
def msg = this match {
case ParameterMisalignment(i, -\/((st, n))) =>
s"""|Interpolated value has no corresponding SQL parameter and likely appears inside a
|comment or quoted string. This will result in a runtime failure; fix this by removing
|the parameter.""".stripMargin.lines.mkString(" ")
case ParameterMisalignment(i, \/-(pm)) =>
s"""|${pm.jdbcType.toString.toUpperCase} parameter is not set; this will result in a runtime
|failure. Perhaps you used a literal ? rather than an interpolated value.""".stripMargin.lines.mkString(" ")
}
}
case class ParameterTypeError(index: Int, scalaType: Meta[_], n: NullabilityKnown, jdbcType: JdbcType, vendorTypeName: String, nativeMap: Map[String, JdbcType]) extends AlignmentError {
val tag = "P"
def msg =
s"""|${typeName(scalaType, n)} is not coercible to ${jdbcType.toString.toUpperCase}
|(${vendorTypeName})
|according to the JDBC specification.
|Fix this by changing the schema type to ${scalaType.jdbcTarget.head.toString.toUpperCase},
|or the Scala type to ${Meta.writersOf(jdbcType, vendorTypeName).toList.map(typeName(_, n)).mkString(" or ")}.""".stripMargin.lines.mkString(" ")
}
case class ColumnMisalignment(index: Int, alignment: (Meta[_], NullabilityKnown) \/ ColumnMeta) extends AlignmentError {
val tag = "C"
def msg = this match {
case ColumnMisalignment(i, -\/((j, n))) =>
s"""|Too few columns are selected, which will result in a runtime failure. Add a column or
|remove mapped ${typeName(j, n)} from the result type.""".stripMargin.lines.mkString(" ")
case ColumnMisalignment(i, \/-(col)) =>
s"""Column is unused. Remove it from the SELECT statement."""
}
}
case class NullabilityMisalignment(index: Int, name: String, st: Meta[_], jdk: NullabilityKnown, jdbc: NullabilityKnown) extends AlignmentError {
val tag = "C"
def msg = this match {
// https://github.com/tpolecat/doobie/issues/164 ... NoNulls means "maybe no nulls" :-\
// case NullabilityMisalignment(i, name, st, NoNulls, Nullable) =>
// s"""Non-nullable column ${name.toUpperCase} is unnecessarily mapped to an Option type."""
case NullabilityMisalignment(i, name, st, Nullable, NoNulls) =>
s"""|Reading a NULL value into ${typeName(st, NoNulls)} will result in a runtime failure.
|Fix this by making the schema type ${formatNullability(NoNulls)} or by changing the
|Scala type to ${typeName(st, Nullable)}""".stripMargin.lines.mkString(" ")
}
}
case class ColumnTypeError(index: Int, jdk: Meta[_], n: NullabilityKnown, schema: ColumnMeta) extends AlignmentError {
val tag = "C"
def msg =
Meta.readersOf(schema.jdbcType, schema.vendorTypeName).toList.map(typeName(_, n)) match {
case Nil =>
s"""|${schema.jdbcType.toString.toUpperCase} (${schema.vendorTypeName}) is not
|coercible to ${typeName(jdk, n)} according to the JDBC specification or any defined
|mapping.
|Fix this by changing the schema type to
|${jdk.jdbcSource.list.map(_.toString.toUpperCase).toList.mkString(" or ") }; or the
|Scala type to an appropriate ${if (schema.jdbcType == Array) "array" else "object"}
|type.
|""".stripMargin.lines.mkString(" ")
case ss =>
s"""|${schema.jdbcType.toString.toUpperCase} (${schema.vendorTypeName}) is not
|coercible to ${typeName(jdk, n)} according to the JDBC specification or any defined
|mapping.
|Fix this by changing the schema type to
|${jdk.jdbcSource.list.map(_.toString.toUpperCase).toList.mkString(" or ") }, or the
|Scala type to ${ss.mkString(" or ")}.
|""".stripMargin.lines.mkString(" ")
}
}
case class ColumnTypeWarning(index: Int, jdk: Meta[_], n: NullabilityKnown, schema: ColumnMeta) extends AlignmentError {
val tag = "C"
def msg =
s"""|${schema.jdbcType.toString.toUpperCase} (${schema.vendorTypeName}) is ostensibly
|coercible to ${typeName(jdk, n)}
|according to the JDBC specification but is not a recommended target type. Fix this by
|changing the schema type to
|${jdk.jdbcSource.list.map(_.toString.toUpperCase).toList.mkString(" or ") }; or the
|Scala type to ${Meta.readersOf(schema.jdbcType, schema.vendorTypeName).toList.map(typeName(_, n)).mkString(" or ")}.
|""".stripMargin.lines.mkString(" ")
}
/** Compatibility analysis for the given statement and aligned mappings. */
final case class Analysis(
sql: String,
nativeMap: Map[String, JdbcType],
parameterAlignment: List[(Meta[_], NullabilityKnown) \&/ ParameterMeta],
columnAlignment: List[(Meta[_], NullabilityKnown) \&/ ColumnMeta]) {
def parameterMisalignments: List[ParameterMisalignment] =
parameterAlignment.zipWithIndex.collect {
case (This(j), n) => ParameterMisalignment(n + 1, -\/(j))
case (That(p), n) => ParameterMisalignment(n + 1, \/-(p))
}
def parameterTypeErrors: List[ParameterTypeError] =
parameterAlignment.zipWithIndex.collect {
case (Both((j, n1), p), n) if !j.jdbcTarget.element(p.jdbcType) =>
ParameterTypeError(n + 1, j, n1, p.jdbcType, p.vendorTypeName, nativeMap)
}
def columnMisalignments: List[ColumnMisalignment] =
columnAlignment.zipWithIndex.collect {
case (This(j), n) => ColumnMisalignment(n + 1, -\/(j))
case (That(p), n) => ColumnMisalignment(n + 1, \/-(p))
}
def columnTypeErrors: List[ColumnTypeError] =
columnAlignment.zipWithIndex.collect {
case (Both((j, n1), p), n) if !(j.jdbcSource.list.toList ++ j.fold(_.jdbcSourceSecondary.toList, _ => Nil)).element(p.jdbcType) =>
ColumnTypeError(n + 1, j, n1, p)
case (Both((j, n1), p), n) if (p.jdbcType === JavaObject || p.jdbcType == Other) && !j.fold(_ => None, a => Some(a.schemaTypes.head)).element(p.vendorTypeName) =>
ColumnTypeError(n + 1, j, n1, p)
}
def columnTypeWarnings: List[ColumnTypeWarning] =
columnAlignment.zipWithIndex.collect {
case (Both((j, n1), p), n) if j.fold(_.jdbcSourceSecondary.toList, _ => Nil).element(p.jdbcType) =>
ColumnTypeWarning(n + 1, j, n1, p)
}
def nullabilityMisalignments: List[NullabilityMisalignment] =
columnAlignment.zipWithIndex.collect {
// We can't do anything helpful with NoNulls .. it means "might not be nullable"
// case (Both((st, Nullable), ColumnMeta(_, _, NoNulls, col)), n) => NullabilityMisalignment(n + 1, col, st, NoNulls, Nullable)
case (Both((st, NoNulls), ColumnMeta(_, _, Nullable, col)), n) => NullabilityMisalignment(n + 1, col, st, Nullable, NoNulls)
// N.B. if we had a warning mechanism we could issue a warning for NullableUnknown
}
lazy val parameterAlignmentErrors =
parameterMisalignments ++ parameterTypeErrors
lazy val columnAlignmentErrors =
columnMisalignments ++ columnTypeErrors ++ columnTypeWarnings ++ nullabilityMisalignments
def alignmentErrors =
(parameterAlignmentErrors).sortBy(m => (m.index, m.msg)) ++
(columnAlignmentErrors).sortBy(m => (m.index, m.msg))
/** Description of each parameter, paired with its errors. */
lazy val paramDescriptions: List[(String, List[AlignmentError])] = {
val params: Block =
parameterAlignment.zipWithIndex.map {
case (Both((j1, n1), ParameterMeta(j2, s2, n2, m)), i) => List(f"P${i+1}%02d", s"${typeName(j1, n1)}", " → ", j2.toString.toUpperCase, s"($s2)")
case (This((j1, n1)), i) => List(f"P${i+1}%02d", s"${typeName(j1, n1)}", " → ", "", "")
case (That( ParameterMeta(j2, s2, n2, m)), i) => List(f"P${i+1}%02d", "", " → ", j2.toString.toUpperCase, s"($s2)")
} .transpose.map(Block(_)).foldLeft(Block(Nil))(_ leftOf1 _).trimLeft(1)
params.toString.lines.toList.zipWithIndex.map { case (s, n) =>
(s, parameterAlignmentErrors.filter(_.index == n + 1))
}
}
/** Description of each parameter, paird with its errors. */
lazy val columnDescriptions: List[(String, List[AlignmentError])] = {
import pretty._
import scalaz._, Scalaz._
val cols: Block =
columnAlignment.zipWithIndex.map {
case (Both((j1, n1), ColumnMeta(j2, s2, n2, m)), i) => List(f"C${i+1}%02d", m, j2.toString.toUpperCase, s"(${s2.toString})", formatNullability(n2), " → ", typeName(j1, n1))
case (This((j1, n1)), i) => List(f"C${i+1}%02d", "", "", "", "", " → ", typeName(j1, n1))
case (That( ColumnMeta(j2, s2, n2, m)), i) => List(f"C${i+1}%02d", m, j2.toString.toUpperCase, s"(${s2.toString})", formatNullability(n2), " → ", "")
} .transpose.map(Block(_)).foldLeft(Block(Nil))(_ leftOf1 _).trimLeft(1)
cols.toString.lines.toList.zipWithIndex.map { case (s, n) =>
(s, columnAlignmentErrors.filter(_.index == n + 1))
}
}
}
// Some stringy helpers
private val packagePrefix = "\\b[a-z]+\\.".r
private def typeName(t: Meta[_], n: NullabilityKnown): String = {
val name = packagePrefix.replaceAllIn(t.scalaType, "")
n match {
case NoNulls => name
case Nullable => s"Option[${name}]"
}
}
private def formatNullability(n: Nullability): String =
n match {
case NoNulls => "NOT NULL"
case Nullable => "NULL"
case NullableUnknown => "NULL?"
}
}
| coltfred/doobie | core/src/main/scala/doobie/util/analysis.scala | Scala | mit | 11,014 |
/*
* Algorithm.scala
* (MutagenTx)
*
* Copyright (c) 2015 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mutagentx
import java.util.concurrent.TimeUnit
import de.sciss.file._
import de.sciss.lucre.confluent.reactive.ConfluentReactive
import de.sciss.lucre.event.Sys
import de.sciss.lucre.stm.store.BerkeleyDB
import de.sciss.lucre.{data, event => evt, stm}
import de.sciss.processor.Processor
import de.sciss.serial.DataOutput
import de.sciss.synth.UGenSpec
import de.sciss.synth.io.AudioFileSpec
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
import scala.language.higherKinds
object Algorithm {
val DEBUG = false
// ---- generation ----
val population : Int = 1000
val constProb : Double = 0.5
val minNumVertices : Int = 64 // 30
val maxNumVertices : Int = 256 // 100
val nonDefaultProb : Double = 0.95 // 0.99 // 0.5
// ---- evaluation ----
val numCoeffs : Int = 42
val normalizeCoeffs : Boolean = false // true
val maxBoost : Double = 10.0
val temporalWeight : Double = 0.3
val vertexPenalty : Double = 0.01
val graphPenaltyIter: Int = 10
val graphPenaltyCeil: Double = 0.275
val graphPenaltyAmt : Double = 0.2
val graphPenaltyCoin: Double = 0.25 // subsampling probability to increase speed (1 = all neighbors, 0.5 = every second)
// ---- breeding ----
val selectionFrac : Double = 0.33
val numElitism : Int = 0 // 5
val mutMin : Int = 2
val mutMax : Int = 4
val mutationProb : Double = 0.75
val numGolem : Int = 15
implicit val executionContext: ExecutionContext = {
ExecutionContext.Implicits.global
// SoundProcesses.executionContext
// val ex = Executors.newFixedThreadPool(6)
// ExecutionContext.fromExecutor(ex)
}
def tmpConfluent(input: File): Confluent =
impl.ConfluentAlgorithm.tmp(input)
def confluent(dir: File, input: File): Confluent =
impl.ConfluentAlgorithm.apply(dir = dir, input = input)
implicit object DurableVertexOrdering extends data.Ordering[evt.Durable#Tx, Vertex[evt.Durable]] {
type S = evt.Durable
def compare(a: Vertex[S], b: Vertex[S])(implicit tx: S#Tx): Int = {
val aid = stm.Escape.durableID(a.id)
val bid = stm.Escape.durableID(b.id)
if (aid < bid) -1 else if (aid > bid) 1 else 0
}
}
def durable(dir: File, input: File): Durable = {
type S = evt.Durable
val dbc = BerkeleyDB.Config()
dbc.lockTimeout = Duration(0, TimeUnit.SECONDS)
val dbf = BerkeleyDB.factory(dir, dbc)
implicit val system = evt.Durable(dbf)
val rootH = system.root[(GlobalState.Durable, Genome[S])] { implicit tx =>
(GlobalState.Durable(), Genome.empty[S])
}
// Yes, I know... Not nice...
val (global, genomeH) = system.step { implicit tx =>
val (_global, _genome) = rootH()
(_global, tx.newHandle(_genome))
}
// val (global: GlobalState.Durable, genomeH: stm.Source[S#Tx, Genome[S]]) = system.step { implicit tx =>
// (GlobalState.Durable(), tx.newHandle(Genome.empty[S]))
// }
lazy val cleaner = { (_tx: S#Tx, elite: Vec[Chromosome[S]]) =>
implicit val tx = _tx
val g = a.genome
val old = g.chromosomes()
val fit = g.fitness()
g.chromosomes() = Vector.empty
val eliteSet = elite.toSet
// val ser = SynthGraphs.ValueSerializer //implicitly[ImmutableSerializer[SynthGraph]]
val iter = a.global.numIterations()
val store = iter % 10 == 0
val f = dir.parent / s"${dir.name}_iter$iter.bin"
lazy val out = DataOutput.open(f)
(old zip fit).foreach { case (c, fit0) =>
if (!eliteSet.contains(c)) {
if (store && fit0 > 0.4f) {
val graph = impl.ChromosomeImpl.mkSynthGraph(c, mono = true, removeNaNs = false, config = true)
val input = SOMGenerator.Input(graph, iter = iter, fitness = fit0)
SOMGenerator.Input.serializer.write(input, out)
}
val v = c.vertices.iterator.toIndexedSeq
c.dispose()
v.foreach(_.dispose())
}
}
if (store) out.close()
()
}
lazy val a: Algorithm.Durable = impl.CopyingAlgorithm[S, GlobalState.Durable](system = system, input = input,
global = global, genomeH = genomeH, ephemeral = true, cleaner = Some(cleaner))
a
}
def inMemory(input: File): Algorithm[evt.InMemory] = {
type S = evt.InMemory
implicit val system = evt.InMemory()
implicit object VertexOrd extends data.Ordering[S#Tx, Vertex[S]] {
def compare(a: Vertex[S], b: Vertex[S])(implicit tx: S#Tx): Int = {
val aid = stm.Escape.inMemoryID(a.id)
val bid = stm.Escape.inMemoryID(b.id)
if (aid < bid) -1 else if (aid > bid) 1 else 0
}
}
val (global: GlobalState[S], genomeH: stm.Source[S#Tx, Genome[S]]) = system.step { implicit tx =>
(GlobalState.InMemory(), tx.newHandle(Genome.empty[S]))
}
impl.CopyingAlgorithm[S, GlobalState[S]](system = system, input = input, global = global,
genomeH = genomeH, ephemeral = true)
}
type InMemory = Algorithm[evt.InMemory] {
type Global = GlobalState[evt.InMemory]
}
type Durable = Algorithm[evt.Durable] {
type Global = GlobalState.Durable
}
type Confluent = Algorithm[ConfluentReactive] {
type Global = GlobalState.Confluent
}
}
trait Algorithm[S <: Sys[S]] {
type C = Chromosome[S]
def genome(implicit tx: S#Tx): Genome[S]
/** Input target sound file. */
def input: File
/** Target sound's feature extraction file. */
def inputExtr: File
/** Target sound's specification. */
def inputSpec: AudioFileSpec
def system: S
type Global <: GlobalState[S]
val global: Global
implicit def ord: data.Ordering[S#Tx, Vertex[S]]
/** Creates the initial population of size `n`. */
def initialize(n: Int)(implicit tx: S#Tx): Processor[Unit]
/** Creates an individual chromosome. */
def mkIndividual()(implicit tx: S#Tx): C
/** Adds a random vertex to an existing chromosome. */
def addVertex(c: C)(implicit tx: S#Tx): Unit
/** Utility method that collects the general arguments into which other `GE` elements may be plugged. */
def geArgs(spec: UGenSpec): Vec[UGenSpec.Argument]
/** Adds vertices a vertex `v` newly added to a chromosome `c` to ensure
* it is fully wired.
*/
def completeUGenInputs(c: C, v: Vertex.UGen[S])(implicit tx: S#Tx): Unit
/** Creates a random constant vertex. */
def mkConstant()(implicit tx: S#Tx): Vertex.Constant[S]
/** Creates a random UGen vertex. */
def mkUGen()(implicit tx: S#Tx): Vertex.UGen[S]
/** Runs the evaluation on all chromosomes of the current genome,
* returning the fitness vector (the genome is not updated).
*/
def evaluate()(implicit tx: S#Tx): Processor[Vec[Float]]
/** Runs the evaluation on all chromosomes of the current genome,
* updating the genome's fitness vector.
*/
def evaluateAndUpdate()(implicit tx: S#Tx): Processor[Unit]
/** Runs the selection stage of the algorithm, using `all` inputs which
* are chromosomes paired with their fitness values.
*/
def select(all: Vec[(C, Float)])(implicit tx: S#Tx): Set[C]
/** Selects the best matching chromosomes. */
def elitism(all: Vec[(C, Float)])(implicit tx: S#Tx): Vec[C]
/** Performs one iteration of the algorithm, assuming that current population
* was already evaluated. Steps:
*
* - elitism
* - selection
* - breeding: mutation and cross-over
* - evaluation
*/
def iterate(): Processor[Unit]
} | Sciss/MutagenTx | src/main/scala/de/sciss/mutagentx/Algorithm.scala | Scala | gpl-3.0 | 7,856 |
package mesosphere.marathon.tasks
import org.apache.mesos.Protos.{ Value, Resource }
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
object ResourceUtil {
private[this] val log = LoggerFactory.getLogger(getClass)
/**
* The resources in launched tasks, should
* be consumed from resources in the offer with the same [[ResourceMatchKey]].
*/
private[this] case class ResourceMatchKey(role: String, name: String)
private[this] object ResourceMatchKey {
def apply(resource: Resource): ResourceMatchKey = ResourceMatchKey(resource.getRole, resource.getName)
}
/**
* Deduct usedResource from resource. If nothing is left, None is returned.
*/
def consumeResource(resource: Resource, usedResource: Resource): Option[Resource] = {
require(resource.getType == usedResource.getType)
def consumeScalarResource: Option[Resource] = {
val leftOver: Double = resource.getScalar.getValue - usedResource.getScalar.getValue
if (leftOver <= 0) {
None
}
else {
Some(resource
.toBuilder
.setScalar(
Value.Scalar
.newBuilder().setValue(leftOver))
.build())
}
}
def deductRange(baseRange: Value.Range, usedRange: Value.Range): Seq[Value.Range] = {
if (baseRange.getEnd < usedRange.getBegin) { // baseRange completely before usedRange
Seq(baseRange)
}
else if (baseRange.getBegin > usedRange.getEnd) { // baseRange completely after usedRange
Seq(baseRange)
}
else {
val rangeBefore: Option[Value.Range] = if (baseRange.getBegin < usedRange.getBegin)
Some(baseRange.toBuilder().setEnd(usedRange.getBegin - 1).build())
else
None
val rangeAfter: Option[Value.Range] = if (baseRange.getEnd > usedRange.getEnd)
Some(baseRange.toBuilder().setBegin(usedRange.getEnd + 1).build())
else
None
Seq(rangeBefore, rangeAfter).flatten
}
}
def consumeRangeResource: Option[Resource] = {
val usedRanges = usedResource.getRanges.getRangeList.asScala
val baseRanges = resource.getRanges.getRangeList.asScala
// FIXME: too expensive?
val diminished = baseRanges.flatMap { baseRange =>
usedRanges.foldLeft(Seq(baseRange)) {
case (result, used) =>
result.flatMap(deductRange(_, used))
}
}
val rangesBuilder = Value.Ranges.newBuilder()
diminished.foreach(rangesBuilder.addRange)
val result = resource
.toBuilder
.setRanges(rangesBuilder)
.build()
if (result.getRanges.getRangeCount > 0)
Some(result)
else
None
}
def consumeSetResource: Option[Resource] = {
val baseSet: Set[String] = resource.getSet.getItemList.asScala.toSet
val consumedSet: Set[String] = usedResource.getSet.getItemList.asScala.toSet
val resultSet: Set[String] = baseSet -- consumedSet
if (resultSet.nonEmpty)
Some(
resource
.toBuilder
.setSet(Value.Set.newBuilder().addAllItem(resultSet.asJava))
.build()
)
else
None
}
resource.getType match {
case Value.Type.SCALAR => consumeScalarResource
case Value.Type.RANGES => consumeRangeResource
case Value.Type.SET => consumeSetResource
case unexpectedResourceType =>
log.warn("unexpected resourceType {} for resource {}", Seq(unexpectedResourceType, resource.getName): _*)
// we don't know the resource, thus we consume it completely
None
}
}
/**
* Deduct usedResources from resources by matching them by name.
*/
def consumeResources(resources: Iterable[Resource], usedResources: Iterable[Resource]): Iterable[Resource] = {
val usedResourceMap: Map[ResourceMatchKey, Seq[Resource]] =
usedResources.groupBy(ResourceMatchKey(_)).mapValues(_.to[Seq])
resources.flatMap { resource: Resource =>
usedResourceMap.get(ResourceMatchKey(resource)) match {
case Some(usedResources: Seq[Resource]) =>
usedResources.foldLeft(Some(resource): Option[Resource]) {
case (Some(resource), usedResource) =>
if (resource.getType != usedResource.getType) {
log.warn("Different resource types for resource {}: {} and {}",
resource.getName, resource.getType, usedResource.getType)
None
}
else
ResourceUtil.consumeResource(resource, usedResource)
case (None, _) => None
}
case None => // if the resource isn't used, we keep it
Some(resource)
}
}
}
def displayResource(resource: Resource): String = resource.getType match {
case Value.Type.SCALAR => s"${resource.getName} ${resource.getScalar.getValue}"
case Value.Type.RANGES =>
s"${resource.getName} ${
resource.getRanges.getRangeList.asScala.map {
range => s"${range.getBegin}->${range.getEnd}"
}.mkString(",")
}"
case other => resource.toString
}
def displayResources(resources: Iterable[Resource]): String = {
resources.map(displayResource).mkString("; ")
}
}
| quamilek/marathon | src/main/scala/mesosphere/marathon/tasks/ResourceUtil.scala | Scala | apache-2.0 | 5,295 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import org.apache.flink.table.api.TableConfigOptions
import org.apache.flink.table.calcite.FlinkContext
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.util.SortUtil
import org.apache.calcite.plan._
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.Sort
import org.apache.calcite.rel.logical.LogicalSort
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollation, RelCollationTraitDef, RelNode}
import org.apache.calcite.rex.{RexLiteral, RexNode}
import org.apache.calcite.sql.`type`.SqlTypeName
/**
* Sub-class of [[Sort]] that is a relational expression which imposes
* a particular sort order on its input without otherwise changing its content in Flink.
*/
class FlinkLogicalSort(
cluster: RelOptCluster,
traits: RelTraitSet,
child: RelNode,
collation: RelCollation,
sortOffset: RexNode,
sortFetch: RexNode)
extends Sort(cluster, traits, child, collation, sortOffset, sortFetch)
with FlinkLogicalRel {
private lazy val limitStart: Long = SortUtil.getLimitStart(offset)
override def copy(
traitSet: RelTraitSet,
newInput: RelNode,
newCollation: RelCollation,
offset: RexNode,
fetch: RexNode): Sort = {
new FlinkLogicalSort(cluster, traitSet, newInput, newCollation, offset, fetch)
}
override def estimateRowCount(mq: RelMetadataQuery): Double = {
val inputRowCnt = mq.getRowCount(this.getInput)
if (inputRowCnt == null) {
inputRowCnt
} else {
val rowCount = (inputRowCnt - limitStart).max(1.0)
if (fetch != null) {
val limit = RexLiteral.intValue(fetch)
rowCount.min(limit)
} else {
rowCount
}
}
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
// by default, assume cost is proportional to number of rows
val rowCount: Double = mq.getRowCount(this)
planner.getCostFactory.makeCost(rowCount, rowCount, 0)
}
}
class FlinkLogicalSortStreamConverter
extends ConverterRule(
classOf[LogicalSort],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalSortStreamConverter") {
override def convert(rel: RelNode): RelNode = {
val sort = rel.asInstanceOf[LogicalSort]
val newInput = RelOptRule.convert(sort.getInput, FlinkConventions.LOGICAL)
FlinkLogicalSort.create(newInput, sort.getCollation, sort.offset, sort.fetch)
}
}
class FlinkLogicalSortBatchConverter extends ConverterRule(
classOf[LogicalSort],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalSortBatchConverter") {
override def convert(rel: RelNode): RelNode = {
val sort = rel.asInstanceOf[LogicalSort]
val newInput = RelOptRule.convert(sort.getInput, FlinkConventions.LOGICAL)
val config = sort.getCluster.getPlanner.getContext.asInstanceOf[FlinkContext].getTableConfig
val enableRangeSort = config.getConf.getBoolean(TableConfigOptions.SQL_EXEC_SORT_RANGE_ENABLED)
val limitValue = config.getConf.getInteger(TableConfigOptions.SQL_EXEC_SORT_DEFAULT_LIMIT)
val (offset, fetch) = if (sort.fetch == null && sort.offset == null
&& !enableRangeSort && limitValue > 0) {
//force the sort add limit
val rexBuilder = rel.getCluster.getRexBuilder
val intType = rexBuilder.getTypeFactory.createSqlType(SqlTypeName.INTEGER)
val offset = rexBuilder.makeLiteral(0, intType, true)
val fetch = rexBuilder.makeLiteral(limitValue, intType, true)
(offset, fetch)
} else {
(sort.offset, sort.fetch)
}
FlinkLogicalSort.create(newInput, sort.getCollation, offset, fetch)
}
}
object FlinkLogicalSort {
val BATCH_CONVERTER: RelOptRule = new FlinkLogicalSortBatchConverter
val STREAM_CONVERTER: RelOptRule = new FlinkLogicalSortStreamConverter
def create(
input: RelNode,
collation: RelCollation,
sortOffset: RexNode,
sortFetch: RexNode): FlinkLogicalSort = {
val cluster = input.getCluster
val collationTrait = RelCollationTraitDef.INSTANCE.canonize(collation)
val traitSet = cluster.traitSetOf(FlinkConventions.LOGICAL).replace(collationTrait)
new FlinkLogicalSort(cluster, traitSet, input, collation, sortOffset, sortFetch)
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalSort.scala | Scala | apache-2.0 | 5,158 |
/**
* Copyright (C) 2016 Verizon. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.verizon.bda.trapezium.framework.handler
import javax.servlet.http.HttpServlet
import org.apache.spark.SparkContext
import org.slf4j.LoggerFactory
/**
* Created by Pankaj on 5/2/16.
*/
private[framework]
class TestHttpServlet2(sc: SparkContext) extends HttpServlet {
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import scala.xml.NodeSeq
override def service(request: HttpServletRequest, response: HttpServletResponse) {
val logger = LoggerFactory.getLogger(this.getClass)
logger.info(s"Spark Test: ${sc.sparkUser},${sc.startTime}")
val parameterNames = request.getParameterNames
logger.info("**************Parameter********************")
while(parameterNames.hasMoreElements){
val nextElement = parameterNames.nextElement()
logger.info(s"${nextElement}")
logger.info(s"${request.getParameter(nextElement)}")
}
logger.info("**************Header********************")
val headerNames = request.getHeaderNames
while(headerNames.hasMoreElements) {
val nextElement = headerNames.nextElement()
logger.info(s"${nextElement}")
logger.info(s"${request.getHeader(nextElement)}")
}
response.setContentType("text/html")
response.setCharacterEncoding("UTF-8")
val responseBody: NodeSeq =
<html>
<head>
<title>embedded jetty</title>
<link rel="stylesheet" type="text/css" href="/css/style.css" />
</head>
<body>
<h1>Servlet 2!</h1>
</body>
</html>
response.getWriter.write(responseBody.toString)
}
}
| Verizon/trapezium | framework/src/test/scala/com/verizon/bda/trapezium/framework/handler/TestHttpServlet2.scala | Scala | apache-2.0 | 2,210 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest}
import scala.concurrent.Future
class NestingTest extends AsyncTest[RelationalTestDB] {
import tdb.profile.api._
def testNestedTuples = {
import TupleMethods._
class T(tag: Tag) extends Table[(Int, String, String)](tag, "T") {
def a = column[Int]("A")
def b = column[String]("B")
def c = column[String]("C")
def * = (a, b, c)
}
val ts = TableQuery[T]
val res1 = List(
(1, "1", "a", 5), (2, "2", "a", 5), (3, "3", "a", 5),
(1, "1", "b", 5), (2, "2", "b", 5), (3, "3", "b", 5),
(1, "1", "c", 5), (2, "2", "c", 5), (3, "3", "c", 5)
)
val res1b = res1.map { case (a, b, c, d) => ((a, b), (c, d)) }
val q1a = (for {
(a, b) <- ts.map(t => (t.a, t.b))
c <- ts.map(t => t.c)
} yield (a, b, c, 5)).sortBy(t => t._3 ~ t._1)
val q1c = (for {
a ~ b <- ts.map(t => (t.a, t.b))
c <- ts.map(t => t.c)
} yield (a, b, c, LiteralColumn(5))).sortBy(t => t._3 ~ t._1)
val q1d = (for {
(a, b) <- ts.map(t => (t.a, t.b))
c <- ts.map(t => t.c)
} yield ((a, b), (c, 5))).sortBy(t => t._2._1 ~ t._1._1)
val res2 = Set((1, "1", 8), (2, "2", 10))
val q2a = for {
a ~ b ~ c <- ts.filter(_.a === 1).map(t => (t.a, t.b, 4)) unionAll ts.filter(_.a === 2).map(t => t.a ~ t.b ~ 5)
} yield a ~ b ~ (c*2)
val q2b = for {
(a, b, c) <- ts.filter(_.a === 1).map(t => (t.a, t.b, LiteralColumn(4))) unionAll ts.filter(_.a === 2).map(t => (t.a, t.b, LiteralColumn(5)))
} yield a ~ b ~ (c*2)
val q2c = for {
(a, b, c) <- ts.filter(_.a === 1).map(t => (t.a, t.b, 4)) unionAll ts.filter(_.a === 2).map(t => (t.a, t.b, 5))
} yield a ~ b ~ (c*2)
seq(
ts.schema.create,
ts ++= Seq((1, "1", "a"), (2, "2", "b"), (3, "3", "c")),
q1a.result.map(_ shouldBe res1),
q1c.result.map(_ shouldBe res1),
q1d.result.map(_ shouldBe res1b),
q2a.result.map(v => v.toSet shouldBe res2),
q2b.result.map(v => v.toSet shouldBe res2),
q2c.result.map(v => v.toSet shouldBe res2)
)
}
def testNestedOptions = {
class X(tag: Tag) extends Table[(Int, String, Option[Int])](tag, "X_OPT") {
def a = column[Int]("A")
def b = column[String]("B")
def c = column[Option[Int]]("C")
def * = (a, b, c)
}
val xs = TableQuery[X]
val q = xs.sortBy(_.a)
val r = Vector((1, "1", Some(1)), (2, "2", Some(2)), (3, "3", None))
val setup = xs.schema.create >> (xs ++= r)
// Construct all kinds of Option Shapes
implicitly[Shape[_, Rep[Int], _, _]]
implicitly[Shape[_, Rep[Option[Int]], _, _]]
implicitly[Shape[_, Rep[Option[Option[Int]]], _, _]]
implicitly[Shape[_, Rep[Option[(Rep[Int], Rep[String])]], _, _]]
implicitly[Shape[_, Rep[Option[X]], _, _]]
// Construct all different kinds of Options
val q1 = q.map(t => Rep.Some(t))
val q1a2 = q.map(t => Rep.Some(Rep.Some(t)))
val q2 = q.map(t => Rep.Some(t.a))
val q2a2 = q.map(t => Rep.Some(Rep.Some(t.a)))
val q3 = q.map(t => t.c)
val q4 = q.map(t => Rep.Some(t.c))
val q5 = q.map(t => (t.c, Rep.Some(t.b)))
val q1t: Query[Rep[Option[X]], _, Seq] = q1
val q1a2t: Query[Rep[Option[Option[X]]], _, Seq] = q1a2
val q2t: Query[Rep[Option[Int]], _, Seq] = q2
val q2a2t: Query[Rep[Option[Option[Int]]], _, Seq] = q2a2
val q3t: Query[Rep[Option[Int]], _, Seq] = q3
val q4t: Query[Rep[Option[Option[Int]]], _, Seq] = q4
val q5t: Query[(Rep[Option[Int]], Rep[Option[String]]), _, Seq] = q5
lazy val t1 = seq(
mark("q1", q1.result).map(_ shouldBe r.map(t => Some(t))),
mark("q1a2", q1a2.result).map(_ shouldBe r.map(t => Some(Some(t)))),
mark("q2", q2.result).map(_ shouldBe r.map(t => Some(t._1))),
mark("q2a2", q2a2.result).map(_ shouldBe r.map(t => Some(Some(t._1)))),
mark("q3", q3.result).map(_ shouldBe r.map(t => t._3)),
mark("q4", q4.result).map(_ shouldBe r.map(t => Some(t._3))),
mark("q5", q5.result).map(_ shouldBe r.map(t => (t._3, Some(t._2))))
)
// Get plain values out
val q1b = q1.map(_.map(x => (x.a, x.b, x.c)).getOrElse((0, "", None: Option[Int])))
val q2b = q2.map(_.get)
val q3b = q3.filter(_.isDefined).map(_.get)
val q4b = q4.map(_.getOrElse(None: Option[Int]))
val q1bt: Query[(Rep[Int], Rep[String], Rep[Option[Int]]), _, Seq] = q1b
val q2bt: Query[Rep[Int], _, Seq] = q2b
val q3bt: Query[Rep[Int], _, Seq] = q3b
val q4bt: Query[Rep[Option[Int]], _, Seq] = q4b
lazy val t2 = seq(
mark("q1b", q1b.result).map(_ shouldBe r.map(t => Some(t)).map(_.getOrElse((0, "", None: Option[String])))),
mark("q2b", q2b.result).map(_ shouldBe r.map(t => Some(t._1)).map(_.get)),
mark("q3b", q3b.result).map(_ shouldBe r.map(t => t._3).filter(_.isDefined).map(_.get)),
mark("a4b", q4b.result).map(_ shouldBe r.map(t => Some(t._3)).map(_.getOrElse(None: Option[String])))
)
// Unpack result types
def r1: Future[Seq[Option[(Int, String, Option[Int])]]] = db.run(q1.result)
def r2: Future[Seq[Option[Int]]] = db.run(q2.result)
def r3: Future[Seq[Option[Int]]] = db.run(q3.result)
def r2b: Future[Seq[Int]] = db.run(q2b.result)
def r3b: Future[Seq[Int]] = db.run(q3b.result)
// Perform Option-mapped operations
val q2c = q2.map(io => io + 42)
val q3c = q3.map(so => so + 10)
lazy val t3 = seq(
mark("q2c", q2c.result).map(_ shouldBe r.map(t => Some(t._1)).map(_.map(_ + 42))),
mark("q3c", q3c.result).map(_ shouldBe r.map(t => t._3).map(_.map(_ + 10)))
)
// Use Option.map
val q1d = q1.map(_.map(_.a))
val q1d2 = q1.map(_.map(x => (x.a, x.b, x.c)))
val q2d = q2.map { (io: Rep[Option[Int]]) =>
io.map { (i: Rep[Int]) =>
i + 1
}
}
val q3d = q3.map(_.map(s => (s, s, 1)))
val q4d = q4.map(_.filter(_.isDefined).map(_.getOrElse(0)))
val q1dt: Query[Rep[Option[Int]], _, Seq] = q1d
val q1d2t: Query[Rep[Option[(Rep[Int], Rep[String], Rep[Option[Int]])]], _, Seq] = q1d2
val q2dt: Query[Rep[Option[Int]], _, Seq] = q2d
val q3dt: Query[Rep[Option[(Rep[Int], Rep[Int], ConstColumn[Int])]], _, Seq] = q3d
val q4dt: Query[Rep[Option[Int]], _, Seq] = q4d
lazy val t4 = seq(
q1d.result.named("q1d").map(_ shouldBe r.map(t => Some(t)).map(_.map(_._1))),
q1d2.result.named("q1d2").map(_ shouldBe r.map(t => Some(t)).map(_.map(x => (x._1, x._2, x._3)))),
q2d.result.named("q2d").map(_ shouldBe r.map(t => Some(t._1)).map(_.map(_ + 1))),
q3d.result.named("q3d").map(_ shouldBe r.map(t => t._3).map(_.map(s => (s, s, 1)))),
q4d.result.named("q4d").map(_ shouldBe r.map(t => Some(t._3)).map(_.filter(_.isDefined).map(_.get)))
)
// Use Option.flatMap
val q1e1 = q1.map { to => to.flatMap { t => Rep.Some(t.b) }}
val q1e2 = q1.map { to => to.flatMap { t => t.c }}
val q1e3 = q1.map(to => Rep.Some(to)).map(_.flatMap(identity))
val q2e = q2.map { io => io.flatMap { i => Rep.Some(i) }}
val q1e1t: Query[Rep[Option[String]], _, Seq] = q1e1
val q1e2t: Query[Rep[Option[Int]], _, Seq] = q1e2
val q2et: Query[Rep[Option[Int]], _, Seq] = q2e
lazy val t5 = seq(
mark("q1e1", q1e1.result).map(_ shouldBe r.map(t => Some(t)).map { to => to.flatMap { t => Some(t._2) }}),
mark("q1e2", q1e2.result).map(_ shouldBe r.map(t => Some(t)).map { to => to.flatMap { t => t._3 }}),
mark("q1e3", q1e3.result).map(_ shouldBe r.map(t => Some(t)).map(to => Some(to)).map(_.flatMap(identity))),
mark("q2e", q2e.result).map(_ shouldBe r.map(t => Some(t._1)).map { io => io.flatMap { i => Some(i) }})
)
// Use Option.flatten
val q1f1 = q1.map { to => Rep.Some(to) }
val q1f2 = q1.map { to => Rep.Some(to).flatten }
val q1f3 = q1.map { to => Rep.Some(to) }.map(_.flatten)
val q2f1 = q2.map { io => Rep.Some(io) }
val q2f2 = q2.map { io => Rep.Some(io).flatten }
val q2f3 = q2.map { io => Rep.Some(io) }.map(_.flatten)
val q1f1t: Query[Rep[Option[Option[X]]], _, Seq] = q1f1
val q1f2t: Query[Rep[Option[X]], _, Seq] = q1f2
val q1f3t: Query[Rep[Option[X]], _, Seq] = q1f3
val q2f1t: Query[Rep[Option[Option[Int]]], _, Seq] = q2f1
val q2f2t: Query[Rep[Option[Int]], _, Seq] = q2f2
val q2f3t: Query[Rep[Option[Int]], _, Seq] = q2f3
lazy val t6 = seq(
q1f1.result.named("q1f1").map(_ shouldBe Vector(Some(Some((1,"1",Some(1)))), Some(Some((2,"2",Some(2)))), Some(Some((3,"3",None))))),
q1f2.result.named("q1f2").map(_ shouldBe r.map(t => Some(t)).map { to => Some(to).flatten }),
q1f3.result.named("q1f3").map(_ shouldBe r.map(t => Some(t)).map { to => Some(to) }.map(_.flatten)),
q2f1.result.named("q2f1").map(_ shouldBe r.map(t => Some(t._1)).map { io => Some(io) }),
q2f2.result.named("q2f2").map(_ shouldBe r.map(t => Some(t._1)).map { io => Some(io).flatten }),
q2f3.result.named("q2f3").map(_ shouldBe r.map(t => Some(t._1)).map { io => Some(io) }.map(_.flatten))
)
setup >> t1 >> t2 >> t3 >> t4 >> t5 >> t6
}
def testGetOrElse = {
case class Chord(name: String, popularOptions: String, id: Long = -1L)
class Chords(tag: Tag) extends Table[Chord](tag, "chords") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def name = column[Option[String]]("name")
def popularOptions = column[Option[String]]("popularOptions")
def * = (name.getOrElse(""), popularOptions.getOrElse(""), id).mapTo[Chord]
}
val chords = TableQuery[Chords]
val allChords = Set(Chord("maj7", "9 #11"), Chord("m7", "9 11"), Chord("7", "9 13"), Chord("m7b5", "11"), Chord("aug7", "9"), Chord("dim7", ""))
val minorChords = for {
chord <- chords if chord.name.startsWith("m7")
} yield (chord.name.getOrElse(""), chord.popularOptions.getOrElse(""))
val otherChords = for {
chord <- chords if !chord.name.startsWith("m7")
} yield (chord.name.getOrElse(""), chord.popularOptions.getOrElse(""))
DBIO.seq(
chords.schema.create,
chords ++= allChords,
(minorChords ++ otherChords).result.map(_.toSet shouldBe allChords.map(c => (c.name, c.popularOptions)))
)
}
}
| slick/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/NestingTest.scala | Scala | bsd-2-clause | 10,302 |
/**
* gilbert - Distributed Linear Algebra on Sparse Matrices
* Copyright (C) 2013 Sebastian Schelter
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.gilbertlang.shell
import org.gilbertlang.operations.{WriteScalarRef, IterationStatePlaceholder, FixpointIteration, VectorwiseMatrixTransformation, CellwiseMatrixMatrixTransformation, ScalarMatrixTransformation, AggregateMatrixTransformation, scalar, rand, ones, WriteMatrix, CellwiseMatrixTransformation, MatrixMult, Transpose, LoadMatrix}
import org.gilbertlang.runtime.Executable
object printPlan {
def apply(executable: Executable) = {
new PlanPrinter().print(executable)
}
}
class PlanPrinter {
def print(executable: Executable, depth: Int = 0): Unit = {
executable match {
case transformation: LoadMatrix => {
printIndented(depth, transformation, "LoadMatrix [" + transformation.path + "]")
}
case transformation: FixpointIteration => {
printIndented(depth, transformation, "FixpointIteration")
print(transformation.initialState, depth + 1)
print(transformation.updatePlan, depth + 1)
}
case transformation: IterationStatePlaceholder => {
printIndented(depth, transformation, "IterationState")
}
case transformation: CellwiseMatrixTransformation => {
printIndented(depth, transformation, "CellwiseMatrixOp [" + transformation.operation + "]")
print(transformation.matrix, depth + 1)
}
case transformation: CellwiseMatrixMatrixTransformation => {
printIndented(depth, transformation, "CellwiseMatrixMatrixTransformation [" + transformation.operation + "]")
print(transformation.left, depth + 1)
print(transformation.right, depth + 1)
}
case transformation: Transpose => {
printIndented(depth, transformation, "Transpose")
print(transformation.matrix, depth + 1)
}
case transformation: MatrixMult => {
printIndented(depth, transformation, "MatrixMult")
print(transformation.left, depth + 1)
print(transformation.right, depth + 1)
}
case transformation: AggregateMatrixTransformation => {
printIndented(depth, transformation, "AggregateMatrixOp [" + transformation.operation + "]")
print(transformation.matrix, depth + 1)
}
case transformation: VectorwiseMatrixTransformation => {
printIndented(depth, transformation, "VectorwiseMatrixTransformation [" + transformation.operation + "]")
print(transformation.matrix, depth + 1)
}
case transformation: ScalarMatrixTransformation => {
printIndented(depth, transformation, "ScalarMatrixOp [" + transformation.operation + "]")
print(transformation.matrix, depth + 1)
print(transformation.scalar, depth + 1)
}
case transformation: ones => {
printIndented(depth, transformation, transformation.toString)
}
case transformation: rand => {
printIndented(depth, transformation, transformation.toString)
}
case transformation: WriteMatrix => {
printIndented(depth, transformation, "WriteMatrix")
print(transformation.matrix, depth + 1)
}
case transformation: scalar => {
printIndented(depth, transformation, transformation.value.toString)
}
case transformation: WriteScalarRef => {
printIndented(depth, transformation, "WriteScalarRef")
print(transformation.scalar, depth + 1)
}
}
}
def printIndented(depth: Int, transformation: Executable, str: String) = {
println("".padTo(depth, " ").mkString + "(" + transformation.id + ") " + str)
}
}
| sscdotopen/gilbert | src/main/scala/org/gilbertlang/shell/printPlan.scala | Scala | gpl-3.0 | 4,315 |
class Lst[+A] {
def map[B, That](f: A => B)(implicit bf: collection.BuildFrom[List[A], B, That]): That = ???
}
object Test {
def foo(l: Lst[Int]) = l.map[Int, List[String]](x => 1) // error
}
| dotty-staging/dotty | tests/neg/i4986a.scala | Scala | apache-2.0 | 197 |
package systems.opalia.commons
package object number {
implicit class StringToNumberImprovements(string: String) {
import scala.util.control.Exception._
def toByteOpt: Option[Byte] =
catching(classOf[NumberFormatException]) opt string.toByte
def toShortOpt: Option[Short] =
catching(classOf[NumberFormatException]) opt string.toShort
def toIntOpt: Option[Int] =
catching(classOf[NumberFormatException]) opt string.toInt
def toLongOpt: Option[Long] =
catching(classOf[NumberFormatException]) opt string.toLong
def toFloatOpt: Option[Float] =
catching(classOf[NumberFormatException]) opt string.toFloat
def toDoubleOpt: Option[Double] =
catching(classOf[NumberFormatException]) opt string.toDouble
def toBigInt: BigInt =
BigInt(string)
def toBigIntOpt: Option[BigInt] =
catching(classOf[NumberFormatException]) opt BigInt(string)
def toBigDecimal: BigDecimal =
BigDecimal(string)
def toBigDecimalOpt: Option[BigDecimal] =
catching(classOf[NumberFormatException]) opt BigDecimal(string)
def toByteX: Byte =
NumberUtils.hex2Long(string, java.lang.Byte.BYTES).toByte
def toShortX: Short =
NumberUtils.hex2Long(string, java.lang.Short.BYTES).toShort
def toIntX: Int =
NumberUtils.hex2Long(string, java.lang.Integer.BYTES).toInt
def toLongX: Long =
NumberUtils.hex2Long(string, java.lang.Long.BYTES)
def toByteXOpt: Option[Byte] =
catching(classOf[NumberFormatException]) opt NumberUtils.hex2Long(string, java.lang.Byte.BYTES).toByte
def toShortXOpt: Option[Short] =
catching(classOf[NumberFormatException]) opt NumberUtils.hex2Long(string, java.lang.Short.BYTES).toShort
def toIntXOpt: Option[Int] =
catching(classOf[NumberFormatException]) opt NumberUtils.hex2Long(string, java.lang.Integer.BYTES).toInt
def toLongXOpt: Option[Long] =
catching(classOf[NumberFormatException]) opt NumberUtils.hex2Long(string, java.lang.Long.BYTES)
def toStrictBoolean: Boolean =
if (string.toLowerCase == "true" || string.toLowerCase == "on" || string.toLowerCase == "yes")
true
else if (string.toLowerCase == "false" || string.toLowerCase == "off" || string.toLowerCase == "no")
false
else
throw new NumberFormatException(s"Cannot get boolean from string: $string")
def toStrictBooleanOpt: Option[Boolean] =
catching(classOf[NumberFormatException]) opt string.toStrictBoolean
}
}
| OpaliaSystems/commons | src/main/scala/systems/opalia/commons/number/package.scala | Scala | apache-2.0 | 2,528 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.cloudstack.vault.server.vault.entities
import java.util.UUID
import com.fasterxml.jackson.annotation.JsonProperty
private[vault] object Token {
/**
* Class is used to serialize a request to create token
*
* @param policies names of policies from Vault server
*
* @param period token lifetime in seconds
*/
case class TokenInitParameters(@JsonProperty("no_default_policy") noDefaultPolicy: Boolean,
policies: List[String],
period: Int)
/**
* Class is used to deserialize a token creation response
*
* @param id token id
*/
case class TokenId(@JsonProperty("client_token") id: UUID)
/**
* Class is used to deserialize a token lookup response
*
* @param policies names of policies from Vault server
*/
case class TokenData(policies: List[String])
}
private[vault] case class Token(@JsonProperty("auth") tokenId: Token.TokenId)
private[vault] case class LookupToken(@JsonProperty("data") tokenData: Token.TokenData)
| bwsw/cs-vault-server | src/main/scala/com/bwsw/cloudstack/vault/server/vault/entities/Token.scala | Scala | apache-2.0 | 1,823 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom
/**
* This package contains element representations that contain the "context" of the element. That is, the elements
* in this package are pairs of a root element and a path (to the actual element itself). The "context" of an element
* also contains an optional document URI.
*
* An example of where such a representation can be useful is XML Schema. After all, to interpret an element definition
* in an XML schema, we need context of the element definition to determine the target namespace, or to determine whether the
* element definition is top level, etc.
*
* Below follows a simple example query, using the uniform query API:
* {{{
* // Note the import of package indexed, and not of its members. That is indeed a best practice!
* import nl.ebpi.yaidom.indexed
*
* val indexedBookstoreElem = indexed.Elem(bookstoreElem)
*
* val scalaBookAuthors =
* for {
* bookElem <- indexedBookstoreElem \\ EName("{http://bookstore/book}Book")
* if (bookElem \\@ EName("ISBN")) == Some("978-0981531649")
* authorElem <- bookElem \\\\ EName("{http://bookstore/author}Author")
* } yield authorElem
* }}}
* The query for Scala book authors would have been exactly the same if normal `Elem`s had been used instead of `indexed.Elem`s
* (replacing `indexedBookstoreElem` by `bookstoreElem`)!
*
* There is no explicit functional update support for the indexed elements in this package. Of course the underlying
* elements can be functionally updated (for element implementations that offer such update support), and indexed
* elements can be created from the update results, but this is hardly efficient functional update support.
*
* One problem with efficient functional updates for indexed elements is that updating just one child element means
* that all subsequent child elements may have to be updated as well, adapting the stored paths. In comparison, simple
* elements do not have this restriction, and can be updated in isolation. Hence the functional update support for
* simple elements but not for the different indexed element implementations.
*
* If efficient functional updates on indexed elements are required, consider using the "lazy" indexed elements
* such as `LazyIndexedClarkElem` and `LazyIndexedScopedElem` instead of the "eager" indexed elements `IndexedClarkElem`
* and `IndexedScopedElem`. After all, creation of the lazy indexed elements is fast.
*
* @author Chris de Vreeze
*/
package object indexed {
type Elem = IndexedScopedElem[simple.Elem]
}
| EBPI/yaidom | src/main/scala/nl/ebpi/yaidom/indexed/package.scala | Scala | apache-2.0 | 3,142 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis.controls
import java.{lang ⇒ jl}
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.analysis.ElementAnalysis._
import org.orbeon.oxf.xforms.analysis.SimpleElementAnalysis
import org.orbeon.dom.QName
// Trait for all elements that have an appearance
trait AppearanceTrait extends SimpleElementAnalysis {
import AppearanceTrait._
val appearances = attQNameSet(element, APPEARANCE_QNAME, namespaceMapping)
val mediatype = Option(element.attributeValue(MEDIATYPE_QNAME))
def encodeAndAppendAppearances(sb: jl.StringBuilder) =
appearances foreach (encodeAndAppendAppearance(sb, localName, _))
}
object AppearanceTrait {
// The client expects long prefixes
private val StandardPrefixes = Map(XXFORMS_NAMESPACE_URI → "xxforms", XFORMS_NAMESPACE_URI → "xforms")
def encodeAndAppendAppearances(sb: jl.StringBuilder, lhha: String, appearances: Set[String]): Unit =
appearances map QName.apply foreach (encodeAndAppendAppearance(sb, lhha, _))
def encodeAndAppendAppearance(sb: jl.StringBuilder, lhha: String, appearance: QName): Unit = {
if (sb.length > 0)
sb.append(' ')
sb.append("xforms-")
sb.append(lhha)
sb.append("-appearance-")
encodeAppearanceValue(sb, appearance)
}
def encodeAppearanceValue(sb: jl.StringBuilder, appearance: QName) = {
// Names in a namespace may get a prefix
val uri = appearance.namespace.uri
if (uri.nonEmpty) {
// Try standard prefixes or else use the QName prefix
val prefix = AppearanceTrait.StandardPrefixes.getOrElse(uri, appearance.namespace.prefix)
if (prefix.nonEmpty) {
sb.append(prefix)
sb.append("-")
}
}
sb.append(appearance.name)
sb
}
} | brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/analysis/controls/AppearanceTrait.scala | Scala | lgpl-2.1 | 2,421 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.jms.sink.converters
import com.datamountaineer.kcql.FormatType
object JMSMessageConverterFn {
def apply(storedAs: FormatType): JMSMessageConverter = {
storedAs match {
case FormatType.AVRO => new AvroMessageConverter
case FormatType.JSON => new JsonMessageConverter
case FormatType.OBJECT => new ObjectMessageConverter
case FormatType.BINARY => new ObjectMessageConverter
case FormatType.TEXT => new JsonMessageConverter
case FormatType.MAP => new MapMessageConverter
}
}
}
| CodeSmell/stream-reactor | kafka-connect-jms/src/main/scala/com/datamountaineer/streamreactor/connect/jms/sink/converters/JMSMessageConverterFn.scala | Scala | apache-2.0 | 1,171 |
package spire
package laws
import java.math.BigInteger
import spire.math.extras.{FixedPoint, FixedScale}
import spire.algebra._
import spire.algebra.free._
import spire.math._
import spire.math.interval.{Bound, Closed, Open, Unbound}
import org.scalacheck.Arbitrary
object arb {
implicit val ubyte: Arbitrary[UByte] =
Arbitrary(gen.ubyte)
implicit val ushort: Arbitrary[UShort] =
Arbitrary(gen.ushort)
implicit val uint: Arbitrary[UInt] =
Arbitrary(gen.uint)
implicit val ulong: Arbitrary[ULong] =
Arbitrary(gen.ulong)
implicit val trilean: Arbitrary[Trilean] =
Arbitrary(gen.trilean)
implicit val fixedScale: Arbitrary[FixedScale] =
Arbitrary(gen.fixedScale)
implicit val fixedPoint: Arbitrary[FixedPoint] =
Arbitrary(gen.fixedPoint)
implicit val bigInteger: Arbitrary[BigInteger] =
Arbitrary(gen.bigInteger)
implicit val safeLong: Arbitrary[SafeLong] =
Arbitrary(gen.safeLong)
implicit val natural: Arbitrary[Natural] =
Arbitrary(gen.natural)
implicit val rational: Arbitrary[Rational] =
Arbitrary(gen.rational)
implicit val number: Arbitrary[Number] =
Arbitrary(gen.number)
implicit val algebraic: Arbitrary[Algebraic] =
Arbitrary(gen.algebraic)
implicit val real: Arbitrary[Real] =
Arbitrary(gen.real)
implicit val sign: Arbitrary[Sign] =
Arbitrary(gen.sign)
implicit def term[A: Arbitrary]: Arbitrary[poly.Term[A]] =
Arbitrary(gen.term[A])
implicit def polynomial[A: Arbitrary: Semiring: Eq: ClassTag]: Arbitrary[Polynomial[A]] =
Arbitrary(gen.polynomial[A])
implicit def complex[A: Arbitrary]: Arbitrary[Complex[A]] =
Arbitrary(gen.complex[A])
implicit def jet[A: Arbitrary: ClassTag]: Arbitrary[Jet[A]] =
Arbitrary(gen.jet2[A])
implicit def quaternion[A: Arbitrary]: Arbitrary[Quaternion[A]] =
Arbitrary(gen.quaternion[A])
implicit def bound[A: Arbitrary]: Arbitrary[Bound[A]] =
Arbitrary(gen.bound[A])
implicit def interval[A: Arbitrary: Order]: Arbitrary[Interval[A]] =
Arbitrary(gen.interval[A])
implicit def freeMonoid[A: Arbitrary]: Arbitrary[FreeMonoid[A]] =
Arbitrary(gen.freeMonoid[A])
implicit def freeGroup[A: Arbitrary]: Arbitrary[FreeGroup[A]] =
Arbitrary(gen.freeGroup[A])
implicit def freeAbGroup[A: Arbitrary]: Arbitrary[FreeAbGroup[A]] =
Arbitrary(gen.freeAbGroup[A])
implicit def perm: Arbitrary[Perm] =
Arbitrary(gen.perm)
}
| tixxit/spire | laws/src/main/scala/spire/laws/arb.scala | Scala | mit | 2,436 |
package io.cumulus.controllers.payloads
import play.api.libs.json.{Json, Reads}
case class LoginPayload(
login: String,
password: String
)
object LoginPayload {
implicit val reader: Reads[LoginPayload] =
Json.reads[LoginPayload]
}
| Cumulus-Cloud/cumulus | server/cumulus-server/src/main/scala/io/cumulus/controllers/payloads/LoginPayload.scala | Scala | mit | 246 |
package spark.partial
import cern.jet.stat.Probability
import spark.util.StatCounter
/**
* An ApproximateEvaluator for means.
*/
private[spark] class MeanEvaluator(totalOutputs: Int, confidence: Double)
extends ApproximateEvaluator[StatCounter, BoundedDouble] {
var outputsMerged = 0
var counter = new StatCounter
override def merge(outputId: Int, taskResult: StatCounter) {
outputsMerged += 1
counter.merge(taskResult)
}
override def currentResult(): BoundedDouble = {
if (outputsMerged == totalOutputs) {
new BoundedDouble(counter.mean, 1.0, counter.mean, counter.mean)
} else if (outputsMerged == 0) {
new BoundedDouble(0, 0.0, Double.NegativeInfinity, Double.PositiveInfinity)
} else {
val mean = counter.mean
val stdev = math.sqrt(counter.sampleVariance / counter.count)
val confFactor = {
if (counter.count > 100) {
Probability.normalInverse(1 - (1 - confidence) / 2)
} else {
Probability.studentTInverse(1 - confidence, (counter.count - 1).toInt)
}
}
val low = mean - confFactor * stdev
val high = mean + confFactor * stdev
new BoundedDouble(mean, confidence, low, high)
}
}
}
| koeninger/spark | core/src/main/scala/spark/partial/MeanEvaluator.scala | Scala | bsd-3-clause | 1,228 |
/*
* Copyright 2015 Priyesh Patel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.priyesh.litho
package core
import java.io.File
import java.nio.file._
import me.priyesh.litho.Strings._
import me.priyesh.litho.core.FontStyle._
object Packager {
def buildPackage(folderName: String): CanFail = {
if (!FontLoader.folderExists(folderName)) {
println(ErrorCantFindFolder)
failed
} else {
val filesAndStyles = FontLoader.filesAndStylesFromFolder(folderName)
if (filesAndStyles.exists(f => !Verifier.fontIsValid(f._1, f._2))) {
println(WarningInvalidMacStyles)
failed
} else {
packageFonts(folderName, filesAndStyles.map(_.swap).toMap)
println(PackageWasCreated)
succeeded
}
}
}
private def enoughStylesProvided(styles: Set[FontStyle]): Boolean = BasicStyles subsetOf styles
private def packageFonts(folderName: String, stylesToFiles: Map[FontStyle, File]): CanFail = {
val packagedFolder = new File(s"./${folderName}FontPack/")
packagedFolder.mkdirs()
val providedStyles = stylesToFiles.keySet
def createDestinationFile(name: String): File = new File(s"${packagedFolder.getPath}/$name")
if (enoughStylesProvided(providedStyles)) {
// Copy all the styles that have already been provided
stylesToFiles.foreach(styleToFile => copyFile(styleToFile._2, createDestinationFile(styleToFile._1.name)))
// Aggregate the styles that haven't been provided
val stylesToGenerate = AllStyles diff providedStyles
stylesToGenerate.foreach(style => {
copyFile(stylesToFiles.get(getFallbackStyle(style, providedStyles)).get, createDestinationFile(style.name))
})
succeeded
} else {
println(ErrorNotEnoughStylesProvided)
failed
}
}
private def getFallbackStyle(style: FontStyle, providedStyles: Set[FontStyle]): FontStyle =
StyleFallbackMap.get(style).get.find(providedStyles.contains).get
private def copyFile(source: File, dest: File): CanFail = {
def fileToPath(file: File): Path = Paths.get(file.toURI)
CanFail(Files.copy(fileToPath(source), fileToPath(dest), StandardCopyOption.REPLACE_EXISTING))
}
}
| ItsPriyesh/Litho | src/me/priyesh/litho/core/Packager.scala | Scala | apache-2.0 | 2,727 |
/**
* This file is part of agora_elections.
* Copyright (C) 2014-2016 Agora Voting SL <agora@agoravoting.com>
* agora_elections is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License.
* agora_elections is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with agora_elections. If not, see <http://www.gnu.org/licenses/>.
**/
package utils
import play.api.libs.json._
import play.api.libs.functional.syntax._
import models._
import java.sql.Timestamp
import scala.math.BigInt
import org.cvogt.play.json.Jsonx
import org.cvogt.play.json.implicits.optionNoError
import play.api.libs._
import json._
/**
* Formatters for json parsing and writing
*
*/
object JsonFormatters {
val dateFormat = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS")
implicit val formatTimestamp = new Format[Timestamp] {
def writes(ts: Timestamp): JsValue = {
JsString(dateFormat.format(ts))
}
def reads(ts: JsValue): JsResult[Timestamp] = {
try {
val date = dateFormat.parse(ts.as[String])
JsSuccess(new Timestamp(date.getTime))
} catch {
case e: IllegalArgumentException => JsError("Unable to parse timestamp")
}
}
}
implicit val formatBigInt = new Format[BigInt] {
def writes(bi: BigInt): JsValue = JsString(bi.toString())
def reads(bi: JsValue): JsResult[BigInt] = {
try {
JsSuccess(BigInt(bi.as[String]))
} catch {
case e: IllegalArgumentException => JsError("Unable to parse BigInt")
}
}
}
implicit val voteDtoF = Json.format[VoteDTO]
implicit val dateDtoF = Json.format[DateDTO]
implicit val voteF = Json.format[Vote]
implicit val electionExtraF = Json.format[ElectionExtra]
implicit val questionConditionF = Json.format[QuestionCondition]
implicit val conditionalQuestionF = Json.format[ConditionalQuestion]
implicit val electionF = Json.format[Election]
implicit val urlF = Json.format[Url]
implicit val answerF = Json.format[Answer]
implicit val qExtraF = Jsonx.formatCaseClass[QuestionExtra]
implicit val questionF = Json.format[Question]
implicit val ShareTextItemF = Json.format[ShareTextItem]
implicit val presentationF = Json.format[ElectionPresentation]
implicit val configF = Json.format[ElectionConfig]
implicit val statDayF = Json.format[StatDay]
implicit val statsF = Json.format[Stats]
implicit val electionDtoF = Json.format[ElectionDTO]
implicit val publicKeyF = Json.format[PublicKey]
implicit val publicKeySessionF = Json.format[PublicKeySession]
implicit val createResponseF = Json.format[CreateResponse]
implicit val popkF = Json.format[Popk]
implicit val choiceF = Json.format[Choice]
implicit val encryptedVoteF = Json.format[EncryptedVote]
implicit val tallyDataF = Json.format[TallyData]
implicit val tallyResponseF = Json.format[TallyResponse]
implicit val authDataF = Json.format[AuthData]
implicit val plaintextAnswerW : Writes[PlaintextAnswer] =
(JsPath \\ "options").write[Array[Long]] contramap { (t: PlaintextAnswer) => t.options }
implicit val plaintextAnswerR : Reads[PlaintextAnswer] =
(JsPath \\ "options").read[Array[Long]] map (PlaintextAnswer.apply )
implicit val PlaintextBallotF = Json.format[PlaintextBallot]
implicit val callbackF = Json.format[Callback]
implicit val writeOptionString = new Writes[Option[String]] {
def writes(ts: Option[String]): JsValue = {
ts match {
case Some(value) => JsString(value)
case None => JsNull
}
}
}
implicit val writeOptionBool = new Writes[Option[Boolean]] {
def writes(ts: Option[Boolean]): JsValue = {
ts match {
case Some(value) => JsBoolean(value)
case None => JsNull
}
}
}
implicit val readOptionShares: Reads[Option[Array[ShareTextItem]]] = optionNoError[Array[ShareTextItem]]
}
| agoravoting/agora_elections | app/utils/JsonFormatters.scala | Scala | agpl-3.0 | 4,273 |
package me.frmr.wepay.api {
import org.scalatest.FunSpec
import net.liftweb.common._
import me.frmr.wepay._
import WePayTestHelpers._
import me.frmr.wepay.api._
class WithdrawalSpec extends FunSpec {
implicit val authorizationToken = testAuthorizationToken
describe("A Withdrawal") {
var testWithdrawalId = 0l
it("should be creatable") {
val saveResponse = Withdrawal(testAccountId).save
assert(saveResponse match {
case Full(WithdrawalResponse(withdrawal_id, _)) =>
testWithdrawalId = withdrawal_id
true
case resp @ _ =>
false
}, saveResponse)
}
it("should be retrievable after creation") {
val retrieval = Withdrawal.find(testWithdrawalId)
assert(retrieval match {
case Full(_:Withdrawal) =>
true
case _ =>
false
}, retrieval)
}
}
}
}
| farmdawgnation/wepay-scala | src/test/scala/me/frmr/wepay/api/WithdrawalSpec.scala | Scala | apache-2.0 | 951 |
class C[T]
class D[type T] // error: identifier expected, but `type` found
object Test {
val x: C[T = Int] = // error: ']' expected, but `=` found // error
new C[T = Int] // error: ']' expected, but `=` found // error
class E extends C[T = Int] // error: ']' expected, but `=` found // error
class F extends C[T = Int]() // error: ']' expected, but `=` found // error
def f[X, Y](x: X, y: Y): Int = ???
f[X = Int, String](1, "") // error // error
f[X = Int][X = Int][Y = String](1, "") // error: illegal repeated type application
f[X = Int][Y = String](1, "") // error: illegal repeated type application
f[X = Int][String](1, "") // error: illegal repeated type application
f[Y = String][X = Int](1, "") // error: illegal repeated type application
f[Y = String][Int](1, "") // error: illegal repeated type application
}
| som-snytt/dotty | tests/neg/namedTypeParams.scala | Scala | apache-2.0 | 853 |
package io.vamp.pulse
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, StringReader }
import java.security.{ KeyStore, SecureRandom }
import org.json4s.ext.EnumNameSerializer
import akka.actor.Actor
import akka.http.scaladsl.{ ConnectionContext, HttpsConnectionContext }
import io.nats.client.{ Nats, Options }
import io.nats.streaming.{ AckHandler, StreamingConnection, StreamingConnectionFactory }
import io.vamp.common.akka.IoC
import io.vamp.common.json.{ OffsetDateTimeSerializer, SerializationFormat }
import io.vamp.common.vitals.{ InfoRequest, StatsRequest }
import io.vamp.common.{ ClassMapper, Config, ConfigMagnet }
import io.vamp.model.event._
import io.vamp.model.resolver.NamespaceValueResolver
import io.vamp.pulse.Percolator.{ GetPercolator, RegisterPercolator, UnregisterPercolator }
import io.vamp.pulse.notification._
import javax.net.ssl.{ KeyManagerFactory, SSLContext, TrustManager, TrustManagerFactory }
import org.json4s.native.Serialization.write
import org.json4s.{ DefaultFormats, Extraction, Formats }
import scala.concurrent.Future
import scala.util.{ Random, Try }
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, StringReader }
import java.security.cert.{ Certificate, CertificateFactory }
import java.security.{ KeyStore, SecureRandom }
import javax.net.ssl.{ KeyManagerFactory, SSLContext, TrustManagerFactory }
import org.bouncycastle.cert.X509CertificateHolder
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.openssl.{ PEMKeyPair, PEMParser }
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter
class NatsPublisherPulseActorMapper extends ClassMapper {
val name = "natspublisher"
val clazz: Class[_] = classOf[NatsPublisherPulseActor]
}
object NatsPublisherPulseActor {
val config: String = PulseActor.config
val subjectPrefixLayout = "${namespace}"
val natsUrl: ConfigMagnet[String] = Config.string(s"$config.nats.url")
val clusterId: ConfigMagnet[String] = Config.string(s"$config.nats.cluster-id")
val clientId: ConfigMagnet[String] = Config.string(s"$config.nats.client-id")
val token: ConfigMagnet[String] = Config.string(s"$config.nats.token")
val caCert: ConfigMagnet[String] = Config.string(s"$config.nats.ca-cert")
val clientCert: ConfigMagnet[String] = Config.string(s"$config.nats.client-cert")
val clientKey: ConfigMagnet[String] = Config.string(s"$config.nats.client-key")
val username: ConfigMagnet[String] = Config.string(s"$config.nats.username")
val password: ConfigMagnet[String] = Config.string(s"$config.nats.password")
def getTrustManager(sslCaCert: ByteArrayInputStream): Array[TrustManager] = {
val password = "change me".toCharArray
val certificateFactory: CertificateFactory = CertificateFactory.getInstance("X.509")
val certificates = certificateFactory.generateCertificates(sslCaCert)
/*
Empty ca cert is allowed
if (certificates.isEmpty())
throw new IllegalArgumentException("expected non-empty set of trusted certificates")
*/
val caKeyStore: KeyStore = newEmptyKeyStore(password)
var index: Int = 0
val var8 = certificates.iterator()
while (var8.hasNext()) {
val certificate: Certificate = var8.next()
val certificateAlias = "ca" + Integer.toString(index)
caKeyStore.setCertificateEntry(certificateAlias, certificate)
index += 1
}
val trustManagerFactory: TrustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm())
trustManagerFactory.init(caKeyStore)
val trustManagers = trustManagerFactory.getTrustManagers()
trustManagers
}
def newEmptyKeyStore(password: Array[Char]): KeyStore = {
val keyStore: KeyStore = KeyStore.getInstance(KeyStore.getDefaultType())
keyStore.load(null, password)
keyStore
}
/**
* Pem keys as private key and server certificates used as input in many applications
* but akka requires PKCS12 type keys for https, so this method is needed for conversion
* TODO: check keystore if this method is not actually needed.
*
* @param keyString private-key
* @param cerString server-certificate
* @param password password for keystore default is change me
* @return PKCS12 file as byte array
*/
def convertPEMToPKCS12(keyString: String, cerString: String, password: String) = { // Get the private key
var reader = new StringReader(keyString)
var pem = new PEMParser(reader)
val pemKeyPair = pem.readObject.asInstanceOf[PEMKeyPair]
val provider = new BouncyCastleProvider()
val jcaPEMKeyConverter = new JcaPEMKeyConverter().setProvider(provider)
val keyPair = jcaPEMKeyConverter.getKeyPair(pemKeyPair)
val key = keyPair.getPrivate
pem.close()
reader.close()
// Get the certificate
reader = new StringReader(cerString)
pem = new PEMParser(reader)
val certHolder = pem.readObject.asInstanceOf[X509CertificateHolder]
val X509Certificate = new JcaX509CertificateConverter().setProvider(provider).getCertificate(certHolder)
pem.close()
reader.close()
// Put them into a PKCS12 keystore and write it to a byte[]
val bos = new ByteArrayOutputStream()
val ks = KeyStore.getInstance("PKCS12")
ks.load(null)
val certs = new Array[java.security.cert.Certificate](1)
certs(0) = X509Certificate
ks.setKeyEntry("alias", key.asInstanceOf[java.security.Key], password.toCharArray, certs)
ks.store(bos, password.toCharArray)
bos.close
bos.toByteArray
}
/**
* Creates and returns required SSL context using configuration
* can return null if ssl files are empty
* @return Ssl context
*/
def getSslContext(keyString: String, cerString: String, caString: String) = {
if (keyString.nonEmpty || cerString.nonEmpty || caString.nonEmpty) {
val keyManagers = {
if (keyString.nonEmpty && cerString.nonEmpty) {
val password = "change me"
val p12 = convertPEMToPKCS12(keyString, cerString, password)
val keystore = new ByteArrayInputStream(p12)
val ks = KeyStore.getInstance("PKCS12")
ks.load(keystore, password.toCharArray)
val keyManagerFactory = KeyManagerFactory.getInstance("SunX509")
keyManagerFactory.init(ks, password.toCharArray)
keyManagerFactory.getKeyManagers
}
else {
null
}
}
val trustManagers = if (caString.nonEmpty) {
getTrustManager(new ByteArrayInputStream(caString.getBytes))
}
else {
null
}
val sslContext = SSLContext.getInstance("TLS")
sslContext.init(keyManagers, trustManagers, new SecureRandom)
sslContext
}
else {
null
}
}
}
/**
* NATS Pulse Actor pushes messages to NATS and also forwards other types of messages to Elasticsearch
*
* If you are here since messages are slow check this:
* https://github.com/nats-io/java-nats-streaming#linux-platform-note
*/
class NatsPublisherPulseActor extends NamespaceValueResolver with PulseActor with PulseActorPublisher {
import PulseActor._
/*
This was actually a generated id for Elasticsearch index names
To be compatible with Elasticsearch, the same structure is used with a fixed layout
*/
lazy val subjectPrefixL: String = resolveWithNamespace(NatsPublisherPulseActor.subjectPrefixLayout, lookup = true)
lazy val randomId = Random.alphanumeric.take(5).mkString("")
lazy val clusterId = NatsPublisherPulseActor.clusterId()
lazy val clientId = s"${NatsPublisherPulseActor.clientId()}_${namespace.name}_$randomId"
lazy val token = getOrElseNil(NatsPublisherPulseActor.token())
lazy val username = getOrElseNil(NatsPublisherPulseActor.username())
lazy val password = getOrElseNil(NatsPublisherPulseActor.password())
lazy val clientCert = NatsPublisherPulseActor.clientCert()
lazy val clientKey = NatsPublisherPulseActor.clientKey()
lazy val caCert = NatsPublisherPulseActor.caCert()
lazy val clientKeyContent: String = readFileIfExists(clientKey)
lazy val clientCertContent: String = readFileIfExists(clientCert)
lazy val caCertContent: String = readFileIfExists(caCert)
def getOrElseNil(input: String): String = {
if (input == null || input.isEmpty()) {
null
}
else {
input
}
}
def readFileIfExists(path: String): String = {
if (path.nonEmpty) {
scala.io.Source.fromFile(path).mkString
}
else {
""
}
}
lazy val natsUrl = NatsPublisherPulseActor.natsUrl()
lazy val cf = {
val scf = new StreamingConnectionFactory(clusterId, clientId)
val ctx = NatsPublisherPulseActor.getSslContext(clientKeyContent, clientCertContent, caCertContent)
scf.setNatsUrl(natsUrl)
val o = new Options.Builder()
.server(natsUrl)
.userInfo(username, password)
.token(token)
.sslContext(ctx)
.maxReconnects(-1)
.build
val nc = Nats.connect(o)
scf.setNatsConnection(nc)
scf
}
/**
* Starts a logical connection to the NATS cluster
* Documentation: https://github.com/nats-io/java-nats-streaming#basic-usage
*/
lazy val sc: StreamingConnection = cf.createConnection
// The ack handler will be invoked when a publish acknowledgement is received
// This is a def, not a val due to possible concurrency issues
// ackHandler is only used in asynchronous case
def ackHandler(subject: String, message: String, count: Int = 5): AckHandler = new AckHandler() {
override def onAck(guid: String, err: Exception): Unit = {
if (err != null) {
logger.error("Error publishing msg id %s: %s %d".format(guid, err.getMessage, count))
if (count > 0)
sc.publish(subject, message.getBytes, ackHandler(subject, message, count - 1))
}
else logger.info("Received ack for msg id %s ".format(guid))
}
}
override def postStop(): Unit = {
Try(sc.close())
}
def receive: Actor.Receive = {
case InfoRequest ⇒ reply(info)
case StatsRequest ⇒ IoC.actorFor[PulseActorSupport].forward(StatsRequest)
case Publish(event, publishEventValue) ⇒ reply((validateEvent andThen publish(publishEventValue) andThen broadcast(publishEventValue))(Event.expandTags(event)), classOf[EventIndexError])
case Query(envelope) ⇒ IoC.actorFor[PulseActorSupport].forward(Query(envelope))
case GetPercolator(name) ⇒ reply(Future.successful(getPercolator(name)))
case RegisterPercolator(name, tags, kind, message) ⇒ registerPercolator(name, tags, kind, message)
case UnregisterPercolator(name) ⇒ unregisterPercolator(name)
case any ⇒ unsupported(UnsupportedPulseRequest(any))
}
private def info = Future { Map[String, Any]("type" → "nats", "nats" → clusterId) }
private def publish(publishEventValue: Boolean)(event: Event): Future[Any] = Future {
implicit val formats: Formats = SerializationFormat(OffsetDateTimeSerializer, new EnumNameSerializer(Aggregator))
val attachment = (publishEventValue, event.value) match {
case (true, str: String) ⇒ Map(typeName → str)
case (true, any) ⇒ Map("value" → write(any)(DefaultFormats), typeName → (if (typeName == Event.defaultType) "" else any))
case (false, _) ⇒ Map("value" → "")
}
val data = Extraction.decompose(if (publishEventValue) event else event.copy(value = None)) merge Extraction.decompose(attachment)
val subject = {
val prefix = s"$subjectPrefixL-${event.`type`}"
val postfix = event.`type` match {
case Event.defaultType if event.tags.contains("gateways") ⇒ "-gateways"
case _ ⇒ ""
}
s"$prefix$postfix"
}.replace(" ", "_")
val message = bodyAsString(data).getOrElse("")
logger.info(s"Pulse publish an event with subject $subject and message: $message")
// This is a synchronous (blocking) call
// This can throw an exception currently it is unhandled so actor will be restarted with the same message
// sc.publish(subject, message.getBytes)
// logger.info(s" Pulse published an event with subject $subject")
// Testing: following method is asynchronous, try asynchronous connections later if feasible
val guid = sc.publish(subject, message.getBytes, ackHandler(subject, message))
event.copy(id = Option(guid))
}
private def broadcast(publishEventValue: Boolean): Future[Any] ⇒ Future[Any] = _.map {
case event: Event ⇒ percolate(publishEventValue)(event)
case other ⇒ other
}
// this is copied from httpClient
private def bodyAsString(body: Any)(implicit formats: Formats): Option[String] = body match {
case string: String ⇒ Some(string)
case Some(string: String) ⇒ Some(string)
case Some(some: AnyRef) ⇒ Some(write(some))
case any: AnyRef if any != null && any != None ⇒ Some(write(any))
case any if any != null && any != None ⇒ Some(any.toString)
case _ ⇒ None
}
} | magneticio/vamp | nats/src/main/scala/io/vamp/pulse/NatsPublisherPulseActor.scala | Scala | apache-2.0 | 13,144 |
package com.arcusys.valamis.certificate.storage
import com.arcusys.valamis.certificate.model.goal.GoalGroup
import com.arcusys.valamis.model.PeriodTypes._
trait CertificateGoalGroupRepository {
def get(certificateId: Long): Seq[GoalGroup]
def create(count: Int,
certificateId: Long,
periodValue: Int,
periodType: PeriodType,
arrangementIndex: Int): Long
def delete(id: Long): Unit
def update(goalGroup: GoalGroup): GoalGroup
def updateGoals(groupId: Long, goalIds: Seq[Long]): Unit
}
| igor-borisov/valamis | valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/storage/CertificateGoalGroupRepository.scala | Scala | gpl-3.0 | 548 |
package im.actor.server.push
import akka.util.Timeout
import im.actor.server.sequence.SeqState
import im.actor.server.{ ActorSpecification, ActorSuite }
import org.scalatest.time.{ Span, Seconds }
import scala.concurrent.duration._
import akka.pattern.ask
import akka.testkit._
import com.typesafe.config._
import im.actor.api.{ rpc ⇒ api }
import im.actor.server.SqlSpecHelpers
import im.actor.server.api.ActorSpecHelpers
class SeqUpdatesManagerSpec extends ActorSuite(
{
ActorSpecification.createSystem(
ConfigFactory.parseString("""
push.seq-updates-manager.receive-timeout = 1 second
""")
)
}
) with SqlSpecHelpers with ActorSpecHelpers {
behavior of "SeqUpdatesManager"
it should "increment seq on update push" in e1
it should "not reply with seq of the ongoing update (concurrency problem)" in e2
import SeqUpdatesManager._
implicit val (ds, db) = migrateAndInitDb()
implicit val timeout: Timeout = Timeout(5.seconds)
override implicit def patienceConfig: PatienceConfig =
new PatienceConfig(timeout = Span(10, Seconds))
val region = buildSeqUpdManagerRegion()
val probe = TestProbe()
def e1() = {
val authId = util.Random.nextLong()
val update = api.contacts.UpdateContactsAdded(Vector(1, 2, 3))
val (userIds, groupIds) = updateRefs(update)
{
probe.send(region.ref, Envelope(authId, PushUpdateGetSequenceState(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
val msg = probe.receiveOne(5.seconds).asInstanceOf[SeqState]
msg.seq should ===(1000)
}
{
probe.send(region.ref, Envelope(authId, PushUpdateGetSequenceState(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
val msg = probe.receiveOne(1.second).asInstanceOf[SeqState]
msg.seq should ===(1001)
}
probe.expectNoMsg(3.seconds)
{
probe.send(region.ref, Envelope(authId, PushUpdateGetSequenceState(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
val msg = probe.receiveOne(1.second).asInstanceOf[SeqState]
msg.seq should ===(2002)
}
for (a ← 1 to 600)
probe.send(region.ref, Envelope(authId, PushUpdate(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
probe.expectNoMsg(4.seconds)
{
probe.send(region.ref, Envelope(authId, PushUpdateGetSequenceState(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
val msg = probe.receiveOne(1.second).asInstanceOf[SeqState]
msg.seq should ===(3603)
}
}
def e2() = {
val authId = util.Random.nextLong()
val update = api.contacts.UpdateContactsAdded(Vector(1, 2, 3))
val (userIds, groupIds) = updateRefs(update)
val futures = for (i ← 0 to 100) yield {
val f = (region.ref ? Envelope(authId, PushUpdateGetSequenceState(update.header, update.toByteArray, userIds, groupIds, None, None, isFat = false)))
.mapTo[SeqState]
(f, 1000 + i)
}
futures foreach {
case (f, expectedSeq) ⇒
whenReady(f) { seqstate ⇒
seqstate.seq shouldEqual expectedSeq
}
}
}
override def afterAll: Unit = {
super.afterAll()
system.awaitTermination()
closeDb()
}
private def closeDb(): Unit =
ds.close()
}
| chieryw/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/push/SeqUpdatesManagerSpec.scala | Scala | mit | 3,398 |
package BIDMach.datasources
import BIDMat.{Mat,SBMat,CMat,CSMat,DMat,FMat,IMat,HMat,GMat,GIMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import scala.concurrent.future
//import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.ExecutionContextExecutor
import java.io._
/*
* SFilesDatasource constructs SMat batches from data files stored on disk as IMat.
* The IMats are 3-column with column, row indices and integer values.
* This format allows dynamic construction of the SMat with a specified bound on the max row index,
* and with specified featurization (e.g. clipped to 1, linear, logarithmic etc.).
* fcounts is an IMat specifying the numbers of rows to use for each input block.
*/
class SFilesDSv1(override val opts:SFilesDS.Opts = new SFilesDS.Options)(override implicit val ec:ExecutionContextExecutor) extends FilesDS(opts) {
var inptrs:IMat = null
var offsets:IMat = null
override def init = {
initbase
var totsize = sum(opts.fcounts).v
if (opts.addConstFeat) totsize += 1
omats = new Array[Mat](1)
omats(0) = SMat(totsize, opts.batchSize, opts.batchSize * opts.eltsPerSample)
inptrs = izeros(opts.fcounts.length, 1)
offsets = 0 on cumsum(opts.fcounts)
}
def binFind(i:Int, mat:Mat):Int = {
val imat = mat.asInstanceOf[IMat]
val nrows = mat.nrows
var ibeg = 0
var iend = nrows
while (ibeg < iend) {
val imid = (iend + ibeg)/2
if (i > imat(imid, 0)) {
ibeg = imid+1
} else {
iend = imid
}
}
iend
}
def sprowslice(inmat:Array[Mat], rowno:Int, nrow:Int, omat0:Mat, done:Int):Mat = {
val omat = omat0.asInstanceOf[SMat]
val ioff = Mat.ioneBased
var idone = done
var innz = omat.nnz
val lims = opts.fcounts
val nfiles = opts.fcounts.length
val addConstFeat = opts.addConstFeat
val featType = opts.featType
val threshold = opts.featThreshold
var j = 0
while (j < nfiles) {
inptrs(j, 0) = binFind(rowno, inmat(j))
j += 1
}
var irow = rowno
while (irow < nrow) {
var j = 0
while (j < nfiles) {
val mat = inmat(j).asInstanceOf[IMat]
val mrows = mat.nrows
var k = inptrs(j)
while (k < mrows && mat.data(k) < irow) k += 1
inptrs(j) = k
val xoff = innz - k
val yoff = offsets(j) + ioff
// println("here %d %d %d %d %d" format (k, mat.nrows, mat.ncols, lims.length, j))
while (k < mat.nrows && mat.data(k) == irow && mat.data(k+mrows) < lims(j)) {
if (xoff + k >= omat.ir.length) {
throw new RuntimeException("SFilesDS index out of range. Try increasing opts.eltsPerSample")
}
omat.ir(xoff + k) = mat.data(k+mrows) + yoff
omat.data(xoff + k) = if (featType == 0) {
1f
} else if (featType == 1) {
mat.data(k+2*mrows)
} else {
if (mat.data(k+2*mrows).toDouble >= threshold.dv) 1f else 0f
}
k += 1
}
innz = xoff + k
inptrs(j) = k
j += 1
}
irow += 1
idone += 1
if (addConstFeat) {
omat.ir(innz) = omat.nrows - 1 + ioff
omat.data(innz) = 1
innz += 1
}
omat.jc(idone) = innz + ioff
}
omat.nnz0 = innz
omat
}
def spmax(matq:Array[Mat]):Int = {
var maxv = 0
for (i <- 0 until matq.length) {
if (matq(i).asInstanceOf[AnyRef] != null) {
val mat = matq(i).asInstanceOf[IMat]
maxv = math.max(maxv, mat(mat.nrows-1,0))
}
}
maxv
}
def fillup(mat:Mat, todo:Int) = {
val smat = mat.asInstanceOf[SMat]
val ncols = mat.ncols
var i = ncols - todo
val theend = smat.jc(i)
while (i < ncols) {
i += 1
smat.jc(i) = theend
}
}
def flushMat(mat:Mat) = {
val smat = mat.asInstanceOf[SMat]
smat.nnz0 = 0
smat.jc(0) = Mat.ioneBased
}
override def next:Array[Mat] = {
var donextfile = false
var todo = opts.batchSize
flushMat(omats(0))
while (todo > 0 && fileno < nend) {
var nrow = rowno
val filex = fileno % math.max(1, opts.lookahead)
if (opts.lookahead > 0) {
while (ready(filex) < fileno) Thread.`yield`
} else {
fetch
}
val spm = spmax(matqueue(filex)) + 1
// println("spm %d" format spm)
nrow = math.min(rowno + todo, spm)
val matq = matqueue(filex)
if (matq(0).asInstanceOf[AnyRef] != null) {
// println("Here %d %d %d" format(rowno, nrow, todo))
omats(0) = sprowslice(matq, rowno, nrow, omats(0), opts.batchSize - todo)
if (rowno + todo >= spm) donextfile = true
} else {
if (opts.throwMissing) {
throw new RuntimeException("Missing file "+fileno)
}
donextfile = true
}
todo -= nrow - rowno
if (donextfile) {
rowno = 0;
fileno += 1;
donextfile = false
} else {
rowno = nrow;
}
}
if (todo > 0) {
fillup(omats(0), todo)
}
omats
}
}
/*
* SFilesDatasource constructs SMat batches from data files stored on disk as IMat.
* The IMats are 3-column with column, row indices and integer values.
* This format allows dynamic construction of the SMat with a specified bound on the max row index,
* and with specified featurization (e.g. clipped to 1, linear, logarithmic etc.).
* fcounts is an IMat specifying the numbers of rows to use for each input block.
*/
class SFilesDS(override val opts:SFilesDS.Opts = new SFilesDS.Options)(override implicit val ec:ExecutionContextExecutor) extends FilesDS(opts) {
var inptrs:IMat = null
var offsets:IMat = null
var fcounts:IMat = null
override def init = {
initbase
fcounts = if (opts.fcounts == null) {
val fc = izeros(opts.fnames.length,1)
for (i <- 0 until opts.fnames.length) {
val m = loadSMat(opts.fnames(0)(nstart))
fc(i) = m.nrows
}
fc
} else opts.fcounts
var totsize = sum(fcounts).v
if (opts.addConstFeat) totsize += 1
omats = new Array[Mat](1)
omats(0) = SMat(totsize, opts.batchSize, opts.batchSize * opts.eltsPerSample)
inptrs = izeros(fcounts.length, 1)
offsets = 0 on cumsum(fcounts)
}
def binFind(i:Int, mat:Mat):Int = {
val imat = mat.asInstanceOf[IMat]
val nrows = mat.nrows
var ibeg = 0
var iend = nrows
while (ibeg < iend) {
val imid = (iend + ibeg)/2
if (i > imat(imid, 0)) {
ibeg = imid+1
} else {
iend = imid
}
}
iend
}
def spcolslice(inmat:Array[Mat], colno:Int, endcol:Int, omat0:Mat, done:Int):Mat = {
val omat = omat0.asInstanceOf[SMat]
val ioff = Mat.ioneBased
var idone = done
var innz = omat.nnz
val lims = fcounts
val nfiles = fcounts.length
val addConstFeat = opts.addConstFeat
val featType = opts.featType
val threshold = opts.featThreshold
var icol = colno;
while (icol < endcol) {
var j = 0;
while (j < nfiles) {
val mat = inmat(j).asInstanceOf[SMat];
var k = mat.jc(icol) - ioff;
var lastk = mat.jc(icol+1) - ioff;
val xoff = innz - k;
// println("here %d %d %d %d %d" format (k, mat.nrows, mat.ncols, lims.length, j))
while (k < lastk && mat.ir(k)-ioff < lims(j)) {
if (xoff + k >= omat.ir.length) {
throw new RuntimeException("SFilesDS index out of range. Try increasing opts.eltsPerSample");
}
omat.ir(xoff + k) = mat.ir(k) + offsets(j);
omat.data(xoff + k) = if (featType == 0) {
1f;
} else if (featType == 1) {
mat.data(k) ;
} else {
if (mat.data(k).toDouble >= threshold.dv) 1f else 0f;
}
k += 1;
}
innz = xoff + k
j += 1
}
icol += 1
idone += 1
if (addConstFeat) {
omat.ir(innz) = omat.nrows - 1 + ioff
omat.data(innz) = 1
innz += 1
}
omat.jc(idone) = innz + ioff
}
omat.nnz0 = innz
omat
}
def spmax(matq:Array[Mat]):Int = {
var maxv = 0;
for (i <- 0 until matq.length) {
if (matq(i).asInstanceOf[AnyRef] != null) {
maxv = matq(i).ncols
}
}
maxv - 1
}
def fillup(mat:Mat, todo:Int) = {
val smat = mat.asInstanceOf[SMat]
val ncols = mat.ncols
var i = ncols - todo
val theend = smat.jc(i)
while (i < ncols) {
i += 1
smat.jc(i) = theend
}
}
def flushMat(mat:Mat) = {
val smat = mat.asInstanceOf[SMat]
smat.nnz0 = 0
smat.jc(0) = Mat.ioneBased
}
override def next:Array[Mat] = {
var donextfile = false
var todo = opts.batchSize
flushMat(omats(0))
while (todo > 0 && fileno < nend) {
var nrow = rowno
val filex = fileno % math.max(1, opts.lookahead)
if (opts.lookahead > 0) {
while (ready(filex) < fileno) Thread.`yield`
} else {
fetch
}
val spm = spmax(matqueue(filex)) + 1
// println("spm %d" format spm)
nrow = math.min(rowno + todo, spm)
val matq = matqueue(filex)
if (matq(0).asInstanceOf[AnyRef] != null) {
// println("Here %d %d %d %d" format(rowno, nrow, todo, spm))
omats(0) = spcolslice(matq, rowno, nrow, omats(0), opts.batchSize - todo)
if (rowno + todo >= spm) donextfile = true
} else {
if (opts.throwMissing) {
throw new RuntimeException("Missing file "+fileno)
}
donextfile = true;
}
todo -= nrow - rowno
fprogress = nrow*1f / spm
if (donextfile) {
rowno = 0;
fileno += 1;
fprogress = 0
donextfile = false
} else {
rowno = nrow
}
}
if (todo > 0) {
fillup(omats(0), todo)
}
omats
}
override def progress = {
((fileno-nstart)*1f + fprogress)/ totalSize
}
}
object SFilesDS {
trait Opts extends FilesDS.Opts {
var fcounts:IMat = null
}
class Options extends Opts {}
}
| uhjish/BIDMach | src/main/scala/BIDMach/datasources/SFilesDS.scala | Scala | bsd-3-clause | 10,503 |
package org.infinispan.spark.suites
import org.infinispan.spark.test._
import org.scalatest.DoNotDiscover
@DoNotDiscover
class DistributedSuite extends RDDRetrievalTest with WordCache with Spark with MultipleServers {
override protected def getNumEntries: Int = 100
}
| infinispan/infinispan-spark | src/test/scala/org/infinispan/spark/suites/DistributedSuite.scala | Scala | apache-2.0 | 274 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spark.impl.optimization
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.types._
/**
* Object to support aggregate expressions like `sum` or `avg`.
*/
private[optimization] object AggregateExpressions extends SupportedExpressions {
/** @inheritdoc */
def apply(expr: Expression, checkChild: (Expression) ⇒ Boolean): Boolean = expr match {
case AggregateExpression(aggregateFunction, _, _, _) ⇒
checkChild(aggregateFunction)
case Average(child) ⇒
checkChild(child)
case Count(children) ⇒
children.forall(checkChild)
case Max(child) ⇒
checkChild(child)
case Min(child) ⇒
checkChild(child)
case Sum(child) ⇒
checkChild(child)
case _ ⇒
false
}
/** @inheritdoc */
override def toString(expr: Expression, childToString: Expression ⇒ String, useQualifier: Boolean,
useAlias: Boolean): Option[String] = expr match {
case AggregateExpression(aggregateFunction, _, isDistinct, _) ⇒
aggregateFunction match {
case Count(children) ⇒
if (isDistinct)
Some(s"COUNT(DISTINCT ${children.map(childToString(_)).mkString(" ")})")
else
Some(s"COUNT(${children.map(childToString(_)).mkString(" ")})")
case sum: Sum ⇒
if (isDistinct)
Some(castSum(
s"SUM(DISTINCT ${sum.children.map(childToString(_)).mkString(" ")})", sum.dataType))
else
Some(castSum(s"SUM(${sum.children.map(childToString(_)).mkString(" ")})", sum.dataType))
case _ ⇒
Some(childToString(aggregateFunction))
}
case Average(child) ⇒
child.dataType match {
case DecimalType() | DoubleType ⇒
Some(s"AVG(${childToString(child)})")
case _ ⇒
//Spark `AVG` return type is always a double or a decimal.
//See [[org.apache.spark.sql.catalyst.expressions.aggregate.Average]]
//But Ignite `AVG` return type for a integral types is integral.
//To preserve query correct results has to cast column to double.
Some(s"AVG(CAST(${childToString(child)} AS DOUBLE))")
}
case Count(children) ⇒
Some(s"COUNT(${children.map(childToString(_)).mkString(" ")})")
case Max(child) ⇒
Some(s"MAX(${childToString(child)})")
case Min(child) ⇒
Some(s"MIN(${childToString(child)})")
case sum: Sum ⇒
Some(castSum(s"SUM(${childToString(sum.child)})", sum.dataType))
case _ ⇒
None
}
/**
* Ignite returns BigDecimal but Spark expects BIGINT.
*/
private def castSum(sumSql: String, dataType: DataType): String = dataType match {
case LongType ⇒
s"CAST($sumSql AS BIGINT)"
case _ ⇒
s"$sumSql"
}
}
| irudyak/ignite | modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/AggregateExpressions.scala | Scala | apache-2.0 | 4,128 |
package colossus.core
import java.net.InetAddress
import java.nio.channels.{CancelledKeyException, SelectionKey}
sealed trait WriteStatus
object WriteStatus {
//connection is busted
case object Failed extends WriteStatus
//data was partially written and the rest is buffered
case object Partial extends WriteStatus
//buffered data is still being written, requested write did not occur
case object Zero extends WriteStatus
//all the data was written
case object Complete extends WriteStatus
}
/**
* This trait abstracts actions performed on a raw socket channel.
*
* This is essentially the only trait that should differ between live
* connections and fake connections in testing
*/
trait ChannelActions {
/**
* Hook to perform that actual operation of writing to a channel
*/
protected def channelWrite(data: DataBuffer): Int
protected def channelClose()
protected def channelHost(): InetAddress
protected def keyInterestOps(ops: Int)
def finishConnect()
def status: ConnectionStatus
}
trait KeyInterestManager extends ChannelActions {
private var _readsEnabled = true
private var _writeReadyEnabled = false
def readsEnabled = _readsEnabled
def writeReadyEnabled = _writeReadyEnabled
protected def setKeyInterest() {
val ops = (if (readsEnabled) SelectionKey.OP_READ else 0) | (if (writeReadyEnabled) SelectionKey.OP_WRITE else 0)
keyInterestOps(ops)
}
def enableReads() {
_readsEnabled = true
setKeyInterest()
}
def disableReads() {
_readsEnabled = false
setKeyInterest()
}
def enableWriteReady() {
_writeReadyEnabled = true
setKeyInterest()
}
def disableWriteReady() {
_writeReadyEnabled = false
setKeyInterest()
}
}
private[colossus] trait WriteBuffer extends KeyInterestManager {
import WriteStatus._
/**
* The WriteBuffer calls this if it has been signaled to disconnect and
* finishes writing any existing partial buffer
*/
protected def completeDisconnect()
/**
* This should be called when it's time to disconnect the connection, but we
* wish to finish writing any existing partial buffer. We do this because any
* levels higher up already consider any data in a partial buffer to be sent,
* so we don't want to disconnect until we fullfil that promise.
*/
def gracefulDisconnect() {
disconnecting = true
if (partialBuffer.isEmpty) {
completeDisconnect()
}
//if the partial buffer is defined, completeDisconnect gets called when we
//finish writing it
}
private var disconnecting = false
private var _bytesSent = 0L
def bytesSent = _bytesSent
//this is only filled when we only partially wrote data
private var partialBuffer: Option[DataBuffer] = None
//technically this value is wrong when first constructed, but since this is
//only used in determining idle time, initializing it to the current time
//simplifies the calculations
private var _lastTimeDataWritten: Long = System.currentTimeMillis
def lastTimeDataWritten: Long = _lastTimeDataWritten
def isDataBuffered: Boolean = partialBuffer.isDefined
private def writeRaw(raw: DataBuffer): WriteStatus = {
try {
_bytesSent += channelWrite(raw)
_lastTimeDataWritten = System.currentTimeMillis
if (raw.hasUnreadData) {
if (partialBuffer.isEmpty) {
//we must take a copy of the buffer since it will be repurposed.
//notice if the partial buffer is defined, then it must be the same as
//what we're currently trying to write, so we don't need to set it
//again
partialBuffer = Some(raw.takeCopy)
}
Partial
} else {
partialBuffer = None
Complete
}
} catch {
case _: CancelledKeyException =>
//no cleanup is required since the connection is closed for good,
Failed
}
}
protected def write(raw: DataBuffer): WriteStatus = {
if (partialBuffer.isDefined) {
Zero
} else if (disconnecting) {
Failed
} else {
writeRaw(raw)
}
}
/**
* Attempts to continue writing any existing partial buffer and returns true
* if the write buffer is able to accept more data immediately. This will
* return false if the WriteBuffer is currently in the middle of draining an
* existing PartialBuffer, so if this returns false, then calling `write` will
* return `Zero`
*/
private[colossus] def continueWrite(): Boolean = partialBuffer.forall { raw =>
if (writeRaw(raw) == Complete) {
if (disconnecting) {
completeDisconnect()
}
true
} else {
false
}
}
def requestWrite() {
enableWriteReady()
}
}
| tumblr/colossus | colossus/src/main/scala/colossus/core/WriteBuffer.scala | Scala | apache-2.0 | 4,764 |
package fp
import fp.ComonadSpecification.comonadLaws
import fp.Function1Specification.function1Equal
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.{Arbitrary, Cogen, Properties}
object StoreSpecification extends Properties("Store") {
implicit def storeEqual[S: Equal : Arbitrary, A: Equal]: Equal[Store[S, A]] =
Equal by (s => (s.peek, s.cursor))
implicit def arbStore[S: Arbitrary : Cogen, A: Arbitrary]: Arbitrary[Store[S, A]] = Arbitrary {
for ((peek, cursor) <- arbitrary[(S => A, S)]) yield Store(peek, cursor)
}
implicit def cogenStore[S: Arbitrary : Cogen, A: Cogen]: Cogen[Store[S, A]] =
Cogen[(S => A, S)] contramap (s => (s.peek, s.cursor))
include(comonadLaws[({type f[x] = Store[Int, x]})#f])
}
| adamgfraser/fp | src/test/scala/fp/StoreSpecification.scala | Scala | apache-2.0 | 754 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.commons.validation._
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class Checks {
//#status-is-200
http("My Request").get("myUrl").check(status.is(200))
//#status-is-200
//#status-is-not-404-or-500
http("My Request").get("myUrl").check(status.not(404), status.not(500))
//#status-is-not-404-or-500
{
type T = String
val pattern, headerName, expression = ""
//#currentLocationRegex-ofType
currentLocationRegex(pattern).ofType[T]
//#currentLocationRegex-ofType
//#currentLocationRegex-example
currentLocationRegex("http://foo.com/bar?(.*)=(.*)").ofType[(String, String)]
//#currentLocationRegex-example
//#headerRegex-ofType
headerRegex(headerName, pattern).ofType[T]
//#headerRegex-ofType
//#headerRegex-example
headerRegex("FOO", "foo(.*)bar(.*)baz").ofType[(String, String)]
//#headerRegex-example
//#substring
substring("foo") // same as substring("foo").find.exists
substring("foo").findAll.saveAs("indices") // saves a Seq[Int]
substring("foo").count.saveAs("counts") // saves the number of occurrences of foo
//#substring
//#regex
regex("""<td class="number">""")
regex("""<td class="number">ACC${account_id}</td>""")
regex("""/private/bank/account/(ACC[0-9]*)/operations.html""")
//#regex
//#regex-ofType
regex(expression).ofType[T]
//#regex-ofType
//#regex-example
regex("foo(.*)bar(.*)baz").ofType[(String, String)]
//#regex-example
//#xpath
xpath("//input[@id='text1']/@value")
xpath("//foo:input[@id='text1']/@value", List("foo" -> "http://foo.com"))
//#xpath
//#jsonPath
jsonPath("$..foo.bar[2].baz")
//#jsonPath
//#jsonPath-ofType
jsonPath(expression).ofType[T]
//#jsonPath-ofType
val response =
"""
//#json-response
// JSON Response
{
"foo": 1,
"bar" "baz"
}
//#json-response
"""
//#jsonPath-Int
jsonPath("$..foo").ofType[Int] // will match 1
//#jsonPath-Int
//#css
css("article.more a", "href")
//#css
//#css-ofType
css("article.more a", "href").ofType[Node]
//#css-ofType
jsonPath("$..foo.bar[2].baz").
//#transform
transform(string => string + "foo")
//#transform
jsonPath("$..foo.bar[2].baz").
//#transformOption
transformOption(extract => extract.orElse(Some("default")).success)
//#transformOption
//#is
status.is(200)
//#is
//#not
status.not(500)
//#not
//#exists
jsonPath("$..foo").exists
//#exists
//#notExists
jsonPath("$..foo").notExists
//#notExists
//#in
status.in(200, 304)
//#in
//#validator
trait Validator[A] {
def name: String
def apply(actual: Option[A]): Validation[Option[A]]
}
//#validator
//#regex-count-is
regex("""https://(.*)""").count.is(5)
//#regex-count-is
//#regex-findAll-is
regex("""https://(.*)/.*""")
.findAll
.is(List("www.google.com", "www.mysecuredsite.com"))
//#regex-findAll-is
//#status-is
status.is(200)
//#status-is
//#status-in
status.in(200 to 210)
//#status-in
//#regex-find-exists
regex("aWord").find(1).exists
//#regex-find-exists
//#regex-notExists
regex("aWord").notExists
//#regex-notExists
//#bodyBytes-is-RawFileBody
bodyBytes.is(RawFileBody("expected_response.json"))
//#bodyBytes-is-RawFileBody
//#bodyString-isElFileBody
bodyString.is(ElFileBody("expected_template.json"))
//#bodyString-isElFileBody
}
}
| MykolaB/gatling | src/sphinx/http/code/Checks.scala | Scala | apache-2.0 | 4,280 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.db
import com.netflix.atlas.core.model._
import com.netflix.atlas.core.stacklang.Interpreter
import com.typesafe.config.ConfigFactory
import org.scalatest.FunSuite
class MemoryDatabaseSuite extends FunSuite {
private val interpreter = new Interpreter(DataVocabulary.allWords)
private val step = DefaultSettings.stepSize
private val db = new MemoryDatabase(ConfigFactory.parseString(
"""
|block-size = 60
|num-blocks = 2
|rebuild-frequency = 10s
|test-mode = true
""".stripMargin))
addData("a", 1.0, 2.0, 3.0)
addData("b", 3.0, 2.0, 1.0)
private val context = EvalContext(0, 3 * step, step)
private def addData(name: String, values: Double*): Unit = {
val tags = Map("name" -> name)
val data = values.toList.zipWithIndex.map { case (v, i) =>
Datapoint(tags, i * step, v)
}
db.update(data)
db.index.rebuildIndex()
}
private def expr(str: String): DataExpr = {
interpreter.execute(str).stack match {
case ModelExtractors.DataExprType(v) :: Nil => v
case _ => throw new IllegalArgumentException(s"invalid data expr: $str")
}
}
private def exec(str: String, s: Long = step): List[TimeSeries] = {
val ctxt = context.copy(step = s)
db.execute(ctxt, expr(str)).sortWith(_.label < _.label).map { t =>
t.mapTimeSeq(s => s.bounded(context.start, context.end))
}
}
private def ts(label: String, mul: Int, values: Double*): TimeSeries = {
TimeSeries(Map.empty, label, new ArrayTimeSeq(DsType.Gauge, 0L, mul * step, values.toArray))
}
private def ts(name: String, label: String, mul: Int, values: Double*): TimeSeries = {
val seq = new ArrayTimeSeq(DsType.Gauge, 0L, mul * step, values.toArray)
TimeSeries(Map("name" -> name), label, seq)
}
test(":eq query") {
assert(exec("name,a,:eq") === List(ts("a", "sum(name=a)", 1, 1.0, 2.0, 3.0)))
}
test(":in query") {
assert(exec("name,(,a,b,),:in") === List(ts("sum(name in (a,b))", 1, 4.0, 4.0, 4.0)))
}
test(":re query") {
assert(exec("name,[ab]$,:re") === List(ts("sum(name~/^[ab]$/)", 1, 4.0, 4.0, 4.0)))
}
test(":has query") {
assert(exec("name,:has") === List(ts("sum(has(name))", 1, 4.0, 4.0, 4.0)))
}
test(":offset expr") {
assert(exec(":true,:sum,1m,:offset") === List(ts("sum(true) (offset=1m)", 1, Double.NaN, 4.0, 4.0)))
}
test(":sum expr") {
assert(exec(":true,:sum") === List(ts("sum(true)", 1, 4.0, 4.0, 4.0)))
}
test(":count expr") {
assert(exec(":true,:count") === List(ts("count(true)", 1, 2.0, 2.0, 2.0)))
}
test(":min expr") {
assert(exec(":true,:min") === List(ts("min(true)", 1, 1.0, 2.0, 1.0)))
}
test(":max expr") {
assert(exec(":true,:max") === List(ts("max(true)", 1, 3.0, 2.0, 3.0)))
}
test(":by expr") {
val expected = List(
ts("a", "(name=a)", 1, 1.0, 2.0, 3.0),
ts("b", "(name=b)", 1, 3.0, 2.0, 1.0)
)
assert(exec(":true,(,name,),:by") === expected)
}
test(":all expr") {
val expected = List(
ts("a", "name=a", 1, 1.0, 2.0, 3.0),
ts("b", "name=b", 1, 3.0, 2.0, 1.0)
)
assert(exec(":true,:all") === expected)
}
test(":by,1,:head expr") {
val expected = List(
ts("a", "(name=a)", 1, 1.0, 2.0, 3.0)
)
assert(exec(":true,(,name,),:by,1,:head") === expected)
}
test(":by,2,:head expr") {
val expected = List(
ts("a", "(name=a)", 1, 1.0, 2.0, 3.0),
ts("b", "(name=b)", 1, 3.0, 2.0, 1.0)
)
assert(exec(":true,(,name,),:by,2,:head") === expected)
}
test(":by,3,:head expr") {
val expected = List(
ts("a", "(name=a)", 1, 1.0, 2.0, 3.0),
ts("b", "(name=b)", 1, 3.0, 2.0, 1.0)
)
assert(exec(":true,(,name,),:by,3,:head") === expected)
}
test(":sum expr, c=3") {
assert(exec(":true,:sum", 3 * step) === List(ts("sum(true)", 3, 4.0)))
}
test(":sum expr, c=3, cf=sum") {
assert(exec(":true,:sum,:cf-sum", 3 * step) === List(ts("sum(true)", 3, 12.0)))
}
test(":sum expr, c=3, cf=max") {
assert(exec(":true,:sum,:cf-max", 3 * step) === List(ts("sum(true)", 3, 6.0)))
}
test(":count expr, c=3") {
assert(exec(":true,:count", 3 * step) === List(ts("count(true)", 3, 2.0)))
}
test(":count expr, c=3, cf=sum") {
assert(exec(":true,:count,:cf-sum", 3 * step) === List(ts("count(true)", 3, 6.0)))
}
test(":count expr, c=3, cf=max") {
assert(exec(":true,:count,:cf-max", 3 * step) === List(ts("count(true)", 3, 2.0)))
}
test(":min expr, c=3") {
assert(exec(":true,:min", 3 * step) === List(ts("min(true)", 3, 1.0)))
}
test(":max expr, c=3") {
assert(exec(":true,:max", 3 * step) === List(ts("max(true)", 3, 3.0)))
}
test(":by expr, c=3") {
val expected = List(
ts("a", "(name=a)", 3, 2.0),
ts("b", "(name=b)", 3, 2.0)
)
assert(exec(":true,(,name,),:by", 3 * step) === expected)
}
test(":all expr, c=3") {
val expected = List(
ts("a", "name=a", 3, 6.0),
ts("b", "name=b", 3, 6.0)
)
assert(exec(":true,:all", 3 * step) === expected)
}
}
| rspieldenner/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/db/MemoryDatabaseSuite.scala | Scala | apache-2.0 | 5,702 |
/* ----------------- sse-breaker ----------------- *\\
* Licensed under the Apache License, Version 2.0. *
* Author: Spiros Tzavellas *
\\* ----------------------------------------------- */
package com.tzavellas.sse.breaker
import org.junit.Test
import scala.util.{Failure, Success}
class CircuitBreakerAsyncTest extends AbstractCircuitBreakerTest with CircuitDriver {
@Test
def dummy_test(): Unit = { }
// -- CircuitDriver implementation ------------------------------------------
implicit val testExecutor = CircuitExecutor.currentThreadExecutor
def makeNormalCall(circuitIsOpen: Boolean = false) = {
val f = executor.async(normalOperation)
f.value.get match {
case Success(i) => i
case Failure(e) if circuitIsOpen => throw e
case Failure(e) => throw new AssertionError("Unexpected exception!", e)
}
}
def makeCallWithNonLocalReturn(): Any = executor.async { return 43 }
def generateFaults(numOfFaults: Int): Unit =
for (_ <- 0 until numOfFaults) executor.async(faultyOperation)
def normalOperation = 42
def faultyOperation = throw new IllegalStateException
} | sptz45/sse-breaker | src/test/scala/com/tzavellas/sse/breaker/CircuitBreakerAsyncTest.scala | Scala | apache-2.0 | 1,183 |
package models
import scala.collection.mutable
/**
*
* User: mihais
* Date: 11/4/16
*/
object EpisodeCache {
private val episodeCache = new mutable.HashMap[String, List[Episode]]()
private val lastCrawlInMinutes = new mutable.HashMap[String, Long]()
val MAX_CACHE_TIME_IN_MINUTES = 60 // the max number of minutes to cache crawled episodes
def validCache(showName:String):Boolean = {
this.synchronized {
val crtTime = System.currentTimeMillis() / 60000
if (!lastCrawlInMinutes.contains(showName) ||
crtTime - lastCrawlInMinutes.get(showName).get > MAX_CACHE_TIME_IN_MINUTES) {
false
} else {
true
}
}
}
def add(showName:String, episodes:List[Episode], time:Long) {
this.synchronized {
episodeCache += showName -> episodes
lastCrawlInMinutes += showName -> time
}
}
def episodes(showName:String):Option[List[Episode]] = {
this.synchronized {
episodeCache.get(showName)
}
}
}
| MihaiSurdeanu/ourshows | app/models/EpisodeCache.scala | Scala | apache-2.0 | 996 |
package org.clulab.embeddings
import java.io._
import java.nio.{ByteBuffer, ByteOrder}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.clulab.utils.MathUtils
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
/**
* Implements similarity metrics using the embedding matrix
* IMPORTANT: In our implementation, words are lower cased but NOT lemmatized or stemmed (see sanitizeWord)
* Note: matrixConstructor is lazy, meant to save memory space if we're caching features
* User: mihais, dfried, gus
* Date: 11/25/13
* Last Modified: Fix compiler issue: import scala.io.Source.
*/
@deprecated("ExplicitWordEmbeddingMap should replace the functionality in this class", "processors 8.3.0")
class SanitizedWordEmbeddingMap(matrixConstructor: => Map[String, Array[Double]]) {
lazy val dimensions: Int = matrix.values.head.length
/** alternate constructor to allow loading from a file, possibly with a set of words to constrain the vocab */
def this(mf: String, wordsToUse: Option[Set[String]] = None, caseInsensitiveWordsToUse:Boolean = false) = {
this(SanitizedWordEmbeddingMap.loadMatrix(mf, wordsToUse, caseInsensitiveWordsToUse)._1)
}
/** alternate constructor to allow loading from a source, possibly with a set of words to constrain the vocab */
def this(src: Source, wordsToUse: Option[Set[String]], caseInsensitiveWordsToUse:Boolean) = {
this(SanitizedWordEmbeddingMap.loadMatrixFromSource(src, wordsToUse, caseInsensitiveWordsToUse)._1)
}
/** alternate constructor to allow loading from a stream, possibly with a set of words to constrain the vocab */
def this(is: InputStream, wordsToUse: Option[Set[String]], caseInsensitiveWordsToUse:Boolean) = {
this(SanitizedWordEmbeddingMap.loadMatrixFromStream(is, wordsToUse, caseInsensitiveWordsToUse)._1)
}
// laziness here causes problems with InputStream-based alternate constructor
val matrix : Map[String, Array[Double]] = matrixConstructor
def saveMatrix(mf: String) {
val pw = new PrintWriter(mf)
pw.println(s"${matrix.size}, $dimensions")
for ((word, vec) <- matrix) {
val strRep = vec.map(_.formatted("%.6f")).mkString(" ")
pw.println(s"$word $strRep")
}
pw.close()
}
/** If the word doesn't exist in the lexicon, try to use UNK */
def getEmbedding(w:String): Option[Array[Double]] = {
if(matrix.contains(w)) {
matrix.get(w)
} else {
matrix.get(SanitizedWordEmbeddingMap.UNK)
}
}
/**
* Computes the similarity between two given words
* IMPORTANT: words here must already be normalized using Word2vec.sanitizeWord()!
* @param w1 The first word
* @param w2 The second word
* @return The cosine similarity of the two corresponding vectors
*/
def similarity(w1:String, w2:String):Double = {
val v1o = getEmbedding(w1)
if(v1o.isEmpty) return -1
val v2o = getEmbedding(w2)
if(v2o.isEmpty) return -1
SanitizedWordEmbeddingMap.dotProduct(v1o.get, v2o.get)
}
/** Adds the content of src to dest, in place */
private def add(dest:Array[Double], src:Array[Double]) {
var i = 0
while(i < dimensions) {
dest(i) += src(i)
i += 1
}
}
/** filterPredicate: if passed, only returns words that match the predicate */
def mostSimilarWords(v: Array[Double], howMany:Int, filterPredicate: Option[String => Boolean]):List[(String, Double)] = {
val words = filterPredicate match {
case None => matrix.keys
case Some(p) => matrix.keys.filter(p)
}
MathUtils.nBest[String](word => SanitizedWordEmbeddingMap.dotProduct(v, matrix(word)))(words, howMany)
}
/**
* Finds the words most similar to this set of inputs
* IMPORTANT: words here must already be normalized using Word2vec.sanitizeWord()!
*/
def mostSimilarWords(words:Set[String], howMany:Int):List[(String, Double)] = {
val v = new Array[Double](dimensions)
var found = false
for(w1 <- words) {
val w = SanitizedWordEmbeddingMap.sanitizeWord(w1) // sanitize words
val vo = getEmbedding(w)
if(vo.isDefined) {
found = true
add(v, vo.get)
}
}
if(! found) return List()
SanitizedWordEmbeddingMap.norm(v)
mostSimilarWords(v, howMany, None)
}
def mostSimilarWords(word: String, howMany: Int, filterPredicate: Option[String => Boolean] = None): List[(String,
Double)] = getEmbedding(word) match {
case Some(v) => mostSimilarWords(v, howMany, filterPredicate)
case None => List()
}
def makeCompositeVector(t:Iterable[String]):Array[Double] = {
val vTotal = new Array[Double](dimensions)
for(s <- t) {
val v = getEmbedding(s)
if(v.isDefined) add(vTotal, v.get)
}
SanitizedWordEmbeddingMap.norm(vTotal)
vTotal
}
/**
* Fetches the embeddings vector for a given word (not lemma)
* @param word The word
* @return the array of embeddings weights
*/
def getWordVector(word:String):Option[Array[Double]] = {
val sw = SanitizedWordEmbeddingMap.sanitizeWord(word)
getEmbedding(sw)
}
/**
* Computes the cosine similarity between two texts, according to the embedding matrix
* IMPORTANT: t1, t2 must be arrays of words, not lemmas!
*/
def textSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
val st1 = new ArrayBuffer[String]()
t1.foreach(st1 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = new ArrayBuffer[String]()
t2.foreach(st2 += SanitizedWordEmbeddingMap.sanitizeWord(_))
sanitizedTextSimilarity(st1, st2)
}
/**
* Computes the cosine similarity between two texts, according to the embedding matrix
* IMPORTANT: words here must already be normalized using Word2vec.sanitizeWord()!
*/
def sanitizedTextSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
val v1 = makeCompositeVector(t1)
val v2 = makeCompositeVector(t2)
SanitizedWordEmbeddingMap.dotProduct(v1, v2)
}
/**
* Similar to textSimilarity, but using the multiplicative heuristic of Levy and Goldberg (2014)
* IMPORTANT: t1, t2 must be arrays of words, not lemmas!
*/
def multiplicativeTextSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
val st1 = new ArrayBuffer[String]()
t1.foreach(st1 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = new ArrayBuffer[String]()
t2.foreach(st2 += SanitizedWordEmbeddingMap.sanitizeWord(_))
multiplicativeSanitizedTextSimilarity(st1, st2)
}
/**
* Similar to sanitizedTextSimilarity, but but using the multiplicative heuristic of Levy and Goldberg (2014)
* IMPORTANT: words here must already be normalized using sanitizeWord()!
* @return Similarity value
*/
def multiplicativeSanitizedTextSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
var sim = 1.0
for(w1 <- t1) {
for(w2 <- t2) {
// no need to add the log sim if identical (log(1) == 0)
if(w1 != w2) {
val v1 = getEmbedding(w1)
val v2 = getEmbedding(w2)
if(v1.isDefined && v2.isDefined) {
// *multiply* rather than add similarities!
sim *= SanitizedWordEmbeddingMap.dotProduct(v1.get, v2.get)
}
}
}
}
sim
}
def logMultiplicativeTextSimilarity(t1: Iterable[String],
t2: Iterable[String],
method: Symbol = 'linear,
normalize: Boolean = false): Double = {
val st1 = t1.map(SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = t2.map(SanitizedWordEmbeddingMap.sanitizeWord(_))
logMultiplicativeSanitizedTextSimilarity(st1, st2, method, normalize)
}
def logMultiplicativeSanitizedTextSimilarity(t1:Iterable[String],
t2:Iterable[String],
method: Symbol = 'linear,
normalize: Boolean = false):Double = {
val t1Vecs = t1.flatMap(getEmbedding) // this will drop any words that don't have vectors
val t2Vecs = t2.flatMap(getEmbedding)
val sims = for {
v1 <- t1Vecs
v2 <- t2Vecs
cosSim = SanitizedWordEmbeddingMap.dotProduct(v1, v2)
toYield = method match {
case 'linear => math.log(cosSim + 1)
case 'linear_scaled => math.log((cosSim + 1) / 2)
case 'angular => math.log(1 - (math.acos(math.min(1, math.max(-1, cosSim))) / math.Pi))
case _ => throw new Exception(s"invalid method $method")
}
} yield toYield
val sum = sims.sum
if (normalize && t2Vecs.nonEmpty)
sum / t2Vecs.size
else
sum
}
/**
* Finds the maximum embedding similarity between any two words in these two texts
* IMPORTANT: IMPORTANT: t1, t2 must be arrays of words, not lemmas!
*/
def maxSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
val st1 = new ArrayBuffer[String]()
t1.foreach(st1 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = new ArrayBuffer[String]()
t2.foreach(st2 += SanitizedWordEmbeddingMap.sanitizeWord(_))
sanitizedMaxSimilarity(st1, st2)
}
def minSimilarity(t1: Iterable[String], t2: Iterable[String]): Double = {
val st1 = t1.map(SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = t2.map(SanitizedWordEmbeddingMap.sanitizeWord(_))
sanitizedMinSimilarity(st1, st2)
}
/**
* Finds the maximum embedding similarity between any two words in these two texts
* IMPORTANT: words here must already be normalized using sanitizeWord()!
*/
def sanitizedMaxSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
var max = Double.MinValue
for(s1 <- t1) {
val v1 = getEmbedding(s1)
if(v1.isDefined) {
for(s2 <- t2) {
val v2 = getEmbedding(s2)
if(v2.isDefined) {
val s = SanitizedWordEmbeddingMap.dotProduct(v1.get, v2.get)
if(s > max) max = s
}
}
}
}
max
}
/**
* Finds the minimum embedding similarity between any two words in these two texts
* IMPORTANT: words here must already be normalized using Word2vec.sanitizeWord()!
*/
def sanitizedMinSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
var min = Double.MaxValue
for(s1 <- t1) {
val v1 = getEmbedding(s1)
if(v1.isDefined) {
for(s2 <- t2) {
val v2 = getEmbedding(s2)
if(v2.isDefined) {
val s = SanitizedWordEmbeddingMap.dotProduct(v1.get, v2.get)
if(s < min) min = s
}
}
}
}
min
}
/**
* Finds the average embedding similarity between any two words in these two texts
* IMPORTANT: words here must be words not lemmas!
*/
def avgSimilarity(t1:Iterable[String], t2:Iterable[String]):Double = {
val st1 = new ArrayBuffer[String]()
t1.foreach(st1 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = new ArrayBuffer[String]()
t2.foreach(st2 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val (score, pairs) = sanitizedAvgSimilarity(st1, st2)
score
}
def avgSimilarityReturnTop(t1:Iterable[String], t2:Iterable[String]):(Double, Array[(Double, String, String)]) = {
val st1 = new ArrayBuffer[String]()
t1.foreach(st1 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val st2 = new ArrayBuffer[String]()
t2.foreach(st2 += SanitizedWordEmbeddingMap.sanitizeWord(_))
val (score, pairs) = sanitizedAvgSimilarity(st1, st2)
val sorted = pairs.sortBy(- _._1).toArray
//if (sorted.size > 10) return (score, sorted.slice(0, 10)) // Commented out -- return all pairs for UASupport structure (it can filter them if it wants)
(score, sorted)
}
/**
* Finds the average embedding similarity between any two words in these two texts
* IMPORTANT: words here must already be normalized using sanitizeWord()!
* Changelog: (Peter/June 4/2014) Now returns words list of pairwise scores, for optional answer justification.
*/
def sanitizedAvgSimilarity(t1:Iterable[String], t2:Iterable[String]):(Double, ArrayBuffer[(Double, String, String)]) = {
// Top words
val pairs = new ArrayBuffer[(Double, String, String)]
var avg = 0.0
var count = 0
for(s1 <- t1) {
val v1 = getEmbedding(s1)
if(v1.isDefined) {
for(s2 <- t2) {
val v2 = getEmbedding(s2)
if(v2.isDefined) {
val s = SanitizedWordEmbeddingMap.dotProduct(v1.get, v2.get)
avg += s
count += 1
// Top Words
pairs.append ( (s, s1, s2) )
}
}
}
}
if(count != 0) (avg / count, pairs)
else (0, pairs)
}
/**
* for a sequence of (word, weight) pairs, interpolate the vectors corresponding to the words by their respective
* weights, and normalize the resulting vector
*/
def interpolate(wordsAndWeights: Iterable[(String, Double)]): Array[Double] = {
// create a vector to store the weighted sum
val v = new Array[Double](dimensions)
for ((word, p) <- wordsAndWeights) {
// get this word's vector, scaled by the weight
val scaled = for {
x <- matrix(word)
} yield x * p
// add it in place to the sum vector
add(v, scaled)
}
SanitizedWordEmbeddingMap.norm(v)
v
}
}
object SanitizedWordEmbeddingMap {
val logger: Logger = LoggerFactory.getLogger(classOf[SanitizedWordEmbeddingMap])
val UNK = "*UNK*"
def sanitizeWord(uw:String, keepNumbers:Boolean = true):String = EmbeddingUtils.sanitizeWord(uw, keepNumbers)
def isNumber(w:String):Boolean = EmbeddingUtils.isNumber(w)
/** Normalizes this vector to length 1, in place */
def norm(weights:Array[Double]) {
var i = 0
var len = 0.0
while (i < weights.length) {
len += weights(i) * weights(i)
i += 1
}
len = math.sqrt(len)
i = 0
if (len != 0) {
while (i < weights.length) {
weights(i) /= len
i += 1
}
}
}
def dotProduct(v1:Array[Double], v2:Array[Double]):Double = {
assert(v1.length == v2.length) //should we always assume that v2 is longer? perhaps set shorter to length of longer...
var sum = 0.0
var i = 0
while(i < v1.length) {
sum += v1(i) * v2(i)
i += 1
}
sum
}
private def loadMatrix(mf: String,
wordsToUse: Option[Set[String]],
caseInsensitiveWordsToUse:Boolean):(Map[String, Array[Double]], Int) = {
logger.debug("Started to load embedding matrix from file " + mf + "...")
val src: Source = Source.fromFile(mf, "iso-8859-1")
val lines: Iterator[String] = src.getLines()
val matrix = buildMatrix(lines, wordsToUse, caseInsensitiveWordsToUse)
src.close()
logger.debug("Completed matrix loading.")
matrix
}
private def loadMatrixFromStream(is: InputStream,
wordsToUse: Option[Set[String]],
caseInsensitiveWordsToUse:Boolean):(Map[String, Array[Double]], Int) = {
logger.debug("Started to load embedding matrix from stream ...")
val src: Source = Source.fromInputStream(is, "iso-8859-1")
val lines: Iterator[String] = src.getLines
val matrix = buildMatrix(lines, wordsToUse, caseInsensitiveWordsToUse)
src.close()
logger.debug("Completed matrix loading.")
matrix
}
private def loadMatrixFromSource(src: Source,
wordsToUse: Option[Set[String]],
caseInsensitiveWordsToUse:Boolean):(Map[String, Array[Double]], Int) = {
logger.debug("Started to load embedding matrix from source ...")
val lines: Iterator[String] = src.getLines()
val matrix = buildMatrix(lines, wordsToUse, caseInsensitiveWordsToUse)
logger.debug("Completed matrix loading.")
matrix
}
private def buildMatrix(lines: Iterator[String],
wordsToUse: Option[Set[String]],
caseInsensitiveWordsToUse:Boolean): (Map[String, Array[Double]], Int) = {
val m = new collection.mutable.HashMap[String, Array[Double]]()
var first = true
var dims = 0
var total = 0
var kept = 0
for((line, index) <- lines.zipWithIndex) {
total += 1
val bits = line.split("\\\\s+")
if(first) {
dims = bits(1).toInt
first = false
} else {
if (bits.length != dims + 1) {
println(s"${bits.length} != ${dims + 1} found on line ${index + 1}")
}
assert(bits.length == dims + 1)
val w = bits(0)
if (wordsToUse.isEmpty || wordsToUse.get.contains(if(caseInsensitiveWordsToUse) w.toLowerCase() else w)) {
kept += 1
val weights = new Array[Double](dims)
var i = 0
while(i < dims) {
weights(i) = bits(i + 1).toDouble
i += 1
}
norm(weights)
m.put(w, weights)
}
}
}
logger.debug(s"Completed matrix loading. Kept $kept words out of a total of $total words.")
(m.toMap, dims)
}
def fromBinary(filename: String): SanitizedWordEmbeddingMap = fromBinary(new File(filename))
def fromBinary(file: File): SanitizedWordEmbeddingMap = {
new SanitizedWordEmbeddingMap(readBinaryMatrix(FileUtils.readFileToByteArray(file)))
}
def fromBinary(inputStream: InputStream): SanitizedWordEmbeddingMap = {
new SanitizedWordEmbeddingMap(readBinaryMatrix(IOUtils.toByteArray(inputStream)))
}
def fromBinary(bytes: Array[Byte]): SanitizedWordEmbeddingMap = {
new SanitizedWordEmbeddingMap(readBinaryMatrix(bytes))
}
// reads non-space chars
private def readNonSpace(bb: ByteBuffer): String = {
val buffer = new ArrayBuffer[Byte]
var byte = bb.get()
while (byte != ' '.toByte && byte != '\\n'.toByte) {
buffer += byte
byte = bb.get()
}
new String(buffer.toArray)
}
private def readBinaryMatrix(bytes: Array[Byte]): Map[String, Array[Double]] = {
val m = new collection.mutable.HashMap[String, Array[Double]]
val bb = ByteBuffer.wrap(bytes)
bb.order(ByteOrder.nativeOrder())
// read number of words
val words = readNonSpace(bb).toLong
// read number of dimensions
val size = readNonSpace(bb).toLong
// consume spaces
var byte = bb.get()
while (byte == ' '.toByte || byte == '\\n'.toByte) {
byte = bb.get()
}
// rewind one byte
bb.position(bb.position() - 1)
// start reading words
var w = 0L
while (w < words) {
w += 1
// read word
val word = readNonSpace(bb)
// populate embedding
val embedding = new Array[Double](size.toInt)
var s = 0
while (s < size) {
embedding(s) = bb.getFloat()
s += 1
}
// normalize
norm(embedding)
// add word to map
m.put(word, embedding)
// skip spaces if needed
if (bb.hasRemaining) {
// consume spaces
byte = bb.get()
while (byte == ' '.toByte || byte == '\\n'.toByte) {
byte = bb.get()
}
// rewind 1 byte
bb.position(bb.position() - 1)
}
}
m.toMap
}
def main(args:Array[String]) {
val w2v = new SanitizedWordEmbeddingMap(args(0), None)
println("Words most similar to \\"house\\":")
for(t <- w2v.mostSimilarWords(Set("house"), 40)) {
println(t._1 + " " + t._2)
}
val t1 = List("a", "delicious", "apple")
val t2 = List("the", "tasty", "pear")
val t3 = List("computer", "oxygen")
println("Text similarity: " + w2v.sanitizedTextSimilarity(t1, t2))
println("Text similarity: " + w2v.sanitizedTextSimilarity(t1, t3))
println("Max similarity: " + w2v.sanitizedMaxSimilarity(t1, t2))
println("Avg similarity: " + w2v.sanitizedAvgSimilarity(t1, t2))
}
}
| sistanlp/processors | main/src/main/scala/org/clulab/embeddings/SanitizedWordEmbeddingMap.scala | Scala | apache-2.0 | 19,971 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import scala.collection.mutable
import org.apache.spark.TaskContext
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.expressions.objects.LambdaVariable
/**
* This class is used to compute equality of (sub)expression trees. Expressions can be added
* to this class and they subsequently query for expression equality. Expression trees are
* considered equal if for the same input(s), the same result is produced.
*/
class EquivalentExpressions {
// For each expression, the set of equivalent expressions.
private val equivalenceMap = mutable.HashMap.empty[ExpressionEquals, ExpressionStats]
/**
* Adds each expression to this data structure, grouping them with existing equivalent
* expressions. Non-recursive.
* Returns true if there was already a matching expression.
*/
def addExpr(expr: Expression): Boolean = {
addExprToMap(expr, equivalenceMap)
}
private def addExprToMap(
expr: Expression, map: mutable.HashMap[ExpressionEquals, ExpressionStats]): Boolean = {
if (expr.deterministic) {
val wrapper = ExpressionEquals(expr)
map.get(wrapper) match {
case Some(stats) =>
stats.useCount += 1
true
case _ =>
map.put(wrapper, ExpressionStats(expr)())
false
}
} else {
false
}
}
/**
* Adds only expressions which are common in each of given expressions, in a recursive way.
* For example, given two expressions `(a + (b + (c + 1)))` and `(d + (e + (c + 1)))`,
* the common expression `(c + 1)` will be added into `equivalenceMap`.
*
* Note that as we don't know in advance if any child node of an expression will be common
* across all given expressions, we count all child nodes when looking through the given
* expressions. But when we call `addExprTree` to add common expressions into the map, we
* will add recursively the child nodes. So we need to filter the child expressions first.
* For example, if `((a + b) + c)` and `(a + b)` are common expressions, we only add
* `((a + b) + c)`.
*/
private def addCommonExprs(
exprs: Seq[Expression],
map: mutable.HashMap[ExpressionEquals, ExpressionStats]): Unit = {
assert(exprs.length > 1)
var localEquivalenceMap = mutable.HashMap.empty[ExpressionEquals, ExpressionStats]
addExprTree(exprs.head, localEquivalenceMap)
exprs.tail.foreach { expr =>
val otherLocalEquivalenceMap = mutable.HashMap.empty[ExpressionEquals, ExpressionStats]
addExprTree(expr, otherLocalEquivalenceMap)
localEquivalenceMap = localEquivalenceMap.filter { case (key, _) =>
otherLocalEquivalenceMap.contains(key)
}
}
localEquivalenceMap.foreach { case (commonExpr, state) =>
val possibleParents = localEquivalenceMap.filter { case (_, v) => v.height > state.height }
val notChild = possibleParents.forall { case (k, _) =>
k == commonExpr || k.e.find(_.semanticEquals(commonExpr.e)).isEmpty
}
if (notChild) {
// If the `commonExpr` already appears in the equivalence map, calling `addExprTree` will
// increase the `useCount` and mark it as a common subexpression. Otherwise, `addExprTree`
// will recursively add `commonExpr` and its descendant to the equivalence map, in case
// they also appear in other places. For example, `If(a + b > 1, a + b + c, a + b + c)`,
// `a + b` also appears in the condition and should be treated as common subexpression.
addExprTree(commonExpr.e, map)
}
}
}
// There are some special expressions that we should not recurse into all of its children.
// 1. CodegenFallback: it's children will not be used to generate code (call eval() instead)
// 2. If: common subexpressions will always be evaluated at the beginning, but the true and
// false expressions in `If` may not get accessed, according to the predicate
// expression. We should only recurse into the predicate expression.
// 3. CaseWhen: like `If`, the children of `CaseWhen` only get accessed in a certain
// condition. We should only recurse into the first condition expression as it
// will always get accessed.
// 4. Coalesce: it's also a conditional expression, we should only recurse into the first
// children, because others may not get accessed.
private def childrenToRecurse(expr: Expression): Seq[Expression] = expr match {
case _: CodegenFallback => Nil
case i: If => i.predicate :: Nil
case c: CaseWhen => c.children.head :: Nil
case c: Coalesce => c.children.head :: Nil
case other => other.children
}
// For some special expressions we cannot just recurse into all of its children, but we can
// recursively add the common expressions shared between all of its children.
private def commonChildrenToRecurse(expr: Expression): Seq[Seq[Expression]] = expr match {
case _: CodegenFallback => Nil
case i: If => Seq(Seq(i.trueValue, i.falseValue))
case c: CaseWhen =>
// We look at subexpressions in conditions and values of `CaseWhen` separately. It is
// because a subexpression in conditions will be run no matter which condition is matched
// if it is shared among conditions, but it doesn't need to be shared in values. Similarly,
// a subexpression among values doesn't need to be in conditions because no matter which
// condition is true, it will be evaluated.
val conditions = if (c.branches.length > 1) {
c.branches.map(_._1)
} else {
// If there is only one branch, the first condition is already covered by
// `childrenToRecurse` and we should exclude it here.
Nil
}
// For an expression to be in all branch values of a CaseWhen statement, it must also be in
// the elseValue.
val values = if (c.elseValue.nonEmpty) {
c.branches.map(_._2) ++ c.elseValue
} else {
Nil
}
Seq(conditions, values)
// If there is only one child, the first child is already covered by
// `childrenToRecurse` and we should exclude it here.
case c: Coalesce if c.children.length > 1 => Seq(c.children)
case _ => Nil
}
/**
* Adds the expression to this data structure recursively. Stops if a matching expression
* is found. That is, if `expr` has already been added, its children are not added.
*/
def addExprTree(
expr: Expression,
map: mutable.HashMap[ExpressionEquals, ExpressionStats] = equivalenceMap): Unit = {
val skip = expr.isInstanceOf[LeafExpression] ||
// `LambdaVariable` is usually used as a loop variable, which can't be evaluated ahead of the
// loop. So we can't evaluate sub-expressions containing `LambdaVariable` at the beginning.
expr.find(_.isInstanceOf[LambdaVariable]).isDefined ||
// `PlanExpression` wraps query plan. To compare query plans of `PlanExpression` on executor,
// can cause error like NPE.
(expr.isInstanceOf[PlanExpression[_]] && TaskContext.get != null)
if (!skip && !addExprToMap(expr, map)) {
childrenToRecurse(expr).foreach(addExprTree(_, map))
commonChildrenToRecurse(expr).filter(_.nonEmpty).foreach(addCommonExprs(_, map))
}
}
/**
* Returns the state of the given expression in the `equivalenceMap`. Returns None if there is no
* equivalent expressions.
*/
def getExprState(e: Expression): Option[ExpressionStats] = {
equivalenceMap.get(ExpressionEquals(e))
}
// Exposed for testing.
private[sql] def getAllExprStates(count: Int = 0): Seq[ExpressionStats] = {
equivalenceMap.values.filter(_.useCount > count).toSeq.sortBy(_.height)
}
/**
* Returns a sequence of expressions that more than one equivalent expressions.
*/
def getCommonSubexpressions: Seq[Expression] = {
getAllExprStates(1).map(_.expr)
}
/**
* Returns the state of the data structure as a string. If `all` is false, skips sets of
* equivalent expressions with cardinality 1.
*/
def debugString(all: Boolean = false): String = {
val sb = new java.lang.StringBuilder()
sb.append("Equivalent expressions:\\n")
equivalenceMap.values.filter(stats => all || stats.useCount > 1).foreach { stats =>
sb.append(" ").append(s"${stats.expr}: useCount = ${stats.useCount}").append('\\n')
}
sb.toString()
}
}
/**
* Wrapper around an Expression that provides semantic equality.
*/
case class ExpressionEquals(e: Expression) {
override def equals(o: Any): Boolean = o match {
case other: ExpressionEquals => e.semanticEquals(other.e)
case _ => false
}
override def hashCode: Int = e.semanticHash()
}
/**
* A wrapper in place of using Seq[Expression] to record a group of equivalent expressions.
*
* This saves a lot of memory when there are a lot of expressions in a same equivalence group.
* Instead of appending to a mutable list/buffer of Expressions, just update the "flattened"
* useCount in this wrapper in-place.
*/
case class ExpressionStats(expr: Expression)(var useCount: Int = 1) {
// This is used to do a fast pre-check for child-parent relationship. For example, expr1 can
// only be a parent of expr2 if expr1.height is larger than expr2.height.
lazy val height = getHeight(expr)
private def getHeight(tree: Expression): Int = {
tree.children.map(getHeight).reduceOption(_ max _).getOrElse(0) + 1
}
}
| chuckchen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/EquivalentExpressions.scala | Scala | apache-2.0 | 10,375 |
class t10999 {
/**
* I am public method !
*/
def publicMethod: Int = 1
/**
* I am protected method !
*/
protected def protectedMethod: Boolean = true
/**
* I am private method !
*/
private def privateMethod: String = ""
}
| scala/scala | test/scaladoc/resources/t10999.scala | Scala | apache-2.0 | 260 |
package co.blocke.scalajack
package yaml
import model._
import scala.collection.mutable
import org.snakeyaml.engine.v2.nodes._
import org.snakeyaml.engine.v2.api.LoadSettings
import org.snakeyaml.engine.v2.common.{FlowStyle, ScalarStyle}
import org.snakeyaml.engine.v2.composer.Composer
import org.snakeyaml.engine.v2.events.Event
import org.snakeyaml.engine.v2.resolver.JsonScalarResolver
import scala.jdk.CollectionConverters._
case class YamlWriter() extends Writer[Node] {
def writeArray[Elem](t: Iterable[Elem], elemTypeAdapter: TypeAdapter[Elem], out: mutable.Builder[Node, Node]): Unit = t match {
case null => writeNull(out)
case a =>
val arr = mutable.ListBuffer.empty[Node]
val outBuf = YamlBuilder()
a.iterator.foreach { item =>
outBuf.clear()
elemTypeAdapter.write(item, this, outBuf)
arr += outBuf.result
}
val flow = elemTypeAdapter match {
case _: ScalarTypeAdapter[_] => FlowStyle.FLOW
case _ => FlowStyle.BLOCK
}
out += new SequenceNode(Tag.SEQ, arr.asJava, flow)
}
def writeBigInt(t: BigInt, out: mutable.Builder[Node, Node]): Unit =
t match {
case null => writeNull(out)
case _ => out += new ScalarNode(Tag.INT, t.toString, ScalarStyle.PLAIN)
}
def writeBoolean(t: Boolean, out: mutable.Builder[Node, Node]): Unit =
out += new ScalarNode(Tag.BOOL, t.toString, ScalarStyle.PLAIN)
def writeDecimal(t: BigDecimal, out: mutable.Builder[Node, Node]): Unit =
t match {
case null => writeNull(out)
case _ => out += new ScalarNode(Tag.FLOAT, t.toString, ScalarStyle.PLAIN)
}
def writeDouble(t: Double, out: mutable.Builder[Node, Node]): Unit =
out += new ScalarNode(Tag.FLOAT, t.toString, ScalarStyle.PLAIN)
def writeInt(t: Int, out: mutable.Builder[Node, Node]): Unit =
out += new ScalarNode(Tag.INT, t.toString, ScalarStyle.PLAIN)
def writeLong(t: Long, out: mutable.Builder[Node, Node]): Unit =
out += new ScalarNode(Tag.INT, t.toString, ScalarStyle.PLAIN)
def writeMap[Key, Value, To](t: collection.Map[Key, Value], keyTypeAdapter: TypeAdapter[Key], valueTypeAdapter: TypeAdapter[Value], out: mutable.Builder[Node, Node]): Unit =
t match {
case null => writeNull(out)
case daMap =>
val outBuf = YamlBuilder()
val outMap = daMap
.map {
case (key, value) =>
if (key == null)
throw new ScalaJackError("Map keys cannot be null.")
outBuf.clear()
keyTypeAdapter.write(key, this, outBuf)
val k = outBuf.result()
outBuf.clear()
valueTypeAdapter.write(value, this, outBuf)
new NodeTuple(k, outBuf.result())
}
.toList
.asJava
out += new MappingNode(Tag.MAP, outMap, FlowStyle.AUTO)
}
def writeNull(out: mutable.Builder[Node, Node]): Unit =
out += new ScalarNode(Tag.NULL, "null", ScalarStyle.PLAIN)
@inline private def writeFields(
fields: List[(String, Any, TypeAdapter[Any])]
): Map[Node, Node] = {
val outBuf = YamlBuilder()
fields.collect {
case (label, value, valueTypeAdapter) if value != None =>
outBuf.clear()
valueTypeAdapter.write(value, this, outBuf)
new ScalarNode(Tag.STR, label, ScalarStyle.PLAIN) -> outBuf.result()
}.toMap
}
def writeObject[T](
t: T,
orderedFieldNames: List[String],
fieldMembersByName: collection.Map[String, ClassFieldMember[_,_]],
out: mutable.Builder[Node, Node],
extras: List[(String, ExtraFieldValue[_])]
): Unit = {
t match {
case null => writeNull(out.asInstanceOf[collection.mutable.Builder[Node,Node]])
case _ =>
val extraFields = writeFields(
extras.map(
e =>
(
e._1,
e._2.value,
e._2.valueTypeAdapter.asInstanceOf[TypeAdapter[Any]]
)
)
)
val classFields = writeFields(orderedFieldNames.map { orn =>
val oneField = fieldMembersByName(orn)
(orn, oneField.info.valueOf(t), oneField.valueTypeAdapter.asInstanceOf[TypeAdapter[Any]])
})
val captureFields = t match {
case sjc: SJCapture =>
import scala.jdk.CollectionConverters._
sjc.captured.asScala.map {
case (k, v) =>
val composer = new Composer(LoadSettings.builder.build, new EventParser(v.asInstanceOf[List[Event]]))
(new ScalarNode(Tag.STR, k, ScalarStyle.PLAIN), composer.next)
}
case _ => Map.empty[Node, Node]
}
val mapNodes = (extraFields ++ classFields ++ captureFields).map { case (k, v) => new NodeTuple(k, v) }
out += new MappingNode(Tag.MAP, mapNodes.toList.asJava, FlowStyle.AUTO).asInstanceOf[Node]
}
}
def writeString(t: String, out: mutable.Builder[Node, Node]): Unit = t match {
case null => writeNull(out)
case _ => out += new ScalarNode(Tag.STR, t, ScalarStyle.PLAIN)
}
def writeRaw(t: Node, out: mutable.Builder[Node, Node]): Unit =
out += t
def writeTuple[T](
t: T,
writeFn: (Product) => List[(TypeAdapter[_], Any)],
out: mutable.Builder[Node, Node]
): Unit = {
val arr = mutable.ListBuffer.empty[Node]
val outBuf = YamlBuilder()
val flowStyle = writeFn(t.asInstanceOf[Product]).foldLeft(true) { case (acc, (fieldTA, fieldValue)) =>
outBuf.clear()
fieldTA.castAndWrite(fieldValue, this, outBuf)
arr += outBuf.result
if fieldTA.isInstanceOf[ScalarTypeAdapter[_]] then
acc
else
false
}
out += new SequenceNode(Tag.SEQ, arr.asJava, {if flowStyle then FlowStyle.FLOW else FlowStyle.BLOCK})
}
}
// $COVERAGE-OFF$This is a snakeyaml thing. It works if consuming it works.
case class EventParser(events: List[Event]) extends org.snakeyaml.engine.v2.parser.Parser {
private var i = 0
def checkEvent(choice: Event.ID): Boolean =
if (i < events.length)
events(i).getEventId == choice
else
false
def peekEvent(): Event = events(i)
def next(): Event = {
val ret = events(i)
i += 1
ret
}
def hasNext: Boolean = i < events.length
// $COVERAGE-ON$
} | gzoller/ScalaJack | core/src/main/scala/co.blocke.scalajack/yaml/YamlWriter.scala | Scala | mit | 6,333 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.example
import com.twitter.summingbird._
import com.twitter.summingbird.batch.Batcher
import com.twitter.summingbird.online.MergeableStoreFactory
import com.twitter.summingbird.storm.Storm
import twitter4j.Status
import twitter4j.TwitterStreamFactory
import twitter4j.conf.ConfigurationBuilder
object StatusStreamer {
/**
* These two items are required to run Summingbird in
* batch/realtime mode, across the boundary between storm and
* scalding jobs.
*/
implicit val timeOf: TimeExtractor[Status] = TimeExtractor(_.getCreatedAt.getTime)
implicit val batcher = Batcher.ofHours(1)
def tokenize(text: String): TraversableOnce[String] =
text.toLowerCase
.replaceAll("[^a-zA-Z0-9\\\\s]", "")
.split("\\\\s+")
/**
* The actual Summingbird job. Notice that the execution platform
* "P" stays abstract. This job will work just as well in memory,
* in Storm or in Scalding, or in any future platform supported by
* Summingbird.
*/
def wordCount[P <: Platform[P]](
source: Producer[P, Status],
store: P#Store[String, Long]) =
source
.filter(_.getText != null)
.flatMap { tweet: Status => tokenize(tweet.getText).map(_ -> 1L) }
.sumByKey(store)
}
| rangadi/summingbird | summingbird-example/src/main/scala/com/twitter/summingbird/example/ExampleJob.scala | Scala | apache-2.0 | 1,823 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.