code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.estimator
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import org.slf4j.LoggerFactory
import scala.reflect.ClassTag
/**
* EstimateSupportive trait to provide some AOP methods as utils
*
*/
trait EstimateSupportive {
def timing[T](name: String)(f: => T): T = {
val begin = System.currentTimeMillis
val result = f
val end = System.currentTimeMillis
val cost = (end - begin)
EstimateSupportive.logger.info(s"$name time elapsed [${cost / 1000} s, ${cost % 1000} ms].")
result
}
/**
* calculate and log the time and throughput
*
* @param name the name of the process
* @param batch the number of the batch
* @param f the process function
* @tparam T the return of the process function
* @return the result of the process function
*/
def throughputing[T](name: String, batch: Int)(f: => T): T = {
val begin = System.currentTimeMillis
val result = f
val end = System.currentTimeMillis
val cost = (end - begin)
val throughput = batch.toDouble / cost * 1000
EstimateSupportive.logger.info(
s"$name time elapsed [${cost / 1000} s, ${cost % 1000} ms], " +
s"throughput: ${throughput} records/second.")
result
}
/**
* calculate and log the time and throughput and loss
*
* @param name the name of the process
* @param batch the number of the batch
* @param f the process function
* @tparam T the return of the process function
* @return the result of the process function
*/
def throughputingWithLoss[T](name: String, batch: Int)(f: => T): T = {
val begin = System.currentTimeMillis
val result = f
val end = System.currentTimeMillis
val cost = (end - begin)
val throughput = batch.toDouble / cost * 1000
EstimateSupportive.logger.info(
s"$name time elapsed [${cost / 1000} s, ${cost % 1000} ms], " +
s"throughput: ${throughput} records/second, loss: ${result}.")
result
}
def clearWeightBias(model: Module[Float]): Unit = {
model.reset()
val weightBias = model.parameters()._1
val clonedWeightBias = model.parameters()._1.map(tensor => {
val newTensor = Tensor[Float]().resizeAs(tensor)
newTensor.copy(tensor)
})
val localWeightBias = model.parameters()._1
var i = 0
while (i < localWeightBias.length) {
if (localWeightBias(i) != null) {
localWeightBias(i).set(clonedWeightBias(i))
}
i += 1
}
releaseTensors(model.parameters()._1)
releaseTensors(model.parameters()._2)
}
def makeMetaModel(original: AbstractModule[Activity, Activity, Float]):
AbstractModule[Activity, Activity, Float] = {
val metaModel = original.cloneModule()
releaseWeightBias(metaModel)
metaModel
}
def releaseWeightBias(model: Module[Float]): Unit = {
model.reset()
releaseTensors(model.parameters()._1)
releaseTensors(model.parameters()._2)
}
private def releaseTensors[T: ClassTag](tensors: Array[Tensor[T]])
(implicit ev: TensorNumeric[T]) = {
var i = 0
while (i < tensors.length) {
if (tensors(i) != null) {
tensors(i).set()
}
i += 1
}
}
def makeUpModel(clonedModel: Module[Float], weightBias: Array[Tensor[Float]]):
AbstractModule[Activity, Activity, Float] = {
putWeightBias(clonedModel, weightBias)
clonedModel.evaluate()
clonedModel
}
private def putWeightBias(target: Module[Float], weightBias: Array[Tensor[Float]]):
Module[Float] = {
val localWeightBias = target.parameters()._1
var i = 0
while (i < localWeightBias.length) {
if (localWeightBias(i) != null) {
localWeightBias(i).set(weightBias(i))
}
i += 1
}
target
}
}
object EstimateSupportive {
val logger = LoggerFactory.getLogger(getClass)
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/estimator/EstimateSupportive.scala | Scala | apache-2.0 | 4,714 |
package utils
import play.api.mvc.RequestHeader
object Helper {
/*
* find URI
* @param request - HTTP request
*/
def findUri(request: RequestHeader): String = {
request.uri
}
/*
* find session values
* @param request - HTTP request
* @param element - find session value stored in element
*/
def findSessionElementValue(request: RequestHeader, element: String): String = {
request.session.get(element).map { sessionvalue =>
sessionvalue
}.getOrElse {
" "
}
}
/*
* find flash values
* @param request - HTTP request
* @param element - find flash value stored in element
*/
def findFlashElementValue(request: RequestHeader, element: String): String = {
request.flash.get(element).getOrElse {
" "
}
}
}
| knoldus/PlayingField | app/utils/Helper.scala | Scala | apache-2.0 | 804 |
package org.jetbrains.plugins.scala
package lang
package refactoring.extractMethod
import com.intellij.psi._
import org.jetbrains.annotations.Nullable
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.project.Project
import com.intellij.openapi.editor.{ScrollType, Editor}
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil
import com.intellij.openapi.application.ApplicationManager
import com.intellij.refactoring.{HelpID, RefactoringActionHandler}
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.lang.psi.dataFlow.impl.reachingDefs.ReachingDefintionsCollector
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.{ScalaPsiElement, ScalaPsiUtil}
import scala.collection.mutable.ArrayBuffer
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScPrimaryConstructor, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScVariableDefinition, ScPatternDefinition, ScFunction, ScFunctionDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.{ScControlFlowOwner, ScalaRecursiveElementVisitor, ScalaFile}
import psi.api.base.patterns.ScCaseClause
import psi.types.result.TypingContext
import com.intellij.refactoring.util.CommonRefactoringUtil
import com.intellij.psi.codeStyle.CodeStyleManager
import scala.annotation.tailrec
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import extensions._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.search.LocalSearchScope
import org.jetbrains.plugins.scala.lang.refactoring.extractMethod.duplicates.DuplicatesUtil
import org.jetbrains.plugins.scala.lang.rearranger.ScalaRearranger
import com.intellij.internal.statistic.UsageTrigger
/**
* User: Alexander Podkhalyuzin
* Date: 11.01.2010
*/
class ScalaExtractMethodHandler extends RefactoringActionHandler {
private val REFACTORING_NAME: String = ScalaBundle.message("extract.method.title")
def invoke(project: Project, elements: Array[PsiElement], dataContext: DataContext) {/*do nothing*/}
def invoke(project: Project, editor: Editor, file: PsiFile, dataContext: DataContext) {
editor.getScrollingModel.scrollToCaret(ScrollType.MAKE_VISIBLE)
if (!file.isInstanceOf[ScalaFile]) return
UsageTrigger.trigger(ScalaBundle.message("extract.method.id"))
ScalaRefactoringUtil.afterExpressionChoosing(project, editor, file, dataContext, REFACTORING_NAME, ScalaRefactoringUtil.checkCanBeIntroduced(_)) {
invokeOnEditor(project, editor, file.asInstanceOf[ScalaFile], dataContext)
}
}
private def invokeOnEditor(project: Project, editor: Editor, file: ScalaFile, dataContext: DataContext) {
if (!ScalaRefactoringUtil.ensureFileWritable(project, file)) {
showErrorMessage(ScalaBundle.message("file.is.not.writable"), project, editor)
return
}
if (!editor.getSelectionModel.hasSelection) return
ScalaRefactoringUtil.trimSpacesAndComments(editor, file, trimComments = false)
val startElement: PsiElement = file.findElementAt(editor.getSelectionModel.getSelectionStart)
val endElement: PsiElement = file.findElementAt(editor.getSelectionModel.getSelectionEnd - 1)
val elements = ScalaPsiUtil.getElementsRange(startElement, endElement) match {
case Seq(b: ScBlock) if !b.hasRBrace => b.children.toSeq
case elems => elems
}
if (showNotPossibleWarnings(elements, project, editor)) return
def checkLastReturn(elem: PsiElement): Boolean = {
elem match {
case ret: ScReturnStmt => true
case m: ScMatchStmt =>
m.getBranches.forall(checkLastReturn(_))
case f: ScIfStmt if f.elseBranch != None && f.thenBranch != None =>
checkLastReturn(f.thenBranch.get) && checkLastReturn(f.elseBranch.get)
case block: ScBlock if block.lastExpr != None => checkLastReturn(block.lastExpr.get)
case _ => false
}
}
@tailrec
def checkLastExpressionMeaningful(elem: PsiElement): Boolean = {
if (!elem.isInstanceOf[ScExpression]) return false
val expr = elem.asInstanceOf[ScExpression]
expr.getParent match {
case t: ScTryBlock if t.lastExpr == Some(expr) => checkLastExpressionMeaningful(t.getParent)
case bl: ScBlock if bl.lastExpr == Some(expr) => checkLastExpressionMeaningful(bl)
case bl: ScBlock => false
case clause: ScCaseClause => checkLastExpressionMeaningful(clause.getParent.getParent)
case d: ScDoStmt if d.getExprBody == Some(expr) => false
case w: ScWhileStmt if w.body == Some(expr) => false
case i: ScIfStmt if i.elseBranch == None && i.thenBranch == Some(expr) => false
case fun: ScFunction if !fun.hasAssign => false
case _ => true
}
}
def returnType: Option[ScType] = {
val fun = PsiTreeUtil.getParentOfType(elements(0), classOf[ScFunctionDefinition])
if (fun == null) return None
var result: Option[ScType] = None
val visitor = new ScalaRecursiveElementVisitor {
override def visitReturnStatement(ret: ScReturnStmt) {
val newFun = PsiTreeUtil.getParentOfType(ret, classOf[ScFunctionDefinition])
if (newFun == fun) {
result = Some(fun.returnType.getOrElse(psi.types.Unit))
}
}
}
for (element <- elements if result == None) {
element.accept(visitor)
}
result
}
val lastScalaElem = elements.reverse.find(_.isInstanceOf[ScalaPsiElement]).get
val lastReturn = checkLastReturn(lastScalaElem)
val isLastExpressionMeaningful: Option[ScType] = {
if (lastReturn) None
else if (checkLastExpressionMeaningful(lastScalaElem))
Some(lastScalaElem.asInstanceOf[ScExpression].getType(TypingContext.empty).getOrAny)
else None
}
val hasReturn: Option[ScType] = returnType
val stopAtScope: PsiElement = findScopeBound(elements).getOrElse(file)
val siblings: Array[PsiElement] = getSiblings(elements(0), stopAtScope)
if (siblings.length == 0) return
val array = elements.toArray
if (ApplicationManager.getApplication.isUnitTestMode && siblings.length > 0) {
invokeDialog(project, editor, array, hasReturn, lastReturn, siblings(0), siblings.length == 1,
isLastExpressionMeaningful)
} else if (siblings.length > 1) {
ScalaRefactoringUtil.showChooser(editor, siblings, {selectedValue =>
invokeDialog(project, editor, array, hasReturn, lastReturn, selectedValue,
siblings(siblings.length - 1) == selectedValue, isLastExpressionMeaningful)
}, "Choose level for Extract Method", getTextForElement, true)
return
}
else if (siblings.length == 1) {
invokeDialog(project, editor, array, hasReturn, lastReturn, siblings(0), smallestScope = true, isLastExpressionMeaningful)
}
}
private def getSiblings(element: PsiElement, @Nullable stopAtScope: PsiElement): Array[PsiElement] = {
def isParentOk(parent: PsiElement): Boolean = {
if (parent == null) return false
assert(parent.getTextRange != null, "TextRange is null: " + parent.getText)
stopAtScope == null || stopAtScope.getTextRange.contains(parent.getTextRange)
}
val res = new ArrayBuffer[PsiElement]
var prev = element
var parent = element.getParent
while (isParentOk(parent)) {
parent match {
case file: ScalaFile if file.isScriptFile() => res += prev
case block: ScBlock => res += prev
case templ: ScTemplateBody => res += prev
case _ =>
}
prev = parent
parent = parent match {
case file: ScalaFile =>
null
case _ => parent.getParent
}
}
res.toArray.reverse
}
private def findScopeBound(elements: Seq[PsiElement]): Option[PsiElement] = {
val commonParent = PsiTreeUtil.findCommonParent(elements: _*)
def scopeBound(ref: ScReferenceElement): Option[PsiElement] = {
val fromThisRef: Option[ScTemplateDefinition] = ref.qualifier match {
case Some(thisRef: ScThisReference) => thisRef.refTemplate
case Some(_) => return None
case None => None
}
val defScope: Option[PsiElement] = fromThisRef.orElse {
ref.resolve() match {
case primConstr: ScPrimaryConstructor =>
primConstr.containingClass match {
case clazz: ScClass =>
if (clazz.isLocal) clazz.parent
else clazz.containingClass.toOption
case _ => None
}
case member: ScMember if !member.isLocal => member.containingClass.toOption
case td: ScTypeDefinition => td.parent
case ScalaPsiUtil.inNameContext(varDef: ScVariableDefinition) if ScalaPsiUtil.isLValue(ref) => varDef.parent
case member: PsiMember => member.containingClass.toOption
case _ => return None
}
}
defScope match {
case Some(clazz: PsiClass) =>
commonParent.parentsInFile.collectFirst {
case td: ScTemplateDefinition if td == clazz || td.isInheritor(clazz, deep = true) => td
}
case local @ Some(_) => local
case _ =>
PsiTreeUtil.getParentOfType(commonParent, classOf[ScPackaging]).toOption
.orElse(commonParent.containingFile)
}
}
var result: PsiElement = commonParent.getContainingFile
val visitor = new ScalaRecursiveElementVisitor {
override def visitReference(ref: ScReferenceElement) {
scopeBound(ref) match {
case Some(bound: PsiElement) if PsiTreeUtil.isAncestor(result, bound, true) => result = bound
case _ =>
}
}
}
elements.foreach {
case elem: ScalaPsiElement => elem.accept(visitor)
case _ =>
}
Option(result)
}
private def invokeDialog(project: Project, editor: Editor, elements: Array[PsiElement], hasReturn: Option[ScType],
lastReturn: Boolean, sibling: PsiElement, smallestScope: Boolean,
lastMeaningful: Option[ScType]) {
val info = ReachingDefintionsCollector.collectVariableInfo(elements, sibling.asInstanceOf[ScalaPsiElement])
val input = info.inputVariables
val output = info.outputVariables
if (output.exists(_.element.isInstanceOf[ScFunctionDefinition])) {
showErrorMessage(ScalaBundle.message("cannot.extract.used.function.definition"), project, editor)
return
}
val settings: ScalaExtractMethodSettings =
if (!ApplicationManager.getApplication.isUnitTestMode) {
val dialog = new ScalaExtractMethodDialog(project, elements, hasReturn, lastReturn, sibling,
input.toArray, output.toArray, lastMeaningful)
dialog.show()
if (!dialog.isOK) return
dialog.getSettings
}
else {
val innerClassSettings = {
val text = editor.getDocument.getText
val isCase = text.startsWith("//case class")
val isInner = text.startsWith("//inner class")
val out = output.map(ScalaExtractMethodUtils.convertVariableData(_, elements)).map(ExtractMethodOutput.from)
InnerClassSettings(isCase || isInner, "TestMethodNameResult", out.toArray, isCase)
}
new ScalaExtractMethodSettings("testMethodName", ScalaExtractMethodUtils.getParameters(input.toArray, elements),
ScalaExtractMethodUtils.getReturns(output.toArray, elements), "", sibling,
elements, hasReturn, lastReturn, lastMeaningful, innerClassSettings)
}
val duplicates = DuplicatesUtil.findDuplicates(settings)
performRefactoring(settings, editor)
if (settings.returnType.isEmpty && settings.typeParameters.isEmpty) {
if (duplicates.size > 0) DuplicatesUtil.processDuplicates(duplicates, settings, project, editor)
}
}
private def getTextForElement(element: PsiElement): String = {
def local(text: String) = ScalaBundle.message("extract.local.method", text)
element.getParent match {
case tbody: ScTemplateBody =>
PsiTreeUtil.getParentOfType(tbody, classOf[ScTemplateDefinition]) match {
case o: ScObject => s"Extract method to object ${o.name}"
case c: ScClass => s"Extract method to class ${c.name}"
case t: ScTrait => s"Extract method to trait ${t.name}"
case n: ScNewTemplateDefinition => "Extract method to anonymous class"
}
case _: ScTryBlock => local("try block")
case _: ScConstrBlock => local("constructor")
case b: ScBlock =>
b.getParent match {
case f: ScFunctionDefinition => local(s"def ${f.name}")
case p: ScPatternDefinition if p.bindings.nonEmpty => local(s"val ${p.bindings(0).name}")
case v: ScVariableDefinition if v.bindings.nonEmpty => local(s"var ${v.bindings(0).name}")
case _: ScCaseClause => local("case clause")
case ifStmt: ScIfStmt =>
if (ifStmt.thenBranch.exists(_ == b)) local("if block")
else "Extract local method in else block"
case forStmt: ScForStatement if forStmt.body.exists(_ == b) => local("for statement")
case whileStmt: ScWhileStmt if whileStmt.body.exists(_ == b) => local("while statement")
case doSttm: ScDoStmt if doSttm.getExprBody.exists(_ == b) => local("do statement")
case funExpr: ScFunctionExpr if funExpr.result.exists(_ == b) => local("function expression")
case _ => local("code block")
}
case _: ScalaFile => "Extract file method"
case _ => "Unknown extraction"
}
}
private def performRefactoring(settings: ScalaExtractMethodSettings, editor: Editor) {
val method = ScalaExtractMethodUtils.createMethodFromSettings(settings)
if (method == null) return
val ics = settings.innerClassSettings
def newLine = ScalaPsiElementFactory.createNewLine(method.getManager)
def addElementBefore(elem: PsiElement, nextSibling: PsiElement) = {
val added = nextSibling.getParent.addBefore(elem, nextSibling)
ScalaPsiUtil.adjustTypes(added)
added
}
def insertInnerClassBefore(anchorNext: PsiElement) {
if (!ics.needClass) return
val classText = ics.classText(canonTextForTypes = true)
val clazz = ScalaPsiElementFactory.createTemplateDefinitionFromText(classText, anchorNext.getContext, anchorNext)
addElementBefore(clazz, anchorNext)
addElementBefore(newLine, anchorNext)
}
def insertMethod() = {
var insertedMethod: PsiElement = null
settings.nextSibling match {
case s childOf (_: ScTemplateBody) =>
// put the extract method *below* the current code if it is added to a template body.
val nextSibling = s.getNextSiblingNotWhitespaceComment
addElementBefore(newLine, nextSibling)
insertedMethod = addElementBefore(method, nextSibling)
addElementBefore(newLine, nextSibling)
case s =>
insertedMethod = addElementBefore(method, s)
addElementBefore(newLine, s)
}
insertedMethod
}
def insertMethodCall() =
ScalaExtractMethodUtils.replaceWithMethodCall(settings, settings.elements, param => param.oldName, output => output.paramName)
PsiDocumentManager.getInstance(editor.getProject).commitDocument(editor.getDocument)
inWriteCommandAction(editor.getProject, REFACTORING_NAME) {
val method = insertMethod()
insertInnerClassBefore(method)
insertMethodCall()
val manager = CodeStyleManager.getInstance(method.getProject)
manager.reformat(method)
editor.getSelectionModel.removeSelection()
}
}
private def showNotPossibleWarnings(elements: Seq[PsiElement], project: Project, editor: Editor): Boolean = {
def errors(elem: PsiElement): Option[String] = elem match {
case _: ScBlockStatement => None
case comm: PsiComment if !comm.getParent.isInstanceOf[ScMember] => None
case _: PsiWhiteSpace => None
case _ if ScalaTokenTypes.tSEMICOLON == elem.getNode.getElementType => None
case typedef: ScTypeDefinition => checkTypeDefUsages(typedef)
case _: ScSelfInvocation => ScalaBundle.message("cannot.extract.self.invocation").toOption
case _ => ScalaBundle.message("cannot.extract.empty.message").toOption
}
def checkTypeDefUsages(typedef: ScTypeDefinition): Option[String] = {
val scope = new LocalSearchScope(PsiTreeUtil.getParentOfType(typedef, classOf[ScControlFlowOwner], true))
val refs = ReferencesSearch.search(typedef, scope).findAll()
import scala.collection.JavaConverters.collectionAsScalaIterableConverter
for {
ref <- refs.asScala
if !elements.exists(PsiTreeUtil.isAncestor(_, ref.getElement, false))
} {
return ScalaBundle.message("cannot.extract.used.type.definition").toOption
}
None
}
val messages = elements.flatMap(errors)
if (messages.nonEmpty) {
showErrorMessage(messages.mkString("\\n"), project, editor)
return true
}
if (elements.length == 0 || !elements.exists(_.isInstanceOf[ScBlockStatement])) {
showErrorMessage(ScalaBundle.message("cannot.extract.empty.message"), project, editor)
return true
}
var typeDefMessage: Option[String] = None
for (element <- elements) {
val visitor = new ScalaRecursiveElementVisitor {
override def visitTypeDefintion(typedef: ScTypeDefinition) {
typeDefMessage = checkTypeDefUsages(typedef)
}
}
element.accept(visitor)
typeDefMessage match {
case Some(m) =>
showErrorMessage(m, project, editor)
return true
case None =>
}
}
false
}
private def showErrorMessage(text: String, project: Project, editor: Editor) {
if (ApplicationManager.getApplication.isUnitTestMode) throw new RuntimeException(text)
CommonRefactoringUtil.showErrorHint(project, editor, text, REFACTORING_NAME, HelpID.EXTRACT_METHOD)
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/refactoring/extractMethod/ScalaExtractMethodHandler.scala | Scala | apache-2.0 | 18,473 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.web
import com.twitter.finatra.{Response, Controller, View, Request}
import com.twitter.logging.Logger
import com.twitter.util.Future
import com.twitter.zipkin.adapter.{JsonQueryAdapter, JsonAdapter, ThriftQueryAdapter, ThriftAdapter}
import com.twitter.zipkin.gen
import com.twitter.zipkin.config.ZipkinWebConfig
import java.nio.ByteBuffer
import java.text.SimpleDateFormat
import java.util.Calendar
/**
* Application that handles ZipkinWeb routes
* @param config ZipkinWebConfig
* @param client Thrift client to ZipkinQuery
*/
class App(config: ZipkinWebConfig, client: gen.ZipkinQuery.FinagledClient) extends Controller {
val log = Logger.get()
val dateFormat = new SimpleDateFormat("MM-dd-yyyy")
val timeFormat = new SimpleDateFormat("HH:mm:ss")
def getDate = dateFormat.format(Calendar.getInstance().getTime)
def getTime = timeFormat.format(Calendar.getInstance().getTime)
/* Index page */
get("/") { request =>
render.view(wrapView(new IndexView(getDate, getTime))).toFuture
}
/* Trace page */
get("/show/:id") { request =>
render.view(wrapView(new ShowView(request.params("id")))).toFuture
}
/* Static page for render trace from JSON */
get("/static") { request =>
render.view(wrapView(new StaticView)).toFuture
}
/**
* API: query
* Returns query results that satisfy the request parameters in order of descending duration
*
* Required GET params:
* - service_name: String
* - end_date: date String formatted to `QueryRequest.fmt`
*
* Optional GET params:
* - limit: Int, default 100
* - span_name: String
* - time_annotation: String
* - annotation_key, annotation_value: String
* - adjust_clock_skew = (true|false), default true
*/
get("/api/query") { request =>
/* Get trace ids */
val traceIds = QueryRequest(request) match {
case r: SpanQueryRequest => {
client.getTraceIdsBySpanName(r.serviceName, r.spanName, r.endTimestamp, r.limit, r.order)
}
case r: AnnotationQueryRequest => {
client.getTraceIdsByAnnotation(r.serviceName, r.annotation, null, r.endTimestamp, r.limit, r.order)
}
case r: KeyValueAnnotationQueryRequest => {
client.getTraceIdsByAnnotation(r.serviceName, r.key, ByteBuffer.wrap(r.value.getBytes), r.endTimestamp, r.limit, r.order)
}
case r: ServiceQueryRequest => {
client.getTraceIdsByServiceName(r.serviceName, r.endTimestamp, r.limit, r.order)
}
}
val adjusters = getAdjusters(request)
traceIds.map { ids =>
ids match {
case Nil => {
Future.value(Seq.empty)
}
case _ => {
client.getTraceSummariesByIds(ids, adjusters).map {
_.map { summary =>
JsonQueryAdapter(ThriftQueryAdapter(summary))
}
}
}
}
}.flatten.map(render.json(_))
}
/**
* API: services
* Returns the total list of services Zipkin is aware of
*/
get("/api/services") { request =>
log.debug("/api/services")
client.getServiceNames().map { services =>
render.json(services.toSeq.sorted)
}
}
/**
* API: spans
* Returns a list of spans for a particular service
*
* Required GET params:
* - serviceName: String
*/
get("/api/spans") { request =>
log.debug("/api/spans")
withServiceName(request) { serviceName =>
client.getSpanNames(serviceName).map { spans =>
render.json(spans.toSeq.sorted)
}
}
}
/**
* API: top_annotations
* Returns a list of top/popular time-based annotations for a particular service
*
* Required GET params:
* - serviceName: string
*/
get("/api/top_annotations") { request =>
withServiceName(request) { serviceName =>
client.getTopAnnotations(serviceName).map { anns =>
render.json(anns.toSeq.sorted)
}
}
}
/**
* API: top_kv_annotations
* Returns a list of the top/popular keys for key-value annotations for a particular service
*
* Required GET params:
* - serviceName: String
*/
get("/api/top_kv_annotations") { request =>
withServiceName(request) { serviceName =>
client.getTopKeyValueAnnotations(serviceName).map { anns =>
render.json(anns.toSeq.sorted)
}
}
}
/**
* API: get
* Returns the data for a particular trace
*
* Required GET params:
* - id: Long
*
* Optional GET params:
* - adjust_clock_skew: (true|false), default true
*/
get("/api/get/:id") { request =>
log.info("/api/get")
val adjusters = getAdjusters(request)
val ids = Seq(request.params("id").toLong)
log.debug(ids.toString())
client.getTraceCombosByIds(ids, adjusters).map { _.map { ThriftQueryAdapter(_) }.head }.map { combo =>
render.json(JsonQueryAdapter(combo))
}
}
/**
* API: is_pinned
* Returns whether a trace has been pinned
*
* Required GET params:
* - id: Long
*/
get("/api/is_pinned/:id") { request =>
val id = request.params("id").toLong
client.getTraceTimeToLive(id).map(render.json(_))
}
/**
* API: pin
* Pins a trace (sets its TTL)
*
* Required GET params:
* - id: Long
* - state: Boolean (true|false)
*/
post("/api/pin/:id/:state") { request =>
val id = request.params("id").toLong
request.params("state").toLowerCase match {
case "true" => {
togglePinState(id, true).map(render.json(_))
}
case "false" => {
togglePinState(id, false).map(render.json(_))
}
case _ => {
render.status(400).body("Must be true or false").toFuture
}
}
}
private def withServiceName(request: Request)(f: String => Future[Response]): Future[Response] = {
request.params.get("serviceName") match {
case Some(s) => {
f(s)
}
case None => {
render.status(401).body("Invalid service name").toFuture
}
}
}
private def togglePinState(traceId: Long, state: Boolean): Future[Boolean] = {
val ttl = state match {
case true => {
Future.value(config.pinTtl.inSeconds)
}
case false => {
client.getDataTimeToLive()
}
}
ttl.map { t =>
client.setTraceTimeToLive(traceId, t).map(Unit => state)
}.flatten
}
/**
* Returns a sequence of adjusters based on the params for a request. Default is TimeSkewAdjuster
*/
private def getAdjusters(request: Request) = {
request.params.get("adjust_clock_skew") match {
case Some(flag) => {
flag match {
case "false" => Seq.empty[gen.Adjust]
case _ => Seq(gen.Adjust.TimeSkew)
}
}
case _ => {
Seq(gen.Adjust.TimeSkew)
}
}
}
private def wrapView(v: View) = new View {
val template = "templates/layouts/application.mustache"
val rootUrl = config.rootUrl
val innerView: View = v
lazy val body = innerView.render
}
}
class IndexView(val endDate: String, val endTime: String) extends View {
val template = "templates/index.mustache"
val inlineJs = "$(Zipkin.Application.Index.initialize());"
}
class ShowView(traceId: String) extends View {
val template = "templates/show.mustache"
val inlineJs = "$(Zipkin.Application.Show.initialize(\\"" + traceId + "\\"));"
}
class StaticView extends View {
val template = "templates/static.mustache"
val inlineJs = "$(Zipkin.Application.Static.initialize());"
}
| dsias/zipkin | zipkin-finatra/src/main/scala/com/twitter/zipkin/web/App.scala | Scala | apache-2.0 | 8,075 |
/*
* MnoClassifier learns MSISDN-Operator combinations to afterwards predict Operators.
* Copyright (C) 2013 MACH Connectivity GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package com.github.mkroli.mnoclassifier.service.http
import scala.Option.option2Iterable
import scala.concurrent.Future
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput
import com.github.mkroli.mnoclassifier.service.AkkaComponent
import com.github.mkroli.mnoclassifier.service.NetworkOperatorClassifierActorComponent
import com.github.mkroli.mnoclassifier.service.Train
import com.github.mkroli.mnoclassifier.service.helper.Logging
import com.github.mkroli.mnoclassifier.types.NetworkOperator
import com.github.mkroli.mnoclassifier.types.TelephoneNumber
import akka.pattern.ask
import unfiltered.kit.GZip
import unfiltered.netty.async.Planify
import unfiltered.request.Body
import unfiltered.request.POST
import unfiltered.request.Path
import unfiltered.request.RequestContentType
import unfiltered.request.Seg
import unfiltered.response.PlainTextContent
import unfiltered.response.ResponseString
trait TrainingPlanComponent extends Logging {
self: AkkaComponent with NetworkOperatorClassifierActorComponent =>
lazy val trainingPlan = Planify(GZip.async {
case req @ POST(Path(Seg("api" :: "train" :: Nil))) => {
val body = Body.string(req)
val samples = req match {
case RequestContentType(ct) if ct startsWith "application/json" =>
implicit val formats = DefaultFormats
parse(body).children
.flatMap { child =>
child.extractOpt[NetworkOperatorTelephoneNumberMapping] match {
case Some(NetworkOperatorTelephoneNumberMapping(o, t)) =>
val train = Train(TelephoneNumber(t), NetworkOperator(o))
debug("Will train using {}", train)
Some(networkOperatorClassifierActor ? train)
case None =>
info("Could not parse training data: {}", child)
None
}
}
case _ =>
val TrainingLine = """([^\\t]*)\\t(\\d*)""".r
body.split("""[\\r\\n]+""").toList.flatMap {
case TrainingLine(o, t) =>
val train = Train(TelephoneNumber(t), NetworkOperator(o))
debug("Will train using {}", train)
Some(networkOperatorClassifierActor ? train)
case line =>
info("Could not parse training data: {}", line)
None
}
}
def onCompletion(l: List[Future[_]]): Unit = l match {
case Nil => req.respond(PlainTextContent ~>
ResponseString("learned from %d samples".format(samples.size)))
case f :: tail =>
f.onComplete(_ => onCompletion(tail))
}
onCompletion(samples)
}
})
}
| mkroli/mnoclassifier | src/main/scala/com/github/mkroli/mnoclassifier/service/http/TrainingPlanComponent.scala | Scala | gpl-2.0 | 3,612 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.calcite
import org.apache.flink.table.planner.calcite.FlinkRelFactories.{ExpandFactory, RankFactory}
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.runtime.operators.rank.{RankRange, RankType}
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptTable.ToRelContext
import org.apache.calcite.plan.{Contexts, RelOptCluster, RelOptTable}
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeField}
import org.apache.calcite.rel.core.RelFactories._
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.hint.RelHint
import org.apache.calcite.rel.logical._
import org.apache.calcite.rel.{RelCollation, RelNode}
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.SqlKind.{EXCEPT, INTERSECT, UNION}
import org.apache.calcite.tools.{RelBuilder, RelBuilderFactory}
import org.apache.calcite.util.ImmutableBitSet
import java.util
import scala.collection.JavaConversions._
/**
* Contains factory interface and default implementation for creating various
* flink logical rel nodes.
*/
object FlinkLogicalRelFactories {
val FLINK_LOGICAL_PROJECT_FACTORY = new ProjectFactoryImpl
val FLINK_LOGICAL_FILTER_FACTORY = new FilterFactoryImpl
val FLINK_LOGICAL_JOIN_FACTORY = new JoinFactoryImpl
val FLINK_LOGICAL_CORRELATE_FACTORY = new CorrelateFactoryImpl
val FLINK_LOGICAL_SORT_FACTORY = new SortFactoryImpl
val FLINK_LOGICAL_AGGREGATE_FACTORY = new AggregateFactoryImpl
val FLINK_LOGICAL_SET_OP_FACTORY = new SetOpFactoryImpl
val FLINK_LOGICAL_VALUES_FACTORY = new ValuesFactoryImpl
val FLINK_LOGICAL_TABLE_SCAN_FACTORY = new TableScanFactoryImpl
val FLINK_LOGICAL_EXPAND_FACTORY = new ExpandFactoryImpl
val FLINK_LOGICAL_RANK_FACTORY = new RankFactoryImpl
/** A [[RelBuilderFactory]] that creates a [[RelBuilder]] that will
* create logical relational expressions for everything. */
val FLINK_LOGICAL_REL_BUILDER: RelBuilderFactory = FlinkRelBuilder.proto(
Contexts.of(
FLINK_LOGICAL_PROJECT_FACTORY,
FLINK_LOGICAL_FILTER_FACTORY,
FLINK_LOGICAL_JOIN_FACTORY,
FLINK_LOGICAL_SORT_FACTORY,
FLINK_LOGICAL_AGGREGATE_FACTORY,
FLINK_LOGICAL_SET_OP_FACTORY,
FLINK_LOGICAL_VALUES_FACTORY,
FLINK_LOGICAL_TABLE_SCAN_FACTORY,
FLINK_LOGICAL_EXPAND_FACTORY,
FLINK_LOGICAL_RANK_FACTORY))
/**
* Implementation of [[ProjectFactory]] that returns a [[FlinkLogicalCalc]].
*/
class ProjectFactoryImpl extends ProjectFactory {
def createProject(
input: RelNode,
hints: util.List[RelHint],
childExprs: util.List[_ <: RexNode],
fieldNames: util.List[String]): RelNode = {
val rexBuilder = input.getCluster.getRexBuilder
val inputRowType = input.getRowType
val programBuilder = new RexProgramBuilder(inputRowType, rexBuilder)
childExprs.zip(fieldNames).foreach {
case (childExpr, fieldName) => programBuilder.addProject(childExpr, fieldName)
}
val program = programBuilder.getProgram
FlinkLogicalCalc.create(input, program)
}
}
/**
* Implementation of [[SortFactory]] that returns a [[FlinkLogicalSort]].
*/
class SortFactoryImpl extends SortFactory {
def createSort(
input: RelNode,
collation: RelCollation,
offset: RexNode,
fetch: RexNode): RelNode = {
FlinkLogicalSort.create(input, collation, offset, fetch)
}
}
/**
* Implementation of [[SetOpFactory]] that
* returns a flink [[org.apache.calcite.rel.core.SetOp]] for the particular kind of set
* operation (UNION, EXCEPT, INTERSECT).
*/
class SetOpFactoryImpl extends SetOpFactory {
def createSetOp(kind: SqlKind, inputs: util.List[RelNode], all: Boolean): RelNode = {
kind match {
case UNION =>
FlinkLogicalUnion.create(inputs, all)
case EXCEPT =>
FlinkLogicalMinus.create(inputs, all)
case INTERSECT =>
FlinkLogicalIntersect.create(inputs, all)
case _ =>
throw new AssertionError("not a set op: " + kind)
}
}
}
/**
* Implementation of [[AggregateFactory]] that returns a [[FlinkLogicalAggregate]].
*/
class AggregateFactoryImpl extends AggregateFactory {
def createAggregate(
input: RelNode,
hints: util.List[RelHint],
groupSet: ImmutableBitSet,
groupSets: ImmutableList[ImmutableBitSet],
aggCalls: util.List[AggregateCall]): RelNode = {
FlinkLogicalAggregate.create(input, groupSet, groupSets, aggCalls)
}
}
/**
* Implementation of [[FilterFactory]] that returns a [[FlinkLogicalCalc]].
*/
class FilterFactoryImpl extends FilterFactory {
override def createFilter(
input: RelNode,
condition: RexNode,
variablesSet: util.Set[CorrelationId]): RelNode = {
// Create a program containing a filter.
// Ignore the variablesSet for current implementation.
val rexBuilder = input.getCluster.getRexBuilder
val inputRowType = input.getRowType
val programBuilder = new RexProgramBuilder(inputRowType, rexBuilder)
programBuilder.addIdentity()
programBuilder.addCondition(condition)
val program = programBuilder.getProgram
FlinkLogicalCalc.create(input, program)
}
}
/**
* Implementation of [[JoinFactory]] that returns a [[FlinkLogicalJoin]].
*/
class JoinFactoryImpl extends JoinFactory {
def createJoin(
left: RelNode,
right: RelNode,
hints: util.List[RelHint],
condition: RexNode,
variablesSet: util.Set[CorrelationId],
joinType: JoinRelType,
semiJoinDone: Boolean): RelNode = {
FlinkLogicalJoin.create(left, right, condition, joinType)
}
}
/**
* Implementation of [[CorrelateFactory]] that returns a [[FlinkLogicalCorrelate]].
*/
class CorrelateFactoryImpl extends CorrelateFactory {
def createCorrelate(
left: RelNode,
right: RelNode,
correlationId: CorrelationId,
requiredColumns: ImmutableBitSet,
joinType: JoinRelType): RelNode = {
FlinkLogicalCorrelate.create(left, right, correlationId, requiredColumns, joinType)
}
}
/**
* Implementation of [[ValuesFactory]] that returns a [[FlinkLogicalValues]].
*/
class ValuesFactoryImpl extends ValuesFactory {
def createValues(
cluster: RelOptCluster,
rowType: RelDataType,
tuples: util.List[ImmutableList[RexLiteral]]): RelNode = {
FlinkLogicalValues.create(
cluster, rowType, ImmutableList.copyOf[ImmutableList[RexLiteral]](tuples))
}
}
/**
* Implementation of [[TableScanFactory]] that returns a
* [[FlinkLogicalLegacyTableSourceScan]] or [[FlinkLogicalDataStreamTableScan]].
*/
class TableScanFactoryImpl extends TableScanFactory {
def createScan(toRelContext: ToRelContext, table: RelOptTable): RelNode = {
val cluster = toRelContext.getCluster
val hints = toRelContext.getTableHints
val tableScan = LogicalTableScan.create(cluster, table, hints)
tableScan match {
case s: LogicalTableScan if FlinkLogicalLegacyTableSourceScan.isTableSourceScan(s) =>
FlinkLogicalLegacyTableSourceScan.create(
cluster,
s.getTable.asInstanceOf[FlinkPreparingTableBase])
case s: LogicalTableScan if FlinkLogicalDataStreamTableScan.isDataStreamTableScan(s) =>
FlinkLogicalDataStreamTableScan.create(
cluster,
s.getTable.asInstanceOf[FlinkPreparingTableBase])
}
}
}
/**
* Implementation of [[FlinkRelFactories.ExpandFactory]] that returns a
* [[FlinkLogicalExpand]].
*/
class ExpandFactoryImpl extends ExpandFactory {
def createExpand(
input: RelNode,
rowType: RelDataType,
projects: util.List[util.List[RexNode]],
expandIdIndex: Int): RelNode = {
FlinkLogicalExpand.create(input, rowType, projects, expandIdIndex)
}
}
/**
* Implementation of [[FlinkRelFactories.RankFactory]] that returns a
* [[FlinkLogicalRank]].
*/
class RankFactoryImpl extends RankFactory {
def createRank(
input: RelNode,
partitionKey: ImmutableBitSet,
orderKey: RelCollation,
rankType: RankType,
rankRange: RankRange,
rankNumberType: RelDataTypeField,
outputRankNumber: Boolean): RelNode = {
FlinkLogicalRank.create(input, partitionKey, orderKey, rankType, rankRange,
rankNumberType, outputRankNumber)
}
}
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/calcite/FlinkLogicalRelFactories.scala | Scala | apache-2.0 | 9,564 |
/*
* Copyright 2014 JHC Systems Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Some source in this file was developed in the Slick framework:
* - UntypedColumnHelpers.likeEncode
*
* Copyright 2011-2012 Typesafe, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package sqlest.untyped.ast.syntax
import org.joda.time.DateTime
import scala.reflect.runtime.{ universe => ru }
import scala.language.implicitConversions
import scala.util.Try
import sqlest.ast._
import sqlest.untyped.ast._
class UntypedColumnHelpers {
def stringArgument(arg: String) = Some(arg)
def intArgument(arg: String) = Try(arg.toInt).toOption
def longArgument(arg: String) = Try(arg.toLong).toOption
def doubleArgument(arg: String) = Try(arg.toDouble).toOption
def booleanArgument(arg: String) = arg.trim.toLowerCase match {
case "true" => Some(true)
case "false" => Some(false)
case _ => None
}
def bigDecimalArgument(arg: String) = Try(BigDecimal(arg)).toOption
def dateTimeArgument(arg: String) = Iso8601.unapply(arg)
def mappedArgument[A](arg: String, columnType: ColumnType[A]): Option[A] = (columnType.typeTag match {
case typeTag if typeTag == ru.typeTag[Int] => intArgument(arg)
case typeTag if typeTag == ru.typeTag[Long] => longArgument(arg)
case typeTag if typeTag == ru.typeTag[Double] => doubleArgument(arg)
case typeTag if typeTag == ru.typeTag[BigDecimal] => bigDecimalArgument(arg)
case typeTag if typeTag == ru.typeTag[Boolean] => booleanArgument(arg)
case typeTag if typeTag == ru.typeTag[String] => stringArgument(arg)
case typeTag if typeTag == ru.typeTag[DateTime] => dateTimeArgument(arg)
case _ => sys.error(s"Untyped operators are not implemented for non-standard mapped types: $columnType")
}).asInstanceOf[Option[A]]
def infixExpression[A](op: String, left: Column[A], right: String, columnType: ColumnType[A]): Option[InfixFunctionColumn[Boolean]] = columnType match {
case IntColumnType => intArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case LongColumnType => longArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case DoubleColumnType => doubleArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case BigDecimalColumnType => bigDecimalArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case BooleanColumnType => booleanArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case StringColumnType => stringArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case DateTimeColumnType => dateTimeArgument(right).map(right => InfixFunctionColumn[Boolean](op, left, right))
case OptionColumnType(baseType) => infixExpression(op, left, right, baseType)
case mappedColumnType: MappedColumnType[A, _] =>
mappedArgument(right, columnType).map { right =>
val mappedRight = mappedColumnType.write(right)
InfixFunctionColumn[Boolean](op, left, LiteralColumn(mappedRight)(mappedColumnType.baseType))
}
}
def likeExpression(left: Column[_], right: String, columnType: ColumnType[_], formatArgument: String => String): Option[InfixFunctionColumn[Boolean]] = columnType match {
case StringColumnType => stringArgument(right).map(right => InfixFunctionColumn[Boolean]("like", left, formatArgument(right)))
case OptionColumnType(baseType) => likeExpression(left, right, baseType, formatArgument)
case _ => None
}
def likeEncode(str: String) = {
val b = new StringBuilder
for (c <- str) c match {
case '%' | '_' | '^' => b append '^' append c
case _ => b append c
}
b.toString
}
}
trait ColumnSyntax {
implicit class UntypedColumnOps(left: Column[_]) {
val helpers = new UntypedColumnHelpers
def untypedEq(right: String) = helpers.infixExpression("=", left, right, left.columnType)
def untypedNe(right: String) = helpers.infixExpression("<>", left, right, left.columnType)
def untypedGt(right: String) = helpers.infixExpression(">", left, right, left.columnType)
def untypedLt(right: String) = helpers.infixExpression("<", left, right, left.columnType)
def untypedGte(right: String) = helpers.infixExpression(">=", left, right, left.columnType)
def untypedLte(right: String) = helpers.infixExpression("<=", left, right, left.columnType)
def untypedContains(right: String): Option[InfixFunctionColumn[Boolean]] =
helpers.likeExpression(left, right, left.columnType, str => s"%${helpers.likeEncode(right)}%")
def untypedStartsWith(right: String): Option[InfixFunctionColumn[Boolean]] =
helpers.likeExpression(left, right, left.columnType, str => s"${helpers.likeEncode(right)}%")
def untypedEndsWith(right: String): Option[InfixFunctionColumn[Boolean]] =
helpers.likeExpression(left, right, left.columnType, str => s"%${helpers.likeEncode(right)}")
def untypedIsNull = Some(PostfixFunctionColumn[Boolean]("is null", left))
}
}
| andrewjskatz/sqlest | src/main/scala/sqlest/untyped/ast/syntax/ColumnSyntax.scala | Scala | apache-2.0 | 6,862 |
package shapeless.datatype.record
import org.scalacheck.Prop.forAll
import org.scalacheck._
import magnolify.scalacheck.auto._
import shapeless._
import scala.language.implicitConversions
object RecordMapperRecords {
case class RequiredA(intField: Int, longField: Long, stringField: String)
case class RequiredB(intField: Int, longField: Long, stringField: Array[Byte])
case class OptionalA(intField: Option[Int], longField: Option[Long], stringField: Option[String])
case class OptionalB(
intField: Option[Int],
longField: Option[Long],
stringField: Option[Array[Byte]]
)
case class RepeatedA(
intList: List[Int],
longList: List[Long],
stringList: List[String],
intSet: Set[Int],
longSet: Set[Long],
stringSet: Set[String],
intMap: Map[String, Int],
longMap: Map[String, Long] /*, stringMap: Map[String, String]*/
)
case class RepeatedB(
intList: List[Int],
longList: List[Long],
stringList: List[Array[Byte]],
intSet: Set[Int],
longSet: Set[Long],
stringSet: Set[Array[Byte]],
intMap: Map[String, Int],
longMap: Map[String, Long] /*, stringMap: Map[String, Array[Byte]]*/
)
case class MixedA(
intField: Int,
stringField: String,
intFieldO: Option[Int],
stringFieldO: Option[String],
intList: List[Int],
stringList: List[String],
intSet: Set[Int],
stringSet: Set[String],
intMap: Map[String, Int] /*, stringMap: Map[String, String]*/
)
case class MixedB(
intField: Int,
stringField: Array[Byte],
intFieldO: Option[Int],
stringFieldO: Option[Array[Byte]],
intList: List[Int],
stringList: List[Array[Byte]],
intSet: Set[Int],
stringSet: Set[Array[Byte]],
intMap: Map[String, Int] /*, stringMap: Map[String, Array[Byte]]*/
)
case class MixedC(
intField: Option[Int],
stringField: Option[String],
intFieldO: Option[Int],
stringFieldO: Option[String],
intList: Option[List[Int]],
stringList: Option[List[String]],
intSet: Option[Set[Int]],
stringSet: Option[Set[String]],
intMap: Option[Map[String, Int]] /*, stringMap: Option[Map[String, String]]*/
)
case class NestedA(
required: String,
optional: Option[String],
list: List[String],
set: Set[String],
map: Map[String, Int],
requiredN: MixedA,
optionalN: Option[MixedA],
listN: List[MixedA],
setN: Set[MixedA] /*, mapN: Map[String, MixedA]*/
)
case class NestedB(
required: Array[Byte],
optional: Option[Array[Byte]],
list: List[Array[Byte]],
set: Set[Array[Byte]],
map: Map[String, Int],
requiredN: MixedB,
optionalN: Option[MixedB],
listN: List[MixedB],
setN: Set[MixedB] /*, mapN: Map[String, MixedB]*/
)
case class NestedC(
required: Option[Array[Byte]],
optional: Option[Array[Byte]],
list: Option[List[String]],
set: Option[Set[String]],
map: Option[Map[String, Int]],
requiredN: Option[MixedA],
optionalN: Option[MixedA],
listN: Option[List[MixedA]],
setN: Option[Set[MixedA]] /*, mapN: Option[Map[String, MixedA]]*/
)
}
object RecordMapperSpec extends Properties("RecordMapper") {
import RecordMapperRecords._
import shapeless.datatype.test.SerializableUtils._
implicit def s2b(x: String): Array[Byte] = x.getBytes
implicit def b2s(x: Array[Byte]): String = new String(x)
class RoundTrip[B] {
def from[A, LA <: HList, LB <: HList](a: A)(implicit
genA: LabelledGeneric.Aux[A, LA],
genB: LabelledGeneric.Aux[B, LB],
mrA: MapRecord[LA, LB],
mrB: MapRecord[LB, LA]
): Boolean = {
val t = ensureSerializable(RecordMapper[A, B])
t.from(t.to(a)) == a
}
}
def roundTripTo[B]: RoundTrip[B] = new RoundTrip[B]
property("required") = forAll { m: RequiredA => roundTripTo[RequiredB].from(m) }
property("optional") = forAll { m: OptionalA => roundTripTo[OptionalB].from(m) }
property("repeated") = forAll { m: RepeatedA => roundTripTo[RepeatedB].from(m) }
property("mixed") = forAll { m: MixedA => roundTripTo[MixedB].from(m) }
property("nested") = forAll { m: NestedA => roundTripTo[NestedB].from(m) }
import UnsafeOptionExtractorImplicits._
property("required to optional with unsafe option extraction") = forAll { m: RequiredA =>
roundTripTo[OptionalB].from(m)
}
property("mixed with unsafe option extraction") = forAll { m: MixedA =>
roundTripTo[MixedC].from(m)
}
property("nested with unsafe option extraction") = forAll { m: NestedA =>
roundTripTo[NestedC].from(m)
}
}
| nevillelyh/shapeless-datatype | core/src/test/scala/shapeless/datatype/record/RecordMapperSpec.scala | Scala | apache-2.0 | 4,558 |
package org.jetbrains.plugins.scala.lang.transformation.calls
import org.jetbrains.plugins.scala.lang.transformation.TransformerTest
/**
* @author Pavel Fatin
*/
class ExpandVarargArgumentTest extends TransformerTest(ExpandVarargArgument,
"""
object O {
def f(v: A*) {}
def g(v1: A, v2: B*) {}
}
""") {
def testEmpty() = check(
"O.f()",
"O.f(Array(): _*)"
)
def testMultiple() = check(
"O.f(A, A)",
"O.f(Array(A, A): _*)"
)
def testTail() = check(
"O.g(A, B, B)",
"O.g(A, Array(B, B): _*)"
)
// TODO
// def testInfixSingle() = check(
// "O f A",
// "O f (Array(A, A): _*)"
// )
// TODO
// def testInfixMultiple() = check(
// "O f (A, A)",
// "O f (Array(A, A): _*)"
// )
def testSynthetic() = check(
"case class T(v: A*)",
"T.apply(A, A)",
"T.apply(Array(A, A): _*)"
)
// TODO
// def testConstructor() = check(
// "class T(v: A*)",
// "new T(A, A)",
// "new T(Array(A, A): _*)"
// )
def testExplicit() = check(
"O.f(Array(A, A): _*)",
"O.f(Array(A, A): _*)"
)
// the transformation is is infinitely recusive, as there's no Object[] {} equivalent in Scala
def testArray() = check(
"Array(A, A)",
"Array(A, A)"
)
// TODO rely on _* instead of Array to prevent recursion
// TODO support Java methods
} | whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/lang/transformation/calls/ExpandVarargArgumentTest.scala | Scala | apache-2.0 | 1,351 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.schema
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.execution.command.{AlterTableDropColumnModel, MetadataCommand}
import org.apache.spark.sql.hive.CarbonSessionCatalog
import org.apache.spark.util.AlterTableUtil
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
import org.apache.carbondata.core.features.TableOperation
import org.apache.carbondata.core.locks.{ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.{AlterTableDropColumnPostEvent, AlterTableDropColumnPreEvent, OperationContext, OperationListenerBus}
import org.apache.carbondata.format.SchemaEvolutionEntry
import org.apache.carbondata.spark.rdd.AlterTableDropColumnRDD
private[sql] case class CarbonAlterTableDropColumnCommand(
alterTableDropColumnModel: AlterTableDropColumnModel)
extends MetadataCommand {
override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
val LOGGER: LogService = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
val tableName = alterTableDropColumnModel.tableName
val dbName = alterTableDropColumnModel.databaseName
.getOrElse(sparkSession.catalog.currentDatabase)
LOGGER.audit(s"Alter table drop columns request has been received for $dbName.$tableName")
var locks = List.empty[ICarbonLock]
var timeStamp = 0L
val locksToBeAcquired = List(LockUsage.METADATA_LOCK, LockUsage.COMPACTION_LOCK)
// get the latest carbon table and check for column existence
var carbonTable: CarbonTable = null
try {
locks = AlterTableUtil
.validateTableAndAcquireLock(dbName, tableName, locksToBeAcquired)(sparkSession)
val metastore = CarbonEnv.getInstance(sparkSession).carbonMetastore
carbonTable = CarbonEnv.getCarbonTable(Some(dbName), tableName)(sparkSession)
if (!carbonTable.canAllow(carbonTable, TableOperation.ALTER_DROP,
alterTableDropColumnModel.columns.asJava)) {
throw new MalformedCarbonCommandException(
"alter table drop column is not supported for index datamap")
}
val partitionInfo = carbonTable.getPartitionInfo(tableName)
if (partitionInfo != null) {
val partitionColumnSchemaList = partitionInfo.getColumnSchemaList.asScala
.map(_.getColumnName)
// check each column existence in the table
val partitionColumns = alterTableDropColumnModel.columns.filter {
tableColumn => partitionColumnSchemaList.contains(tableColumn)
}
if (partitionColumns.nonEmpty) {
throwMetadataException(dbName, tableName, "Partition columns cannot be dropped: " +
s"$partitionColumns")
}
}
// Check if column to be dropped is of complex dataType
alterTableDropColumnModel.columns.foreach { column =>
if (carbonTable.getColumnByName(alterTableDropColumnModel.tableName, column).getDataType
.isComplexType) {
val errMsg = "Complex column cannot be dropped"
throw new MalformedCarbonCommandException(errMsg)
}
}
val tableColumns = carbonTable.getCreateOrderColumn(tableName).asScala
var dictionaryColumns = Seq[org.apache.carbondata.core.metadata.schema.table.column
.ColumnSchema]()
// TODO: if deleted column list includes bucketted column throw an error
alterTableDropColumnModel.columns.foreach { column =>
var columnExist = false
tableColumns.foreach { tableColumn =>
// column should not be already deleted and should exist in the table
if (!tableColumn.isInvisible && column.equalsIgnoreCase(tableColumn.getColName)) {
if (tableColumn.isDimension) {
if (tableColumn.hasEncoding(Encoding.DICTIONARY)) {
dictionaryColumns ++= Seq(tableColumn.getColumnSchema)
}
}
columnExist = true
}
}
if (!columnExist) {
throwMetadataException(dbName, tableName,
s"Column $column does not exists in the table $dbName.$tableName")
}
}
val operationContext = new OperationContext
// event will be fired before dropping the columns
val alterTableDropColumnPreEvent: AlterTableDropColumnPreEvent = AlterTableDropColumnPreEvent(
carbonTable,
alterTableDropColumnModel,
sparkSession)
OperationListenerBus.getInstance().fireEvent(alterTableDropColumnPreEvent, operationContext)
// read the latest schema file
val tableInfo: org.apache.carbondata.format.TableInfo =
metastore.getThriftTableInfo(carbonTable)
// maintain the deleted columns for schema evolution history
var deletedColumnSchema = ListBuffer[org.apache.carbondata.format.ColumnSchema]()
val columnSchemaList = tableInfo.fact_table.table_columns.asScala
alterTableDropColumnModel.columns.foreach { column =>
columnSchemaList.foreach { columnSchema =>
if (!columnSchema.invisible && column.equalsIgnoreCase(columnSchema.column_name)) {
deletedColumnSchema += columnSchema.deepCopy
columnSchema.invisible = true
}
}
}
// add deleted columns to schema evolution history and update the schema
timeStamp = System.currentTimeMillis
val schemaEvolutionEntry = new SchemaEvolutionEntry(timeStamp)
schemaEvolutionEntry.setRemoved(deletedColumnSchema.toList.asJava)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val delCols = deletedColumnSchema.map { deleteCols =>
schemaConverter.fromExternalToWrapperColumnSchema(deleteCols)
}
val (tableIdentifier, schemaParts, cols) = AlterTableUtil.updateSchemaInfo(
carbonTable,
schemaEvolutionEntry,
tableInfo,
Some(delCols))(sparkSession)
sparkSession.sessionState.catalog.asInstanceOf[CarbonSessionCatalog]
.alterDropColumns(tableIdentifier, schemaParts, cols)
sparkSession.catalog.refreshTable(tableIdentifier.quotedString)
// TODO: 1. add check for deletion of index tables
// delete dictionary files for dictionary column and clear dictionary cache from memory
new AlterTableDropColumnRDD(sparkSession.sparkContext,
dictionaryColumns,
carbonTable.getAbsoluteTableIdentifier).collect()
// event will be fired before dropping the columns
val alterTableDropColumnPostEvent: AlterTableDropColumnPostEvent =
AlterTableDropColumnPostEvent(
carbonTable,
alterTableDropColumnModel,
sparkSession)
OperationListenerBus.getInstance().fireEvent(alterTableDropColumnPostEvent, operationContext)
LOGGER.info(s"Alter table for drop columns is successful for table $dbName.$tableName")
LOGGER.audit(s"Alter table for drop columns is successful for table $dbName.$tableName")
} catch {
case e: Exception =>
LOGGER.error("Alter table drop columns failed : " + e.getMessage)
if (carbonTable != null) {
AlterTableUtil.revertDropColumnChanges(dbName, tableName, timeStamp)(sparkSession)
}
throwMetadataException(dbName, tableName,
s"Alter table drop column operation failed: ${e.getMessage}")
} finally {
// release lock after command execution completion
AlterTableUtil.releaseLocks(locks)
}
Seq.empty
}
}
| jatin9896/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala | Scala | apache-2.0 | 8,798 |
/* Copyright 2009-2014 EPFL, Lausanne */
import leon._
import leon.lang._
import leon.annotation._
object Numerical {
def power(base: BigInt, p: BigInt): BigInt = {
require(p >= BigInt(0))
if (p == BigInt(0)) {
BigInt(1)
} else if (p%BigInt(2) == BigInt(0)) {
power(base*base, p/BigInt(2))
} else {
base*power(base, p-BigInt(1))
}
} ensuring {
res => ((base, p), res) passes {
case (_, BigInt(0)) => BigInt(1)
case (b, BigInt(1)) => b
case (BigInt(2), BigInt(7)) => BigInt(128)
case (BigInt(2), BigInt(10)) => BigInt(1024)
}
}
def gcd(a: BigInt, b: BigInt): BigInt = {
require(a > BigInt(0) && b > BigInt(0));
if (a == b) {
BigInt(1) // fixme: should be a
} else if (a > b) {
gcd(a-b, b)
} else {
gcd(a, b-a)
}
} ensuring {
res => (a%res == BigInt(0)) && (b%res == BigInt(0)) && (((a,b), res) passes {
case (BigInt(120), BigInt(24)) => BigInt(12)
case (BigInt(5), BigInt(7)) => BigInt(1)
case (BigInt(5), BigInt(5)) => BigInt(5)
})
}
}
| ericpony/scala-examples | testcases/repair/Numerical/Numerical2.scala | Scala | mit | 1,092 |
/**
* @author Admin
*
*/
package ex8.cs
import scala.collection.mutable
class translator {
var counter = 1
var init = true
def initial = init = false
var scope = mutable.ListBuffer("")
def trueLabalCounter: Int = { counter += 1; counter / 2 }
def pars(s: String, file: String = "") = {
val a = s.split(" ")
a.length match {
case 3 => findOpration3(a(0), a(1), a(2),file.split("//").last)
case 2 => findOpration2(a(0), a(1))
case 1 => findOpration1(a(0))
}
}
//Arithmetic operator
def arrOp(o: String) = f"""@SP \nAM=M-1\nD=M\n@SP\nA=M-1\nM=M$o%sD//end of arithmetic opp"""
//Negative operator
def negOp = s"""@SP\nA=M-1\nM=-M//end of neg op"""
//comperation operator
def compOper(o: String) = s"""@SP\nAM=M-1\nD=M\n@SP\nA=M-1\nD=M-D\nM=-1\n@TRUE$trueLabalCounter\nD;$o\n@SP\nA=M-1\nM=0\n(TRUE$trueLabalCounter)//end of comm opp"""
//bitwise operator
def bitOper(o: String) = f"""@SP\nAM=M-1\nD=M\n@SP\nA=M-1\nM=D$o%sM//end of bitwish opp"""
//bitwise not operator
def notOp = s"""@SP\nA=M-1\nM=!M\n//end of nor opp"""
//push segments and operation
//concat it in the end of each push order
val pushCommanOrder = s"""@SP\nA=M\nM=D\n@SP\nM=M+1//end of push"""
//for pointer temp and static
def pushSigType(offset: String): String = s"""@$offset\nD=M\n""".concat(pushCommanOrder)
//for arguments and location this that segment
def pushSigArgsLoc(typ: String, offset: Int): String = s"""@$typ\nD=M\n@$offset\nA=D+A\nD=M\n""".concat(pushCommanOrder)
def pushConstant(offset: String) = s"""@$offset \nD=A\n""".concat(pushCommanOrder)
//pop segments and operation
// for pointer temp and static
def popSigType(offset: String): String = s"""@SP\nAM=M-1\nD=M\n@$offset\nM=D //pop tmp and static end"""
//for arguments and location this that segment
def popSigArgsLoc(typ: String, offset: Int): String = s"""@$typ\nD=M\n@$offset\nD=D+A\n@13\nM=D\n@SP\nAM=M-1\nD=M\n@13\nA=M\nM=D //pop arg end"""
/**
* to project 8
*/
val scopename :String = if(scope.length>1)scope.last else ""
def makeLabal(st: String): String = s"""(${scopename + "$" + st})"""
val retFunction: String = {
//clear this function from scope
var scopelabale =scope.last
scope -= scope.last
def pointRetern(st: String): String = s"""@13\nAM=M-1\nD=M\n@$st\nM=D\n"""
val point = List[String]("THAT", "THIS", "ARG", "LCL")
s"""@LCL //farme and return point\nD=M\n@13\nM=D\n@5\nA=D-A\nD=M\n@14\nM=D\n""" +
s"""@SP\nAM=M-1\nD=M\n@ARG\nA=M\nM=D // SP = ARG +1\n@ARG\nD=M+1\n@SP\nM=D\n""" + point.foldLeft("") {
(acc, n) =>
acc.concat(pointRetern(n))
} +
s"""@14\nA=M\n0;JMP//end of retern : $scopelabale"""
}
def goTo(l: String): String = s"""@${scopename + "$" + l}\n0;JMP"""
def ifgoto(l: String): String = { s"""@SP\nAM=M-1\nD=M\n@${scopename+ "$" + l}\nD;JNE""" }
def doFunction(fName: String, nArgs: String) = {
//set the scope of this function
scope += fName
s"""($fName) //functoin:$fName\n""" + List.range(1, nArgs.toInt).foldLeft("") {
(acc, x) =>
acc + pars("push constant 0")
}
}
def initilize: String = s"""@256\nD=A\n@SP\nM=D\n""" + pars("call Sys.init 0")
def callFunction(fName: String, nArgs: String): String = {
val labal = fName + "$"
s"""@${labal + trueLabalCounter} //Start call function:$fName\nD=A\n@SP\nAM=M+1\nA=A-1\nM=D\n@LCL\nD=M\n@SP\nAM=M+1\nA=A-1\nM=D\n@ARG\nD=M\n@SP\nAM=M+1\nA=A-1\nM=D\n""" +
s"""@THIS\nD=M\n@SP\nAM=M+1\nA=A-1\nM=D\n@THAT\nD=M\n@SP\nAM=M+1\nA=A-1\nM=D\n@SP\nD=M\n@${5 + nArgs.toInt}\nD=D-A\n@ARG\nM=D\n""" +
s"""@SP\nD=M\n@LCL\nM=D\n@$fName\n0;JMP\n(${labal + trueLabalCounter}) //end of function:$fName"""
}
def findOpration1(s: String): String = {
s match {
case "add" => arrOp("+")
case "sub" => arrOp("-")
case "neg" => negOp
case "eq" => compOper("JEQ")
case "gt" => compOper("JGT")
case "lt" => compOper("JLT")
case "and" => bitOper("&")
case "or" => bitOper("|")
case "not" => notOp
case "return" => retFunction
}
}
def findOpration2(op: String, s: String): String = {
op match {
case "if-goto" => ifgoto(s)
case "goto" => goTo(s)
case "label" => makeLabal(s.toString())
}
}
def findOpration3(op: String, sigment: String, offset: String,fileName :String =""): String = {
op match {
case "push" => pushInstrac(sigment, offset,fileName)
case "pop" => popInstrac(sigment, offset,fileName)
case "call" => callFunction(sigment, offset)
case "function" => doFunction(sigment, offset)
}
}
def popInstrac(sigment: String, offset: String,fileName:String=""): String =
{
sigment match {
case "constant" => s"""@SP\nAM=M-1\nD=M\n@$offset\nM=D"""
case "this" => popSigArgsLoc("THIS", offset.toInt)
case "that" => popSigArgsLoc("THAT", offset.toInt)
case "pointer" => popSigType((offset.toInt + 3).toString)
case "temp" => popSigType((offset.toInt + 5).toString)
case "static" => popSigType(fileName+"."+offset)
case "argument" => popSigArgsLoc("ARG", offset.toInt)
case "local" => popSigArgsLoc("LCL", offset.toInt)
}
}
def pushInstrac(sigment: String, offset: String,fileName:String=""): String =
{
sigment match {
case "constant" => pushConstant(offset)
case "this" => pushSigArgsLoc("THIS", offset.toInt)
case "that" => pushSigArgsLoc("THAT", offset.toInt)
case "pointer" => pushSigType((offset.toInt + 3).toString)
case "temp" => pushSigType((offset.toInt + 5).toString)
case "static" => pushSigType(fileName+"."+offset)
case "argument" => pushSigArgsLoc("ARG", offset.toInt)
case "local" => pushSigArgsLoc("LCL", offset.toInt)
}
}
def transAll(file: String, arr: Array[String], f: (String, String) => String,fileName:String) = {
if (this.init) {
helpers.writeTranslatToFile(file, initilize)
this.initial
}
for (x <- arr filterNot (_ == "")) {
helpers.writeTranslatToFile(file, f(x.replaceAll("//.*", ""), fileName))
}
}
} | semlie/TheElemOfCompSys | ex8/cs/translator.scala | Scala | gpl-2.0 | 6,399 |
package com.tutuur.ducksoup.database
import com.tutuur.ducksoup.json.PostJsonProtocol
import com.tutuur.ducksoup.meta.Post
import slick.driver.JdbcDriver
import slick.profile.RelationalProfile.ColumnOption.Length
import spray.json._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.language.implicitConversions
/**
* @author Zale
*/
class PostDao(val driver: JdbcDriver) extends Dao[Long, Post] with PostJsonProtocol {
// internal constants.
private[this] val MaxContentLength = 1024 * 1024
// import slick classes.
import driver.api._
type PostEntry = (Option[Long], Option[Long], String, String, Option[String], Option[String], Option[String])
class Posts(tag: Tag) extends Table[PostEntry](tag, "POSTS") {
def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
def authorId = column[Long]("AUTHOR_ID")
def title = column[String]("TITLE", new Length(64, false))
def description = column[String]("DESCRIPTION", new Length(256, false))
def covers = column[Option[String]]("COVERS", new Length(128, false))
def content = column[Option[String]]("CONTENT", new Length(MaxContentLength, true))
def contentType = column[Option[String]]("CONTENT_TYPE", new Length(8, false))
override def * = (id.?, authorId.?, title, description, covers, content, contentType)
}
private implicit def toPost(entry: PostEntry): Post = {
val covers = entry._5 match {
case Some(s) => Some(s.parseJson.convertTo[List[String]])
case None => None
}
Post(entry._1, entry._2, entry._3, entry._4, covers, entry._6, entry._7)
}
private implicit def fromPost(post: Post): PostEntry = {
val covers = post.covers match {
case Some(s) => if (s.nonEmpty) Some(s.toJson.compactPrint) else None
case None => None
}
(post.id, post.authorId, post.title, post.description, covers, post.content, post.contentType)
}
private[this] val query = TableQuery[Posts]
DB.db.run(DBIO.seq(
query.schema.create
))
override def store(t: Post): Option[Long] = {
assert(t.id.isEmpty)
val future = DB.db.run((query returning query.map(_.id)) += t)
val id = Await.result(future, Duration.Inf)
Some(id)
}
override def retrieve(id: Long): Option[Post] = {
val future = DB.db.run(query.filter(_.id === id).result)
val posts = Await.result(future, Duration.Inf)
if (posts.isEmpty) None else Option(posts.head)
}
override def list(limit: Int, fromId: Long): List[Post] = {
val future = DB.db.run(query.take(limit).map((e) =>
(e.id, e.authorId, e.title, e.description, e.covers, e.contentType)
).result)
val posts = Await.result(future, Duration.Inf)
for {
r <- posts.toList
} yield toPost((Some(r._1), Some(r._2), r._3, r._4, r._5, None, r._6))
}
}
| Thiross/ducksoup | web/src/main/scala/com/tutuur/ducksoup/database/PostDao.scala | Scala | gpl-3.0 | 2,916 |
package org.zalando.grafter
import scala.reflect.ClassTag
trait Reflect { outer =>
/**
* @return true if A implements the list of types defined by a given class tag
*/
def implements(a: Any)(implicit ct: ClassTag[_]): Boolean = {
val types: List[Class[_]] =
ct.runtimeClass +: ct.runtimeClass.getInterfaces.toList
types.forall(t => t.isAssignableFrom(a.getClass))
}
implicit class ReflectOps(t: Any) {
def implements[T : ClassTag]: Boolean =
outer.implements(t)(implicitly[ClassTag[T]])
}
}
object Reflect extends Reflect
| jcranky/grafter | core/src/main/scala/org/zalando/grafter/Reflect.scala | Scala | mit | 568 |
package org.scalafmt.util
import scala.meta.inputs.Input
import scala.collection.mutable
final case class MarkdownFile(parts: List[MarkdownPart]) {
def renderToString: String = {
val out = new StringBuilder()
parts.foreach(_.renderToString(out))
out.result()
}
}
object MarkdownFile {
sealed abstract class State
object State {
case class CodeFence(start: Int, backticks: String, info: String)
extends State
case object Text extends State
}
class Parser(input: Input) {
private val text = input.text
private def substringWithAdaptedEnd(start: Int, end: Int): String = {
val adaptedEnd = math.max(start, end)
text.substring(start, adaptedEnd)
}
private def newCodeFence(
state: State.CodeFence,
backtickStart: Int,
backtickEnd: Int
): MarkdownPart.CodeFence = {
val open = substringWithAdaptedEnd(
state.start,
state.start + state.backticks.length()
)
val lastIndexOfOpeningBackTicks = state.start + state.backticks.length()
val info = substringWithAdaptedEnd(
lastIndexOfOpeningBackTicks,
lastIndexOfOpeningBackTicks + state.info.length()
)
val adaptedBacktickStart = math.max(0, backtickStart - 1)
val body = substringWithAdaptedEnd(
lastIndexOfOpeningBackTicks + info.length(),
adaptedBacktickStart
)
val close = substringWithAdaptedEnd(adaptedBacktickStart, backtickEnd)
MarkdownPart.CodeFence(open, info, body, close)
}
def acceptParts(): List[MarkdownPart] = {
var state: State = State.Text
val parts = mutable.ListBuffer.empty[MarkdownPart]
var curr = 0
text.linesWithSeparators
.foreach { line =>
val end = curr + line.length()
state match {
case State.Text =>
if (line.startsWith("```")) {
val backticks = line.takeWhile(_ == '`')
val info = line.substring(backticks.length())
state = State.CodeFence(curr, backticks, info)
} else {
parts += MarkdownPart.Text(substringWithAdaptedEnd(curr, end))
}
case s: State.CodeFence =>
if (
line.startsWith(s.backticks) &&
line.forall(ch => ch == '`' || ch.isWhitespace)
) {
parts += newCodeFence(s, curr, end)
state = State.Text
}
}
curr = end
}
state match {
case s: State.CodeFence =>
parts += newCodeFence(s, text.length(), text.length())
case _ =>
}
parts.toList
}
}
def parse(input: Input): MarkdownFile =
MarkdownFile(new Parser(input).acceptParts())
}
sealed abstract class MarkdownPart {
final def renderToString(out: StringBuilder): Unit =
this match {
case MarkdownPart.Text(value) =>
out.append(value)
case fence: MarkdownPart.CodeFence =>
out.append(fence.openBackticks)
out.append(fence.info)
fence.newBody match {
case None =>
out.append(fence.body)
case Some(newBody) =>
out.append(newBody)
}
out.append(fence.closeBackticks)
}
}
object MarkdownPart {
final case class Text(value: String) extends MarkdownPart
final case class CodeFence(
openBackticks: String,
info: String,
body: String,
closeBackticks: String
) extends MarkdownPart {
var newBody = Option.empty[String]
}
}
| scalameta/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/util/MarkdownFile.scala | Scala | apache-2.0 | 3,581 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v2
import org.joda.time.LocalDate
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtValue, Input}
import uk.gov.hmrc.ct.ct600a.v2.formats.Loans
import uk.gov.hmrc.ct.utils.DateImplicits._
/**
* @param endDateOfRepaymentAP : The end date of the accounting period in which the loan repayment was made
*/
case class Loan (id: String,
name: String,
amount: Int,
repaid: Boolean = false,
lastRepaymentDate: Option[LocalDate] = None,
totalAmountRepaid: Option[Int] = None,
endDateOfRepaymentAP: Option[LocalDate] = None) {
def isRepaymentReliefEarlierThanDue(acctPeriodEnd: LocalDate): Boolean = {
val nineMonthsAndADayAfter: LocalDate = acctPeriodEnd.plusMonths(9).plusDays(1)
repaid && lastRepaymentIsAfter(acctPeriodEnd) && lastRepaymentBefore(nineMonthsAndADayAfter)
}
def isRepaymentLaterReliefNowDue(acctPeriodEnd: LocalDate, filingDate: LPQ07): Boolean = {
repaid && !isRepaymentReliefEarlierThanDue(acctPeriodEnd) && isFilingAfterLaterReliefDueDate(filingDate)
}
def isRepaymentLaterReliefNotYetDue(acctPeriodEnd: LocalDate, filingDate: LPQ07): Boolean = {
repaid && !isRepaymentReliefEarlierThanDue(acctPeriodEnd) && !isFilingAfterLaterReliefDueDate(filingDate)
}
val isFullyRepaid = {
repaid && amount == totalAmountRepaid.getOrElse(0)
}
private def isFilingAfterLaterReliefDueDate(filingDate: LPQ07) = endDateOfRepaymentAP match {
case Some(date) => {
val reliefDueDate = date.plusMonths(9)
filingDate.value.map(_ > reliefDueDate).getOrElse(false) // LPQ07 None implies that filing is within 9 months of AP end date
}
case None => throw new IllegalArgumentException("As the repayment date is more than 9 months after the accounting period end date, the end date of the repayment accounting period must be provided")
}
private def lastRepaymentIsAfter(date: LocalDate) = lastRepaymentDate.fold(false)(x => x > date)
private def lastRepaymentBefore(date: LocalDate) = lastRepaymentDate.fold(false)(x => x < date)
}
case class LP02(loans: Option[List[Loan]]) extends CtBoxIdentifier(name = "Loan to participators - participants and amounts.") with CtValue[Option[List[Loan]]] with Input {
def +(other: LP02): LP02 = new LP02(Some(loans.getOrElse(Nil) ++ other.loans.getOrElse(Nil)))
override def value = loans
override def asBoxString = Loans.asBoxString(this)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600a/v2/LP02.scala | Scala | apache-2.0 | 3,087 |
package com.typesafe.sbt
package packager
package rpm
import linux.{LinuxPackageMapping,LinuxFileMetaData}
import sbt._
import com.typesafe.sbt.packager.linux.LinuxSymlink
case class RpmMetadata(
name: String,
version: String,
release: String,
arch: String,
vendor: String,
os: String,
summary: String,
description: String,
autoprov: String,
autoreq: String) {
}
/**
* The Description used to generate an RPM
*/
case class RpmDescription(
license: Option[String] = None,
distribution: Option[String] = None,
//vendor: Option[String] = None,
url: Option[String] = None,
group: Option[String] = None,
packager: Option[String] = None,
icon: Option[String] = None
)
case class RpmDependencies(
provides: Seq[String] = Seq.empty,
requirements: Seq[String] = Seq.empty,
prereq: Seq[String] = Seq.empty,
obsoletes: Seq[String] = Seq.empty,
conflicts: Seq[String] = Seq.empty) {
def contents: String = {
val sb = new StringBuilder
def appendSetting(prefix: String, values: Seq[String]) =
values foreach (v => sb append (prefix + v + "\\n"))
appendSetting("Provides: ", provides)
appendSetting("Requires: ", requirements)
appendSetting("PreReq: ", prereq)
appendSetting("Obsoletes: ", obsoletes)
appendSetting("Conflicts: ", conflicts)
sb.toString
}
}
case class RpmScripts(
pretrans: Option[String] = None,
pre: Option[String] = None,
post: Option[String] = None,
verifyscript: Option[String] = None,
posttrans: Option[String] = None,
preun: Option[String] = None,
postun: Option[String] = None
) {
def contents(): String = {
val labelledScripts = Seq("%pretrans","%pre","%post","%verifyscript","%posttrans","%preun","%postun")
.zip(Seq( pretrans, pre, post, verifyscript, posttrans, preun, postun))
labelledScripts.collect{case (a, Some(b)) => a + "\\n" + b} .mkString("\\n\\n")
}
}
case class RpmSpec(meta: RpmMetadata,
desc: RpmDescription = RpmDescription(),
deps: RpmDependencies = RpmDependencies(),
scriptlets: RpmScripts = RpmScripts(),
mappings: Seq[LinuxPackageMapping] = Seq.empty,
symlinks: Seq[LinuxSymlink] = Seq.empty) {
private[this] def fixFilename(n: String): String = {
val tmp =
if(n startsWith "/") n
else "/" + n
if(tmp.contains(' ')) "\\"%s\\"" format tmp
else tmp
}
private[this] def makeFilesLine(target: String, meta: LinuxFileMetaData, isDir: Boolean): String = {
val sb = new StringBuilder
meta.config.toLowerCase match {
case "false" => ()
case "true" => sb append "%config "
case x => sb append ("%config("+x+") ")
}
if(meta.docs) sb append "%doc "
if(isDir) sb append "%dir "
// TODO - map dirs...
sb append "%attr("
sb append meta.permissions
sb append ','
sb append meta.user
sb append ','
sb append meta.group
sb append ") "
sb append fixFilename(target)
sb append '\\n'
sb.toString
}
private[this] def fileSection: String = {
val sb = new StringBuilder
sb append "\\n%files\\n"
// TODO - default attribute string.
for {
mapping <- mappings
(file, dest) <- mapping.mappings
} sb append makeFilesLine(dest, mapping.fileData, file.isDirectory)
for {
link <- symlinks
} sb append (fixFilename(link.link) + "\\n")
sb.toString
}
private[this] def installSection(root: File): String = {
val sb = new StringBuilder
sb append "\\n"
sb append "%install\\n"
sb append "if [ -e \\"$RPM_BUILD_ROOT\\" ]; "
sb append "then\\n"
sb append " mv \\""
sb append root.getAbsolutePath
sb append "\\"/* \\"$RPM_BUILD_ROOT\\"\\n"
sb append "else\\n"
sb append " mv \\""
sb append root.getAbsolutePath
sb append "\\" \\"$RPM_BUILD_ROOT\\"\\n"
sb append "fi\\n"
sb.toString
}
// TODO - This is *very* tied to RPM helper, may belong *in* RpmHelper
def writeSpec(rpmRoot: File, tmpRoot: File): String = {
val sb = new StringBuilder
sb append ("Name: %s\\n" format meta.name)
sb append ("Version: %s\\n" format meta.version)
sb append ("Release: %s\\n" format meta.release)
sb append ("Summary: %s\\n" format meta.summary)
desc.license foreach { v => sb append ("License: %s\\n" format v)}
desc.distribution foreach { v => sb append ("Distribution: %s\\n" format v)}
// TODO - Icon
sb append ("Vendor: %s\\n" format meta.vendor)
desc.url foreach { v => sb append ("URL: %s\\n" format v)}
desc.group foreach { v => sb append ("Group: %s\\n" format v)}
desc.packager foreach { v => sb append ("Packager: %s\\n" format v)}
sb append deps.contents
// TODO - autoprov + autoreq
sb append ("autoprov: %s\\n" format meta.autoprov)
sb append ("autoreq: %s\\n" format meta.autoreq)
sb append ("BuildRoot: %s\\n\\n" format rpmRoot.getAbsolutePath)
sb append "%description\\n"
sb append meta.description
sb append "\\n\\n"
// write build as moving everything into RPM directory.
sb append installSection(tmpRoot)
// TODO - Allow symlinks
// write scriptlets
sb append scriptlets.contents()
// Write file mappings
sb append fileSection
// TODO - Write triggers...
// TODO - Write changelog...
sb.toString
}
}
| yanns/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/rpm/RpmMetadata.scala | Scala | bsd-2-clause | 5,442 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf
import java.nio.ByteOrder
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, DataFormat}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import Tensorflow._
import BigDLToTensorflow._
import org.tensorflow.framework.{DataType, NodeDef}
import scala.collection.mutable.ArrayBuffer
/**
* Wrapper of logic to convert module to tensorflow node definition
*/
trait BigDLToTensorflow {
/**
* Convert the module to a tensorflow nodedef
* @return Mapped nodedef list, the first is the output node
*/
def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef]
}
object BigDLToTensorflow {
/**
* This method is just for test purpose. Do not use the bigdl.saveNHWC for real use case
* @return
*/
private[tf] def processSaveDim(dim: Int): Int = {
if (System.getProperty("bigdl.enableNHWC", "false").toBoolean) {
if (dim == 2) return 4
if (dim == 3) return 2
if (dim == 4) return 3
dim
} else {
dim
}
}
/**
* This method is just for test purpose. Do not use the bigdl.enableNHWC for real use case
* @return
*/
private[tf] def getDataFormat(): TensorflowDataFormat = {
if (System.getProperty("bigdl.enableNHWC", "false").toBoolean) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
}
}
object InputToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Input only accept one input")
Seq(identity(inputs(0), module.getName()))
}
}
object ReLUToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Relu only accept one input")
Seq(relu(inputs(0), module.getName()))
}
}
object LinearToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Linear only accept one input")
val linear = module.asInstanceOf[Linear[_]]
val weight = const(linear.weight.t().contiguous(), linear.getName() + "/weight", byteOrder)
val weightReader = identity(weight, linear.getName() + "/weightReader")
val mm = matmul(inputs(0), weightReader, linear.getName() + "/matmul")
val bias = const(linear.bias, linear.getName() + "/bias", byteOrder)
val biasReader = identity(bias, linear.getName() + "/biasReader")
val add = biasAdd(mm, biasReader, getDataFormat(), linear.getName() + "/biasAdd")
Seq(add, biasReader, bias, mm, weightReader, weight)
}
}
object SpatialConvolutionToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "SpatialConvolution only accept one input")
val spatialConv = module.asInstanceOf[SpatialConvolution[_]]
// squeeze will modify the weight tensor
// GOIHW -> HWIO
require(spatialConv.weight.size(1) == 1, "convolution group is not supported")
val (dataFormat, filterTensor) = if (spatialConv.format == DataFormat.NCHW) {
(TensorflowDataFormat.NCHW,
spatialConv.weight.select(1, 1)
.transpose(2, 3).transpose(3, 4)
.transpose(1, 2).transpose(2, 3)
.transpose(3, 4).contiguous())
} else {
(TensorflowDataFormat.NHWC, spatialConv.weight.select(1, 1))
}
val filter = const(filterTensor, spatialConv.getName() + "/filter", byteOrder)
val filterReader = identity(filter, spatialConv.getName() + "/filterReader")
val conv = conv2D(inputs(0), filterReader, spatialConv.strideW, spatialConv.strideH,
spatialConv.kernelW, spatialConv.kernelH, spatialConv.padW, spatialConv.padH,
dataFormat, spatialConv.getName() + "/conv2D")
val bias = const(spatialConv.bias, spatialConv.getName() + "/bias", byteOrder)
val biasReader = identity(bias, spatialConv.getName() + "/biasReader")
val add = biasAdd(conv, biasReader, dataFormat,
spatialConv.getName() + "/biasAdd")
Seq(add, biasReader, bias, conv, filterReader, filter)
}
}
object TemporalConvolutionToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "SpatialConvolution only accept one input")
val spatialConv = module.asInstanceOf[TemporalConvolution[_]]
val const1 = const(Tensor.scalar[Int](1), spatialConv.getName() + "/dim1", byteOrder)
val expandDimsInput = expandDims(inputs.head, const1,
spatialConv.getName() + "/expandDimsInput")
val filterTensor = spatialConv.weight
.view(spatialConv.outputFrameSize, spatialConv.kernelW, spatialConv.inputFrameSize)
.transpose(2, 3).transpose(1, 3).contiguous()
val filter = const(filterTensor, spatialConv.getName() + "/filter", byteOrder)
val filterReader = identity(filter, spatialConv.getName() + "/filterReader")
val const2 = const(Tensor.scalar[Int](0), spatialConv.getName() + "/dim2", byteOrder)
val expandDimsWeight = expandDims(filterReader, const2,
spatialConv.getName() + "/expandDimsWeight")
val conv = conv2D(expandDimsInput, expandDimsWeight, spatialConv.strideW, 1,
spatialConv.kernelW, 1, 0, 0,
getDataFormat(), spatialConv.getName() + "/conv2D")
val sq = squeeze(conv, Seq(1), spatialConv.getName() + "/squeeze")
val bias = const(spatialConv.bias, spatialConv.getName() + "/bias", byteOrder)
val biasReader = identity(bias, spatialConv.getName() + "/biasReader")
val add = biasAdd(sq, biasReader, getDataFormat(),
spatialConv.getName() + "/biasAdd")
Seq(add, biasReader, bias, conv, filterReader, filter, sq,
expandDimsInput, expandDimsWeight, const1, const2)
}
}
object SqueezeToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Squeeze only accept one input")
val sq = module.asInstanceOf[Squeeze[_]]
Seq(squeeze(inputs(0), sq.dims.map(processSaveDim(_) - 1), sq.getName()))
}
}
object TanhToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Tanh only accept one input")
Seq(tanh(inputs(0), module.getName()))
}
}
object ReshapeToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Reshape only accept one input")
val rh = module.asInstanceOf[Reshape[_]]
val size = Tensor[Int](rh.size.length)
var i = 0
while(i < rh.size.length) {
size.setValue(i + 1, rh.size(i))
i += 1
}
val shape = const(size, rh.getName() + "/shape", byteOrder)
val reshapeNode = reshape(inputs(0), shape, rh.getName())
Seq(reshapeNode, shape)
}
}
object ViewToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Reshape only accept one input")
val viewLayer = module.asInstanceOf[View[_]]
val size = Tensor[Int](viewLayer.sizes.length)
var i = 0
while(i < viewLayer.sizes.length) {
size.setValue(i + 1, viewLayer.sizes(i))
i += 1
}
val shape = const(size, viewLayer.getName() + "/shape", byteOrder)
val reshapeNode = reshape(inputs(0), shape, viewLayer.getName())
Seq(reshapeNode, shape)
}
}
object MaxpoolToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Maxpool only accept one input")
val layer = module.asInstanceOf[SpatialMaxPooling[_]]
val dataFormat = if (layer.format == DataFormat.NHWC) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
Seq(maxPool(inputs(0), layer.kW, layer.kH, layer.padW, layer.padH,
layer.dW, layer.dH, dataFormat, layer.getName()))
}
}
object PaddingToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Padding only accept one input")
val layer = module.asInstanceOf[Padding[_]]
require(layer.nIndex == 1, "only support padding nIndex == 1")
require(layer.nInputDim > 0, "nInputDim must be explicit specified")
val padding = Tensor[Int](layer.nInputDim, 2).zero()
if (layer.pad < 0) {
padding.setValue(layer.dim, 1, -layer.pad)
}
else {
padding.setValue(layer.dim, 2, layer.pad)
}
val paddingsNode = const(padding, layer.getName() + "/padding", byteOrder)
val padNode = pad(inputs(0), paddingsNode, layer.getName() + "/output")
Seq(padNode, paddingsNode)
}
}
object AvgpoolToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Avgpool only accept one input")
val layer = module.asInstanceOf[SpatialAveragePooling[_]]
val dataFormat = if (layer.format == DataFormat.NHWC) {
TensorflowDataFormat.NHWC
} else {
TensorflowDataFormat.NCHW
}
Seq(avgPool(inputs(0), layer.kW, layer.kH, layer.padW, layer.padH,
layer.dW, layer.dH, dataFormat, layer.getName()))
}
}
object SigmoidToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Sigmoid only accept one input")
Seq(sigmoid(inputs(0), module.getName()))
}
}
object DropoutToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Dropout only accept one input")
val layer = module.asInstanceOf[Dropout[_]]
require(layer.isTraining() == false, "only support evaluating mode dropout")
require(inputs.length == 1, "require only one tensor input")
Seq(identity(inputs(0), layer.getName()))
}
}
object CAddTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
Seq(addN(inputs, module.getName()))
}
}
object CMultTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 2, "Tensorflow only support two tensor multiply together")
Seq(multiply(inputs(0), inputs(1), module.getName()))
}
}
object JoinTableToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
val layer = module.asInstanceOf[JoinTable[_]]
val axis = const(Tensor.scalar[Int](layer.dimension - 1), layer.getName() + "/axis", byteOrder)
val updateInputs = new ArrayBuffer[NodeDef]()
updateInputs ++= inputs.reverse
updateInputs.append(axis)
Seq(concat(updateInputs, layer.dimension - 1, layer.getName()), axis)
}
}
object MeanToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Mean only accept one input")
val layer = module.asInstanceOf[Mean[_, _]]
require(layer.squeeze == true, "Mean must squeeze input")
val dimsTensor = Tensor[Int](layer.dimension)
dimsTensor.setValue(1, layer.dimension - 1)
val dims = const(dimsTensor, layer.getName() + "/dims", byteOrder)
val mean = reduceMean(inputs(0), dims, false, layer.getName() + "/output")
Seq(mean, dims)
}
}
object SoftMaxToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "Softmax only accept one input")
Seq(softmax(inputs(0), module.getName()))
}
}
object LogSoftMaxToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "LogSoftmax only accept one input")
Seq(logSoftmax(inputs(0), module.getName()))
}
}
object BatchNorm2DToTF extends BigDLToTensorflow {
override def toTFDef(module: AbstractModule[_, _, _], inputs: Seq[NodeDef],
byteOrder: ByteOrder): Seq[NodeDef] = {
require(inputs.length == 1, "BatchNorm only accept one input")
val layer = module.asInstanceOf[SpatialBatchNormalization[_]]
require(!layer.isTraining(), "Only support evaluate mode batch norm")
// reshape to nchw
val size = Tensor[Int](layer.nDim)
for (i <- 0 until layer.nDim) {
size.setValue(i + 1, 1)
}
size(2) = layer.weight.size(1)
val shapeVar = const(size, layer.getName() + "/reshape_1/shape", byteOrder)
val shapeMean = const(size, layer.getName() + "/reshape_2/shape", byteOrder)
val shapeScale = const(size, layer.getName() + "/reshape_3/shape", byteOrder)
val shapeOffset = const(size, layer.getName() + "/reshape_4/shape", byteOrder)
val varNode = const(layer.runningVar, layer.getName() + "/std", byteOrder)
val mean = const(layer.runningMean, layer.getName() + "/mean", byteOrder)
val scale = const(layer.weight, layer.getName() + "/scale", byteOrder)
val offset = const(layer.bias, layer.getName() + "/offset", byteOrder)
val reshapeVar = reshape(varNode, shapeVar, s"${layer.getName()}/reshape_1")
val reshapeMean = reshape(mean, shapeMean, s"${layer.getName()}/reshape_2")
val reshapeScale = reshape(scale, shapeScale, s"${layer.getName()}/reshape_3")
val reshapeOffset = reshape(offset, shapeOffset, s"${layer.getName()}/reshape_4")
// construct graph
val sqrtVar = rsqrt(reshapeVar, layer.getName() + "/stdvar")
val mul0 = multiply(reshapeScale, sqrtVar, layer.getName() + "/mul0")
val mul1 = multiply(inputs(0), mul0, layer.getName() + "/mul1")
val mul2 = multiply(reshapeMean, mul0, layer.getName() + "/mul2")
val sub = subtract(reshapeOffset, mul2, layer.getName() + "/sub")
val output = add(mul1, sub, layer.getName() + "/output")
Seq(output, sub, mul2, mul1, mul0, reshapeOffset, reshapeMean, reshapeScale,
shapeOffset, shapeMean, shapeScale, offset, scale, mean,
sqrtVar, reshapeVar, shapeVar, varNode)
}
}
| jenniew/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/tf/BigDLToTensorflow.scala | Scala | apache-2.0 | 16,116 |
package jp.sf.amateras.solr.scala.async
import scala.concurrent.{Promise, Future}
import scala.util.{Failure, Success}
import jp.sf.amateras.solr.scala.query.{QueryTemplate, ExpressionParser}
import org.apache.solr.client.solrj.request.UpdateRequest
import jp.sf.amateras.solr.scala.CaseClassMapper
import org.apache.solr.common.SolrInputDocument
trait IAsyncSolrClient {
protected implicit def parser: ExpressionParser
/**
* Execute given operation in the transaction.
*
* The transaction is committed if operation was successful.
* But the transaction is rolled back if an error occurred.
*/
def withTransaction[T](operations: => Future[T]): Future[T] = {
import scala.concurrent.ExecutionContext.Implicits.global
val p = Promise[T]()
operations onComplete {
case Success(x) => commit() onComplete {
case Success(_) => p success x
case Failure(t) => p failure t
}
case Failure(t) => rollback() onComplete (_ => p failure t)
}
p.future
}
protected def execute(req: UpdateRequest, promise: Promise[Unit]): Future[Unit]
def query(query: String): AbstractAsyncQueryBuilder
/**
* Add the document.
*
* @param doc the document to register
*/
def add(doc: Any): Future[Unit] = {
val solrDoc = doc match {
case sid: SolrInputDocument => sid
case _ =>
val ret = new SolrInputDocument
CaseClassMapper.toMap(doc) map {
case (key, value) => ret.addField(key, value)
}
ret
}
val req = new UpdateRequest()
req.add(solrDoc)
execute(req, Promise[Unit]())
}
/**
* Add the document and commit them immediately.
*
* @param doc the document to register
*/
def register(doc: Any): Future[Unit] = {
withTransaction {
add(doc)
}
}
/**
* Delete the document which has a given id.
*
* @param id the identifier of the document to delete
*/
def deleteById(id: String): Future[Unit] = {
val req = new UpdateRequest()
req.deleteById(id)
execute(req, Promise[Unit]())
}
/**
* Delete documents by the given query.
*
* @param query the solr query to select documents which would be deleted
* @param params the parameter map which would be given to the query
*/
def deleteByQuery(query: String, params: Map[String, Any] = Map()): Future[Unit] = {
val req = new UpdateRequest()
req.deleteByQuery(new QueryTemplate(query).merge(params))
execute(req, Promise[Unit]())
}
def commit(): Future[Unit]
def rollback(): Future[Unit]
def shutdown(): Unit
}
| matthewchartier/solr-scala-client | src/main/scala/jp/sf/amateras/solr/scala/async/IAsyncSolrClient.scala | Scala | apache-2.0 | 2,858 |
package mesosphere.marathon.core.appinfo.impl
import mesosphere.marathon.MarathonSchedulerService
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.appinfo.{ AppInfo, EnrichedTask, TaskCounts, TaskStatsByVersion }
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.health.{ Health, HealthCheckManager }
import mesosphere.marathon.state._
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.upgrade.DeploymentPlan
import org.slf4j.LoggerFactory
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.util.control.NonFatal
class AppInfoBaseData(
clock: Clock,
taskTracker: TaskTracker,
healthCheckManager: HealthCheckManager,
marathonSchedulerService: MarathonSchedulerService,
taskFailureRepository: TaskFailureRepository) {
import AppInfoBaseData.log
import scala.concurrent.ExecutionContext.Implicits.global
if (log.isDebugEnabled) log.debug(s"new AppInfoBaseData $this")
lazy val runningDeploymentsByAppFuture: Future[Map[PathId, Seq[Identifiable]]] = {
log.debug("Retrieving running deployments")
val allRunningDeploymentsFuture: Future[Seq[DeploymentPlan]] =
for {
stepInfos <- marathonSchedulerService.listRunningDeployments()
} yield stepInfos.map(_.plan)
allRunningDeploymentsFuture.map { allDeployments =>
val byApp = Map.empty[PathId, Vector[DeploymentPlan]].withDefaultValue(Vector.empty)
val deploymentsByAppId = allDeployments.foldLeft(byApp) { (result, deploymentPlan) =>
deploymentPlan.affectedApplicationIds.foldLeft(result) { (result, appId) =>
val newEl = appId -> (result(appId) :+ deploymentPlan)
result + newEl
}
}
deploymentsByAppId
.mapValues(_.map(deploymentPlan => Identifiable(deploymentPlan.id)))
.withDefaultValue(Seq.empty)
}
}
def appInfoFuture(app: AppDefinition, embed: Set[AppInfo.Embed]): Future[AppInfo] = {
val appData = new AppData(app)
embed.foldLeft(Future.successful(AppInfo(app))) { (infoFuture, embed) =>
infoFuture.flatMap { info =>
embed match {
case AppInfo.Embed.Counts =>
appData.taskCountsFuture.map(counts => info.copy(maybeCounts = Some(counts)))
case AppInfo.Embed.Deployments =>
runningDeploymentsByAppFuture.map(deployments => info.copy(maybeDeployments = Some(deployments(app.id))))
case AppInfo.Embed.LastTaskFailure =>
appData.maybeLastTaskFailureFuture.map { maybeLastTaskFailure =>
info.copy(maybeLastTaskFailure = maybeLastTaskFailure)
}
case AppInfo.Embed.Tasks =>
appData.enrichedTasksFuture.map(tasks => info.copy(maybeTasks = Some(tasks)))
case AppInfo.Embed.TaskStats =>
appData.taskStatsFuture.map(taskStats => info.copy(maybeTaskStats = Some(taskStats)))
}
}
}
}
/**
* Contains app-sepcific data that we need to retrieved.
*
* All data is lazy such that only data that is actually needed for the requested embedded information
* gets retrieved.
*/
private[this] class AppData(app: AppDefinition) {
lazy val now: Timestamp = clock.now()
lazy val tasks: Iterable[MarathonTask] = {
log.debug(s"retrieving running tasks for app [${app.id}]")
taskTracker.getTasks(app.id)
}
lazy val tasksFuture: Future[Iterable[MarathonTask]] = Future.successful(tasks)
lazy val healthCountsFuture: Future[Map[String, Seq[Health]]] = {
log.debug(s"retrieving health counts for app [${app.id}]")
healthCheckManager.statuses(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving health counts for app [${app.id}]", e)
}
lazy val tasksForStats: Future[Iterable[TaskForStatistics]] = {
for {
tasks <- tasksFuture
healthCounts <- healthCountsFuture
} yield TaskForStatistics.forTasks(now, tasks, healthCounts)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while calculating tasksForStats for app [${app.id}]", e)
}
lazy val taskCountsFuture: Future[TaskCounts] = {
log.debug(s"calculating task counts for app [${app.id}]")
for {
tasks <- tasksForStats
} yield TaskCounts(tasks)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while calculating task counts for app [${app.id}]", e)
}
lazy val taskStatsFuture: Future[TaskStatsByVersion] = {
log.debug(s"calculating task stats for app [${app.id}]")
for {
tasks <- tasksForStats
} yield TaskStatsByVersion(app.versionInfo, tasks)
}
lazy val enrichedTasksFuture: Future[Seq[EnrichedTask]] = {
def statusesToEnrichedTasks(
tasksById: Map[String, MarathonTask],
statuses: Map[String, collection.Seq[Health]]): Seq[EnrichedTask] = {
for {
(taskId, healthResults) <- statuses.to[Seq]
task <- tasksById.get(taskId)
} yield EnrichedTask(app.id, task, healthResults)
}
log.debug(s"assembling rich tasks for app [${app.id}]")
val tasksById: Map[String, MarathonTask] = tasks.map(task => task.getId -> task).toMap
healthCheckManager.statuses(app.id).map(statuses => statusesToEnrichedTasks(tasksById, statuses))
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while assembling rich tasks for app [${app.id}]", e)
}
lazy val maybeLastTaskFailureFuture: Future[Option[TaskFailure]] = {
log.debug(s"retrieving last task failure for app [${app.id}]")
taskFailureRepository.current(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving last task failure for app [${app.id}]", e)
}
}
}
object AppInfoBaseData {
private val log = LoggerFactory.getLogger(getClass)
}
| sledigabel/marathon | src/main/scala/mesosphere/marathon/core/appinfo/impl/AppInfoBaseData.scala | Scala | apache-2.0 | 5,917 |
package com.wavesplatform.it.sync.smartcontract
import com.typesafe.config.Config
import com.wavesplatform.api.http.ApiError.ScriptExecutionError
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.it.NodeConfigs
import com.wavesplatform.it.api.SyncHttpApi._
import com.wavesplatform.it.api.TransactionInfo
import com.wavesplatform.it.sync._
import com.wavesplatform.it.transactions.BaseTransactionSuite
import com.wavesplatform.lang.v1.compiler.Terms.CONST_BYTESTR
import com.wavesplatform.lang.v1.estimator.v2.ScriptEstimatorV2
import com.wavesplatform.lang.v1.estimator.v3.ScriptEstimatorV3
import com.wavesplatform.state._
import com.wavesplatform.test._
import com.wavesplatform.transaction.Asset.Waves
import com.wavesplatform.transaction.TxVersion
import com.wavesplatform.transaction.smart.InvokeScriptTransaction.Payment
import com.wavesplatform.transaction.smart.script.ScriptCompiler
import org.scalatest.CancelAfterFailure
import scala.concurrent.duration._
class InvokeScriptTransactionSuite extends BaseTransactionSuite with CancelAfterFailure {
val activationHeight = 8
override protected def nodeConfigs: Seq[Config] =
NodeConfigs
.Builder(NodeConfigs.Default, 1, Seq.empty)
.overrideBase(_.quorum(0))
.overrideBase(
_.preactivatedFeatures(
(BlockchainFeatures.Ride4DApps.id, 0),
(BlockchainFeatures.BlockV5.id, activationHeight)
)
)
.withDefault(1)
.buildNonConflicting()
private def firstContract = firstKeyPair
private def secondContract = secondKeyPair
private lazy val thirdContract = sender.createKeyPair()
private def caller = thirdKeyPair
private lazy val firstContractAddress: String = firstContract.toAddress.toString
private lazy val secondContractAddress: String = secondContract.toAddress.toString
protected override def beforeAll(): Unit = {
super.beforeAll()
val scriptText =
"""
|{-# STDLIB_VERSION 3 #-}
|{-# CONTENT_TYPE DAPP #-}
|
| @Callable(inv)
| func foo(a:ByteVector) = {
| WriteSet([DataEntry("a", a), DataEntry("sender", inv.caller.bytes)])
| }
| @Callable(inv)
| func emptyKey() = {
| WriteSet([DataEntry("", "a")])
| }
|
| @Callable(inv)
| func baz() = {
| WriteSet([DataEntry("test", this.bytes)])
| }
|
| @Callable(inv)
| func default() = {
| WriteSet([DataEntry("a", "b"), DataEntry("sender", "senderId")])
| }
|
| @Verifier(t)
| func verify() = {
| true
| }
|
|
""".stripMargin
val script = ScriptCompiler.compile(scriptText, ScriptEstimatorV2).explicitGet()._1.bytes().base64
sender.transfer(firstKeyPair, thirdContract.toAddress.toString, 10.waves, minFee, waitForTx = true)
val setScriptId = sender.setScript(firstContract, Some(script), setScriptFee, waitForTx = true).id
val setScriptId2 = sender.setScript(secondContract, Some(script), setScriptFee, waitForTx = true).id
val acc0ScriptInfo = sender.addressScriptInfo(firstContractAddress)
val acc0ScriptInfo2 = sender.addressScriptInfo(secondContractAddress)
sender.createAlias(firstContract, "alias", fee = 1.waves, waitForTx = true)
acc0ScriptInfo.script.isEmpty shouldBe false
acc0ScriptInfo.scriptText.isEmpty shouldBe false
acc0ScriptInfo.script.get.startsWith("base64:") shouldBe true
acc0ScriptInfo2.script.isEmpty shouldBe false
acc0ScriptInfo2.scriptText.isEmpty shouldBe false
acc0ScriptInfo2.script.get.startsWith("base64:") shouldBe true
sender.transactionInfo[TransactionInfo](setScriptId).script.get.startsWith("base64:") shouldBe true
sender.transactionInfo[TransactionInfo](setScriptId2).script.get.startsWith("base64:") shouldBe true
}
ignore("""Allow to use "this" if DApp is called by alias""") {
sender.invokeScript(
caller,
"alias:I:alias",
func = Some("baz"),
args = List(),
payment = Seq(),
fee = 1.waves,
waitForTx = true
)
sender.getDataByKey(firstContractAddress, "test") shouldBe BinaryDataEntry("test", ByteStr(firstContract.toAddress.bytes))
}
test("Wait for activation") {
val scriptTextV4 =
"""
|{-# STDLIB_VERSION 4 #-}
|{-# CONTENT_TYPE DAPP #-}
|
| @Callable(inv)
|func foo() = [IntegerEntry("", 1)]
|
| @Callable(inv)
|func bar() = [IntegerEntry("", 2)]
|
|@Callable(inv)
| func biz() = [IntegerEntry("numb", 1)]
|
""".stripMargin
val script2 = ScriptCompiler.compile(scriptTextV4, ScriptEstimatorV3(fixOverflow = true)).explicitGet()._1.bytes().base64
sender.waitForHeight(activationHeight, 13.minute)
val setScriptId3 = sender.setScript(thirdContract, Some(script2), setScriptFee, waitForTx = true).id
sender.transactionInfo[TransactionInfo](setScriptId3).script.get.startsWith("base64:") shouldBe true
}
test("contract caller invokes a function on a contract") {
val arg = ByteStr(Array(42: Byte))
for (v <- invokeScrTxSupportedVersions) {
val contract = (if (v < 2) firstContract else secondContract).toAddress.toString
val invokeScriptTx = sender.invokeScript(
caller,
contract,
func = Some("foo"),
args = List(CONST_BYTESTR(arg).explicitGet()),
payment = Seq(Payment(1.waves, Waves)),
fee = 1.waves,
version = v,
waitForTx = true
)
nodes.waitForHeightAriseAndTxPresent(invokeScriptTx._1.id)
sender.getDataByKey(contract, "a") shouldBe BinaryDataEntry("a", arg)
sender.getDataByKey(contract, "sender") shouldBe BinaryDataEntry("sender", ByteStr(caller.toAddress.bytes))
}
}
test("contract caller invokes a function on a contract by alias") {
val arg = ByteStr(Array(43: Byte))
val _ = sender.invokeScript(
caller,
"alias:I:alias",
func = Some("foo"),
args = List(CONST_BYTESTR(arg).explicitGet()),
payment = Seq(),
fee = 1.waves,
waitForTx = true
)
sender.getDataByKey(firstContractAddress, "a") shouldBe BinaryDataEntry("a", arg)
sender.getDataByKey(firstContractAddress, "sender") shouldBe BinaryDataEntry("sender", ByteStr(caller.toAddress.bytes))
}
test("translate alias to the address") {
sender.invokeScript(
caller,
"alias:I:alias",
func = Some("baz"),
args = List(),
payment = Seq(),
fee = 1.waves,
waitForTx = true
)
sender.getDataByKey(firstContractAddress, "test") shouldBe BinaryDataEntry("test", ByteStr(firstContract.toAddress.bytes))
}
test("contract caller invokes a default function on a contract") {
for (v <- invokeScrTxSupportedVersions) {
val contract = (if (v < 2) firstContract else secondContract).toAddress.toString
val _ = sender.invokeScript(
caller,
contract,
func = None,
payment = Seq(),
fee = 1.waves,
version = v,
waitForTx = true
)
sender.getDataByKey(contract, "a") shouldBe StringDataEntry("a", "b")
sender.getDataByKey(contract, "sender") shouldBe StringDataEntry("sender", "senderId")
}
}
test("verifier works") {
for (v <- invokeScrTxSupportedVersions) {
val contract = if (v < 2) firstContract else secondContract
val dataTxId = sender.putData(contract, data = List(StringDataEntry("a", "OOO")), fee = 1.waves, waitForTx = true).id
nodes.waitForHeightAriseAndTxPresent(dataTxId)
sender.getDataByKey(contract.toAddress.toString, "a") shouldBe StringDataEntry("a", "OOO")
}
}
test("not able to set an empty key by InvokeScriptTransaction with version >= 2") {
assertApiError(
sender
.invokeScript(
caller,
secondContractAddress,
func = Some("emptyKey"),
payment = Seq(),
fee = 1.waves,
version = TxVersion.V2
),
AssertiveApiError(ScriptExecutionError.Id, "Error while executing account-script: Empty keys aren't allowed in tx version >= 2")
)
nodes.waitForHeightArise()
sender.getData(secondContractAddress).filter(_.key.isEmpty) shouldBe List.empty
assertApiError(
sender.invokeScript(
caller,
thirdContract.toAddress.toString,
func = Some("bar"),
payment = Seq(),
fee = 1.waves,
version = TxVersion.V2
),
AssertiveApiError(ScriptExecutionError.Id, "Error while executing account-script: Empty keys aren't allowed in tx version >= 2")
)
nodes.waitForHeightArise()
sender.getData(thirdContract.toAddress.toString).filter(_.key.isEmpty) shouldBe List.empty
}
test("invoke script via dApp alias") {
sender.createAlias(thirdContract, "dappalias", smartMinFee, waitForTx = true)
val dAppAlias = sender.aliasByAddress(thirdContract.toAddress.toString).find(_.endsWith("dappalias")).get
for (v <- invokeScrTxSupportedVersions) {
sender.invokeScript(caller, dAppAlias, fee = smartMinFee + smartFee, func = Some("biz"), version = v, waitForTx = true)
sender.getDataByKey(thirdContract.toAddress.toString, "numb") shouldBe IntegerDataEntry("numb", 1)
}
}
}
| wavesplatform/Waves | node-it/src/test/scala/com/wavesplatform/it/sync/smartcontract/InvokeScriptTransactionSuite.scala | Scala | mit | 9,498 |
package org.vvcephei.rest
import java.io.ByteArrayInputStream
import java.net.{URI, URL}
import javax.ws.rs.core.MediaType
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.google.common.io.ByteStreams
import com.sun.jersey.api.client.ClientResponse.Status
import com.sun.jersey.api.client.{WebResource, Client => JerseyClient, ClientResponse => JerseyClientResponse}
import com.sun.jersey.core.header.InBoundHeaders
import org.mockito.Matchers
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.FunSuite
import org.vvcephei.rest.Client.Executor
import org.vvcephei.rest.RestClient._
import scala.collection.JavaConversions._
import scala.reflect.classTag
sealed trait Accessor
case object TEXT extends Accessor
case object LONG extends Accessor
case object INT extends Accessor
//...
sealed trait Verb
case object GET extends Verb
case object PUT extends Verb
case object POST extends Verb
case object OPTIONS extends Verb
case object HEAD extends Verb
case object DELETE extends Verb
//...
trait Response {
def get(key: String): Response
def /(key: String): Response = get(key)
def as[T: Manifest]: T
def /(t: TEXT.type): String = as[String]
def /(i: INT.type): Int = as[Int]
def /(i: LONG.type): Long = as[Long]
}
case class UntypedResponse(response: JerseyClientResponse) extends Response {
override def get(key: String) = ???
override def as[T: Manifest] = response.getEntity(classTag[T].runtimeClass.asInstanceOf[Class[T]])
}
case class JsonResponse(json: JsonNode)(implicit mapper: ObjectMapper with ScalaObjectMapper) extends Response {
override def get(key: String) = JsonResponse(if (json == null) null else json.path(key))
override def as[T: Manifest] = mapper.convertValue[T](json)
}
case class JsonClientResponse(response: JerseyClientResponse)(implicit mapper: ObjectMapper with ScalaObjectMapper) extends Response {
private lazy val cachedInputStream = ByteStreams.toByteArray(response.getEntityInputStream)
def get(key: String) = JsonResponse(mapper.readTree(cachedInputStream).path(key))
def as[T: Manifest] = mapper.readValue[T](cachedInputStream)
}
case class Client(headers: Map[String, String] = Map(),
protocol: Option[String] = None,
host: Option[String] = None,
port: Option[Int] = None,
path: List[String] = Nil,
params: List[(String, String)] = Nil,
body: Option[Any] = None)(implicit val executor: Executor) {
def isJsonClient: Boolean = headers.getOrElse("content-type", "").contains("json") || path.lastOption.getOrElse("").endsWith(".json")
def header(h: (String, String)): Client = copy(headers = headers ++ Seq(h))
def protocol(p: String): Client = copy(protocol = Some(p))
def host(h: String): Client = copy(host = Some(h))
def port(p: Int): Client = copy(port = Some(p))
def /(segment: String*): Client = copy(path = path ++ segment)
def segment(segment: String*): Client = this / (segment: _*)
def &(param: (String, String)*): Client = copy(params = params ++ param)
def param(param: (String, String)*): Client = copy(params = params ++ param)
def body(body: Any): Client = copy(body = Some(body))
def go(v: GET.type) = executor(this, GET)
def go(v: PUT.type) = executor(this, PUT)
def go(v: POST.type) = executor(this, POST)
def go(v: OPTIONS.type) = executor(this, OPTIONS)
def go(v: HEAD.type) = executor(this, HEAD)
}
object Client {
type Executor = (Client, Verb) => Response
def apply(s: String)(implicit executor: Executor): Client = apply(new URL(s))
def apply(uri: URL)(implicit executor: Executor): Client = new Client(
protocol = Option(uri.getProtocol),
host = Option(uri.getHost),
port = if (uri.getPort == -1) None else Some(uri.getPort),
path = if (uri.getPath.isEmpty) Nil else List(uri.getPath),
params = extractParams(Option(uri.getQuery) getOrElse "").toList)
private def split(s: String, on: String) = {
val list: List[String] = s.split(on).toList
list match {
case Nil => None
case List("") => None
case k :: v :: Nil => Some((k, v))
case other => throw new IllegalArgumentException(other.toString())
}
}
private def extractParams(s: String) = for {
pairStr <- s.split("&")
pair <- split(pairStr, "=")
} yield {
pair
}
}
class RestClientTest extends FunSuite {
def ~=[T](a: T) = Matchers.eq(a)
test("testing one url test") {
val mockJersey = mock(classOf[JerseyClient])
val mockResource = mock(classOf[WebResource])
val mockBuilder = mock(classOf[WebResource#Builder])
val jsonClient = client(mockJersey).setType(MediaType.APPLICATION_JSON_TYPE).header("X-Stream" -> true)
val j2 = jsonClient.segment("http://api.tumblr.com", "v2", "blog").param("api_key" -> "fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4")
val expectedResultMap: Map[String, String] = Map("stat" -> "ok")
def cr = {
val mapper = new ObjectMapper() with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
new JerseyClientResponse(200, new InBoundHeaders(), new ByteArrayInputStream(mapper.writeValueAsBytes(expectedResultMap)), null)
}
when(mockJersey.resource(~=("http://api.tumblr.com/v2/blog/scipsy.tumblr.com/info?api_key=fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4")))
.thenReturn(mockResource)
when(mockJersey.resource(~=("http://api.tumblr.com/v2/blog/good.tumblr.com/info?api_key=fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4")))
.thenReturn(mockResource)
when(mockResource.`type`(any(classOf[MediaType]))).thenReturn(mockBuilder, mockBuilder)
when(mockBuilder.header(~=("X-Stream"), ~=(true))).thenReturn(mockBuilder, mockBuilder)
when(mockBuilder.get(~=(classOf[JerseyClientResponse]))).thenReturn(cr, cr)
assert(j2.segment("scipsy.tumblr.com", "info").get().entityAs[Map[String, Object]] === expectedResultMap)
assert(j2.segment("good.tumblr.com", "info").get().entityAs[Map[String, Object]] === expectedResultMap)
}
test("toString works as expected") {
val resp = RestClientResponse(Status.OK, Map(), new ByteArrayInputStream("The response goes here.".getBytes("UTF-8")))
assert(resp.toString === """RestClientResponse(status="OK", headers=Map(), entity="The response goes here.")""")
assert(resp.copy(entity = new ByteArrayInputStream(Array())).toString === """RestClientResponse(status="OK", headers=Map(), entity="")""")
}
test("new syntax") {
implicit def jacksonObjectMapper(): ObjectMapper with ScalaObjectMapper = {
val mapper = new ObjectMapper with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
mapper
}
implicit def cachingJerseyExecutor(c: Client, verb: Verb): Response = {
import c._
val uri: URI = new URI(
protocol.orNull,
null,
host.orNull,
port getOrElse -1,
if (path.isEmpty) null else path mkString "/",
if (params.isEmpty) null else params map {p => p._1 + "=" + p._2} mkString "&",
null)
def addHeaders(resource: WebResource#Builder, headers: List[(String, String)]): WebResource#Builder = headers match {
case Nil => resource
case (k, v) :: rest => addHeaders(resource.header(k, v), rest)
}
val resource: WebResource#Builder = addHeaders(JerseyClient.create().resource(uri).getRequestBuilder, headers.toList)
val response: JerseyClientResponse = verb match {
case GET => resource.get(classOf[JerseyClientResponse])
case PUT => resource.put(classOf[JerseyClientResponse], body)
case POST => resource.post(classOf[JerseyClientResponse], body)
case OPTIONS => resource.options(classOf[JerseyClientResponse])
case HEAD => resource.head()
case DELETE => resource.delete(classOf[JerseyClientResponse], body)
}
if (c.isJsonClient || response.getHeaders.get("content-type").mkString("").contains("json")) {
JsonClientResponse(response)(jacksonObjectMapper())
} else {
UntypedResponse(response)
}
}
/*
val client: Client = Client(headers = Map("content-type", "application/json")).host("example.com").port(8080)
val response: Response = client / "this" /("is", "a", "path") & ("an" -> "arg") & ("another" -> "arg") go GET
val s: String = response / "json" / "keys" / "here" / TEXT
val d: Double = (response / "json" / "keys" / "also" / "here").as[Double]
val x = Client("http://www.example.com:8080").header("content-type" -> "application/json") / "path" / "to" body List("a","b") go PUT
*/
val ip = Client("http://ip.jsontest.com/")
val response: Response = ip go GET
val s1: String = response / "ip" / TEXT
val map: Map[String, Any] = response.as[Map[String,Any]]
val echo = (Client("http://echo.jsontest.com") / "/key/value" / "one/two" go GET).as[Map[String,Any]]
val valid = (Client("http://validate.jsontest.com/") & ("json" -> """{"key":"value"}""") go GET).as[Map[String,Any]]
map
}
}
| vvcephei/denim | src/test/scala/org/vvcephei/rest/RestClientTest.scala | Scala | apache-2.0 | 9,358 |
package org.jetbrains.plugins.scala
package lang
package structureView
package itemsPresentations
package impl
import javax.swing._
import com.intellij.openapi.editor.colors.{CodeInsightColors, TextAttributesKey}
import com.intellij.psi._
import org.jetbrains.plugins.scala.icons.Icons;
/**
* @author Alexander Podkhalyuzin
* Date: 08.05.2008
*/
class ScalaValueItemPresentation(private val element: PsiElement, isInherited: Boolean) extends ScalaItemPresentation(element) {
def getPresentableText: String = {
ScalaElementPresentation.getPresentableText(myElement)
}
override def getIcon(open: Boolean): Icon = {
Icons.VAL
}
override def getTextAttributesKey: TextAttributesKey = {
if(isInherited) CodeInsightColors.NOT_USED_ELEMENT_ATTRIBUTES else null
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/structureView/itemsPresentations/impl/ScalaValueItemPresentation.scala | Scala | apache-2.0 | 788 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv
import kantan.codecs.laws.{CodecLaws, CodecValue, DecoderLaws, EncoderLaws}
import kantan.csv.ops._
package object laws {
type CellDecoderLaws[A] = DecoderLaws[String, A, DecodeError, codecs.type]
type CellEncoderLaws[A] = EncoderLaws[String, A, codecs.type]
type CellCodecLaws[A] = CodecLaws[String, A, DecodeError, codecs.type]
type RowDecoderLaws[A] = DecoderLaws[Seq[String], A, DecodeError, codecs.type]
type RowEncoderLaws[A] = EncoderLaws[Seq[String], A, codecs.type]
type RowCodecLaws[A] = CodecLaws[Seq[String], A, DecodeError, codecs.type]
type CellValue[A] = CodecValue[String, A, codecs.type]
type LegalCell[A] = CodecValue.LegalValue[String, A, codecs.type]
type IllegalCell[A] = CodecValue.IllegalValue[String, A, codecs.type]
type RowValue[A] = CodecValue[Seq[String], A, codecs.type]
type LegalRow[A] = CodecValue.LegalValue[Seq[String], A, codecs.type]
type IllegalRow[A] = CodecValue.IllegalValue[Seq[String], A, codecs.type]
def asCsv[A](data: List[RowValue[A]], conf: CsvConfiguration): String =
data.map(_.encoded).asCsv(conf)
}
| nrinaudo/tabulate | laws/shared/src/main/scala/kantan/csv/laws/package.scala | Scala | mit | 1,721 |
package com.twitter.finagle.httpx.netty
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.handler.codec.http.{HttpHeaders, HttpMessage, HttpVersion}
/** Proxy for HttpMessage. Used by Request and Response. */
private[finagle] trait HttpMessageProxy extends Proxy {
protected[finagle] def httpMessage: HttpMessage
protected[finagle] def getHttpMessage(): HttpMessage = httpMessage
def self = httpMessage
protected[finagle] def getProtocolVersion(): HttpVersion =
httpMessage.getProtocolVersion()
protected[finagle] def setProtocolVersion(version: HttpVersion): Unit =
httpMessage.setProtocolVersion(version)
protected[finagle] def headers(): HttpHeaders =
httpMessage.headers()
protected[finagle] def getContent(): ChannelBuffer =
httpMessage.getContent()
protected[finagle] def setContent(content: ChannelBuffer): Unit =
httpMessage.setContent(content)
def isChunked: Boolean = httpMessage.isChunked()
def setChunked(chunked: Boolean): Unit =
httpMessage.setChunked(chunked)
}
| suls/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/netty/HttpMessageProxy.scala | Scala | apache-2.0 | 1,049 |
package scoreboards
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.openqa.selenium.remote.DesiredCapabilities
import scoreboards.Scoreboards._
import scala.annotation.tailrec
class Scoreboards(columnNameResolver: ColumnNamesResolver,
pageDataResolver: PageDataResolver,
nextPage: NextPage,
sort: Sorter,
collector: Collector) {
def collect(): Unit = {
@tailrec def extract(data: PageData, driver: Option[WebDriver]): PageData = driver match {
case None => data
case Some(d) => extract(data ::: pageDataResolver(d), nextPage(d))
}
val driver = nextPage(new HtmlUnitDriver(DesiredCapabilities.firefox()))
val columnNames = driver map columnNameResolver
val pageData = extract(Nil, driver)
collector(columnNames.getOrElse(Nil), sort(pageData))
}
}
object Scoreboards {
type ColumnNames = List[String]
type PageData = List[Row]
type Row = Map[String, String]
type ColumnNamesResolver = (WebDriver) => ColumnNames
type PageDataResolver = (WebDriver) => PageData
type NextPage = (WebDriver) => Option[WebDriver]
type Sorter = PageData => PageData
type Collector = (ColumnNames, PageData) => Unit
} | dballinger/scoreboards | src/main/scala/scoreboards/Scoreboards.scala | Scala | apache-2.0 | 1,278 |
package core.models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import core.models.User
import core.utils.json.MongoFormats._
import core.utils.mongo.MongoModel
import javax.inject.Inject
import play.api.libs.json.Json
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json._
import reactivemongo.bson.BSONObjectID
import reactivemongo.play.json.BSONFormats.BSONObjectIDFormat
import reactivemongo.play.json.collection.JSONCollection
import scala.concurrent.{ ExecutionContext, Future }
/**
* Give access to the [[User]] object.
*
* @param reactiveMongoApi The ReactiveMongo API.
* @param ec The execution context.
*/
class UserDAOImpl @Inject() (reactiveMongoApi: ReactiveMongoApi)(
implicit
val ec: ExecutionContext
) extends UserDAO with MongoModel {
/**
* The MongoDB collection.
*/
protected def collection = reactiveMongoApi.database.map(_.collection[JSONCollection]("users"))
/**
* Finds a user by its login info.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def find(loginInfo: LoginInfo): Future[Option[User]] =
collection.flatMap(_.find(Json.obj("loginInfo" -> loginInfo)).one[User])
/**
* Finds a user by its user ID.
*
* @param userID The ID of the user to find.
* @return The found user or None if no user for the given ID could be found.
*/
def find(userID: BSONObjectID): Future[Option[User]] =
collection.flatMap(_.find(Json.obj("_id" -> BSONObjectIDFormat.partialWrites(userID))).one[User])
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User): Future[User] = {
onSuccess(collection.flatMap(
_.update(
Json.obj("_id" -> BSONObjectIDFormat.partialWrites(user.id)),
user,
upsert = true
)
), user)
}
}
| akkie/silhouette-play-react-seed | app-core/src/main/scala/core/models/daos/UserDAOImpl.scala | Scala | mit | 1,962 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.storage.api
import java.net.URI
import play.api.Configuration
import scrupal.storage.impl.StorageConfigHelper
import scrupal.store.mem.MemoryStorageDriver
import scrupal.utils.{ConfigHelpers, ScrupalComponent}
import scala.concurrent.{ExecutionContext, Future}
/** Title Of Thing.
*
* Description of thing
*/
object Storage extends ScrupalComponent {
require(MemoryStorageDriver.isRegistered, "MemoryStorageDriver failed to register")
def apply(uri : URI, create: Boolean = true)(implicit ec: ExecutionContext) : Future[StoreContext] = {
fromURI(uri, create)
}
def fromConfigFile(path: String, name : String = "scrupal", create: Boolean = false)(implicit ec: ExecutionContext) : Future[StoreContext] = {
val helper = StorageConfigHelper.fromConfigFile(path)
val config = helper.getStorageConfig
val config_name = s"storage.$name"
config.getConfig(config_name) match {
case Some(cfg) ⇒
fromSpecificConfig(cfg, create )
case None ⇒
Future { toss(s"Failed to find storage configuration for $config_name") }
}
}
def fromConfiguration(
conf: Option[Configuration] = None, name : String = "scrupal", create: Boolean = false)(implicit ec: ExecutionContext) : Future[StoreContext] = {
val config = conf.getOrElse{
val dflt = ConfigHelpers.default()
val helper = new StorageConfigHelper(dflt)
helper.getStorageConfig
}
val config_name = s"storage.$name"
config.getConfig(config_name) match {
case Some(cfg) ⇒
fromSpecificConfig(cfg, create)
case None ⇒
Future { toss(s"Failed to find storage configuration for $config_name") }
}
}
def fromSpecificConfig(config : Configuration, create: Boolean = false)(implicit ec: ExecutionContext) : Future[StoreContext] = {
config.getString("uri") match {
case Some(uri) ⇒
fromURI(uri, create)
case None ⇒
Future { toss("No 'uri' element of configuration") }
}
}
def fromURI(uriStr : String, create: Boolean)(implicit ec: ExecutionContext) : Future[StoreContext] = {
fromURI(new URI(uriStr), create)
}
def fromURI(uri: URI, create: Boolean = false)(implicit ec: ExecutionContext) : Future[StoreContext] = {
StorageDriver(uri).flatMap { driver ⇒
driver.open(uri, create) map { store ⇒
StoreContext(driver, store)(ec)
}
}
}
}
| scrupal/scrupal | scrupal-storage/src/main/scala/scrupal/storage/api/Storage.scala | Scala | apache-2.0 | 4,100 |
package spatutorial.shared
import boopickle._
sealed trait TodoPriority
case object TodoLow extends TodoPriority
case object TodoNormal extends TodoPriority
case object TodoHigh extends TodoPriority
case class TodoItem(id: String, timeStamp:Int, content: String, priority: TodoPriority, completed: Boolean)
object TodoPriority {
implicit val todoPriorityPickler = CompositePickler[TodoPriority].addConcreteType[TodoLow.type].addConcreteType[TodoNormal.type].addConcreteType[TodoHigh.type]
}
| kkasravi/scalajs-spa-tutorial | shared/src/main/scala/spatutorial/shared/TodoItem.scala | Scala | apache-2.0 | 500 |
package com.github.diegopacheco.sandbox.scala.akka.actor.persistence
import akka.persistence.SaveSnapshotFailure
import akka.persistence.SaveSnapshotSuccess
import akka.persistence.Processor
import akka.persistence.SnapshotOffer
import akka.persistence.Persistent
import akka.actor.ActorSystem
import akka.actor.Props
import akka.persistence.Recover
import akka.persistence.SnapshotSelectionCriteria
@deprecated("Processor will be removed. Instead extend `akka.persistence.PersistentActor` and use it's `persistAsync(command)(callback)` method to get equivalent semantics.", since = "2.3.4")
class MyProcessor extends Processor {
var state: Any = _
def receive = {
case "snap" => saveSnapshot(state)
case SaveSnapshotSuccess(metadata) => println("meta: " + metadata)
case SaveSnapshotFailure(metadata, reason) => println("meta: " + metadata + " reason: " + reason)
case SnapshotOffer(metadata, offeredSnapshot) => state = offeredSnapshot
case Persistent(payload, sequenceNr) => println("persistent")
}
}
object SnapshotsPersistenceApp extends App {
val system = ActorSystem("SnapshotsAS")
val processor = system.actorOf(Props[MyProcessor])
processor ! "snap"
processor !
Recover(fromSnapshot = SnapshotSelectionCriteria(maxSequenceNr = 457L,maxTimestamp = System.currentTimeMillis))
} | diegopacheco/scala-playground | scala_11_akka_23_full_playground/src/main/scala/com/github/diegopacheco/sandbox/scala/akka/actor/persistence/SnapshotsPersistenceApp.scala | Scala | unlicense | 1,415 |
// Copyright 2015-2016 Ricardo Gladwell.
// Licensed under the GNU Affero General Public License.
// See the LICENSE file for more information.
package com.is_hosted_by.api
import java.io.InputStream
import scala.concurrent.{ExecutionContext, Future}
trait NetworkParser {
def parseNetwork(input: InputStream)(implicit context: ExecutionContext): Future[Seq[Network]]
}
| rgladwell/is-aws-api | src/main/scala/com/is_hosted_by/api/NetworkParser.scala | Scala | agpl-3.0 | 377 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.test
import org.openqa.selenium.WebDriver
import org.specs2.execute.{ AsResult, Result }
import org.specs2.mutable.Around
import org.specs2.specification.Scope
import play.api.inject.guice.{ GuiceApplicationBuilder, GuiceApplicationLoader }
import play.api.{ Application, ApplicationLoader, Environment, Mode }
import play.core.j.JavaContextComponents
import play.core.server.ServerProvider
// NOTE: Do *not* put any initialisation code in the below classes, otherwise delayedInit() gets invoked twice
// which means around() gets invoked twice and everything is not happy. Only lazy vals and defs are allowed, no vals
// or any other code blocks.
/**
* Used to run specs within the context of a running application loaded by the given `ApplicationLoader`.
*
* @param applicationLoader The application loader to use
* @param context The context supplied to the application loader
*/
abstract class WithApplicationLoader(applicationLoader: ApplicationLoader = new GuiceApplicationLoader(), context: ApplicationLoader.Context = ApplicationLoader.Context.create(new Environment(new java.io.File("."), ApplicationLoader.getClass.getClassLoader, Mode.Test))) extends Around with Scope {
implicit lazy val app = applicationLoader.load(context)
def around[T: AsResult](t: => T): Result = {
Helpers.running(app)(AsResult.effectively(t))
}
}
/**
* Used to run specs within the context of a running application.
*
* @param app The fake application
*/
abstract class WithApplication(val app: Application = GuiceApplicationBuilder().build()) extends Around with Scope {
def this(builder: GuiceApplicationBuilder => GuiceApplicationBuilder) {
this(builder(GuiceApplicationBuilder()).build())
}
implicit def implicitApp = app
implicit def implicitMaterializer = app.materializer
override def around[T: AsResult](t: => T): Result = {
Helpers.running(app)(AsResult.effectively(t))
}
}
/**
* Used to run specs within the context of a running server.
*
* @param app The fake application
* @param port The port to run the server on
* @param serverProvider *Experimental API; subject to change* The type of
* server to use. Defaults to providing a Netty server.
*/
abstract class WithServer(
val app: Application = GuiceApplicationBuilder().build(),
val port: Int = Helpers.testServerPort,
val serverProvider: Option[ServerProvider] = None) extends Around with Scope {
implicit def implicitMaterializer = app.materializer
implicit def implicitApp = app
implicit def implicitPort: Port = port
override def around[T: AsResult](t: => T): Result =
Helpers.running(TestServer(
port = port,
application = app,
serverProvider = serverProvider))(AsResult.effectively(t))
}
/**
* Used to run specs within the context of a running server, and using a web browser
*
* @param webDriver The driver for the web browser to use
* @param app The fake application
* @param port The port to run the server on
*/
abstract class WithBrowser[WEBDRIVER <: WebDriver](
val webDriver: WebDriver = WebDriverFactory(Helpers.HTMLUNIT),
val app: Application = GuiceApplicationBuilder().build(),
val port: Int = Helpers.testServerPort) extends Around with Scope {
def this(
webDriver: Class[WEBDRIVER],
app: Application,
port: Int) = this(WebDriverFactory(webDriver), app, port)
implicit def implicitApp: Application = app
implicit def implicitPort: Port = port
lazy val browser: TestBrowser = TestBrowser(webDriver, Some("http://localhost:" + port))
override def around[T: AsResult](t: => T): Result = {
try {
Helpers.running(TestServer(port, app))(AsResult.effectively(t))
} finally {
browser.quit()
}
}
}
| Shenker93/playframework | framework/src/play-specs2/src/main/scala/play/api/test/Specs.scala | Scala | apache-2.0 | 3,822 |
/*
* Copyright 2012 IL <iron9light AT gmali DOT com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ideacolorschemes.commons.json
import net.liftweb.json._
import com.ideacolorschemes.commons.entities.Version
/**
* @author il
* @version 11/8/11 10:11 AM
*/
class VersionSerializer extends Serializer[Version] {
private val clazz = classOf[Version]
def deserialize(implicit format: Formats) = {
case (TypeInfo(this.clazz, _), json) =>
json match {
case JString(s) => try {
Version(s)
} catch {
case e: Exception =>
throw new MappingException("Can't convert %s to %s".format(json, clazz), e)
}
case _ =>
throw new MappingException("Can't convert %s to %s".format(json, clazz))
}
}
def serialize(implicit format: Formats) = {
case version: Version => JString(version.toString)
}
}
| iron9light/ideacolorschemes-commons | src/main/scala/com/ideacolorschemes/commons/json/VersionSerializer.scala | Scala | apache-2.0 | 1,418 |
package com.box.castle.router
class RouterException(val msg: String, e: Throwable = null) extends RuntimeException(msg, e)
| Box-Castle/router | src/main/scala/com/box/castle/router/RouterException.scala | Scala | apache-2.0 | 124 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.sql
import java.util.Calendar
import java.util.Date
import java.util.Locale
import scala.collection.JavaConverters.mapAsJavaMapConverter
import scala.collection.mutable.LinkedHashMap
import scala.collection.mutable.LinkedHashSet
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.Row
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.SaveMode.Append
import org.apache.spark.sql.SaveMode.ErrorIfExists
import org.apache.spark.sql.SaveMode.Ignore
import org.apache.spark.sql.SaveMode.Overwrite
import org.apache.spark.sql.sources.And
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.sources.CreatableRelationProvider
import org.apache.spark.sql.sources.EqualTo
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.sources.GreaterThan
import org.apache.spark.sql.sources.GreaterThanOrEqual
import org.apache.spark.sql.sources.In
import org.apache.spark.sql.sources.InsertableRelation
import org.apache.spark.sql.sources.IsNotNull
import org.apache.spark.sql.sources.IsNull
import org.apache.spark.sql.sources.LessThan
import org.apache.spark.sql.sources.LessThanOrEqual
import org.apache.spark.sql.sources.Not
import org.apache.spark.sql.sources.Or
import org.apache.spark.sql.sources.PrunedFilteredScan
import org.apache.spark.sql.sources.RelationProvider
import org.apache.spark.sql.sources.SchemaRelationProvider
import org.apache.spark.sql.types.DateType
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.TimestampType
import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException
import org.elasticsearch.hadoop.EsHadoopIllegalStateException
import org.elasticsearch.hadoop.cfg.ConfigurationOptions
import org.elasticsearch.hadoop.cfg.InternalConfigurationOptions
import org.elasticsearch.hadoop.rest.{InitializationUtils, RestClient, RestRepository}
import org.elasticsearch.hadoop.serialization.builder.JdkValueWriter
import org.elasticsearch.hadoop.serialization.json.JacksonJsonGenerator
import org.elasticsearch.hadoop.util.FastByteArrayOutputStream
import org.elasticsearch.hadoop.util.IOUtils
import org.elasticsearch.hadoop.util.SettingsUtils
import org.elasticsearch.hadoop.util.StringUtils
import org.elasticsearch.hadoop.util.Version
import org.elasticsearch.spark.cfg.SparkSettingsManager
import org.elasticsearch.spark.serialization.ScalaValueWriter
import javax.xml.bind.DatatypeConverter
import org.elasticsearch.hadoop.serialization.field.ConstantFieldExtractor
private[sql] class DefaultSource extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider {
Version.logVersion()
override def createRelation(@transient sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = {
ElasticsearchRelation(params(parameters), sqlContext)
}
override def createRelation(@transient sqlContext: SQLContext, parameters: Map[String, String], schema: StructType): BaseRelation = {
ElasticsearchRelation(params(parameters), sqlContext, Some(schema))
}
override def createRelation(@transient sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = {
val relation = ElasticsearchRelation(params(parameters), sqlContext, Some(data.schema))
mode match {
case Append => relation.insert(data, false)
case Overwrite => relation.insert(data, true)
case ErrorIfExists => {
if (relation.isEmpty()) relation.insert(data, false)
else throw new EsHadoopIllegalStateException(s"SaveMode is set to ErrorIfExists and " +
s"index ${relation.cfg.getResourceWrite} exists and contains data. Consider changing the SaveMode")
}
case Ignore => if (relation.isEmpty()) { relation.insert(data, false) }
}
relation
}
private def params(parameters: Map[String, String]) = {
// '.' seems to be problematic when specifying the options
val params = parameters.map { case (k, v) => (k.replace('_', '.'), v)}. map { case (k, v) =>
if (k.startsWith("es.")) (k, v)
else if (k == "path") (ConfigurationOptions.ES_RESOURCE, v)
else if (k == "pushdown") (Utils.DATA_SOURCE_PUSH_DOWN, v)
else if (k == "strict") (Utils.DATA_SOURCE_PUSH_DOWN_STRICT, v)
else if (k == "double.filtering") (Utils.DATA_SOURCE_KEEP_HANDLED_FILTERS, v)
else ("es." + k, v)
}
// validate path
params.getOrElse(ConfigurationOptions.ES_RESOURCE_READ,
params.getOrElse(ConfigurationOptions.ES_RESOURCE, throw new EsHadoopIllegalArgumentException("resource must be specified for Elasticsearch resources.")))
params
}
}
private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @transient val sqlContext: SQLContext, userSchema: Option[StructType] = None)
extends BaseRelation with PrunedFilteredScan with InsertableRelation
{
@transient lazy val cfg = { new SparkSettingsManager().load(sqlContext.sparkContext.getConf).merge(parameters.asJava) }
@transient lazy val lazySchema = { SchemaUtils.discoverMapping(cfg) }
@transient lazy val valueWriter = { new ScalaValueWriter }
override def schema = userSchema.getOrElse(lazySchema.struct)
// TableScan
def buildScan(): RDD[Row] = buildScan(Array.empty)
// PrunedScan
def buildScan(requiredColumns: Array[String]): RDD[Row] = buildScan(requiredColumns, Array.empty)
// PrunedFilteredScan
def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
val paramWithScan = LinkedHashMap[String, String]() ++ parameters
var filteredColumns = requiredColumns
// scroll fields only apply to source fields; handle metadata separately
if (cfg.getReadMetadata) {
val metadata = cfg.getReadMetadataField
// if metadata is not selected, don't ask for it
if (!requiredColumns.contains(metadata)) {
paramWithScan += (ConfigurationOptions.ES_READ_METADATA -> false.toString())
}
else {
filteredColumns = requiredColumns.filter( _ != metadata)
}
}
// Set fields to scroll over (_metadata is excluded, because it isn't a part of _source)
val sourceCSV = StringUtils.concatenate(filteredColumns.asInstanceOf[Array[Object]], StringUtils.DEFAULT_DELIMITER)
paramWithScan += (InternalConfigurationOptions.INTERNAL_ES_TARGET_FIELDS -> sourceCSV)
// Keep the order of fields requested by user (we don't exclude _metadata here)
val requiredCSV = StringUtils.concatenate(requiredColumns.asInstanceOf[Array[Object]], StringUtils.DEFAULT_DELIMITER)
paramWithScan += (Utils.DATA_SOURCE_REQUIRED_COLUMNS -> requiredCSV)
// If the only field requested by user is metadata, we don't want to fetch the whole document source
if (requiredCSV == cfg.getReadMetadataField()) {
paramWithScan += (InternalConfigurationOptions.INTERNAL_ES_EXCLUDE_SOURCE -> "true")
}
if (filters != null && filters.size > 0) {
if (Utils.isPushDown(cfg)) {
if (Utils.LOGGER.isDebugEnabled()) {
Utils.LOGGER.debug(s"Pushing down filters ${filters.mkString("[", ",", "]")}")
}
val filterString = createDSLFromFilters(filters, Utils.isPushDownStrict(cfg), SettingsUtils.isEs50(cfg))
if (Utils.LOGGER.isTraceEnabled()) {
Utils.LOGGER.trace(s"Transformed filters into DSL ${filterString.mkString("[", ",", "]")}")
}
paramWithScan += (InternalConfigurationOptions.INTERNAL_ES_QUERY_FILTERS -> IOUtils.serializeToBase64(filterString))
}
else {
if (Utils.LOGGER.isTraceEnabled()) {
Utils.LOGGER.trace("Push-down is disabled; ignoring Spark filters...")
}
}
}
new ScalaEsRowRDD(sqlContext.sparkContext, paramWithScan, lazySchema)
}
// introduced in Spark 1.6
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = {
if (Utils.isKeepHandledFilters(cfg) || filters == null || filters.size == 0) {
filters
} else {
// walk the filters (things like And / Or) and see whether we recognize all of them
// if we do, skip the filter, otherwise let it in there even though we might push some of it
def unhandled(filter: Filter): Boolean = {
filter match {
case EqualTo(_, _) => false
case GreaterThan(_, _) => false
case GreaterThanOrEqual(_, _) => false
case LessThan(_, _) => false
case LessThanOrEqual(_, _) => false
// In is problematic - see translate, don't filter it
case In(_, _) => true
case IsNull(_) => false
case IsNotNull(_) => false
case And(left, right) => unhandled(left) || unhandled(right)
case Or(left, right) => unhandled(left) || unhandled(right)
case Not(pred) => unhandled(pred)
// Spark 1.3.1+
case f: Product if isClass(f, "org.apache.spark.sql.sources.StringStartsWith") => false
case f: Product if isClass(f, "org.apache.spark.sql.sources.StringEndsWith") => false
case f: Product if isClass(f, "org.apache.spark.sql.sources.StringContains") => false
// Spark 1.5+
case f: Product if isClass(f, "org.apache.spark.sql.sources.EqualNullSafe") => false
// unknown
case _ => true
}
}
val filtered = filters.filter(unhandled)
if (Utils.LOGGER.isTraceEnabled()) {
Utils.LOGGER.trace(s"Unhandled filters from ${filters.mkString("[", ",", "]")} to ${filtered.mkString("[", ",", "]")}")
}
filtered
}
}
private def createDSLFromFilters(filters: Array[Filter], strictPushDown: Boolean, isES50: Boolean) = {
filters.map(filter => translateFilter(filter, strictPushDown, isES50)).filter(query => StringUtils.hasText(query))
}
// string interpolation FTW
private def translateFilter(filter: Filter, strictPushDown: Boolean, isES50: Boolean):String = {
// the pushdown can be strict - i.e. use only filters and thus match the value exactly (works with non-analyzed)
// or non-strict meaning queries will be used instead that is the filters will be analyzed as well
filter match {
case EqualTo(attribute, value) => {
// if we get a null, translate it into a missing query (we're extra careful - Spark should translate the equals into isMissing anyway)
if (value == null || value == None || value == Unit) {
if (isES50) {
s"""{"bool":{"must_not":{"exists":{"field":"$attribute"}}}}"""
}
else {
s"""{"missing":{"field":"$attribute"}}"""
}
}
if (strictPushDown) s"""{"term":{"$attribute":${extract(value)}}}"""
else {
if (isES50) {
s"""{"match":{"$attribute":${extract(value)}}}"""
}
else {
s"""{"query":{"match":{"$attribute":${extract(value)}}}}"""
}
}
}
case GreaterThan(attribute, value) => s"""{"range":{"$attribute":{"gt" :${extract(value)}}}}"""
case GreaterThanOrEqual(attribute, value) => s"""{"range":{"$attribute":{"gte":${extract(value)}}}}"""
case LessThan(attribute, value) => s"""{"range":{"$attribute":{"lt" :${extract(value)}}}}"""
case LessThanOrEqual(attribute, value) => s"""{"range":{"$attribute":{"lte":${extract(value)}}}}"""
case In(attribute, values) => {
// when dealing with mixed types (strings and numbers) Spark converts the Strings to null (gets confused by the type field)
// this leads to incorrect query DSL hence why nulls are filtered
val filtered = values filter (_ != null)
if (filtered.isEmpty) {
return ""
}
// further more, match query only makes sense with String types so for other types apply a terms query (aka strictPushDown)
val attrType = lazySchema.struct(attribute).dataType
val isStrictType = attrType match {
case DateType |
TimestampType => true
case _ => false
}
if (!strictPushDown && isStrictType) {
if (Utils.LOGGER.isDebugEnabled()) {
Utils.LOGGER.debug(s"Attribute $attribute type $attrType not suitable for match query; using terms (strict) instead")
}
}
if (strictPushDown || isStrictType) s"""{"terms":{"$attribute":${extractAsJsonArray(filtered)}}}"""
else {
if (isES50) {
s"""{"bool":{"should":[${extractMatchArray(attribute, filtered)}]}}"""
}
else {
s"""{"or":{"filters":[${extractMatchArray(attribute, filtered)}]}}"""
}
}
}
case IsNull(attribute) => {
if (isES50) {
s"""{"bool":{"must_not":{"exists":{"field":"$attribute"}}}}"""
}
else {
s"""{"missing":{"field":"$attribute"}}"""
}
}
case IsNotNull(attribute) => s"""{"exists":{"field":"$attribute"}}"""
case And(left, right) => {
if (isES50) {
s"""{"bool":{"filter":[${translateFilter(left, strictPushDown, isES50)}, ${translateFilter(right, strictPushDown, isES50)}]}}"""
}
else {
s"""{"and":{"filters":[${translateFilter(left, strictPushDown, isES50)}, ${translateFilter(right, strictPushDown, isES50)}]}}"""
}
}
case Or(left, right) => {
if (isES50) {
s"""{"bool":{"should":[{"bool":{"filter":${translateFilter(left, strictPushDown, isES50)}}}, {"bool":{"filter":${translateFilter(right, strictPushDown, isES50)}}}]}}"""
}
else {
s"""{"or":{"filters":[${translateFilter(left, strictPushDown, isES50)}, ${translateFilter(right, strictPushDown, isES50)}]}}"""
}
}
case Not(filterToNeg) => {
if (isES50) {
s"""{"bool":{"must_not":${translateFilter(filterToNeg, strictPushDown, isES50)}}}"""
}
else {
s"""{"not":{"filter":${translateFilter(filterToNeg, strictPushDown, isES50)}}}"""
}
}
// the filter below are available only from Spark 1.3.1 (not 1.3.0)
//
// String Filter notes:
//
// the DSL will be quite slow (linear to the number of terms in the index) but there's no easy way around them
// we could use regexp filter however it's a bit overkill and there are plenty of chars to escape
// s"""{"regexp":{"$attribute":"$value.*"}}"""
// as an alternative we could use a query string but still, the analyzed / non-analyzed is there as the DSL is slightly more complicated
// s"""{"query":{"query_string":{"default_field":"$attribute","query":"$value*"}}}"""
// instead wildcard query is used, with the value lowercased (to match analyzed fields)
case f:Product if isClass(f, "org.apache.spark.sql.sources.StringStartsWith") => {
val arg = {
val x = f.productElement(1).toString()
if (!strictPushDown) x.toLowerCase(Locale.ROOT) else x
}
if (isES50) {
s"""{"wildcard":{"${f.productElement(0)}":"$arg*"}}"""
}
else {
s"""{"query":{"wildcard":{"${f.productElement(0)}":"$arg*"}}}"""
}
}
case f:Product if isClass(f, "org.apache.spark.sql.sources.StringEndsWith") => {
val arg = {
val x = f.productElement(1).toString()
if (!strictPushDown) x.toLowerCase(Locale.ROOT) else x
}
if (isES50) {
s"""{"wildcard":{"${f.productElement(0)}":"*$arg"}}"""
}
else {
s"""{"query":{"wildcard":{"${f.productElement(0)}":"*$arg"}}}"""
}
}
case f:Product if isClass(f, "org.apache.spark.sql.sources.StringContains") => {
val arg = {
val x = f.productElement(1).toString()
if (!strictPushDown) x.toLowerCase(Locale.ROOT) else x
}
if (isES50) {
s"""{"wildcard":{"${f.productElement(0)}":"*$arg*"}}"""
}
else {
s"""{"query":{"wildcard":{"${f.productElement(0)}":"*$arg*"}}}"""
}
}
// the filters below are available only from Spark 1.5.0
case f:Product if isClass(f, "org.apache.spark.sql.sources.EqualNullSafe") => {
val arg = extract(f.productElement(1))
if (strictPushDown) s"""{"term":{"${f.productElement(0)}":$arg}}"""
else {
if (isES50) {
s"""{"match":{"${f.productElement(0)}":$arg}}"""
}
else {
s"""{"query":{"match":{"${f.productElement(0)}":$arg}}}"""
}
}
}
case _ => ""
}
}
private def isClass(obj: Any, className: String) = {
className.equals(obj.getClass().getName())
}
private def extract(value: Any):String = {
extract(value, true, false)
}
private def extractAsJsonArray(value: Any):String = {
extract(value, true, true)
}
private def extractMatchArray(attribute: String, ar: Array[Any]):String = {
// use a set to avoid duplicate values
// especially since Spark conversion might turn each user param into null
val numbers = LinkedHashSet.empty[AnyRef]
val strings = LinkedHashSet.empty[AnyRef]
// move numbers into a separate list for a terms query combined with a bool
for (i <- ar) i.asInstanceOf[AnyRef] match {
case null => // ignore
case n:Number => numbers += extract(i, false, false)
case _ => strings += extract(i, false, false)
}
if (numbers.isEmpty) {
if (strings.isEmpty) {
StringUtils.EMPTY
} else {
if (SettingsUtils.isEs50(cfg)) {
s"""{"match":{"$attribute":${strings.mkString("\\"", " ", "\\"")}}}"""
}
else {
s"""{"query":{"match":{"$attribute":${strings.mkString("\\"", " ", "\\"")}}}}"""
}
}
} else {
// translate the numbers into a terms query
val str = s"""{"terms":{"$attribute":${numbers.mkString("[", ",", "]")}}}"""
if (strings.isEmpty) {
str
// if needed, add the strings as a match query
} else str + {
if (SettingsUtils.isEs50(cfg)) {
s""",{"match":{"$attribute":${strings.mkString("\\"", " ", "\\"")}}}"""
}
else {
s""",{"query":{"match":{"$attribute":${strings.mkString("\\"", " ", "\\"")}}}}"""
}
}
}
}
private def extract(value: Any, inJsonFormat: Boolean, asJsonArray: Boolean):String = {
// common-case implies primitives and String so try these before using the full-blown ValueWriter
value match {
case null => "null"
case u: Unit => "null"
case b: Boolean => b.toString
case by: Byte => by.toString
case s: Short => s.toString
case i: Int => i.toString
case l: Long => l.toString
case f: Float => f.toString
case d: Double => d.toString
case bd: BigDecimal => bd.toString
case _: Char |
_: String |
_: Array[Byte] => if (inJsonFormat) StringUtils.toJsonString(value.toString) else value.toString()
// handle Timestamp also
case dt: Date => {
val cal = Calendar.getInstance()
cal.setTime(dt)
val str = DatatypeConverter.printDateTime(cal)
if (inJsonFormat) StringUtils.toJsonString(str) else str
}
case ar: Array[Any] =>
if (asJsonArray) (for (i <- ar) yield extract(i, true, false)).distinct.mkString("[", ",", "]")
else (for (i <- ar) yield extract(i, false, false)).distinct.mkString("\\"", " ", "\\"")
// new in Spark 1.4
case utf if (isClass(utf, "org.apache.spark.sql.types.UTF8String")
// new in Spark 1.5
|| isClass(utf, "org.apache.spark.unsafe.types.UTF8String"))
=> if (inJsonFormat) StringUtils.toJsonString(utf.toString()) else utf.toString()
case a: AnyRef => {
val storage = new FastByteArrayOutputStream()
val generator = new JacksonJsonGenerator(storage)
valueWriter.write(a, generator)
generator.flush()
generator.close()
storage.toString()
}
}
}
def insert(data: DataFrame, overwrite: Boolean): Unit = {
if (overwrite) {
Utils.LOGGER.info(s"Overwriting data for ${cfg.getResourceWrite}")
// perform a scan-scroll delete
val cfgCopy = cfg.copy()
InitializationUtils.setValueWriterIfNotSet(cfgCopy, classOf[JdkValueWriter], null)
InitializationUtils.setFieldExtractorIfNotSet(cfgCopy, classOf[ConstantFieldExtractor], null) //throw away extractor
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_FLUSH_MANUAL, "false")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_ENTRIES, "1000")
cfgCopy.setProperty(ConfigurationOptions.ES_BATCH_SIZE_BYTES, "1mb")
val rr = new RestRepository(cfgCopy)
if (rr.indexExists(false)) {
rr.delete()
}
rr.close()
}
EsSparkSQL.saveToEs(data, parameters)
}
def isEmpty(): Boolean = {
val rr = new RestRepository(cfg)
val empty = rr.isEmpty(true)
rr.close()
empty
}
} | wangcy6/storm_app | Elasticsearch/elasticsearch-hadoop-master/spark/sql-13/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala | Scala | apache-2.0 | 23,062 |
package services.graphstages
import akka.stream._
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import drt.shared.CrunchApi.{MillisSinceEpoch, StaffMinute, StaffMinutes}
import drt.shared.Terminals.Terminal
import drt.shared.{SDateLike, _}
import org.slf4j.{Logger, LoggerFactory}
import services.SDate
import services.graphstages.Crunch.movementsUpdateCriteria
import services.metrics.{Metrics, StageTimer}
import scala.collection.immutable.SortedMap
class StaffGraphStage(initialShifts: ShiftAssignments,
initialFixedPoints: FixedPointAssignments,
optionalInitialMovements: Option[Seq[StaffMovement]],
now: () => SDateLike,
expireAfterMillis: Int,
numberOfDays: Int)
extends GraphStage[FanInShape3[ShiftAssignments, FixedPointAssignments, Seq[StaffMovement], StaffMinutes]] {
val inShifts: Inlet[ShiftAssignments] = Inlet[ShiftAssignments]("Shifts.in")
val inFixedPoints: Inlet[FixedPointAssignments] = Inlet[FixedPointAssignments]("FixedPoints.in")
val inMovements: Inlet[Seq[StaffMovement]] = Inlet[Seq[StaffMovement]]("Movements.in")
val outStaffMinutes: Outlet[StaffMinutes] = Outlet[StaffMinutes]("StaffMinutes.out")
val stageName = "staff"
override def shape: FanInShape3[ShiftAssignments, FixedPointAssignments, Seq[StaffMovement], StaffMinutes] =
new FanInShape3(inShifts, inFixedPoints, inMovements, outStaffMinutes)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
var shifts: ShiftAssignments = ShiftAssignments.empty
var fixedPoints: FixedPointAssignments = FixedPointAssignments.empty
var movementsOption: Option[Seq[StaffMovement]] = None
var staffMinuteUpdates: SortedMap[TM, StaffMinute] = SortedMap()
val log: Logger = LoggerFactory.getLogger(getClass)
val expireBefore: MillisSinceEpoch = now().millisSinceEpoch - expireAfterMillis
def staffSources: StaffSources = Staffing.staffAvailableByTerminalAndQueue(expireBefore, shifts, fixedPoints, movementsOption)
override def preStart(): Unit = {
shifts = initialShifts
fixedPoints = initialFixedPoints
movementsOption = optionalInitialMovements
super.preStart()
}
setHandler(inShifts, new InHandler {
override def onPush(): Unit = {
val timer = StageTimer(stageName, inShifts)
val incomingShifts = grab(inShifts)
log.info(s"Grabbed available inShifts")
val updateCriteria = shiftsUpdateCriteria(shifts, incomingShifts)
shifts = incomingShifts
applyUpdatesFromSources(staffSources, updateCriteria)
tryPush()
pull(inShifts)
timer.stopAndReport()
}
})
setHandler(inFixedPoints, new InHandler {
override def onPush(): Unit = {
val timer = StageTimer(stageName, inFixedPoints)
val incomingFixedPoints = grab(inFixedPoints)
log.info(s"Grabbed available inFixedPoints")
val updateCriteria = fixedPointsUpdateCriteria(fixedPoints, incomingFixedPoints)
fixedPoints = incomingFixedPoints
applyUpdatesFromSources(staffSources, updateCriteria)
tryPush()
pull(inFixedPoints)
timer.stopAndReport()
}
})
setHandler(inMovements, new InHandler {
override def onPush(): Unit = {
val timer = StageTimer(stageName, inMovements)
val incomingMovements = grab(inMovements)
log.info(s"Grabbed available inMovements")
val existingMovements = movementsOption.map(_.toSet).getOrElse(Set())
val updateCriteria: UpdateCriteria = movementsUpdateCriteria(existingMovements, incomingMovements)
movementsOption = Option(incomingMovements)
applyUpdatesFromSources(staffSources, updateCriteria)
tryPush()
pull(inMovements)
timer.stopAndReport()
}
})
def shiftsUpdateCriteria(oldShifts: ShiftAssignments, newShifts: ShiftAssignments): UpdateCriteria = {
val oldAssignments = oldShifts.assignments.toSet
val newAssignments = newShifts.assignments.toSet
val diff = newAssignments -- oldAssignments
val minuteMillis = diff.flatMap(a => {
val startMillis = a.startDt.millisSinceEpoch
val endMillis = a.endDt.millisSinceEpoch
startMillis to endMillis by 60000
}).toSeq
val terminalNames = diff.map(_.terminal)
UpdateCriteria(minuteMillis, terminalNames)
}
def fixedPointsUpdateCriteria(oldFixedPoints: FixedPointAssignments, newFixedPoints: FixedPointAssignments): UpdateCriteria = {
val fpMinutesToUpdate = allMinuteMillis(newFixedPoints) union allMinuteMillis(oldFixedPoints)
val fpMinutesOfDayToUpdate = fpMinutesToUpdate.map(m => {
val date = SDate(m, Crunch.europeLondonTimeZone)
val hours = date.getHours()
val minutes = date.getMinutes()
hours * 60 + minutes
})
val firstMinute = now().getLocalLastMidnight
val minuteMillis = (0 until numberOfDays)
.flatMap(d =>
fpMinutesOfDayToUpdate
.toSeq
.sortBy(identity)
.map(m => {
val date = firstMinute
.addDays(d)
.addMinutes(m)
date.millisSinceEpoch
})
)
val oldAssignments = oldFixedPoints.assignments.toSet
val newAssignments = newFixedPoints.assignments.toSet
val terminalNames = ((newAssignments -- oldAssignments) union (oldAssignments -- newAssignments)).map(_.terminal)
UpdateCriteria(minuteMillis, terminalNames)
}
setHandler(outStaffMinutes, new OutHandler {
override def onPull(): Unit = {
val start = SDate.now()
log.info(s"outStaffMinutes onPull called")
tryPush()
if (!hasBeenPulled(inShifts)) pull(inShifts)
if (!hasBeenPulled(inFixedPoints)) pull(inFixedPoints)
if (!hasBeenPulled(inMovements)) pull(inMovements)
log.info(s"outStaffMinutes Took ${SDate.now().millisSinceEpoch - start.millisSinceEpoch}ms")
}
})
def applyUpdatesFromSources(staff: StaffSources, updateCriteria: UpdateCriteria): Unit = {
log.info(s"about to update ${updateCriteria.minuteMillis.size} staff minutes for ${updateCriteria.terminalNames}")
import SDate.implicits.sdateFromMilliDateLocal
val updatedMinutes = SortedMap[TM, StaffMinute]() ++ updateCriteria.minuteMillis
.flatMap(m => {
updateCriteria.terminalNames.map { tn =>
val shifts = staff.shifts.terminalStaffAt(tn, SDate(m))
val fixedPoints = staff.fixedPoints.terminalStaffAt(tn, SDate(m, Crunch.europeLondonTimeZone))
val movements = staff.movements.terminalStaffAt(tn, m)
(TM(tn, m), StaffMinute(tn, m, shifts, fixedPoints, movements, lastUpdated = Option(SDate.now().millisSinceEpoch)))
}
})
staffMinuteUpdates = staffMinuteUpdates ++ updatedMinutes
}
def tryPush(): Unit = {
if (isAvailable(outStaffMinutes)) {
if (staffMinuteUpdates.nonEmpty) {
log.info(s"Pushing ${staffMinuteUpdates.size} staff minute updates")
Metrics.counter(s"$stageName.minute-updates", staffMinuteUpdates.size)
push(outStaffMinutes, StaffMinutes(staffMinuteUpdates))
staffMinuteUpdates = SortedMap()
}
} else log.debug(s"outStaffMinutes not available to push")
}
}
def allMinuteMillis(fixedPoints: FixedPointAssignments): Set[MillisSinceEpoch] = {
fixedPoints.assignments
.flatMap(a => {
val startMillis = a.startDt.millisSinceEpoch
val endMillis = a.endDt.millisSinceEpoch
startMillis to endMillis by 60000
})
.toSet
}
}
case class UpdateCriteria(minuteMillis: Seq[MillisSinceEpoch], terminalNames: Set[Terminal])
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/main/scala/services/graphstages/StaffGraphStage.scala | Scala | apache-2.0 | 7,937 |
package io.finch.petstore
import _root_.argonaut._, Argonaut._
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finagle.httpx.{Request, Response}
import com.twitter.util.Future
import io.finch.argonaut._
import io.finch.request._
import io.finch.request.items._
import io.finch.response._
import io.finch.route._
/**
* Tells the API how to respond when certain exceptions are thrown.
*/
trait ErrorHandling {
/**
* Tells the service how to handle certain types of servable errors (i.e. PetstoreError)
*/
def errorHandler: PartialFunction[Throwable, Response] = {
case NotPresent(ParamItem(p)) => BadRequest(
Map("error" -> "param_not_present", "param" -> p).asJson
)
case NotPresent(BodyItem) => BadRequest(
Map("error" -> "body_not_present").asJson
)
case NotParsed(ParamItem(p), _, _) => BadRequest(
Map("error" -> "param_not_parsed", "param" -> p).asJson
)
case NotParsed(BodyItem, _, _) => BadRequest(
Map("error" -> "body_not_parsed").asJson
)
case NotValid(ParamItem(p), rule) => BadRequest(
Map("error" -> "param_not_valid", "param" -> p, "rule" -> rule).asJson
)
// Domain errors
case error: PetstoreError => NotFound(
Map("error" -> error.message).asJson
)
}
/**
* A simple Finagle filter that handles all the exceptions, which might be thrown by
* a request reader of one of the REST services.
*/
def handleExceptions: SimpleFilter[Request,Response] = new SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]): Future[Response] =
service(req).handle(errorHandler)
}
}
| bthuillier/finch | petstore/src/main/scala/io/finch/petstore/ErrorHandling.scala | Scala | apache-2.0 | 1,670 |
package io.mpjsons.impl
/**
* Exception used internally inside this library.
* @param message message for the exception
* @param cause cause of the exception
* @author Marcin Pieciukiewicz
*/
class JsonInnerException(message: String, cause: Exception) extends RuntimeException(message, cause) {}
| marpiec/mpjsons | src/main/scala/io/mpjsons/impl/JsonInnerException.scala | Scala | apache-2.0 | 302 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.langserver.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait LocationFormats { self: sbt.internal.langserver.codec.RangeFormats with sjsonnew.BasicJsonProtocol =>
implicit lazy val LocationFormat: JsonFormat[sbt.internal.langserver.Location] = new JsonFormat[sbt.internal.langserver.Location] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.internal.langserver.Location = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val uri = unbuilder.readField[String]("uri")
val range = unbuilder.readField[sbt.internal.langserver.Range]("range")
unbuilder.endObject()
sbt.internal.langserver.Location(uri, range)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.internal.langserver.Location, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("uri", obj.uri)
builder.addField("range", obj.range)
builder.endObject()
}
}
}
| xuwei-k/xsbt | protocol/src/main/contraband-scala/sbt/internal/langserver/codec/LocationFormats.scala | Scala | apache-2.0 | 1,196 |
package com.airbnb.aerosolve.training
import java.io.{StringReader, BufferedWriter, BufferedReader, StringWriter}
import com.airbnb.aerosolve.core.models.{ModelFactory, MlpModel}
import com.airbnb.aerosolve.core.Example
import com.typesafe.config.ConfigFactory
import org.apache.spark.SparkContext
import org.junit.Test
import org.slf4j.LoggerFactory
import org.junit.Assert._
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
class MlpModelTrainerTest {
val log = LoggerFactory.getLogger("MlpModelTrainerTest")
def makeConfig(dropout : Double,
momentumT : Int,
loss : String,
extraArgs : String,
weightDecay : Double = 0.0,
margin : Double = 1.0,
learningRateInit: Double = 0.1) : String = {
"""
|identity_transform {
| transform : list
| transforms: []
|}
|model_config {
| %s
| loss : %s
| rank_key : "$rank"
| rank_threshold : 0.0
| margin : %f
| learning_rate_init : %f
| learning_rate_decay : 0.95
| momentum_init : 0.5
| momentum_end : 0.9
| momentum_t : %d
| weight_decay : %f
| weight_init_std : 0.5
| iterations : 50
| dropout : %f
| min_count : 0
| subsample : 0.1
| cache : "cache"
| context_transform : identity_transform
| item_transform : identity_transform
| combined_transform : identity_transform
| activations : ["tanh", "identity"]
| node_number : [5, 1]
| model_output : ""
|}
""".stripMargin.format(extraArgs, loss, margin, learningRateInit, momentumT, weightDecay, dropout)
}
// TODO (peng): add more tests and gradient checks
@Test
def testModelTrainerHingeNonLinear() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 0, 0.0, "poly")
}
@Test
def testModelTrainerHingeLinear() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 0, 0.0, "linear")
}
@Test
def testModelTrainerHingeNonLinearWithDropout() : Unit = {
testMlpModelTrainer("hinge", 0.1, "", 0, 0.0, "poly")
}
@Test
def testModelTrainerHingeLinearWithDropout() : Unit = {
testMlpModelTrainer("hinge", 0.1, "", 0, 0.0, "linear")
}
@Test
def testModelTrainerHingeNonLinearWithMomentum() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 50, 0.0, "poly")
}
@Test
def testModelTrainerHingeLinearWithMomentum() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 50, 0.0, "linear")
}
@Test
def testModelTrainerHingeNonLinearWithWeightDecay() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 0, weightDecay = 0.0001, "poly")
}
@Test
def testModelTrainerHingeLinearWithWeightDecay() : Unit = {
testMlpModelTrainer("hinge", 0.0, "", 0, 0.0001, "linear")
}
@Test
def testRegression(): Unit = {
testRegressionModel(0.0, "", 0, weightDecay = 0.0, epsilon = 0.1, learningRateInit = 0.2)
}
@Test
def testRegressionWithDropout(): Unit = {
testRegressionModel(0.1, "", 0, weightDecay = 0.01, epsilon = 0.1, learningRateInit = 0.2)
}
def testMlpModelTrainer(loss : String,
dropout : Double,
extraArgs : String,
momentumT : Int,
weightDecay : Double = 0.0,
exampleFunc: String = "poly") = {
var sc = new SparkContext("local", "MlpModelTrainerTest")
try {
val (examples, label, numPos) = if (exampleFunc.equals("poly")) {
TrainingTestHelper.makeClassificationExamples
} else {
TrainingTestHelper.makeLinearClassificationExamples
}
val config = ConfigFactory.parseString(makeConfig(dropout, momentumT, loss, extraArgs, weightDecay))
val input = sc.parallelize(examples)
val model = MlpModelTrainer.train(sc, input, config, "model_config")
testClassificationModel(model, examples, label, numPos)
} finally {
sc.stop
sc = null
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.master.port")
}
}
def testClassificationModel(model: MlpModel,
examples: ArrayBuffer[Example],
label: ArrayBuffer[Double],
numPos: Int): Unit = {
var numCorrect : Int = 0
var i : Int = 0
val labelArr = label.toArray
for (ex <- examples) {
val score = model.scoreItem(ex.example.get(0))
if (score * labelArr(i) > 0) {
numCorrect += 1
}
i += 1
}
val fracCorrect : Double = numCorrect * 1.0 / examples.length
log.info("Num correct = %d, frac correct = %f, num pos = %d, num neg = %d"
.format(numCorrect, fracCorrect, numPos, examples.length - numPos))
assertTrue(fracCorrect > 0.8)
val swriter = new StringWriter()
val writer = new BufferedWriter(swriter)
model.save(writer)
writer.close()
val str = swriter.toString
val sreader = new StringReader(str)
val reader = new BufferedReader(sreader)
log.info(str)
val model2Opt = ModelFactory.createFromReader(reader)
assertTrue(model2Opt.isPresent)
val model2 = model2Opt.get()
for (ex <- examples) {
val score = model.scoreItem(ex.example.get(0))
val score2 = model2.scoreItem(ex.example.get(0))
assertEquals(score, score2, 0.01f)
}
}
def testRegressionModel(dropout : Double,
extraArgs : String,
momentumT : Int,
weightDecay : Double,
epsilon: Double = 0.1,
learningRateInit: Double = 0.1): Unit = {
val (trainingExample, trainingLabel) = TrainingTestHelper.makeRegressionExamples()
var sc = new SparkContext("local", "MlpRegressionTest")
try {
val config = ConfigFactory.parseString(makeConfig(
dropout, momentumT, "regression", extraArgs, weightDecay = weightDecay,
margin = epsilon, learningRateInit = learningRateInit))
val input = sc.parallelize(trainingExample)
val model = MlpModelTrainer.train(sc, input, config, "model_config")
val trainLabelArr = trainingLabel.toArray
var trainTotalError : Double = 0
var i = 0
// compute training error
for (ex <- trainingExample) {
val score = model.scoreItem(ex.example.get(0))
val label = trainLabelArr(i)
trainTotalError += math.abs(score - label)
i += 1
}
val trainError = trainTotalError / trainingExample.size.toDouble
// compute testing error
val (testingExample, testingLabel) = TrainingTestHelper.makeRegressionExamples(25)
val testLabelArr = testingLabel.toArray
var testTotalError : Double = 0
// compute training error
i = 0
for (ex <- testingExample) {
val score = model.scoreItem(ex.example.get(0))
val label = testLabelArr(i)
testTotalError += math.abs(score - label)
i += 1
}
val testError = testTotalError / testingExample.size.toDouble
log.info("Training: Average absolute error = %f".format(trainError))
log.info("Testing: Average absolute error = %f".format(testError))
assertTrue(trainError < 3.0)
assertTrue(testError < 3.5)
} finally {
sc.stop
sc = null
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.master.port")
}
}
}
| ralic/aerosolve | training/src/test/scala/com/airbnb/aerosolve/training/MlpModelTrainerTest.scala | Scala | apache-2.0 | 7,641 |
package com.novocode.squery
import java.sql.ResultSet
import com.novocode.squery.session.{Session, CloseableIterator, ReadAheadIterator, PositionedResult}
import com.novocode.squery.sql.QueryBuilder
class QueryInvoker[T,R] private (q: Query[ConvertableColumn[T]], mapper: T => R) {
lazy val selectStatement = new QueryBuilder(q).buildSelect
def convertResult(rs: PositionedResult): R = {
val qr = q.value.getResult(rs)
mapper(qr)
}
def first(implicit session: Session): Option[R] = {
var res: Option[R] = None
foreach({ x => res = Some(x) }, 1)
res
}
def list(implicit session: Session): List[R] = {
var xs:List[R] = Nil
foreach({ x => xs = x :: xs }, 0)
xs
}
def foreach(f: R => Unit)(implicit session: Session): Unit = foreach(f, 0)
private[this] def foreach(f: R => Unit, maxRows: Int)(implicit session: Session): Unit = {
//TODO Support multiple results
val st = session.allocPS(selectStatement)
try {
st.setMaxRows(maxRows)
if(st.execute) {
var count = 0
val rs = new PositionedResult(st.getResultSet)
while(rs.next && (maxRows == 0 || count < maxRows)) {
f(convertResult(rs))
count += 1
}
} else f(mapper(st.getUpdateCount.asInstanceOf[T]))
} finally session.freePS(selectStatement, st)
}
def elements(implicit session: Session): CloseableIterator[R] = {
//TODO Support multiple results
val st = session.allocPS(selectStatement)
var doClose = true
try {
st.setMaxRows(0)
if(st.execute) {
val rs = new PositionedResult(st.getResultSet)
doClose = false
new ReadAheadIterator[R] with CloseableIterator[R] {
def close() = session.freePS(selectStatement, st)
protected def fetchNext() = {
if(rs.next) Some(convertResult(rs))
else { close(); None }
}
}
} else {
val r = mapper(st.getUpdateCount.asInstanceOf[T])
new CloseableIterator[R] {
private var hasnext = true
def hasNext: Boolean = hasnext
def next(): R =
if (hasnext) { hasnext = false; r }
else throw new NoSuchElementException("next on empty iterator")
def close {}
}
}
} finally if(doClose) session.freePS(selectStatement, st)
}
def mapResult[U](f: (R => U)) = new QueryInvoker[T,U](q, { v:T => f(mapper(v)) })
}
object QueryInvoker {
def apply[T](q: Query[ConvertableColumn[T]]) = new QueryInvoker[T,T](q, { v:T => v })
}
| gnufied/squery | src/com/novocode/squery/QueryInvoker.scala | Scala | bsd-2-clause | 2,559 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class NarrowTableSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val narrowTable = NarrowTable[Float](1, 1)
val input = T()
input(1.0) = Tensor[Float](2, 2).apply1(e => Random.nextFloat())
input(2.0) = Tensor[Float](2, 2).apply1(e => Random.nextFloat())
input(3.0) = Tensor[Float](2, 2).apply1(e => Random.nextFloat())
runSerializationTest(narrowTable, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/NarrowTableSpec.scala | Scala | apache-2.0 | 1,273 |
package spinoco.fs2.cassandra.system
import java.nio.ByteBuffer
/**
* C* 3+ schema for columns
*/
case class ColumnSchema(
keyspace_name: String
, table_name: String
, column_name: String
, clustering_order: String
, column_name_bytes: ByteBuffer
, kind:String
, position: Int
, `type`: String
)
/**
* C* v2 columns
*/
case class ColumnSchemaV2(
keyspace_name :String
, columnfamily_name :String
, column_name : String
, component_index : Option[Int]
, index_name :Option[String]
, index_options :Option[String]
, index_type :Option[String]
, `type` :String
, validator :String
)
| Spinoco/fs2-cassandra | core/src/main/scala/spinoco/fs2/cassandra/system/ColumnSchema.scala | Scala | mit | 626 |
package io.youi.material
case class MaterialIcon(name: String) {
def isEmpty: Boolean = name.isEmpty
def nonEmpty: Boolean = name.nonEmpty
} | outr/youi | gui/src/main/scala/io/youi/material/MaterialIcon.scala | Scala | mit | 145 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tracking
import java.util.NoSuchElementException
import akka.actor.ActorRef
import akka.pattern.ask
import cmwell.common.StatusTracking
import cmwell.ctrl.config.Jvms
import cmwell.domain.Infoton
import cmwell.driver.Dao
import cmwell.irw.IRWService
import cmwell.util.FullBox
import cmwell.util.concurrent.{retry, travector}
import cmwell.zcache.L1Cache
import cmwell.zstore.ZStore
import com.typesafe.scalalogging.LazyLogging
import k.grid.{Grid, GridJvm}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
/**
* Created by yaakov on 3/15/17.
*/
trait TrackingUtil {
val zStore: ZStore
val irw: IRWService
def spawn(actorName: String,
data: Set[String] = Set.empty,
createTime: Long = System.currentTimeMillis()): Future[(ActorRef, TrackingId)]
def update(trackingId: TrackingId)(pss: PathStatus*): Future[TrackingOperationResponse]
def readStatus(trackingId: TrackingId): Future[Seq[PathStatus]]
def debug(trackingId: TrackingId)(msg: Any): Unit
private val malformedTidResponse = Future.successful[TrackingOperationResponse](LogicalFailure("malformed tid"))
// done is syntactic sugar for update(Done) with Strings
def done(tid: String, path: String): Future[TrackingOperationResponse] =
TrackingId
.unapply(tid)
.fold(malformedTidResponse)(update(_)(PathStatus(path, Done)))
def update(path: String, st: StatusTracking): Future[TrackingOperationResponse] =
TrackingId
.unapply(st.tid)
.fold(malformedTidResponse)(update(_)(PathStatus(path, PartialDone(1, st.numOfParts))))
def cleanDirtyData(data: Seq[PathStatus], createTime: Long): Future[Seq[PathStatus]]
def updateEvicted(path: String, st: StatusTracking, reason: String): Future[TrackingOperationResponse] =
updateEvicted(path, st.tid, reason)
def updateEvicted(path: String, tid: String, reason: String): Future[TrackingOperationResponse] =
TrackingId
.unapply(tid)
.fold(malformedTidResponse)(update(_)(PathStatus(path, Evicted(reason))))
// todo merge PathsStatus and StatusTracking case classes...
def updateSeq(path: String, statusTrackings: Seq[StatusTracking]): Future[TrackingOperationResponse] =
statusTrackings
.map(update(path, _))
.foldLeft(Future.successful(Successful)) {
case (f1, f2) => f1.zip(f2).map(_._1)
}
.recoverWith { case t: NoSuchElementException => Future.successful(LogicalFailure(t.getMessage)) }
/**
* {{{
* scala> cmwell.tracking.TrackingUtil().actorIdFromActorPath("akka://tracking/user/LTIyNzkyMzc1Mg")
* res0: String = LTIyNzkyMzc1Mg
* }}}
*/
def actorIdFromActorPath[T: Stringer](actorPath: T): String = {
val stringActorPath = implicitly[Stringer[T]].stringify(actorPath)
stringActorPath.drop(stringActorPath.lastIndexOf('/') + 1).takeWhile(_ != '#')
}
}
trait Stringer[T] {
def stringify(t: T): String
}
object Stringer {
implicit val identityStringer = new Stringer[String] {
override def stringify(s: String): String = s
}
implicit val actorRefStringer = new Stringer[ActorRef] {
override def stringify(ar: ActorRef): String = ar.path.toSerializationFormat
}
}
object TrackingUtil {
def zStore: ZStore = TrackingUtilImpl.zStore
def apply(): TrackingUtil = TrackingUtilImpl
}
object TrackingUtilImpl extends TrackingUtil with LazyLogging {
private lazy val dao: Dao =
Dao(Settings.irwServiceDaoClusterName, Settings.irwServiceDaoKeySpace2, Settings.irwServiceDaoHostName)
override lazy val zStore: ZStore = ZStore.apply(dao)
override lazy val irw: IRWService = IRWService.newIRW(dao)
implicit val timeout = akka.util.Timeout(3.seconds)
private lazy val system = Grid.system
private val toOpResponse: Future[Unit] => Future[TrackingOperationResponse] =
_.map { _ =>
Successful
}.recover { case t: NoSuchElementException => LogicalFailure(t.getMessage) }
def spawn(actorName: String, initialData: Set[String], createTime: Long): Future[(ActorRef, TrackingId)] = {
Try {
(resurrector ? Spawn(actorName, initialData, Seq.empty[PathStatus], createTime)).map {
case ta: ActorRef =>
ta -> TrackingId(actorIdFromActorPath(ta), createTime)
}
} match {
case Success(x) => x
case Failure(e) => logger.error(s"Tracking: failed to spawn", e); throw e
}
}
def update(trackingId: TrackingId)(pss: PathStatus*): Future[TrackingOperationResponse] = {
val result = resolveActor(trackingId).map(actor => pss.foreach(actor.!))
toOpResponse(result)
}
def readStatus(trackingId: TrackingId): Future[Seq[PathStatus]] =
resolveActor(trackingId).flatMap { actor =>
logger.debug(s"Tracking: Going to Read from $actor...")
(actor ? Read).mapTo[Seq[PathStatus]]
}
def debug(trackingId: TrackingId)(msg: Any): Unit =
resolveActor(trackingId).foreach(_ ! msg)
private val resolveActor = {
val task = (trackingId: TrackingId) => {
val TrackingId(actorAddr, _) = trackingId
logger.debug(s"Resolving Actor: $actorAddr")
Grid.selectActor(actorIdFromActorPath(actorAddr), GridJvm(Jvms.WS)).resolveOne().recoverWith {
case t: Throwable =>
logger.warn(s"Tracking: Could not resolve TrackingActor($actorAddr), will try to resurrect.", t)
ressurect(trackingId)
}
}
L1Cache.memoize[TrackingId, ActorRef](task)(_.token)(l1Size = 256, ttlSeconds = 8)
}
private def ressurect(trackingId: TrackingId): Future[ActorRef] = {
retry(3, 10.millis)((resurrector ? Resurrect(trackingId)).mapTo[Option[ActorRef]].andThen {
case Failure(t) =>
logger.warn(
s"Tracking: [ressurect.Failure] Could not resolve TrackingActor($trackingId), will try to resurrect.",
t
)
}).flatMap {
case Some(ref) => Future.successful(ref)
case None => Future.failed(new NoSuchElementException("could not resolve actor"))
}
}
def cleanDirtyData(data: Seq[PathStatus], createTime: Long): Future[Seq[PathStatus]] = {
def isNew(i: Infoton) = (i.lastModified.isAfter(createTime)) && i.indexTime.isDefined
val (inProgress, notInProgress) = data.partition(_.status == InProgress)
if (inProgress.isEmpty) Future.successful(data)
else {
val inProgressPaths: Vector[String] = inProgress.map(_.path)(collection.breakOut)
val infotonsFut = travector(inProgressPaths)(irw.readPathAsync(_))
val alreadyDonePathsFut = infotonsFut.map { infotons =>
infotons.collect {
case FullBox(infoton) if isNew(infoton) => infoton.path
}.toSet
}
alreadyDonePathsFut.map { alreadyDonePaths =>
notInProgress ++ alreadyDonePaths.map(PathStatus(_, Done)) ++ inProgress.filterNot(
ps => alreadyDonePaths(ps.path)
)
}
}
}
private def resurrector = Grid.serviceRef("Resurrector")
}
sealed trait TrackingOperationResponse
case object Successful extends TrackingOperationResponse // in future, might as well be case class Success(value), if needed
case class LogicalFailure(reason: String) extends TrackingOperationResponse
| bryaakov/CM-Well | server/cmwell-tracking/src/main/scala/cmwell/tracking/TrackingUtil.scala | Scala | apache-2.0 | 7,876 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.relational.crud
import java.sql.Connection
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.fr.relational.{ForDocument, Specific, Next, Unspecified}
import org.orbeon.oxf.util.{LoggerFactory, IndentedLogger}
import org.orbeon.oxf.fr.{FormRunnerPersistence, FormRunner}
import org.orbeon.oxf.webapp.HttpStatusCodeException
import org.orbeon.scaxon.XML._
import org.orbeon.saxon.om.DocumentInfo
trait Common extends RequestResponse with FormRunnerPersistence {
implicit val Logger = new IndentedLogger(LoggerFactory.createLogger(classOf[CRUD]), "")
/**
* Finds in the database what form version is used for an app/form and optional document id:
*
* 1. If only the app/form are provided, it returns the latest version of a published, non-deleted, form.
* We wouldn't want to return a version of a deleted published form, as running a GET for the latest
* form definition for an app/form, we would here return the version of a deleted form (and then 404 as we
* can't find that form in the database).
* 2. If the document id is provided, it returns the form version used for that document. We could return the
* form version for a deleted document, but decided against it for consistency with what we do when returning
* the form version for app/form. (A benefit of returning the version of a deleted data is that this could
* allow Form Runner to return a 510 to the browser. Without it, since Form Runner starts by reading the form
* definition, it will fail if that version isn't found. But this isn't a real benefit since right now the
* Page Flow Controller doesn't know how to return a 510.)
*/
def formVersion(connection: Connection, app: String, form: String, docId: Option[String]): Option[Int] = {
val versionResult = {
val table = s"orbeon_form_${if (docId.isEmpty) "definition" else "data"}"
val ps = connection.prepareStatement(
s"""|SELECT max(t.form_version)
|FROM $table t,
| (
| SELECT max(last_modified_time) last_modified_time, app, form, form_version
| FROM $table
| WHERE app = ?
| AND form = ?
| ${docId.map(_ ⇒ "and document_id = ?").getOrElse("")}
| GROUP BY app, form, form_version
| ) m
|WHERE ${joinColumns(Seq("last_modified_time", "app", "form", "form_version"), "t", "m")}
| AND t.deleted = 'N'
|""".stripMargin)
ps.setString(1, app)
ps.setString(2, form)
docId.foreach(ps.setString(3, _))
val rs = ps.executeQuery()
rs.next(); rs
}
val version = versionResult.getInt(1)
if (versionResult.wasNull()) None else Some(version)
}
/**
* For every request, there is a corresponding specific form version number. In the request, that specific version
* can be specified, but the caller can also say that it wants the next version, the latest version, or the version
* of the form used to create a specific document. This function finds the specific form version corresponding to
* the request.
*/
def requestedFormVersion(connection: Connection, req: Request): Int = {
def latest = formVersion(connection, req.app, req.form, None)
req.version match {
case Unspecified ⇒ latest.getOrElse(1)
case Next ⇒ latest.map(_ + 1).getOrElse(1)
case Specific(v) ⇒ v
case ForDocument(docId) ⇒ formVersion(connection, req.app, req.form, Some(docId))
.getOrElse(throw new HttpStatusCodeException(404))
}
}
// List of columns that identify a row
def idColumns(req: Request): List[String] =
List(
Some("app"), Some("form"),
req.forForm option "form_version",
req.forData option "document_id",
req.forData option "draft",
req.forAttachment option "file_name"
).flatten
def idColumnsList(req: Request): String = idColumns(req).mkString(", ")
def joinColumns(cols: Seq[String], t1: String, t2: String) = cols.map(c ⇒ s"$t1.$c = $t2.$c").mkString(" AND ")
def readFormMetadata(req: Request): DocumentInfo =
readFormMetadata(req.app, req.form).ensuring(_.isDefined, "can't find form metadata for data").get
// Given a user/group name coming from the data, tells us what operations we can do in this data, assuming that
// it is for the current request app/form
def authorizedOperations(formMetadata: DocumentInfo, dataUserGroup: Option[(String, String)]): Set[String] = {
val permissions = (formMetadata / "forms" / "form" / "permissions").headOption
permissions match {
case None ⇒ Set("create", "read", "update", "delete")
case Some(permissionsEl) ⇒
if (dataUserGroup.isDefined) {
val (username, groupname) = dataUserGroup.get
FormRunner.allAuthorizedOperations(permissionsEl, username, groupname).toSet
} else
FormRunner.authorizedOperationsBasedOnRoles(permissionsEl).toSet
}
}
}
| martinluther/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/relational/crud/Common.scala | Scala | lgpl-2.1 | 6,250 |
package dstructures
/** Maps features into original strings
*
* @constructor create a new FeatureMap with key
* @param k key/feature
*/
class FeatureMap(k:String) extends KeyStore {
val key = k;
var listlength = 0;
var strings = List[String]();
def addString(s: String) = {
strings = strings ::: List(s);
listlength += 1;
}
def getStrings():List[String] = strings
def getStringsOfLength(le: Int):List[String] = strings.filter(s => featureCount(s) == le)
def length():Int = listlength
def featureCount(s: String): Int = {
s.length + 2
}
override def toString(): String = key
}
/** Maps keys and count of occurences
*
* @constructor create a key with count
* @param k Key
* @param c Count
*/
class CountMap(k:String, c:Int = 0) extends KeyStore with MaxHeapable {
val key = k;
var count = c;
override def value = count;
def ++() = {
count += 1;
}
def canEqual(a: Any) = a.isInstanceOf[CountMap]
override def equals(a: Any) =
a match {
case a: CountMap => a.key == key && a.count == count;
case _ => false
}
override def toString(): String = key+":"+count
} | theikkila/fuzzydb | src/main/scala/dstructures/FeatureMap.scala | Scala | apache-2.0 | 1,127 |
/*
* Copyright (c) 2013-2014 Erik van Oosten
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.grons.metrics.scala
import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.{Gauge => CHGauge}
/**
* Builds and registering metrics.
*/
class MetricBuilder(val baseName: MetricName, val registry: MetricRegistry) {
/**
* Registers a new gauge metric.
*
* @param name the name of the gauge
* @param scope the scope of the gauge or null for no scope
*/
def gauge[A](name: String, scope: String = null)(f: => A): Gauge[A] =
new Gauge[A](registry.register(metricName(name, scope), new CHGauge[A] { def getValue: A = f }))
/**
* Creates a new counter metric.
*
* @param name the name of the counter
* @param scope the scope of the counter or null for no scope
*/
def counter(name: String, scope: String = null): Counter =
new Counter(registry.counter(metricName(name, scope)))
/**
* Creates a new histogram metrics.
*
* @param name the name of the histogram
* @param scope the scope of the histogram or null for no scope
*/
def histogram(name: String, scope: String = null): Histogram =
new Histogram(registry.histogram(metricName(name, scope)))
/**
* Creates a new meter metric.
*
* @param name the name of the meter
* @param scope the scope of the meter or null for no scope
*/
def meter(name: String, scope: String = null): Meter =
new Meter(registry.meter(metricName(name, scope)))
/**
* Creates a new timer metric.
*
* @param name the name of the timer
* @param scope the scope of the timer or null for no scope
*/
def timer(name: String, scope: String = null): Timer =
new Timer(registry.timer(metricName(name, scope)))
private[this] def metricName(name: String, scope: String = null): String =
baseName.append(name, scope).name
}
| scullxbones/metrics-scala | src/main/scala/nl/grons/metrics/scala/MetricBuilder.scala | Scala | apache-2.0 | 2,402 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv.enumeratum.values
import kantan.codecs.enumeratum.laws.discipline.EnumeratedString
import kantan.csv.enumeratum.arbitrary._
import kantan.csv.laws.discipline.{CellCodecTests, DisciplineSuite, RowCodecTests}
class StringEnumCodecTests extends DisciplineSuite {
checkAll("CellCodec[EnumeratedString]", CellCodecTests[EnumeratedString].codec[String, Float])
checkAll("RowCodec[EnumeratedString]", RowCodecTests[EnumeratedString].codec[String, Float])
}
| nrinaudo/tabulate | enumeratum/shared/src/test/scala/kantan/csv/enumeratum/values/StringEnumCodecTests.scala | Scala | mit | 1,077 |
package scala.tools.scalap
import java.io.ByteArrayInputStream
import java.nio.charset.StandardCharsets
import java.lang.StringBuilder
import org.apache.bcel.classfile._
import scala.reflect.internal.pickling.ByteCodecs
import scala.tools.scalap.scalax.rules.scalasig.{ByteCode, ScalaSig, ScalaSigAttributeParsers, ScalaSigPrinter}
/**
* @author Alefas
* @since 11/09/15
*/
object Decompiler {
private val UTF8 = "UTF-8"
private val SOURCE_FILE = "SourceFile"
private val SCALA_SIG = "ScalaSig"
private val SCALA_SIG_ANNOTATION = "Lscala/reflect/ScalaSignature;"
private val SCALA_LONG_SIG_ANNOTATION = "Lscala/reflect/ScalaLongSignature;"
private val BYTES_VALUE = "bytes"
private val scalaSigBytes = SCALA_SIG.getBytes(StandardCharsets.UTF_8)
private def hasScalaSigBytes(content: Array[Byte]): Boolean = containsSubArray(content, scalaSigBytes)
private def isScalaSignatureAnnotation(entry: AnnotationEntry) = {
val annType = entry.getAnnotationType
annType == SCALA_SIG_ANNOTATION || annType == SCALA_LONG_SIG_ANNOTATION
}
private def toBytes(elemValue: ElementValue): Array[Byte] = {
def simpleToBytes(sv: SimpleElementValue) = sv.getValueString.getBytes(StandardCharsets.UTF_8)
elemValue match {
case sv: SimpleElementValue => simpleToBytes(sv)
case arr: ArrayElementValue =>
val fromSimpleValues = arr.getElementValuesArray.collect {
case sv: SimpleElementValue => simpleToBytes(sv)
}
Array.concat(fromSimpleValues: _*)
}
}
private def parseScalaSig(entry: AnnotationEntry) = {
val bytesValue = entry.getElementValuePairs.find(_.getNameString == BYTES_VALUE)
bytesValue match {
case Some(v) =>
val bytes = toBytes(v.getValue)
val length = ByteCodecs.decode(bytes)
Some(ScalaSigAttributeParsers.parse(ByteCode(bytes.take(length))))
case _ => None
}
}
def decompile(fileName: String, bytes: Array[Byte]): Option[(String, String)] = {
if (!hasScalaSigBytes(bytes)) return None
val parsed = new ClassParser(new ByteArrayInputStream(bytes), fileName).parse()
val scalaSig =
parsed.getAnnotationEntries
.find(isScalaSignatureAnnotation)
.flatMap(parseScalaSig)
scalaSig.map { sig =>
val decompiledSourceText = decompiledText(fileName, sig)
val sourceFileName = parsed.getSourceFileName
(sourceFileName, decompiledSourceText)
}
}
private def decompiledText(fileName: String, scalaSig: ScalaSig) = {
val printer = new ScalaSigPrinter(new StringBuilder, false)
val syms = scalaSig.topLevelClasses ::: scalaSig.topLevelObjects
// Print package with special treatment for package objects
syms.head.parent match {
//Partial match
case Some(p) if p.name != "<empty>" =>
val path = p.path
val isPackageObject = fileName == "package.class"
if (!isPackageObject) {
printer.print("package ")
printer.print(ScalaSigPrinter.processName(path))
printer.print("\\n")
} else {
val i = path.lastIndexOf(".")
if (i > 0) {
printer.print("package ")
printer.print(ScalaSigPrinter.processName(path.substring(0, i)))
printer.print("\\n")
}
}
case _ =>
}
// Print classes
for (c <- syms) {
printer.printSymbol(c)
}
printer.result
}
private def containsSubArray(text: Array[Byte], word: Array[Byte]): Boolean = {
if (text.length < word.length || word.length == 0) return false
var wordStartIdx = 0
var innerIdx = 0
while (wordStartIdx <= text.length - word.length) {
while(innerIdx < word.length && text(wordStartIdx + innerIdx) == word(innerIdx)) {
innerIdx += 1
}
if (innerIdx == word.length) return true
else {
wordStartIdx += 1
innerIdx = 0
}
}
false
}
}
| loskutov/intellij-scala | scalap/src/scala/tools/scalap/Decompiler.scala | Scala | apache-2.0 | 3,948 |
package akka.persistence.pg.query
import java.util.concurrent.TimeUnit
import akka.NotUsed
import akka.actor.ActorRef
import akka.persistence.pg._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{RunnableGraph, Sink}
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Milliseconds, Seconds, Span}
import scala.util.Random
/**
* uses the RowIdUpdating write strategy and will use the "rowid" column of the journal
* table for queries
*/
class EventStoreQueryNotificationTest extends AbstractEventStoreTest
with PgConfig
with Eventually
with ScalaFutures {
override lazy val config: Config = ConfigFactory.load("pg-eventstore-rowid.conf")
override implicit val patienceConfig = PatienceConfig(timeout = Span(20, Seconds), interval = Span(100, Milliseconds))
implicit val materializer = ActorMaterializer()
implicit val timeOut = Timeout(1, TimeUnit.MINUTES)
val expected = 2000
val numActors = 100
var actors: Map[String, ActorRef] = Map.empty
test("query tagged events tagged with 'Altered'") {
var events = List[E]()
val sink = Sink.foreach[E] { e =>
events = events :+ e
}
val graph: RunnableGraph[NotUsed] = startSource[E](Set(TestTags.alteredTag), 0).to(sink)
1 to expected foreach { i =>
actors.values.toSeq(Random.nextInt(actors.size)) ! alterCommand(i)
}
graph.run()
println(s"query tagged events, expecting $expected events")
eventually {
println(events.size)
if (events.size >= expected - 5) checkConsecutive(events)
events.size shouldBe expected
}
}
test("query all events") {
var events = List[E]()
val sink = Sink.foreach[E] { e =>
events = events :+ e
}
val graph: RunnableGraph[NotUsed] = startSource[E](0).to(sink)
1 to expected foreach { i =>
actors.values.toSeq(Random.nextInt(actors.size)) ! alterCommand(i)
}
graph.run()
println(s"query all events, expecting $expected events")
eventually {
println(events.size)
if (events.size >= expected - 5) checkConsecutive(events)
events.size shouldBe expected
}
}
test("query persistenceId events") {
var events = List[E]()
val sink = Sink.foreach[E] { e =>
events = events :+ e
}
var expectedForPersistenceId = 0
val index = Random.nextInt(actors.size)
val persistenceId = actors.keys.toSeq(index)
val graph: RunnableGraph[NotUsed] = startSource[E](persistenceId, 0).to(sink)
1 to expected foreach { i =>
val chosen = Random.nextInt(actors.size)
if (chosen == index) expectedForPersistenceId += 1
actors.values.toSeq(chosen) ! alterCommand(i)
}
graph.run()
println(s"query persistenceId events, expecting $expectedForPersistenceId events")
eventually {
println(events.size)
events should have size expectedForPersistenceId
}
database.run(countEvents(persistenceId)).futureValue shouldEqual expectedForPersistenceId
}
override def beforeAll(): Unit = {
super.beforeAll()
actors = (1 to numActors map { i: Int =>
val pid = s"TestActor-$i"
pid -> createActor(pid)
}).toMap
}
type E = TestActor.Event
def alterCommand(i: Int) = TestActor.Alter(i.toString)
def createActor(pid: String): ActorRef = system.actorOf(TestActor.props(testProbe.ref, Some(pid)))
def checkConsecutive(events: List[E]): Unit = {
events
.collect { case TestActor.Altered(id, _) => id.toInt }
.sorted
.sliding(2)
.find(l => if (l.size == 1) false else l.head+1 != l(1))
.foreach(println)
}
}
| kwark/akka-persistence-postgresql | modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/query/EventStoreQueryNotificationTest.scala | Scala | mit | 3,722 |
package ml.combust.mleap.core.feature
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types.{BasicType, ListType, StructType}
/**
* Created by mikhail on 9/29/16.
*/
case class NGramModel(n: Int) extends Model{
def apply(value: Seq[String]): Seq[String] = {
value.iterator.sliding(n).withPartial(false).map(_.mkString(" ")).toSeq
}
override def inputSchema: StructType = StructType("input" -> ListType(BasicType.String)).get
override def outputSchema: StructType = StructType("output" -> ListType(BasicType.String)).get
}
| combust/mleap | mleap-core/src/main/scala/ml/combust/mleap/core/feature/NGramModel.scala | Scala | apache-2.0 | 559 |
package net.surguy.olympic
import scala.collection.mutable._
import java.io.FileInputStream
import java.awt.{Frame, BorderLayout}
import processing.core.PApplet
import processing.core.PConstants
import com.hp.hpl.jena.ontology._
import com.hp.hpl.jena.rdf.model._
import pimpmyjena.Conversions._
/**
* Visualize the medal position of countries versus another numeric measure (e.g. population, area, GDP).
* This is using the Processing library, and heavily based on an example from the book Visualizing Data by Ben Fry.
*/
class Processing extends PApplet with ModelUser {
val bg = 0xEEEEEEEE
val SIDE_PADDING = 30
val ROW_SEPARATION = 18
// val predicate = "areaKm"
// val predicate = "hdi"
val predicate = "gdpNominalPerCapita"
// val title = "Medal position versus size of country (sq. km)"
// val title = "Medal position versus human development index"
val title = "Medal position versus nominal GDP per capita"
val model = getFullModel()
val countriesByMedal = getCountriesWithMedals().sort( (c1,c2) => c1.medalCount > c2.medalCount )
val countriesByValue = getCountriesWithMedals().sort( (c1,c2) => c1.population > c2.population )
// String formatting doesn't seem to be nice in the Scala standard library at the moment - this is from the mailing list
def format(s: String, x: Any*) = String.format(s, x.asInstanceOf[scala.runtime.BoxedObjectArray].unbox(x.getClass).asInstanceOf[Array[Object]])
override def setup() {
size(600, (countriesByMedal.size * ROW_SEPARATION) + 50)
fill(128)
background(bg)
smooth()
textFont(createFont("Georgia", 14))
textAlign(PConstants.CENTER, PConstants.TOP)
text(title, 300, 5)
textFont(createFont("Georgia", 12))
translate(SIDE_PADDING, SIDE_PADDING)
(0 until countriesByMedal.size).foreach( pos => {
val current = countriesByMedal(pos)
val other = countriesByValue(pos)
val otherPos = countriesByValue.findIndexOf( c => c.name == current.name )
val medalsY = pos*ROW_SEPARATION
val otherY = otherPos*ROW_SEPARATION
textAlign(PConstants.LEFT, PConstants.CENTER)
text(current.name, 20, medalsY)
textAlign(PConstants.RIGHT, PConstants.CENTER)
text(current.medalCount, 130, medalsY)
textAlign(PConstants.LEFT, PConstants.CENTER)
val valDisplay = format("%,.0f", other.population)
text(valDisplay, 350, medalsY)
textAlign(PConstants.RIGHT, PConstants.CENTER)
text(other.name, 500, medalsY)
val weight = (Math.abs(pos - otherPos) / countriesByMedal.size.asInstanceOf[Float]) * 3 + 1
strokeWeight(weight)
if (pos<otherPos) stroke(33,85,156) else stroke(206,0,82)
line(140, medalsY+4, 340, otherY+4)
})
save(predicate+".png")
}
class DefaultDict[K, V](defaultFn: (K)=>V) extends HashMap[K, V] {
override def default(key: K): V = return defaultFn(key)
}
case class Country(name:String, medalCount:Int, population: Float)
def getCountriesWithMedals() = {
val medals = new DefaultDict[String, Int](K => 0)
model.individuals("medal").foreach( ind => {
val c = ind("country").toString
medals( c ) = medals( c ) + 1
})
var countries = List[Country]()
var i = 0
medals.foreach( item => {
val (countryName,medalCount) = item
val value = getValue(countryName, predicate)
if (value!=0) {
countries = Country(countryName, medalCount, value) :: countries
}
})
countries
}
def getValue(countryName : String, predicate: String) : Float = {
val value = model.individuals("Country").
filter(ind => ind("name").toString == countryName && ind(predicate)!=null).firstOption
value match {
case Some(ind) => println(ind(predicate).toString); java.lang.Float.parseFloat(ind(predicate).toString)
case None => 0.0f
}
}
}
object ProcessingTest extends Application {
PApplet.main(Array[String]("net.surguy.olympic.Processing"))
}
| inigo/olympic-mashup | src/main/scala/net/surguy/olympic/ProcessingVisualization.scala | Scala | gpl-3.0 | 3,994 |
import at.logic.gapt.language.fol._
import at.logic.gapt.proofs.lk._
object inductionExamples {
// Variables and constants
val (x, y, z) = (FOLVar("x"), FOLVar("y"), FOLVar("z"))
val (a, b, c) = (FOLVar("α"), FOLVar("β"), FOLVar("γ"))
val zero = FOLConst("0")
// Successor and addition
def S(x: FOLTerm) = FOLFunction("S", List(x))
def plus(x: FOLTerm, y: FOLTerm) = FOLFunction("+", List(x, y))
// Instances of addition axioms
def add0(v: FOLTerm) = Eq(plus(v, zero), v)
def addS(u: FOLTerm, v: FOLTerm) =
Eq(
plus(u, S(v)),
S(plus(u, v))
)
// Instances of associativity and reflexivity
def assoc(x: FOLTerm, y: FOLTerm, z: FOLTerm) = Eq(plus(plus(x, y), z), plus(x, plus(y, z)))
def ref(t: FOLTerm) = Eq(t, t)
// Universally quantified equations
val ForAllAssoc = AllBlock(List(x, y, z), assoc(x, y, z))
val ForAllAdd0 = All(x, add0(x))
val ForAllAddS = AllBlock(List(x, y), addS(x, y))
val inductionBase1 =
Axiom(
Nil,
List(ref(plus(a, b)))
)
val inductionBase2 =
EquationRightRule(
inductionBase1,
inductionBase1.root.succedent.head,
add0(b),
Eq(plus(a, b), plus(a, plus(b, zero)))
)
val inductionBase3 =
EquationRightRule(
inductionBase2,
inductionBase2.root.succedent.head,
add0(plus(a,b)),
assoc(a, b, zero)
)
val inductionBase4 =
ForallLeftRule(
inductionBase3,
inductionBase3.root.antecedent.head,
ForAllAdd0,
plus(a,b)
)
val inductionBase5 = ContractionMacroRule(
ForallLeftRule(
inductionBase4,
inductionBase4.root.antecedent.head,
ForAllAdd0,
b
))
val inductionBase =
ContractionMacroRule(
ForallLeftRule(
inductionBase4,
inductionBase4.root.antecedent.head,
ForAllAdd0,
b
)
)
val inductionStep1 =
Axiom(
Nil,
List(ref(plus(plus(a,b), S(c))))
)
val inductionStep2 =
ForallLeftBlock(
EquationRightRule(
inductionStep1,
inductionStep1.root.succedent(0),
addS(plus(a,b), c),
Eq(plus(plus(a,b), S(c)), S(plus(plus(a,b),c)))
),
ForAllAddS,
List(plus(a,b), c)
)
val inductionStep3 =
EquationRightRule(
inductionStep2,
inductionStep2.root.succedent(0),
assoc(a,b,c),
Eq(plus(plus(a,b), S(c)), S(plus(a, plus(b,c))))
)
val inductionStep4 =
ForallLeftBlock(
EquationRightRule(
inductionStep3,
inductionStep3.root.succedent(0),
addS(a, plus(b,c)),
Eq(plus(plus(a,b), S(c)), plus(a, S(plus(b,c))))
),
ForAllAddS,
List(a, plus(b,c))
)
val inductionStep5 =
ForallLeftBlock(
EquationRightRule(
inductionStep4,
inductionStep4.root.succedent(0),
addS(b,c),
Eq(plus(plus(a,b), S(c)), plus(a, plus(b,S(c))))
),
ForAllAddS,
List(b,c)
)
val inductionStep = ContractionMacroRule(inductionStep5)
val inductionProof =
ForallRightBlock(
InductionRule(
inductionBase,
inductionStep,
assoc(a,b,c)
),
ForAllAssoc,
List(a,b,c)
)
}
| gisellemnr/gapt | examples/simple/induction.scala | Scala | gpl-3.0 | 3,121 |
package services
import javax.inject.Singleton
import java.util.UUID
import com.google.inject.ImplementedBy
/**
* A type declaring the interface that will be injectable.
*/
@ImplementedBy(classOf[SimpleUUIDGenerator])
abstract class UUIDGenerator() {
def generate: UUID
}
/**
* A simple implementation of UUIDGenerator that we will inject.
*/
@Singleton
class SimpleUUIDGenerator extends UUIDGenerator {
def generate: UUID = UUID.randomUUID()
} | lancearlaus/vega-factor-scratchpad | app/services/UUIDGenerator.scala | Scala | apache-2.0 | 456 |
/*
* Random Access list.
* Copyright (C) 2014 Michael Thorsley
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*/
package com.eigenvektor.collections.immutable
import com.eigenvektor.collections.RandomAccessList.CompleteBinaryTree
/** An implementation of the Random Access List that is guaranteed to be immutable. */
final class RandomAccessList[+A] private[collections] (trees:List[CompleteBinaryTree[A]])
extends com.eigenvektor.collections.RandomAccessList[A](trees) {
def newInstance[B](trees:List[CompleteBinaryTree[B]]) = new RandomAccessList[B](trees)
} | Vyzen/trout | src/main/scala/com/eigenvektor/collections/immutable/RandomAccessList.scala | Scala | gpl-3.0 | 1,188 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package blazecore
import org.http4s.blaze.pipeline.MidStage
import org.http4s.blaze.util.Cancelable
import org.http4s.blaze.util.TickWheelExecutor
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
private[http4s] final class ResponseHeaderTimeoutStage[A](
timeout: FiniteDuration,
exec: TickWheelExecutor,
ec: ExecutionContext,
) extends MidStage[A, A] { stage =>
@volatile private[this] var cb: Callback[TimeoutException] = null
private val timeoutState = new AtomicReference[Cancelable](NoOpCancelable)
override def name: String = "ResponseHeaderTimeoutStage"
private val killSwitch = new Runnable {
override def run(): Unit = {
val t = new TimeoutException(s"Response header timeout after ${timeout.toMillis} ms.")
logger.debug(t.getMessage)
cb(Left(t))
removeStage()
}
}
override def readRequest(size: Int): Future[A] =
channelRead(size)
override def writeRequest(data: A): Future[Unit] = {
setTimeout()
channelWrite(data)
}
override def writeRequest(data: collection.Seq[A]): Future[Unit] = {
setTimeout()
channelWrite(data)
}
override protected def stageShutdown(): Unit = {
cancelTimeout()
logger.debug(s"Shutting down response header timeout stage")
super.stageShutdown()
}
override def stageStartup(): Unit = {
super.stageStartup()
logger.debug(s"Starting response header timeout stage with timeout of ${timeout}")
}
def init(cb: Callback[TimeoutException]): Unit = {
this.cb = cb
stageStartup()
}
private def setTimeout(): Unit = {
@tailrec
def go(): Unit = {
val prev = timeoutState.get()
if (prev == NoOpCancelable) {
val next = exec.schedule(killSwitch, ec, timeout)
if (!timeoutState.compareAndSet(prev, next)) {
next.cancel()
go()
} else
prev.cancel()
}
}
go()
}
private def cancelTimeout(): Unit =
timeoutState.getAndSet(NoOpCancelable).cancel()
}
| http4s/http4s | blaze-core/src/main/scala/org/http4s/blazecore/ResponseHeaderTimeoutStage.scala | Scala | apache-2.0 | 2,812 |
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.protocol
import java.io.DataOutputStream
import com.codedx.codepulse.agent.common.connect.Connection
import com.codedx.codepulse.agent.common.message.MessageProtocol
/** A Trait that describes an object that can send ControlMessages to a connection.
*
* One of the motivating purposes for this trait is to hide the fact that messages
* might be buffered in the underlying stream, so that callers don't need to know
* that they would normally need to explicitly `flush` the stream.
*/
trait ControlMessageSender {
/** Send any number of control messages via the given `connection`, ensuring
* synchronized access to the connection's output stream, and making sure
* to flush the stream afterward.
*/
def sendMessages(connection: Connection)(messages: ControlMessage*): Unit = {
val out = connection.output
out.synchronized {
try {
//write each message
for (m <- messages) writeMessage(out, m)
} finally {
//flush the stream: control messages shouldn't need to wait for any buffering
out.flush
}
}
}
/** Write an individual message to the given `out` stream. This method must be implemented
* by concrete implementations of `ControlMessageSender`, and will generally have a close
* coupling with a specific [[MessageProtocol]].
* This method is called from within [[sendMessages]].
*/
protected def writeMessage(out: DataOutputStream, message: ControlMessage): Unit
} | secdec/codepulse | hq/src/main/scala/com/secdec/bytefrog/hq/protocol/ControlMessageSender.scala | Scala | apache-2.0 | 2,195 |
package extruder.core
import cats.data.NonEmptyList
import extruder.core.ValidationErrorsToThrowable.defaultValidationErrorsThrowable
import extruder.data.{ValidationError, ValidationErrors}
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
class ValidationErrorsToThrowableSuite extends AnyFunSuite with ScalaCheckDrivenPropertyChecks {
import ValidationErrorsToThrowableSuite._
test("Converts a non-empty list of validation errors to throwables") {
forAll { li: ValidationErrors =>
val th = defaultValidationErrorsThrowable.convertErrors(li)
assert(li.map(_.message).toList === (th :: th.getSuppressed.toList).map(_.getMessage))
}
}
}
object ValidationErrorsToThrowableSuite {
implicit val validationErrorsGen: Arbitrary[ValidationErrors] =
Arbitrary(
for {
head <- Gen.alphaNumStr
tail <- Gen.listOf(Gen.alphaNumStr)
} yield
NonEmptyList
.of(head, tail: _*)
.flatMap(
msg =>
NonEmptyList.of(
ValidationError.failure(msg),
ValidationError.missing(msg),
ValidationError.exception(msg, new RuntimeException(msg))
)
)
)
}
| janstenpickle/extruder | core/src/test/scala/extruder/core/ValidationErrorsToThrowableSuite.scala | Scala | mit | 1,311 |
package com.socrata.http.common.util
import java.io.InputStream
class AcknowledgeableInputStream(underlying: InputStream, limit: Long) extends InputStream with Acknowledgeable {
private var readSoFar: Long = 0L
private def checkSize() {
if(readSoFar > limit || readSoFar < 0) throw new TooMuchDataWithoutAcknowledgement(limit)
}
def acknowledge() {
readSoFar = 0L
}
def read(): Int = {
checkSize()
val result = underlying.read()
if(result >= 0) readSoFar += 1
result
}
override def read(buf: Array[Byte], off: Int, len: Int): Int = {
checkSize()
val result = underlying.read(buf, off, len)
if(result >= 0) readSoFar += result
result
}
override def close() {
underlying.close()
}
override def skip(n: Long): Long = {
checkSize()
val result = underlying.skip(Math.min(n, Int.MaxValue))
readSoFar += result
result
}
// mark/reset not supported
}
| socrata-platform/socrata-http | socrata-http-common/src/main/scala/com/socrata/http/common/util/AcknowledgeableInputStream.scala | Scala | apache-2.0 | 940 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package examples.ml.classification
import api.Meta.Projections._
import api._
import examples.ml.model._
import breeze.linalg.{Vector => Vec}
class SparkNaiveBayesIntegrationSpec extends BaseNaiveBayesIntegrationSpec with SparkAware {
def naiveBayes(input: String, lambda: Double, modelType: MType): Set[Model] =
withDefaultSparkSession(implicit spark => emma.onSpark {
// read the input
val data = for (line <- DataBag.readText(input)) yield {
val record = line.split(",").map(_.toDouble)
LVector(record.head, Vec(record.slice(1, record.length)))
}
// classification
val result = NaiveBayes(lambda, modelType)(data)
// collect the result locally
result.collect().toSet[Model]
})
}
| aalexandrov/emma | emma-examples/emma-examples-spark/src/test/scala/org/emmalanguage/examples/ml/classification/SparkNaiveBayesIntegrationSpec.scala | Scala | apache-2.0 | 1,400 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.sql.catalyst.expressions.ScalaUDF
import org.apache.spark.sql.Column
import org.apache.spark.sql.types.DataType
/**
* A user-defined function. To create one, use the `udf` functions in `functions`.
*
* As an example:
* {{{
* // Defined a UDF that returns true or false based on some numeric score.
* val predict = udf((score: Double) => if (score > 0.5) true else false)
*
* // Projects a column that adds a prediction column based on the score column.
* df.select( predict(df("score")) )
* }}}
*
* @since 1.3.0
*/
@InterfaceStability.Stable
case class UserDefinedFunction protected[sql] (
f: AnyRef,
dataType: DataType,
inputTypes: Option[Seq[DataType]]) {
private var _nameOption: Option[String] = None
private var _nullable: Boolean = true
private var _deterministic: Boolean = true
/**
* Returns true when the UDF can return a nullable value.
*
* @since 2.3.0
*/
def nullable: Boolean = _nullable
/**
* Returns true iff the UDF is deterministic, i.e. the UDF produces the same output given the same
* input.
*
* @since 2.3.0
*/
def deterministic: Boolean = _deterministic
/**
* Returns an expression that invokes the UDF, using the given arguments.
*
* @since 1.3.0
*/
def apply(exprs: Column*): Column = {
Column(ScalaUDF(
f,
dataType,
exprs.map(_.expr),
inputTypes.getOrElse(Nil),
udfName = _nameOption,
nullable = _nullable,
udfDeterministic = _deterministic))
}
private def copyAll(): UserDefinedFunction = {
val udf = copy()
udf._nameOption = _nameOption
udf._nullable = _nullable
udf._deterministic = _deterministic
udf
}
/**
* Updates UserDefinedFunction with a given name.
*
* @since 2.3.0
*/
def withName(name: String): UserDefinedFunction = {
val udf = copyAll()
udf._nameOption = Option(name)
udf
}
/**
* Updates UserDefinedFunction to non-nullable.
*
* @since 2.3.0
*/
def asNonNullabe(): UserDefinedFunction = {
if (!nullable) {
this
} else {
val udf = copyAll()
udf._nullable = false
udf
}
}
/**
* Updates UserDefinedFunction to nondeterministic.
*
* @since 2.3.0
*/
def asNondeterministic(): UserDefinedFunction = {
if (!_deterministic) {
this
} else {
val udf = copyAll()
udf._deterministic = false
udf
}
}
}
| mike0sv/spark | sql/core/src/main/scala/org/apache/spark/sql/expressions/UserDefinedFunction.scala | Scala | apache-2.0 | 3,345 |
/*
Copyright 2013 Ilya Lakhin (Илья Александрович Лахин)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package name.lakhin.eliah.projects
package papacarlo.syntax.rules
import name.lakhin.eliah.projects.papacarlo.syntax._
import name.lakhin.eliah.projects.papacarlo.utils.Bounds
import name.lakhin.eliah.projects.papacarlo.syntax.Result._
final case class ReferentialRule(name: String, tag: Option[String] = None)
extends Rule {
def apply(session: Session) = {
session.syntax.onRuleEnter.trigger(this, session.state)
val packratKey =
session.relativeIndexOf(session.state.virtualPosition) +
name +
tag.map(":" + _).getOrElse("")
val result = session.packrat.lift(packratKey) match {
case Some(packrat) =>
session.state =
packrat.state.copy(virtualPosition = packrat.range.until)
packrat.result
case None =>
val initialPosition = session.state.virtualPosition
val result = session.getCache(initialPosition, name) match {
case Some(cache) =>
session.state = session.state.copy(
virtualPosition = session.virtualIndexOf(
cache.end.index -
session.sourceTokensOffset) + 1,
products = tag
.map(tag => (tag, cache) :: session.state.products)
.getOrElse(session.state.products)
)
Successful
case None => performReferredRule(session)
}
session.packrat += Tuple2(
packratKey,
Packrat(
name,
Bounds(initialPosition, session.state.virtualPosition),
result,
session.state
))
result
}
session.syntax.onRuleLeave.trigger(this, session.state, result)
result
}
private def performReferredRule(session: Session) = {
var result = Failed
val initialState = session.state
for (rule <- session.syntax.rules.get(name)) {
session.state = State(virtualPosition = session.state.virtualPosition)
result = rule.body(session)
tag match {
case Some(tag: String) =>
if (result != Failed) {
var node =
(if (!rule.cachingFlag
&& session.state.captures.isEmpty
&& session.state.products.size == 1)
session.state.products.headOption
.flatMap { product =>
if (product._1 == ReferentialRule.Result) Some(product._2)
else None
} else None).getOrElse {
val begin = session.reference(
session
.relativeIndexOf(initialState.virtualPosition))
val end = session.reference(
session
.relativeIndexOf(session.state.virtualPosition - 1))
val node = new Node(rule.productKind, begin, end)
node.cachable = rule.cachingFlag
node.branches = session.state.products
.groupBy(_._1)
.mapValues(_.map(_._2).reverse)
.toMap
node.references = session.state.captures
.groupBy(_._1)
.mapValues(_.map(_._2.iterator
.map(session.reference)).flatten)
.toMap
node.producer = Some(rule.body)
node
}
for (transformer <- rule.transformer) node = transformer(node)
session.state = initialState.copy(
virtualPosition = session.state.virtualPosition,
products = (tag, node) :: initialState.products,
issues = session.state.issues ::: initialState.issues
)
} else
session.state = initialState.copy(
issues = session.state.issues :::
initialState.issues)
case None =>
session.state = initialState.copy(
virtualPosition = session.state.virtualPosition,
issues = session.state.issues ::: initialState.issues
)
}
}
result
}
override val show = {
val atom = "@" + name
tag match {
case Some(branch) if branch != name => branch + " -> " + atom -> 1
case _ => atom -> Int.MaxValue
}
}
override val captures = Set.empty[String]
override val branches =
tag.map(tag => Map(tag -> Set(name))).getOrElse(Map.empty)
}
object ReferentialRule {
val Result = "result"
}
| Eliah-Lakhin/papa-carlo | src/main/scala/name.lakhin.eliah.projects/papacarlo/syntax/rules/ReferentialRule.scala | Scala | apache-2.0 | 5,130 |
package com.github.basp1.pulsar
import akka.actor.{ActorRef, ActorSystem}
object Main extends App {
val system: ActorSystem = ActorSystem("main")
val worker: ActorRef = system.actorOf(Worker.props())
worker ! new Worker.Append(new Track(1L, 1000, 42, 0))
worker ! new Worker.Time(1000)
} | basp1/pulsar | src/main/scala/Main.scala | Scala | mit | 301 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.internal.typeconv
import java.time._
import io.rdbc.pgsql.core.internal.typeconv.extractors._
import io.rdbc.pgsql.core.typeconv.PartialTypeConverter
private[typeconv] object LocalTimeTypeConverter
extends PartialTypeConverter[LocalTime] {
val cls = classOf[LocalTime]
def convert(any: Any): Option[LocalTime] = {
any match {
case LocalTimeVal(lt) => Some(lt)
case _ => None
}
}
}
| rdbc-io/rdbc-pgsql | rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/typeconv/LocalTimeTypeConverter.scala | Scala | apache-2.0 | 1,043 |
package domala.internal.macros.meta.args
import scala.meta._
case class TableGeneratorArgs(
catalog: Term.Arg,
schema: Term.Arg,
table: Term.Arg,
pkColumnName: Term.Arg,
valueColumnName: Term.Arg,
pkColumnValue: Term.Arg,
initialValue: Term.Arg,
allocationSize: Term.Arg,
implementer: Type
)
object TableGeneratorArgs {
def of(mods: Seq[Mod], className: String): Option[TableGeneratorArgs] = {
val blank = q""" "" """
mods.collectFirst {
case mod"@TableGenerator(..$args)" =>
val catalog = args.collectFirst { case arg"catalog = $x" => x }.getOrElse(blank)
val schema = args.collectFirst { case arg"schema = $x" => x }.getOrElse(blank)
val table = args.collectFirst { case arg"table = $x" => x }.getOrElse(q""""ID_GENERATOR"""")
val pkColumnName = args.collectFirst { case arg"pkColumnName = $x" => x }.getOrElse(q""""PK"""")
val valueColumnName = args.collectFirst { case arg"valueColumnName = $x" => x }.getOrElse(q""""VALUE"""")
val pkColumnValue = args.collectFirst { case arg"pkColumnValue = $x" => x }.get
val initialValue = args.collectFirst { case arg"initialValue = $x" => x }.getOrElse(q"1")
val allocationSize = args.collectFirst { case arg"allocationSize = $x" => x }.getOrElse(q"1")
val implementer = args.collectFirst { case arg"implementer = classOf[$x]" => x }.getOrElse(t"org.seasar.doma.jdbc.id.BuiltinTableIdGenerator")
TableGeneratorArgs(catalog, schema, table, pkColumnName, valueColumnName, pkColumnValue, initialValue, allocationSize, implementer)
}
}
}
| bakenezumi/domala | meta/src/main/scala/domala/internal/macros/meta/args/TableGeneratorArgs.scala | Scala | apache-2.0 | 1,600 |
package com.scalaenthusiasts
import java.io.File
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
case class WikipediaArticle(title: String, text: String) {
/**
* @return Whether the text of this article mentions `lang` or not
* @param lang Language to look for (e.g. "Scala")
*/
def mentionsLanguage(lang: String): Boolean = text.split(' ').contains(lang)
}
object RDD_03 {
/**
* Actions vs Transformations
* - transformation:
* - creates a "new" rdd from an existing one, e.g. map
* - lazily evaluated
* - action:
* - returns a value to the driver program after running all transformations, e.g. collect
* - intermediate RDD's are not cached unless Spark is told to do so
* - Spark keeps track of the DAG of RDD dependencies
* - actions:
* - count
* - collect
* - reduce
* - transformations:
* - map
* - filter
* - join
* - key/value RDDs
* - sortByKey
* - groupByKey
* - reduceByKey
* - moving code (JVM)
* - moving data (shuffling)
*
* Links
* - http://spark.apache.org/docs/latest/rdd-programming-guide.html
*/
val conf: SparkConf = new SparkConf()
.setAppName("Spark_03")
.setMaster("local[*]")
.set("spark.executor.memory","4g")
// The entry point to programming Spark with the RDD API.
val sc: SparkContext = new SparkContext(conf)
val langs = List(
"JavaScript", "Java", "PHP", "Python", "C#", "C++", "Ruby", "CSS",
"Objective-C", "Perl", "Scala", "Haskell", "MATLAB", "Clojure", "Groovy"
)
val wikiRdd: RDD[WikipediaArticle] = {
def filePath = {
val resource = this.getClass.getClassLoader.getResource("wikipedia/wikipedia.dat")
if (resource == null) sys.error("Please download the dataset as explained in the assignment instructions")
new File(resource.toURI).getPath
}
def parse(line: String): WikipediaArticle = {
val subs = "</title><text>"
val i = line.indexOf(subs)
// Skip over "<page><title>
val title = line.substring(14, i)
// Then extract skipping over </title><text> and up to </text></page>",
val text = line.substring(i + subs.length, line.length-16)
WikipediaArticle(title, text)
}
// Load and parse the data using our helpers
sc.textFile(filePath).map(parse)
}
/**
* Returns the number of articles on which the language `lang` occurs.
*/
def occurrencesOfLang(lang: String, rdd: RDD[WikipediaArticle]): Int = {
rdd.filter(_.mentionsLanguage(lang)).map((_,1)).aggregate(0)(
(acc, value) => acc + value._2,
(acc1, acc2) => acc1 + acc2
)
}
/* (1) Use `occurrencesOfLang` to compute the ranking of the languages
* (`val langs`) by determining the number of Wikipedia articles that
* mention each language at least once. Don't forget to sort the
* languages by their occurrence, in decreasing order!
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangs(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
val countLangs = langs.map(lang => (lang, occurrencesOfLang(lang, rdd)))
val sorted = countLangs.sortBy { case (lang, count) => count } (Ordering[Int].reverse)
sorted
}
/* Compute an inverted index of the set of articles, mapping each language
* to the Wikipedia pages in which it occurs.
*/
def makeIndex(langs: List[String], rdd: RDD[WikipediaArticle]): RDD[(String, Iterable[WikipediaArticle])] = {
rdd.flatMap { article =>
langs.filter(lang => article.mentionsLanguage(lang)).map(lang => (lang, article))
}.groupByKey()
}
/* (2) Compute the language ranking again, but now using the inverted index. Can you notice
* a performance improvement?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsUsingIndex(index: RDD[(String, Iterable[WikipediaArticle])]): List[(String, Int)] = {
index
.map { case (lang, articles) =>
(lang,articles.size)
}
.sortBy(_._2, ascending = false)
.collect()
.toList
}
/* (3) Use `reduceByKey` so that the computation of the index and the ranking are combined.
* Can you notice an improvement in performance compared to measuring *both* the computation of the index
* and the computation of the ranking? If so, can you think of a reason?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsReduceByKey(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
rdd
.flatMap { article =>
val mentionedLangs = langs.filter { lang => article.mentionsLanguage(lang) }
val a = mentionedLangs.map { lang =>
(lang, 1)
}
a
}
.reduceByKey(_ + _)
.sortBy(_._2, ascending = false)
.collect()
.toList
}
/** Main function */
def main(args: Array[String]): Unit = {
/* Languages ranked according to (1) */
val langsRanked: List[(String, Int)] = timed("Part 1: naive ranking", rankLangs(langs, wikiRdd))
/* An inverted index mapping languages to wikipedia pages on which they appear */
def index: RDD[(String, Iterable[WikipediaArticle])] = makeIndex(langs, wikiRdd)
/* Languages ranked according to (2), using the inverted index */
val langsRanked2: List[(String, Int)] = timed("Part 2: ranking using inverted index", rankLangsUsingIndex(index))
/* Languages ranked according to (3) */
val langsRanked3: List[(String, Int)] = timed("Part 3: ranking using reduceByKey", rankLangsReduceByKey(langs, wikiRdd))
/* Output the speed of each ranking */
println(timing)
sc.stop()
}
val timing = new StringBuffer
def timed[T](label: String, code: => T): T = {
val start = System.currentTimeMillis()
val result = code
val stop = System.currentTimeMillis()
timing.append(s"Processing $label took ${stop - start} ms.\\n")
result
}
}
| littlenag/scala-enthusiasts | spark-intro/src/main/scala/com/scalaenthusiasts/RDD_03.scala | Scala | mit | 6,226 |
package com.twitter.scalding
sealed trait DescribedArg {
def key: String
def description: String
}
final case class RequiredArg(key: String, description: String) extends DescribedArg
final case class OptionalArg(key: String, description: String) extends DescribedArg
final case class ListArg(key: String, description: String) extends DescribedArg
final case class BooleanArg(key: String, description: String) extends DescribedArg
class HelpException extends RuntimeException("User asked for help")
class DescriptionValidationException(msg: String) extends RuntimeException(msg)
trait ArgHelper {
/**
* Similar to describe but validate all args are described
*
* @param describedArgs List of Argument Descriptions
* @param ex Input Execution
* @return Output Execution
*/
def validatedDescribe[T](describedArgs: Seq[DescribedArg], ex: Execution[T]): Execution[T] = {
Execution.getArgs.flatMap { args =>
validatedDescribe(describedArgs, args)
ex
}
}
/**
* Describe a set of Args given Descriptions and validate all Args are described
* @param describedArgs List of Argument Descriptions
* @param args Job Arguments
*/
def validatedDescribe(describedArgs: Seq[DescribedArg], args: Args): Unit = {
describe(describedArgs, args)
val describedKeys = describedArgs.map(_.key).toSet
val missingKeys = args.m.keySet.filter(_.nonEmpty).diff(describedKeys)
if (missingKeys.nonEmpty) {
val msg = missingKeys.mkString(", ")
throw new DescriptionValidationException(s"Must describe missing keys : $msg")
}
}
/**
* Describe the Arguments of this Execution. By running --help the args will output
* and the execution will end
*
* @param describedArgs List of Argument Descriptions
* @param ex Input Execution
* @return Output Execution
*/
def describe[T](describedArgs: Seq[DescribedArg], ex: Execution[T]): Execution[T] = {
Execution.getArgs.flatMap { args =>
describe(describedArgs, args)
ex
}
}
/**
* Describe a set of Args given Descriptions
*
* @param describedArgs List of Argument Descriptions
* @param args Job Arguments
*/
def describe(describedArgs: Seq[DescribedArg], args: Args): Unit =
if (args.boolean("help")) helpRequest(describedArgs)
else ()
def helpRequest(describedArgs: Seq[DescribedArg]): Nothing = {
val top = "\\n###########################################################################\\n\\n"
val usage = s"Command Line Args :: ${argString(describedArgs)}\\n\\n\\n"
val bottom = "\\n\\n###########################################################################\\n"
println(top + usage + help(describedArgs) + bottom)
throw new HelpException()
}
/**
* Command line arg string given the Described Args
*
* @param describedArgs List of Argument Descriptions
* @return Command Line Parameters
*/
private[this] def argString(describedArgs: Seq[DescribedArg]): String = {
describedArgs.foldLeft("") {
case (str, describedArg) =>
val msg = describedArg match {
case RequiredArg(key, _) => s"--$key VALUE "
case OptionalArg(key, _) => s"[--$key VALUE] "
case ListArg(key, _) => s"[--$key VALUE VALUE2] "
case BooleanArg(key, _) => s"[--$key] "
}
str + msg
} + "[--help]"
}
/**
* More detailed help command for these described arguments
*
* @param describedArgs List of Argument Descriptions
* @return Detailed Help for the Args
*/
private[this] def help(describedArgs: Seq[DescribedArg]): String = {
describedArgs.foldLeft("") {
case (str, describedArg) =>
val msg = describedArg match {
case RequiredArg(key, description) => s"--$key(Required) :: $description \\n"
case OptionalArg(key, description) => s"--$key(Optional) :: $description \\n"
case ListArg(key, description) => s"--$key(List) :: $description \\n"
case BooleanArg(key, description) => s"--$key(Boolean) :: $description \\n"
}
str + msg
} + "--help :: Show this help message."
}
}
object ArgHelp extends ArgHelper
| tdyas/scalding | scalding-core/src/main/scala/com/twitter/scalding/ArgHelp.scala | Scala | apache-2.0 | 4,180 |
package at.logic.gapt.examples
import at.logic.gapt.expr._
import at.logic.gapt.expr.hol.universalClosure
import at.logic.gapt.proofs.{ Context, Sequent }
import at.logic.gapt.proofs.gaptic._
import at.logic.gapt.proofs.lk.LKProof
/**
* Monoid cancellation benchmark from
* Gregory Malecha and Jesper Bengtson: Extensible and Efficient Automation Through Reflective Tactics, ESOP 2016.
*/
object MonoidCancellation extends TacticsProof {
ctx += Context.Sort( "m" )
ctx += hoc"'*': m>m>m"
ctx += hoc"1: m"
ctx += "mul_assoc" -> hcl":- (x*y)*z = x*(y*z)"
ctx += "mul_comm" -> hcl":- x*y = y*x"
ctx += "one_mul" -> hcl":- 1*x = x"
val setup: Tactical[Unit] = {
def mkAux( formula: Formula ) =
Lemma( Sequent() :+ ( "goal" -> universalClosure( formula ) ) ) {
decompose
foTheory
}
val plus_unit_p = mkAux( hof"a = b -> 1 * a = b" )
val plus_assoc_p1 = mkAux( hof"a * (b * c) = d -> (a * b) * c = d" )
val plus_assoc_p2 = mkAux( hof"b * (a * c) = d -> (a * b) * c = d" )
val plus_comm_p = mkAux( hof"a * b = c -> b * a = c" )
val plus_unit_c = mkAux( hof"a = b -> a = 1 * b" )
val plus_assoc_c1 = mkAux( hof"d = a * (b * c) -> (a * b) * c = d" )
val plus_assoc_c2 = mkAux( hof"d = b * (a * c) -> (a * b) * c = d" )
val plus_comm_c = mkAux( hof"c = a * b -> c = b * a" )
val plus_cancel = mkAux( hof"a = c -> b = d -> a * b = c * d" )
Tactical {
include( "plus_unit_p", plus_unit_p ) andThen
include( "plus_assoc_p1", plus_assoc_p1 ) andThen
include( "plus_assoc_p2", plus_assoc_p2 ) andThen
include( "plus_comm_p", plus_comm_p ) andThen
include( "plus_unit_c", plus_unit_c ) andThen
include( "plus_assoc_c1", plus_assoc_c1 ) andThen
include( "plus_assoc_c2", plus_assoc_c2 ) andThen
include( "plus_comm_c", plus_comm_c ) andThen
include( "plus_cancel", plus_cancel ) andThen
skip
}
}
lazy val iterRight: Tactical[Unit] = Tactical {
chain( "plus_unit_c" ) orElse
chain( "plus_assoc_c1" ).andThen( iterRight ) orElse
chain( "plus_assoc_c2" ).andThen( iterRight ) orElse
chain( "plus_cancel" ).andThen( refl )
}
lazy val iterLeft: Tactical[Unit] = Tactical {
chain( "plus_unit_p" ) orElse
chain( "plus_assoc_p1" ).andThen( iterRight ) orElse
chain( "plus_assoc_p2" ).andThen( iterRight ) orElse
iterRight orElse chain( "plus_comm_p" ).andThen( iterRight )
}
lazy val cancel: Tactical[Unit] = Tactical {
iterLeft orElse chain( "plus_comm_c" ).andThen( iterLeft )
}
val solve: Tactical[Unit] = Tactical {
setup andThen
repeat( refl orElse cancel )
}
Lemma( hols":- a*(b*c) = (b*a)*c" ) { solve }
def benchmarkFormula( n: Int ): Formula = {
def buildL( n: Int ): Expr = {
val x = Var( s"x$n", TBase( "m" ) )
if ( n == 0 ) x else le"$x * ${buildL( n - 1 )}"
}
def buildR( n: Int ): Expr = {
val x = Var( s"x$n", TBase( "m" ) )
if ( n == 0 ) x else le"${buildL( n - 1 )} * $x"
}
hof"${buildL( n )} = ${buildR( n )}"
}
def proveBenchmark( n: Int ): LKProof =
Lemma( hols":- ${benchmarkFormula( n )}" ) { solve }
def runBenchmark( n: Int ): Unit =
ctx.check( proveBenchmark( n ) )
} | gebner/gapt | examples/gaptic/MonoidCancellation.scala | Scala | gpl-3.0 | 3,292 |
package org.ugr.sci2s.mllib.test
import org.apache.spark.mllib.classification.ClassificationModel
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.classification.SVMModel
import org.apache.spark.mllib.linalg._
trait ClassifierAdapter extends Serializable {
def classify (
train: RDD[LabeledPoint],
parameters: Map[String, String]): ClassificationModelAdapter
def algorithmInfo (parameters: Map[String, String]): String
} | sramirez/spark-experiments | src/main/scala/org/ugr/sci2s/mllib/test/ClassifierAdapter.scala | Scala | apache-2.0 | 514 |
/**
* Copyright (C) 2015-2016 Duncan DeVore. <https://github.com/ironfish/>
*/
package akka.persistence.mongo.journal
import akka.actor.ActorLogging
import akka.persistence.PersistentRepr
import akka.persistence.mongo.{CasbahCommon, CasbahRoot, CasbahJournalCommon}
import com.mongodb.casbah.Imports._
import scala.collection.immutable
import scala.util.{Try, Failure, Success}
object CasbahJournalRoot {
import CasbahCommon._
def dbObjectToPersistentRepr(dbObject: DBObject, f: (DBObject, String) =>
Try[PersistentRepr]): Option[PersistentRepr] = {
if (dbObject.as[String](markerKey) == "D") return None
f(dbObject, messageKey) match {
case Success(pr) =>
Some(pr)
case Failure(error) =>
None
}
}
def persistentReprToDBObjectExecute(persistentRepr: PersistentRepr, f: PersistentRepr => Try[Array[Byte]])
(implicit rejectNonSerializableObjects: Boolean): Try[DBObject] = {
val errorMsg: String = "Unable to serialize payload for"
val pidMsg: String = s"$persistenceIdKey: ${persistentRepr.persistenceId}"
val snrMsg: String = s"$sequenceNrKey: ${persistentRepr.sequenceNr}"
def marker(): String = if (persistentRepr.deleted) "D" else ""
def toDBObject(data: Array[Byte]): DBObject = {
val builder = MongoDBObject.newBuilder
builder += persistenceIdKey -> persistentRepr.persistenceId
builder += sequenceNrKey -> persistentRepr.sequenceNr
builder += markerKey -> marker()
builder += messageKey -> data
builder.result()
}
f(persistentRepr) match {
case Failure(error) if rejectNonSerializableObjects =>
Failure(new Exception(s"$errorMsg $pidMsg, $snrMsg", error))
case Failure(error) =>
Success(toDBObject(Array.empty[Byte]))
case Success(value) =>
Success(toDBObject(value))
}
}
def deleteToExecute(collection: MongoCollection, concern: WriteConcern, persistenceId: String,
toSequenceNr: Long, f: PersistentRepr => Try[Array[Byte]]): Unit = {
val sequenceNbr = highestSequenceNrExecute(collection, persistenceId)
collection.remove(MongoDBObject(
persistenceIdKey -> persistenceId,
sequenceNrKey -> MongoDBObject(lteKey -> toSequenceNr)), concern)
if (toSequenceNr >= sequenceNbr) {
val retainHighestSequenceNbr = PersistentRepr("D", sequenceNbr, persistenceId, deleted = true)
val dbObject: DBObject =
persistentReprToDBObjectExecute(retainHighestSequenceNbr, f)(rejectNonSerializableObjects = false).get
persistExecute(collection, immutable.Seq(dbObject))
}
}
def highestSequenceNrExecute(collection: MongoCollection, persistenceId: String): Long = {
val cursor: MongoCursor = collection
.find(MongoDBObject(persistenceIdKey -> persistenceId))
.sort(MongoDBObject(sequenceNrKey -> -1)).limit(1)
if (cursor.hasNext) cursor.next().getAs[Long](sequenceNrKey).get else 0L
}
def persistExecute(collection: MongoCollection, objects: immutable.Seq[DBObject]): WriteResult = {
collection.insert(objects:_ *)
}
def replayCursorExecute(collection: MongoCollection, persistenceId: String, fromSequenceNr: Long,
toSequenceNr: Long, maxNumberOfMessages: Int, f: (DBObject, String) =>
Try[PersistentRepr]): Iterator[PersistentRepr] = {
val cursor: MongoCursor = collection.find(MongoDBObject(
persistenceIdKey -> persistenceId,
sequenceNrKey -> MongoDBObject(gteKey -> fromSequenceNr, lteKey -> toSequenceNr)))
.sort(MongoDBObject(
persistenceIdKey -> 1,
sequenceNrKey -> 1))
.limit(maxNumberOfMessages)
cursor.flatMap(dbObject => dbObjectToPersistentRepr(dbObject, f))
}
}
trait CasbahJournalRoot extends CasbahRoot
with CasbahJournalCommon { mixin : ActorLogging =>
import CasbahJournalRoot._
import CasbahCommon._
private val replayDispatcherKey: String = "replay-dispatcher"
protected lazy val replayDispatcherId: String = config.getString(replayDispatcherKey)
override protected def initialize(): Unit = {
val indexOne: MongoDBObject = MongoDBObject(persistenceIdKey -> 1, sequenceNrKey -> 1)
val indexTwo: MongoDBObject = MongoDBObject(sequenceNrKey -> 1)
ensure(indexOne, indexOptions)(mongoCollection)
ensure(indexTwo)(mongoCollection)
}
protected def persistentReprToDBObject(persistentRepr: PersistentRepr)
(implicit rejectNonSerializableObjects: Boolean): Try[DBObject] =
persistentReprToDBObjectExecute(persistentRepr, toBytes)
protected def deleteTo(collection: MongoCollection,concern: WriteConcern, persistenceId: String,
toSequenceNr: Long): Unit =
deleteToExecute(collection, concern, persistenceId, toSequenceNr, toBytes)
def replayCursor(collection: MongoCollection, persistenceId: String, fromSequenceNr: Long,
toSequenceNr: Long, maxNumberOfMessages: Int): Iterator[PersistentRepr] =
replayCursorExecute(collection, persistenceId, fromSequenceNr, toSequenceNr, maxNumberOfMessages,
fromBytes[PersistentRepr])
}
| ironfish/akka-persistence-mongo | src/main/scala/akka/persistence/mongo/journal/CasbahJournalRoot.scala | Scala | apache-2.0 | 5,044 |
package spire
package algebra
import java.lang.Double.{ isInfinite, isNaN, doubleToLongBits }
import java.lang.Long.{ numberOfTrailingZeros }
trait Field[@sp(Byte, Short, Int, Long, Float, Double) A] extends Any with EuclideanRing[A] with MultiplicativeAbGroup[A] {
/**
* This is implemented in terms of basic Field ops. However, this is
* probably significantly less efficient than can be done with a specific
* type. So, it is recommended that this method is overriden.
*
* This is possible because a Double is a rational number.
*/
def fromDouble(a: Double): A = if (a == 0.0) {
fromInt(0)
} else {
require(!isInfinite(a) && !isNaN(a),
"Double must be representable as a fraction.")
val bits = doubleToLongBits(a)
val m = bits & 0x000FFFFFFFFFFFFFL | 0x0010000000000000L
val zeros = numberOfTrailingZeros(m)
val value = m >>> zeros
val exp = ((bits >> 52) & 0x7FF).toInt - 1075 + zeros // 1023 + 52
val high = times(fromInt((value >>> 30).toInt), fromInt(1 << 30))
val low = fromInt((value & 0x3FFFFFFF).toInt)
val num = plus(high, low)
val unsigned = if (exp > 0) {
times(num, pow(fromInt(2), exp))
} else if (exp < 0) {
div(num, pow(fromInt(2), -exp))
} else {
num
}
if (a < 0) negate(unsigned) else unsigned
}
}
object Field {
@inline final def apply[A](implicit f: Field[A]): Field[A] = f
}
| tixxit/spire | core/shared/src/main/scala/spire/algebra/Field.scala | Scala | mit | 1,423 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.rules
import org.apache.spark.util.Utils
/**
* A collection of generators that build custom bytecode at runtime for performing the evaluation
* of catalyst expression.
*/
package object codegen {
/**
* A lock to protect invoking the scala compiler at runtime, since it is not thread safe in Scala
* 2.10.
*/
protected[codegen] val globalLock = org.apache.spark.sql.catalyst.ScalaReflectionLock
/** Canonicalizes an expression so those that differ only by names can reuse the same code. */
object ExpressionCanonicalizer extends rules.RuleExecutor[Expression] {
val batches =
Batch("CleanExpressions", FixedPoint(20), CleanExpressions) :: Nil
object CleanExpressions extends rules.Rule[Expression] {
def apply(e: Expression): Expression = e transform {
case Alias(c, _) => c
}
}
}
/**
* :: DeveloperApi ::
* Dumps the bytecode from a class to the screen using javap.
*/
@DeveloperApi
object DumpByteCode {
import scala.sys.process._
val dumpDirectory = Utils.createTempDir()
dumpDirectory.mkdir()
def apply(obj: Any): Unit = {
val generatedClass = obj.getClass
val classLoader =
generatedClass
.getClassLoader
.asInstanceOf[scala.tools.nsc.interpreter.AbstractFileClassLoader]
val generatedBytes = classLoader.classBytes(generatedClass.getName)
val packageDir = new java.io.File(dumpDirectory, generatedClass.getPackage.getName)
if (!packageDir.exists()) { packageDir.mkdir() }
val classFile =
new java.io.File(packageDir, generatedClass.getName.split("\\\\.").last + ".class")
val outfile = new java.io.FileOutputStream(classFile)
outfile.write(generatedBytes)
outfile.close()
println(
s"javap -p -v -classpath ${dumpDirectory.getCanonicalPath} ${generatedClass.getName}".!!)
}
}
}
| andrewor14/iolap | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/package.scala | Scala | apache-2.0 | 2,829 |
package org.jetbrains.plugins.scala.codeInsight.intention.types
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScTypedPattern, ScWildcardPattern}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunctionDefinition, ScPatternDefinition, ScVariableDefinition}
/**
* Pavel.Fatin, 28.04.2010
*/
class Description(message: String => Unit) extends Strategy {
def addToFunction(function: ScFunctionDefinition) {
message("intention.type.annotation.function.add.text")
}
def removeFromFunction(function: ScFunctionDefinition) {
message("intention.type.annotation.function.remove.text")
}
def addToValue(value: ScPatternDefinition) {
message("intention.type.annotation.value.add.text")
}
def removeFromValue(value: ScPatternDefinition) {
message("intention.type.annotation.value.remove.text")
}
def addToVariable(variable: ScVariableDefinition) {
message("intention.type.annotation.variable.add.text")
}
def removeFromVariable(variable: ScVariableDefinition) {
message("intention.type.annotation.variable.remove.text")
}
def addToPattern(pattern: ScBindingPattern) {
message("intention.type.annotation.pattern.add.text")
}
def addToWildcardPattern(pattern: ScWildcardPattern) {
message("intention.type.annotation.pattern.add.text")
}
def removeFromPattern(pattern: ScTypedPattern) {
message("intention.type.annotation.pattern.remove.text")
}
def addToParameter(param: ScParameter) {
message("intention.type.annotation.parameter.add.text")
}
def removeFromParameter(param: ScParameter) {
message("intention.type.annotation.parameter.remove.text")
}
}
| LPTK/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/intention/types/Description.scala | Scala | apache-2.0 | 1,768 |
package hyperion
import scala.concurrent.duration.DurationInt
import scala.concurrent.ExecutionContext.Implicits.global
import akka.actor.{ActorSystem, DeadLetter, Props}
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.pattern.{BackoffOpts, BackoffSupervisor}
import akka.stream.{ActorMaterializer, Materializer}
import hyperion.database.DatabaseActor
import hyperion.rest.HttpApi
/**
* Core is type containing the ``system: ActorSystem`` member. This enables us to use it in our
* apps as well as in our tests.
*/
trait Core {
protected implicit def system: ActorSystem
protected implicit def settings: AppSettingsImpl = AppSettings(system)
}
/**
* This trait implements ``Core`` by starting the required ``ActorSystem`` and registering the
* termination handler to stop the system when the JVM exits.
*/
trait BootedCore extends Core with HttpApi {
override protected implicit def system: ActorSystem = ActorSystem("hyperion")
private[this] implicit val materializer: Materializer = ActorMaterializer()
private[this] val log = Logging(system, getClass.getName)
log.info(s"Starting Hyperion at port ${settings.api.port}")
private[this] val bindingFuture = Http().newServerAt("0.0.0.0", settings.api.port).bindFlow(routes)
sys.addShutdownHook({
log.info("Shutting down Hyperion")
bindingFuture
.flatMap(_.unbind())
.onComplete(_ => system.terminate())
})
}
/**
* This trait contains the actors that make up the application; it can be mixed in with
* ``BootedCore`` for running code or ``TestKit`` for unit and integration tests.
*/
trait HyperionActors { this: Core =>
val deadLetterLogger = system.actorOf(DeadLetterLoggingActor.props(), "dead-letter-logging")
system.eventStream.subscribe(deadLetterLogger, classOf[DeadLetter])
val messageDistributor = system.actorOf(Props(new MessageDistributor()), "receiver")
val collectingActor = system.actorOf(Props(new CollectingActor(messageDistributor)), "collecting-actor")
val meterAgent = system.actorOf(Props(new MeterAgent(collectingActor)), "meter-agent")
val recentHistoryActor = system.actorOf(Props(new RecentHistoryActor(messageDistributor)), "recent-history")
val supervisor = BackoffSupervisor.props(
BackoffOpts.onStop(
childProps = Props(new DatabaseActor()),
childName = "database",
minBackoff = 5 seconds,
maxBackoff = 60 seconds,
randomFactor = 0.2
)
)
val databaseActor = system.actorOf(supervisor, "database-supervisor")
val dailyHistoryActor = system.actorOf(Props(new DailyHistoryActor(messageDistributor, databaseActor)), "daily-history")
val usageCalculationActor = system.actorOf(Props(new UsageCalculationActor(databaseActor)), "usage-calculation")
}
| mthmulders/hyperion | app/src/main/scala/hyperion/core.scala | Scala | mit | 2,768 |
/*
* @author Genc Mazlami
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.dcop.vertices.id
class VariableId(num : Int) extends MaxSumId{
//Variable nodes have ids of the form V1, V2, ....
val id = "V" + num
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/dcop/vertices/id/VariableId.scala | Scala | apache-2.0 | 815 |
package com.freshsoft.matterbridge.routing
import akka.http.scaladsl.server.Directives.{path, _}
import akka.http.scaladsl.server.Route
import com.freshsoft.matterbridge.service.database.CodingLoveService
import model.{CodingLoveUpload, DatabaseEntityJsonSupport}
import scala.concurrent.ExecutionContext
/**
* The bot specific service routes
*/
class CodingLoveRoute(service: CodingLoveService)(implicit executionContext: ExecutionContext)
extends DatabaseEntityJsonSupport {
val route: Route = pathPrefix("codingLove") {
path("count") {
get {
complete {
service.count map (_.toString)
}
}
} ~
path("add") {
post {
entity(as[CodingLoveUpload]) { entity =>
complete {
service.add(entity.name, entity.gifUrl) map (_.toString)
}
}
}
} ~
path("exists" / Remaining) { p =>
get {
complete(service.exists(p) map (_.toString))
}
} ~
path(JavaUUID) { uuid =>
get {
complete(service.byId(uuid))
}
} ~
path("last") {
get {
complete(service.last)
}
} ~
path(Remaining) { name =>
get {
complete(service.byName(name))
}
}
}
}
| Freshwood/matterbridge | src/main/scala/com/freshsoft/matterbridge/routing/CodingLoveRoute.scala | Scala | mit | 1,314 |
package coffee.cypher.scalaexamples
object HelloWorld {
def main(args: Array[String]) {
println("Hello World (Scala)")
}
}
| Cypher121/CSCodeExamples | src/coffee/cypher/scalaexamples/HelloWorld.scala | Scala | mit | 140 |
import sbt._
object TestBuild extends Build {
val k1 = TaskKey[Unit]("k1")
val k2 = TaskKey[Unit]("k2")
val k3 = TaskKey[Unit]("k3")
val k4 = TaskKey[Unit]("k4")
val k5 = TaskKey[Unit]("k4")
lazy val root = Project("root", file("."))
}
| jkinkead/sbt | sbt/src/sbt-test/project/session-save/project/Build.scala | Scala | bsd-3-clause | 247 |
import com.typesafe.sbt.SbtSite.site
import sbt._
import Keys._
import com.typesafe.sbt._
object LibExifBuild extends Build {
val dependencies = Seq(
"org.scalatest" %% "scalatest" % "3.0.1" % "test" cross CrossVersion.binary
)
override lazy val settings = super.settings ++ Seq(
libraryDependencies ++= dependencies,
organization := "net.n12n.exif",
scalaVersion in ThisBuild := "2.11.4",
scalacOptions ++= Seq("-deprecation", "-unchecked", "-feature"),
scalacOptions in doc ++= Seq("-diagrams", "-doc-title Scala Exif Library", "-implicits"),
testOptions ++= Seq(Tests.Argument("-oSDW"))
)
lazy val slibexifSettings = Defaults.coreDefaultSettings ++ Seq(
name := "slibexif",
crossScalaVersions := Seq("2.10.4", "2.11.4", "2.12.1"),
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
credentials += Credentials(Path.userHome / ".sbt" / "sonatype"),
pomExtra := <url>https://github.com/ngrossmann/slibexif</url>
<licenses>
<license>
<name>GNU LESSER GENERAL PUBLIC LICENSE, Version 3</name>
<url>http://www.gnu.org/licenses/lgpl.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>https://github.com/ngrossmann/slibexif</url>
</scm>
<developers>
<developer>
<id>ngrossmann</id>
<name>Niklas Grossmann</name>
<url>https://github.com/ngrossmann</url>
</developer>
</developers>
)
lazy val slibexif = Project(id = "slibexif", base = file("."), settings = slibexifSettings).
settings(site.settings : _*).settings(site.includeScaladoc() : _*)
lazy val examples = Project(id = "examples", base = file("examples")).dependsOn(slibexif)
}
| ngrossmann/slibexif | project/Build.scala | Scala | gpl-3.0 | 1,984 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.template.similarproduct
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]) extends Params
class ALSModel(
val productFeatures: Map[Int, Array[Double]],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
override def toString = {
s" productFeatures: [${productFeatures.size}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends P2LAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(sc: SparkContext, data: PreparedData): ALSModel = {
require(!data.viewEvents.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.viewEvents.map(_.user))
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}
.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
new ALSModel(
productFeatures = m.productFeatures.collectAsMap.toMap,
itemStringIntMap = itemStringIntMap,
items = items
)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
val productFeatures = model.productFeatures
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector
// productFeatures may not contain the requested item
.map { item => productFeatures.get(item) }
.flatten
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures vector for query items ${query.items}.")
Array[(Int, Double)]()
} else {
productFeatures.par // convert to parallel collection
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.filter(_._2 > 0) // keep items with score > 0
.seq // convert back to sequential collection
.toArray
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
new ItemScore(
item = model.itemIntStringMap(i),
score = s
)
}
new PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true)
}
}
| himanshudhami/PredictionIO | examples/scala-parallel-similarproduct/no-set-user/src/main/scala/ALSAlgorithm.scala | Scala | apache-2.0 | 7,806 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.json4s.{DefaultFormats, JObject}
import org.json4s.JsonDSL._
import org.apache.spark.annotation.Since
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.regression.DecisionTreeRegressionModel
import org.apache.spark.ml.tree._
import org.apache.spark.ml.tree.impl.GradientBoostedTrees
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.DefaultParamsReader.Metadata
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
/**
* Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting)
* learning algorithm for classification.
* It supports binary labels, as well as both continuous and categorical features.
*
* The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
*
* Notes on Gradient Boosting vs. TreeBoost:
* - This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
* - Both algorithms learn tree ensembles by minimizing loss functions.
* - TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
* based on the loss function, whereas the original gradient boosting method does not.
* - We expect to implement TreeBoost in the future:
* [https://issues.apache.org/jira/browse/SPARK-4240]
*
* @note Multiclass labels are not currently supported.
*/
@Since("1.4.0")
class GBTClassifier @Since("1.4.0") (
@Since("1.4.0") override val uid: String)
extends ProbabilisticClassifier[Vector, GBTClassifier, GBTClassificationModel]
with GBTClassifierParams with DefaultParamsWritable with Logging {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("gbtc"))
// Override parameter setters from parent trait for Java API compatibility.
// Parameters from TreeClassifierParams:
/** @group setParam */
@Since("1.4.0")
override def setMaxDepth(value: Int): this.type = set(maxDepth, value)
/** @group setParam */
@Since("1.4.0")
override def setMaxBins(value: Int): this.type = set(maxBins, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value)
/**
* Specifies how often to checkpoint the cached node IDs.
* E.g. 10 means that the cache will get checkpointed every 10 iterations.
* This is only used if cacheNodeIds is true and if the checkpoint directory is set in
* [[org.apache.spark.SparkContext]].
* Must be at least 1.
* (default = 10)
* @group setParam
*/
@Since("1.4.0")
override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/**
* The impurity setting is ignored for GBT models.
* Individual trees are built using impurity "Variance."
*
* @group setParam
*/
@Since("1.4.0")
override def setImpurity(value: String): this.type = {
logWarning("GBTClassifier.setImpurity should NOT be used")
this
}
// Parameters from TreeEnsembleParams:
/** @group setParam */
@Since("1.4.0")
override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value)
/** @group setParam */
@Since("1.4.0")
override def setSeed(value: Long): this.type = set(seed, value)
// Parameters from GBTParams:
/** @group setParam */
@Since("1.4.0")
override def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("1.4.0")
override def setStepSize(value: Double): this.type = set(stepSize, value)
/** @group setParam */
@Since("2.3.0")
override def setFeatureSubsetStrategy(value: String): this.type =
set(featureSubsetStrategy, value)
// Parameters from GBTClassifierParams:
/** @group setParam */
@Since("1.4.0")
def setLossType(value: String): this.type = set(lossType, value)
/** @group setParam */
@Since("2.4.0")
def setValidationIndicatorCol(value: String): this.type = {
set(validationIndicatorCol, value)
}
override protected def train(dataset: Dataset[_]): GBTClassificationModel = {
val categoricalFeatures: Map[Int, Int] =
MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
val withValidation = isDefined(validationIndicatorCol) && $(validationIndicatorCol).nonEmpty
// We copy and modify this from Classifier.extractLabeledPoints since GBT only supports
// 2 classes now. This lets us provide a more precise error message.
val convert2LabeledPoint = (dataset: Dataset[_]) => {
dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) =>
require(label == 0 || label == 1, s"GBTClassifier was given" +
s" dataset with invalid label $label. Labels must be in {0,1}; note that" +
s" GBTClassifier currently only supports binary classification.")
LabeledPoint(label, features)
}
}
val (trainDataset, validationDataset) = if (withValidation) {
(
convert2LabeledPoint(dataset.filter(not(col($(validationIndicatorCol))))),
convert2LabeledPoint(dataset.filter(col($(validationIndicatorCol))))
)
} else {
(convert2LabeledPoint(dataset), null)
}
val numFeatures = trainDataset.first().features.size
val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Classification)
val numClasses = 2
if (isDefined(thresholds)) {
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".train() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
val instr = Instrumentation.create(this, dataset)
instr.logParams(labelCol, featuresCol, predictionCol, impurity, lossType,
maxDepth, maxBins, maxIter, maxMemoryInMB, minInfoGain, minInstancesPerNode,
seed, stepSize, subsamplingRate, cacheNodeIds, checkpointInterval, featureSubsetStrategy,
validationIndicatorCol)
instr.logNumFeatures(numFeatures)
instr.logNumClasses(numClasses)
val (baseLearners, learnerWeights) = if (withValidation) {
GradientBoostedTrees.runWithValidation(trainDataset, validationDataset, boostingStrategy,
$(seed), $(featureSubsetStrategy))
} else {
GradientBoostedTrees.run(trainDataset, boostingStrategy, $(seed), $(featureSubsetStrategy))
}
val m = new GBTClassificationModel(uid, baseLearners, learnerWeights, numFeatures)
instr.logSuccess(m)
m
}
@Since("1.4.1")
override def copy(extra: ParamMap): GBTClassifier = defaultCopy(extra)
}
@Since("1.4.0")
object GBTClassifier extends DefaultParamsReadable[GBTClassifier] {
/** Accessor for supported loss settings: logistic */
@Since("1.4.0")
final val supportedLossTypes: Array[String] = GBTClassifierParams.supportedLossTypes
@Since("2.0.0")
override def load(path: String): GBTClassifier = super.load(path)
}
/**
* Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting)
* model for classification.
* It supports binary labels, as well as both continuous and categorical features.
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
*
* @note Multiclass labels are not currently supported.
*/
@Since("1.6.0")
class GBTClassificationModel private[ml](
@Since("1.6.0") override val uid: String,
private val _trees: Array[DecisionTreeRegressionModel],
private val _treeWeights: Array[Double],
@Since("1.6.0") override val numFeatures: Int,
@Since("2.2.0") override val numClasses: Int)
extends ProbabilisticClassificationModel[Vector, GBTClassificationModel]
with GBTClassifierParams with TreeEnsembleModel[DecisionTreeRegressionModel]
with MLWritable with Serializable {
require(_trees.nonEmpty, "GBTClassificationModel requires at least 1 tree.")
require(_trees.length == _treeWeights.length, "GBTClassificationModel given trees, treeWeights" +
s" of non-matching lengths (${_trees.length}, ${_treeWeights.length}, respectively).")
/**
* Construct a GBTClassificationModel
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
* @param numFeatures The number of features.
*/
private[ml] def this(
uid: String,
_trees: Array[DecisionTreeRegressionModel],
_treeWeights: Array[Double],
numFeatures: Int) =
this(uid, _trees, _treeWeights, numFeatures, 2)
/**
* Construct a GBTClassificationModel
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
*/
@Since("1.6.0")
def this(uid: String, _trees: Array[DecisionTreeRegressionModel], _treeWeights: Array[Double]) =
this(uid, _trees, _treeWeights, -1, 2)
@Since("1.4.0")
override def trees: Array[DecisionTreeRegressionModel] = _trees
/**
* Number of trees in ensemble
*/
@Since("2.0.0")
val getNumTrees: Int = trees.length
@Since("1.4.0")
override def treeWeights: Array[Double] = _treeWeights
override protected def transformImpl(dataset: Dataset[_]): DataFrame = {
val bcastModel = dataset.sparkSession.sparkContext.broadcast(this)
val predictUDF = udf { (features: Any) =>
bcastModel.value.predict(features.asInstanceOf[Vector])
}
dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol))))
}
override def predict(features: Vector): Double = {
// If thresholds defined, use predictRaw to get probabilities, otherwise use optimization
if (isDefined(thresholds)) {
super.predict(features)
} else {
if (margin(features) > 0.0) 1.0 else 0.0
}
}
override protected def predictRaw(features: Vector): Vector = {
val prediction: Double = margin(features)
Vectors.dense(Array(-prediction, prediction))
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
dv.values(0) = loss.computeProbability(dv.values(0))
dv.values(1) = 1.0 - dv.values(0)
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in GBTClassificationModel:" +
" raw2probabilityInPlace encountered SparseVector")
}
}
/** Number of trees in ensemble */
val numTrees: Int = trees.length
@Since("1.4.0")
override def copy(extra: ParamMap): GBTClassificationModel = {
copyValues(new GBTClassificationModel(uid, _trees, _treeWeights, numFeatures, numClasses),
extra).setParent(parent)
}
@Since("1.4.0")
override def toString: String = {
s"GBTClassificationModel (uid=$uid) with $numTrees trees"
}
/**
* Estimate of the importance of each feature.
*
* Each feature's importance is the average of its importance across all trees in the ensemble
* The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
* (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
* and follows the implementation from scikit-learn.
* See `DecisionTreeClassificationModel.featureImportances`
*/
@Since("2.0.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures)
/** Raw prediction for the positive class. */
private def margin(features: Vector): Double = {
val treePredictions = _trees.map(_.rootNode.predictImpl(features).prediction)
blas.ddot(numTrees, treePredictions, 1, _treeWeights, 1)
}
/** (private[ml]) Convert to a model in the old API */
private[ml] def toOld: OldGBTModel = {
new OldGBTModel(OldAlgo.Classification, _trees.map(_.toOld), _treeWeights)
}
// hard coded loss, which is not meant to be changed in the model
private val loss = getOldLossType
/**
* Method to compute error or loss for every iteration of gradient boosting.
*
* @param dataset Dataset for validation.
*/
@Since("2.4.0")
def evaluateEachIteration(dataset: Dataset[_]): Array[Double] = {
val data = dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) => LabeledPoint(label, features)
}
GradientBoostedTrees.evaluateEachIteration(data, trees, treeWeights, loss,
OldAlgo.Classification
)
}
@Since("2.0.0")
override def write: MLWriter = new GBTClassificationModel.GBTClassificationModelWriter(this)
}
@Since("2.0.0")
object GBTClassificationModel extends MLReadable[GBTClassificationModel] {
private val numFeaturesKey: String = "numFeatures"
private val numTreesKey: String = "numTrees"
@Since("2.0.0")
override def read: MLReader[GBTClassificationModel] = new GBTClassificationModelReader
@Since("2.0.0")
override def load(path: String): GBTClassificationModel = super.load(path)
private[GBTClassificationModel]
class GBTClassificationModelWriter(instance: GBTClassificationModel) extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata: JObject = Map(
numFeaturesKey -> instance.numFeatures,
numTreesKey -> instance.getNumTrees)
EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata)
}
}
private class GBTClassificationModelReader extends MLReader[GBTClassificationModel] {
/** Checked against metadata when loading model */
private val className = classOf[GBTClassificationModel].getName
private val treeClassName = classOf[DecisionTreeRegressionModel].getName
override def load(path: String): GBTClassificationModel = {
implicit val format = DefaultFormats
val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) =
EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName, false)
val numFeatures = (metadata.metadata \\ numFeaturesKey).extract[Int]
val numTrees = (metadata.metadata \\ numTreesKey).extract[Int]
val trees: Array[DecisionTreeRegressionModel] = treesData.map {
case (treeMetadata, root) =>
val tree = new DecisionTreeRegressionModel(treeMetadata.uid,
root.asInstanceOf[RegressionNode], numFeatures)
treeMetadata.getAndSetParams(tree)
tree
}
require(numTrees == trees.length, s"GBTClassificationModel.load expected $numTrees" +
s" trees based on metadata but found ${trees.length} trees.")
val model = new GBTClassificationModel(metadata.uid,
trees, treeWeights, numFeatures)
metadata.getAndSetParams(model)
model
}
}
/** Convert a model from the old API */
private[ml] def fromOld(
oldModel: OldGBTModel,
parent: GBTClassifier,
categoricalFeatures: Map[Int, Int],
numFeatures: Int = -1,
numClasses: Int = 2): GBTClassificationModel = {
require(oldModel.algo == OldAlgo.Classification, "Cannot convert GradientBoostedTreesModel" +
s" with algo=${oldModel.algo} (old API) to GBTClassificationModel (new API).")
val newTrees = oldModel.trees.map { tree =>
// parent for each tree is null since there is no good way to set this.
DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
}
val uid = if (parent != null) parent.uid else Identifiable.randomUID("gbtc")
new GBTClassificationModel(uid, newTrees, oldModel.treeWeights, numFeatures, numClasses)
}
}
| lxsmnv/spark | mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala | Scala | apache-2.0 | 17,177 |
package tastytest
object Logarithms {
opaque type Logarithm = Double
object Logarithm {
// These are the two ways to lift to the Logarithm type
private[Logarithms] def apply(d: Double): Logarithm = math.log(d)
def of(d: Double): Option[Logarithm] =
if (d > 0.0) Some(math.log(d)) else None
}
// implicit define cross compatible public APIs for opaque types
final implicit class LogarithmOps(val logarithm: Logarithm) extends AnyVal {
def toDouble: Double = math.exp(logarithm)
def + (other: Logarithm): Logarithm = Logarithm(math.exp(logarithm) + math.exp(other))
def * (other: Logarithm): Logarithm = logarithm + other
}
}
| lrytz/scala | test/tasty/run/src-3/tastytest/Logarithms.scala | Scala | apache-2.0 | 674 |
package lila.analyse
package actorApi
import lila.game.Game
case class AnalysisReady(game: Game, analysis: Analysis)
case class AnalysisProgress(
game: Game,
variant: chess.variant.Variant,
initialFen: chess.format.FEN,
analysis: Analysis
)
case class StudyAnalysisProgress(analysis: Analysis, complete: Boolean)
| luanlv/lila | modules/analyse/src/main/actorApi.scala | Scala | mit | 333 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras
import com.intel.analytics.bigdl.nn.{Cropping2D, _}
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
class Cropping2DSpec extends KerasBaseSpec {
"Cropping2D" should "with NCHW work properly" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 4, 5])
|input = np.random.uniform(-1, 1, [2, 3, 4, 5])
|output_tensor = Cropping2D(cropping=((1, 1), (1, 1)), dim_ordering='th')(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val model = Cropping2D[Float](Array(1, 1), Array(1, 1), DataFormat.NCHW)
checkOutputAndGrad(model, kerasCode)
}
"Cropping2D" should "with NHWC work properly" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 4, 5])
|input = np.random.uniform(-1, 1, [2, 3, 4, 5])
|output_tensor = Cropping2D(cropping=((1, 1), (1, 1)), dim_ordering='tf')(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val model = Cropping2D[Float](Array(1, 1), Array(1, 1), DataFormat.NHWC)
checkOutputAndGrad(model, kerasCode)
}
}
| qiuxin2012/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/keras/Cropping2DSpec.scala | Scala | apache-2.0 | 1,786 |
/*
* Copyright 2013 websudos ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.phantom.tables
import com.websudos.phantom.Implicits._
import com.websudos.phantom.PhantomCassandraConnector
case class MyTestRow(
key: String,
optionA: Option[Int],
stringlist: List[String]
)
sealed class MyTest extends CassandraTable[MyTest, MyTestRow] {
def fromRow(r: Row): MyTestRow = {
MyTestRow(key(r), optionA(r), stringlist(r))
}
object key extends StringColumn(this) with PartitionKey[String]
object stringlist extends ListColumn[MyTest, MyTestRow, String](this)
object optionA extends OptionalIntColumn(this)
}
object MyTest extends MyTest with PhantomCassandraConnector {
override val tableName = "mytest"
}
| nosheenzaza/phantom-data-centric | phantom-dsl/src/test/scala/com/websudos/phantom/tables/MyTest.scala | Scala | gpl-2.0 | 1,273 |
package org.jetbrains.bsp.project.test.environment
import com.intellij.execution.application.ApplicationConfiguration
import com.intellij.execution.configurations.RunConfiguration
class BspEnvironmentApplicationRunnerExtension extends BspEnvironmentRunnerExtension {
override def classes(config: RunConfiguration): Option[List[String]] = {
config match {
case applicationConfig: ApplicationConfiguration => Some(List(applicationConfig.getMainClassName))
case _ => None
}
}
override def runConfigurationSupported(config: RunConfiguration): Boolean =
config.isInstanceOf[ApplicationConfiguration]
override def environmentType: ExecutionEnvironmentType = ExecutionEnvironmentType.RUN
}
| JetBrains/intellij-scala | bsp/src/org/jetbrains/bsp/project/test/environment/BspEnvironmentApplicationRunnerExtension.scala | Scala | apache-2.0 | 718 |
// https://www.hackerrank.com/challenges/order-exercises
// see https://en.wikipedia.org/wiki/Maximum_subarray_problem
object OrderExercises extends App {
// kadane
def max(A: Array[Int]): Int = {
var maxLocal, maxTotal = 0
for (a <- A) {
maxLocal = math.max(0, maxLocal + a)
maxTotal = math.max(maxTotal, maxLocal)
}
maxTotal
}
// another approach, more FP: A.scanLeft(0)((acc, n) => math.max(0, acc + n)).max
// ...but that´s not enough to solve this problem thus try the following:
// run kadane and return a triplet (max, start_idx, end_idx)
// recursively
// run kadane in both sides (0 to start_idx, end_idx to end) to produce maxleft and maxright
// return max(maxleft, maxright)
// run kadane ...
val lines = io.Source.stdin.getLines
val Array(n, k) = lines.take(1).toList(0).split(" ").map(_.toInt)
val A = lines.take(1).toList(0).split(" ").map(_.toInt)
println(max(A))
}
| flopezlasanta/hackerrank | src/functional_programming/functional_structures/OrderExercises.scala | Scala | mit | 920 |
import java.time.{LocalDate, LocalDateTime}
import java.util.Date
import org.joda.time.DateTime
trait Now[T] {
def apply(): T
}
object Now {
def apply[T : Now]: Now[T] = implicitly
def apply[T](value: => T): Now[T] = new Now[T] {
override def apply(): T = value
}
implicit object DateNow extends Now[Date] {
override def apply(): Date = new Date()
}
implicit object JodaDateTimeNow extends Now[DateTime] {
override def apply(): DateTime = DateTime.now()
}
implicit object LocalDateNow extends Now[LocalDate] {
override def apply(): LocalDate = LocalDate.now()
}
implicit object LocalDateTimeNow extends Now[LocalDateTime] {
override def apply(): LocalDateTime = LocalDateTime.now()
}
}
| PeterPerhac/cheat-sheets | scala-scratches/now.scala | Scala | unlicense | 742 |
package uk.gov.dvla.vehicles.presentation.common.mappings
import java.util.Locale
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.DateTimeFormat
object Time {
val format = "h:mm a"
val formatHour = "h a"
def fromMinutes(mins: Long) = {
val millisPerMinute = 60000
val millis = mins * millisPerMinute
fromHourMillis(millis)
}
def fromHourMillis(hourMillis: Long) =
print(new DateTime(hourMillis, DateTimeZone.forID("UTC"))) // Must use UTC as we only want to format the hour
def fromDateTime(date: DateTime) = {
print(date)
}
def print(date: DateTime) = {
if (date.minuteOfHour().get() == 0)
DateTimeFormat.forPattern(formatHour).withLocale(Locale.UK)
.print(date).toLowerCase
else
DateTimeFormat.forPattern(format).withLocale(Locale.UK)
.print(date).toLowerCase
}
}
| dvla/vehicles-presentation-common | app/uk/gov/dvla/vehicles/presentation/common/mappings/Time.scala | Scala | mit | 869 |
package net.kwas.impatient.ch6
import org.scalatest._
class ArgumentReverserSpec extends FlatSpec with Matchers {
"ArgumentReverser" should "print out reversed arguments" in {
ArgumentReverser.main(Array("1", "2", "3"))
}
}
| dkwasny/ScalaImpatient | src/test/scala/net/kwas/impatient/ch6/ArgumentReverserSpec.scala | Scala | mit | 235 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.businessdetails
import jto.validation.forms._
import jto.validation.{From, Rule, Write}
import play.api.libs.json.Json
case class RegisteredOfficeIsUK(isUK: Boolean)
object RegisteredOfficeIsUK {
implicit val formats = Json.format[RegisteredOfficeIsUK]
import utils.MappingUtils.Implicits._
implicit val formRule: Rule[UrlFormEncoded, RegisteredOfficeIsUK] =
From[UrlFormEncoded] { __ =>
import jto.validation.forms.Rules._
(__ \\ "isUK").read[Boolean].withMessage("error.required.atb.registered.office.uk.or.overseas") map RegisteredOfficeIsUK.apply
}
implicit val formWrites: Write[RegisteredOfficeIsUK, UrlFormEncoded] =
Write {
case RegisteredOfficeIsUK(b) =>
Map("isUK" -> Seq(b.toString))
}
} | hmrc/amls-frontend | app/models/businessdetails/RegisteredOfficeIsUK.scala | Scala | apache-2.0 | 1,373 |
package com.kubukoz.adventofcode2016
import org.scalatest.{FlatSpec, Matchers}
class Day12Tests extends FlatSpec with Matchers {
"transformInput" should "work" in {
Day12.transformInput(
"""cpy 41 a
|inc a
|inc a
|dec a
|jnz a 2
|dec a""".stripMargin.split("\\n").toList, Map('a' -> 0))('a') shouldBe 42
}
}
| kubukoz/advent-of-code-2016 | src/test/scala/com/kubukoz/adventofcode2016/Day12Tests.scala | Scala | apache-2.0 | 363 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.geotools
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom._
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.Conversions.toRichSimpleFeatureIterator
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GridSnapTest extends Specification with LazyLogging {
"GridSnap" should {
"create a gridsnap around a given bbox" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 100, 100)
gridSnap must not(beNull)
}
"snap to the middle of a grid cell" in {
val bbox = new Envelope(0.0, 4.0, -4.0, 0.0)
val gridSnap = new GridSnap(bbox, 4, 4)
gridSnap.x(0) mustEqual 0.5
gridSnap.x(1) mustEqual 1.5
gridSnap.x(2) mustEqual 2.5
gridSnap.x(3) mustEqual 3.5
gridSnap.y(0) mustEqual -3.5
gridSnap.y(1) mustEqual -2.5
gridSnap.y(2) mustEqual -1.5
gridSnap.y(3) mustEqual -0.5
gridSnap.snap(0, -4.0) mustEqual (0.5, -3.5)
gridSnap.snap(0.1, -3.9) mustEqual (0.5, -3.5)
gridSnap.snap(0.9, -3.1) mustEqual (0.5, -3.5)
gridSnap.snap(1.0, -3.0) mustEqual (1.5, -2.5)
gridSnap.snap(1.1, -2.9) mustEqual (1.5, -2.5)
gridSnap.snap(1.9, -2.1) mustEqual (1.5, -2.5)
gridSnap.snap(3.0, -1.0) mustEqual (3.5, -0.5)
gridSnap.snap(3.1, -0.9) mustEqual (3.5, -0.5)
gridSnap.snap(3.9, -0.1) mustEqual (3.5, -0.5)
gridSnap.snap(4.0, 0.0) mustEqual (3.5, -0.5)
}
"handle min/max" >> {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 100, 10)
gridSnap.i(0.0) mustEqual 0
gridSnap.j(0.0) mustEqual 0
gridSnap.i(10.0) mustEqual 99
gridSnap.j(10.0) mustEqual 9
}
"handle out of bounds points" >> {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 100, 10)
gridSnap.i(-1.0) mustEqual 0
gridSnap.j(-1.0) mustEqual 0
gridSnap.i(11.0) mustEqual 99
gridSnap.j(11.0) mustEqual 9
}
"compute a SimpleFeatureSource Grid over the bbox" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 10, 10)
val grid = gridSnap.generateCoverageGrid
grid must not(beNull)
val featureIterator = grid.getFeatures.features.toList
featureIterator must haveLength(100)
}
"compute a sequence of points between various sets of coordinates" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 10, 10)
val resultDiagonal = gridSnap.genBresenhamCoordSet(0, 0, 9, 9).toList
resultDiagonal must haveLength(9)
val resultVertical = gridSnap.genBresenhamCoordSet(0, 0, 0, 9).toList
resultVertical must haveLength(9)
val resultHorizontal = gridSnap.genBresenhamCoordSet(0, 0, 9, 0).toList
resultHorizontal must haveLength(9)
val resultSamePoint = gridSnap.genBresenhamCoordSet(0, 0, 0, 0).toList
resultSamePoint must haveLength(1)
val resultInverse = gridSnap.genBresenhamCoordSet(9, 9, 0, 0).toList
resultInverse must haveLength(9)
}
"not have floating point errors" >> {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val cols = 100
val rows = 100
val gridSnap = new GridSnap(bbox, cols, rows)
"columns" >> {
forall(0 until cols) { i =>
gridSnap.x(gridSnap.i(gridSnap.x(i))) mustEqual gridSnap.x(i)
}
}
"rows" >> {
forall(0 until rows) { j =>
gridSnap.y(gridSnap.j(gridSnap.y(j))) mustEqual gridSnap.y(j)
}
}
}
}
}
| mdzimmerman/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geotools/GridSnapTest.scala | Scala | apache-2.0 | 4,236 |
package edu.berkeley.nlp.entity.wiki
import edu.berkeley.nlp.futile.fig.basic.Indexer
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import scala.collection.mutable.HashSet
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.entity.GUtil
import edu.berkeley.nlp.entity.coref.MentionPropertyComputer
import edu.berkeley.nlp.entity.coref.CorefDocAssembler
import edu.berkeley.nlp.entity.ConllDocReader
import edu.berkeley.nlp.entity.lang.Language
import edu.berkeley.nlp.entity.wiki._
@SerialVersionUID(9084163557546777842L)
class WikipediaLinkDB(val pageNameIndex: Indexer[String],
val inLinksMap: HashMap[String,Array[Int]],
val outLinksMap: HashMap[String,Array[Int]]) extends Serializable {
var outLinksSetCache: HashMap[String,Set[Int]] = null;
def getOutLinks(title: String) = {
if (outLinksMap.contains(title)) {
outLinksMap(title);
} else {
Array[Int]();
}
}
def getOutLinksSetUseCache(title: String) = {
if (outLinksMap.contains(title)) {
if (outLinksSetCache == null) {
outLinksSetCache = new HashMap[String,Set[Int]];
}
if (!outLinksSetCache.contains(title)) {
if (outLinksSetCache.size > 1000) {
outLinksSetCache.dropRight(1);
}
outLinksSetCache.put(title, outLinksMap(title).toSet);
}
outLinksSetCache(title);
} else {
Set[Int]();
}
}
def computeOutLinkSuffStats(title1: String, title2: String): (Int, Int, Int) = {
val outLinksTitle1 = getOutLinksSetUseCache(title1);
val outLinksTitle2 = getOutLinksSetUseCache(title2);
val intersection = (outLinksTitle1 & outLinksTitle2);
(intersection.size, outLinksTitle1.size, outLinksTitle2.size);
}
def doPagesShareOutLink(title1: String, title2: String): Boolean = {
computeOutLinkSuffStats(title1, title2)._1 > 0;
}
def doesOneLinkToOther(title1: String, title2: String): Boolean = {
val outLinksTitle1 = getOutLinks(title1);
val outLinksTitle2 = getOutLinks(title2);
outLinksTitle1.contains(pageNameIndex.indexOf(title2)) || outLinksTitle2.contains(pageNameIndex.indexOf(title1))
}
}
object WikipediaLinkDB {
def processWikipedia(wikipediaPath: String, pageTitleSetLc: Set[String]): WikipediaLinkDB = {
val pageNamesIndex = new Indexer[String];
val inLinksMap = new HashMap[String,HashSet[Int]];
val outLinksMap = new HashMap[String,HashSet[Int]];
val lines = IOUtils.lineIterator(IOUtils.openInHard(wikipediaPath));
var currentPageTitle = "";
var linksThisPage = new StringBuilder();
var doneWithThisPage = false;
var numPagesSeen = 0;
var lineIdx = 0;
var isInText = false;
val categoryMap = new HashMap[String,ArrayBuffer[String]];
val infoboxMap = new HashMap[String,String];
val appositiveMap = new HashMap[String,String];
// Extract first line that's not in brackets
while (lines.hasNext) {
val line = lines.next;
if (lineIdx % 100000 == 0) {
println("Line: " + lineIdx + ", processed " + numPagesSeen + " pages");
}
lineIdx += 1;
// 8 because all page lines look like " <page>" so we just need to catch the next one and skip
// longer lines
if (line.size > 8 && doneWithThisPage) {
// Do nothing
} else {
if (line.contains("<page>")) {
doneWithThisPage = false;
numPagesSeen += 1;
} else if (line.contains("<title>")) {
// 7 = "<title>".length()
currentPageTitle = line.substring(line.indexOf("<title>") + 7, line.indexOf("</title>"));
if (!pageTitleSetLc.contains(currentPageTitle.toLowerCase)) {
doneWithThisPage = true;
}
} else if (line.contains("<redirect title")) {
doneWithThisPage = true;
}
var startIdx = line.indexOf("[[");
while (startIdx >= 0 ) {
val endIdx = line.indexOf("]]", startIdx);
val pipeIdx = line.indexOf("|", startIdx);
val linkDest: String = if (pipeIdx >= 0 && pipeIdx < endIdx) {
line.substring(startIdx + 2, pipeIdx);
} else if (endIdx >= startIdx + 2) {
line.substring(startIdx + 2, endIdx);
} else {
""
}
if (linkDest != "") {
val idx = pageNamesIndex.getIndex(linkDest);
if (!outLinksMap.contains(currentPageTitle)) {
outLinksMap.put(currentPageTitle, new HashSet[Int]);
}
outLinksMap(currentPageTitle) += idx;
}
startIdx = line.indexOf("[[", startIdx + 2);
}
}
}
val inLinksMapArrs = inLinksMap.map(entry => entry._1 -> entry._2.toArray);
val outLinksMapArrs = outLinksMap.map(entry => entry._1 -> entry._2.toArray);
val sizes = Array.tabulate(10)(i => 0);
for (key <- outLinksMapArrs.keySet) {
val size = outLinksMapArrs(key).size;
// Logger.logss(size);
val exponent = Math.floor(Math.log(size)/Math.log(10)).toInt;
sizes(exponent) += 1;
}
Logger.logss("SIZES: " + sizes.toSeq);
new WikipediaLinkDB(pageNamesIndex, inLinksMapArrs, outLinksMapArrs);
}
def main(args: Array[String]) {
val wi = GUtil.load("data/wikipedia/wiki-model-ace-links.ser.gz").asInstanceOf[WikipediaInterface];
val linkDB = wi.linksDB;
val categoryDB = wi.categoryDB;
val set = Set("Bill Clinton", "President", "White House", "Hospital", "Prime Minister", "Judge")
for (title1 <- set) {
for (title2 <- set) {
if (title1 != title2) {
Logger.logss(linkDB.computeOutLinkSuffStats(title1, title2));
Logger.logss(categoryDB.getCategories(title1).toSet.toString + " " + categoryDB.getCategories(title2).toSet.toString);
Logger.logss((categoryDB.getCategories(title1).toSet & categoryDB.getCategories(title2).toSet).toString);
}
}
}
val basicWikifier = new BasicWikifier(wi);
val mentionPropertyComputer = new MentionPropertyComputer(None);
val pmAssembler = CorefDocAssembler(Language.ENGLISH, useGoldMentions = false);
val gmAssembler = CorefDocAssembler(Language.ENGLISH, useGoldMentions = true);
val corefDocs = ConllDocReader.loadRawConllDocsWithSuffix("data/ace05/dev", -1, "", Language.ENGLISH).map(doc => gmAssembler.createCorefDoc(doc, mentionPropertyComputer));
Logger.logss("Loaded docs");
var countsMat = Array.tabulate(2, 2)((i, j) => 0);
var countsMatNe = Array.tabulate(2, 2)((i, j) => 0);
var linksToOtherMatNe = Array.tabulate(2, 2)((i, j) => 0);
var linksToOtherAndOverlapMatNe = Array.tabulate(2, 2)((i, j) => 0);
var categoryMatchMatNe = Array.tabulate(2, 2)((i, j) => 0);
for (corefDoc <- corefDocs.slice(0, 10)) {
val goldClusters = corefDoc.getOraclePredClustering;
val wiks = (0 until corefDoc.predMentions.size).map(i => basicWikifier.wikify(corefDoc.rawDoc.docID, corefDoc.predMentions(i)));
for (mentIdx <- 0 until corefDoc.predMentions.size) {
for (antIdx <- 0 until mentIdx) {
if (wiks(mentIdx) != NilToken && wiks(antIdx) != NilToken) {
var firstIdx = if (goldClusters.areInSameCluster(mentIdx, antIdx)) 0 else 1;
var secondIdx = if (linkDB.computeOutLinkSuffStats(wiks(mentIdx), wiks(antIdx))._1 > 0) 0 else 1;
countsMat(firstIdx)(secondIdx) += 1;
if (wiks(mentIdx) != wiks(antIdx)) {
countsMatNe(firstIdx)(secondIdx) += 1;
linksToOtherMatNe(firstIdx)(if (linkDB.doesOneLinkToOther(wiks(mentIdx), wiks(antIdx))) 0 else 1) += 1;
linksToOtherAndOverlapMatNe(firstIdx)(if (linkDB.doesOneLinkToOther(wiks(mentIdx), wiks(antIdx)) && secondIdx == 0) 0 else 1) += 1;
categoryMatchMatNe(firstIdx)(if ((categoryDB.getCategories(wiks(mentIdx)).toSet & categoryDB.getCategories(wiks(antIdx)).toSet).size > 0) 0 else 1) += 1;
}
}
}
}
}
Logger.logss(countsMat(0)(0) + "\\t" + countsMat(0)(1))
Logger.logss(countsMat(1)(0) + "\\t" + countsMat(1)(1))
Logger.logss("Restricted to pairs with unequal Wikification");
Logger.logss(countsMatNe(0)(0) + "\\t" + countsMatNe(0)(1))
Logger.logss(countsMatNe(1)(0) + "\\t" + countsMatNe(1)(1))
Logger.logss("Links to other");
Logger.logss(linksToOtherMatNe(0)(0) + "\\t" + linksToOtherMatNe(0)(1))
Logger.logss(linksToOtherMatNe(1)(0) + "\\t" + linksToOtherMatNe(1)(1))
Logger.logss("Links to other and overlap");
Logger.logss(linksToOtherAndOverlapMatNe(0)(0) + "\\t" + linksToOtherAndOverlapMatNe(0)(1))
Logger.logss(linksToOtherAndOverlapMatNe(1)(0) + "\\t" + linksToOtherAndOverlapMatNe(1)(1))
Logger.logss("Category overlap");
Logger.logss(categoryMatchMatNe(0)(0) + "\\t" + categoryMatchMatNe(0)(1))
Logger.logss(categoryMatchMatNe(1)(0) + "\\t" + categoryMatchMatNe(1)(1))
}
}
| malcolmgreaves/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/wiki/WikipediaLinkDB.scala | Scala | gpl-3.0 | 9,022 |
/**
* @author Yuuto
*/
package yuuto.inventorytools.gui.slot
import net.minecraft.inventory.Slot
import yuuto.inventorytools.tiles.TileToolBench
import net.minecraft.item.ItemStack
import net.minecraft.entity.player.EntityPlayer
class SlotTool(tile:TileToolBench, slotIndex:Int, x:Int, y:Int) extends Slot(tile.inv, slotIndex, x, y){
override def isItemValid(stack:ItemStack):Boolean={
return this.inventory.isItemValidForSlot(this.slotIndex, stack);
}
override def canTakeStack(player:EntityPlayer):Boolean={
return tile.isToolBoxOpen();
}
} | AnimeniacYuuto/InventoryTools | src/main/scala/yuuto/inventorytools/gui/slot/SlotTool.scala | Scala | gpl-3.0 | 575 |
package com.github.eyce9000.spark.bes
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.Partition
import org.apache.spark.TaskContext
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.catalyst.expressions.SpecificMutableRow
import org.apache.spark.sql.catalyst.expressions.MutableRow
import scala.collection.JavaConverters._
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.StringType
import java.util.Date
import org.apache.spark.sql.types.TimestampType
import java.net.URI
import com.github.eyce9000.spark.bes.client.BigfixAPI
import com.github.eyce9000.spark.bes.client.xml.QueryResult
import org.apache.spark.sql.catalyst.InternalRow
class BigfixRDD(@transient sc: SparkContext, config:BigfixConfig, schema: StructType)
extends RDD[BigfixRow](sc, Nil) {
val relevanceQuery:String = config.relevanceQuery
object SinglePartition extends Partition{
val index:Int=0
}
override def getPartitions: Array[Partition] = Array(SinglePartition)
private def getApi(config:BigfixConfig):BigfixAPI = {
new BigfixAPI(config)
}
override def compute(thePart: Partition, context: TaskContext) = {
getApi(config).query(config.relevanceQuery).map { row => new BigfixRow(row) }
}
} | eyce9000/bigfix-relevance-spark | src/main/scala/com/github/eyce9000/spark/bes/BigfixRDD.scala | Scala | apache-2.0 | 1,337 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation._
import leon.lang._
import leon.lang.StaticChecks._
import leon.io.{
FileInputStream => FIS,
FileOutputStream => FOS,
StdOut
}
import leon.util.{ TimePoint }
import scala.annotation.tailrec
/**
* Some basic image processing; this version uses VLA and is designed for benchmarks.
*
* General NOTEs
* -------------
*
* Byte ranges from -128 to 127, not 0 to 255. It is important to remember
* that when manipulating individual component as Byte.
*
* The BMP format uses little endian.
*
* See https://msdn.microsoft.com/en-us/library/dd183391(v=vs.85).aspx
* for the full documentation.
*
* For benchmarks, the each images are loaded once, then for each filter
* the image is processed `BenchmarkRuns` times. Each runs prints:
*
* input;kernel;runtime[ms]
*
* This program is expected to be run from `run.fish`.
*/
object ImageProcessingVLABenchmark {
/**********************************************************************
* Constants *
**********************************************************************/
@inline
val BenchmarkRuns = 50
// Sizes in bytes of several Windows numerical types
@inline
val WordSize = 2 // 16 bits, unsigned
@inline
val DwordSize = 4 // 32 bits, unsigned
@inline
val LongSize = 4 // 32 bits, signed
/**********************************************************************
* Basic Algorithms *
**********************************************************************/
def inRange(x: Int, min: Int, max: Int): Boolean = {
min <= x && x <= max
}
def min(x: Int, y: Int): Int = {
if (x <= y) x else y
}
def max(x: Int, y: Int): Int = {
if (x < y) y else x
}
def clamp(x: Int, down: Int, up: Int): Int = {
max(down, min(x, up))
}
/**********************************************************************
* Status *
**********************************************************************/
sealed abstract class Status {
def isSuccess: Boolean = this.isInstanceOf[Success]
}
case class Success() extends Status
case class OpenError() extends Status
case class ReadError() extends Status
case class DomainError() extends Status
case class InvalidFileHeaderError() extends Status
case class InvalidBitmapHeaderError() extends Status
case class CorruptedDataError() extends Status
case class ImageTooBigError() extends Status
case class NoImageError() extends Status
case class WriteError() extends Status
case class NotImplementedError() extends Status
def statusCode(s: Status): Int = s match {
case Success() => StdOut.println("success"); 0
case OpenError() => StdOut.println("couldn't open file"); 1
case ReadError() => StdOut.println("couldn't read some expected data"); 2
case DomainError() => StdOut.println("integer out of range"); 3
case InvalidFileHeaderError() => StdOut.println("file format unsupported"); 4
case InvalidBitmapHeaderError() => StdOut.println("bitmap format unsupported"); 5
case CorruptedDataError() => StdOut.println("the file appears to be corrupted"); 6
case ImageTooBigError() => StdOut.println("the image is too big"); 7
case NoImageError() => StdOut.println("the image is empty"); 8
case WriteError() => StdOut.println("couldn't write image"); 9
case NotImplementedError() => StdOut.println("not yet implemented"); 99
}
/**********************************************************************
* MaybeResult *
**********************************************************************/
// Basically, MaybeResult[A] is Either[A, B] where B is Status
abstract class MaybeResult[A] {
def isDefined = this match {
case Result(_) => true
case _ => false
}
def getResult: A = {
this.asInstanceOf[Result[A]].result
}
def getStatus: Status = {
this.asInstanceOf[Failure[A]].status
}
def toStatus: Status = {
if (isDefined) Success()
else getStatus
}
}
case class Result[A](result: A) extends MaybeResult[A]
case class Failure[A](status: Status) extends MaybeResult[A] {
}
// Extra operations for MaybeResult[Int].
implicit class MaybeResultIntOps(val result: MaybeResult[Int]) {
def expect(value: Int): MaybeResult[Int] = result match {
case Result(res) if res == value => result
case Result(_) => Failure[Int](DomainError())
case _ => result // a Failure remains a Failure
}
}
// Combine two, three or four MaybeResult to a MaybeResult of tuple.
def combine[A, B](a: MaybeResult[A], b: MaybeResult[B]): MaybeResult[(A, B)] = {
if (a.isDefined) {
if (b.isDefined) {
Result((a.getResult, b.getResult))
} else Failure[(A, B)](b.getStatus)
} else Failure[(A, B)](a.getStatus)
}
def combine[A, B, C](a: MaybeResult[A], b: MaybeResult[B],
c: MaybeResult[C]): MaybeResult[(A, B, C)] = {
val tmp = combine(combine(a, b), c)
tmp match {
case Result(((a, b), c)) => Result((a, b, c))
case Failure(status) => Failure[(A, B, C)](status)
}
}
def combine[A, B, C, D](a: MaybeResult[A], b: MaybeResult[B],
c: MaybeResult[C], d: MaybeResult[D]): MaybeResult[(A, B, C, D)] = {
val tmp = combine(combine(a, b, c), d)
tmp match {
case Result(((a, b, c), d)) => Result((a, b, c, d))
case Failure(status) => Failure[(A, B, C, D)](status)
}
}
// Convert an Option to a MaybeResult
def maybe[A](opt: Option[A], failStatus: Status): MaybeResult[A] = {
opt match {
case Some(result) => Result(result)
case None() => Failure(failStatus)
}
}
// Special DSL for Option.
implicit class OptionOps[A](val opt: Option[A]) {
def toResultOr(failStatus: Status) = {
maybe(opt, failStatus)
}
}
/**********************************************************************
* Data Structures *
**********************************************************************/
/*
* Hold (some) information about the general file structure;
* The file header is 14 bytes, the offset refers to the beginning of the file header.
*/
case class FileHeader(size: Int, offset: Int) {
}
/*
* Hold basic information about the bitmap.
*
* See https://msdn.microsoft.com/en-us/library/dd183376(v=vs.85).aspx
*
* NOTE We assume that
* - The number of bits-per-pixel is 24 (RGB format, 8-bit channels);
* - No compression is used;
* - The palette is empty.
*/
case class BitmapHeader(width: Int, height: Int) {
}
/*
* Represent an Image, using the usual RGB channels.
*
* NOTE use createImage to create a new instance of this class easily.
*/
case class Image(r: Array[Byte], g: Array[Byte], b: Array[Byte], w: Int, h: Int) {
}
@inline // <- in order to "return" the image
def createImage(width: Int, height: Int) = {
// NOTE: this variant of the case study uses VLA!
val surface = width * height
Image(
Array.fill[Byte](surface)(0),
Array.fill[Byte](surface)(0),
Array.fill[Byte](surface)(0),
width, height
)
}
/**********************************************************************
* I/O functions for WORD, DWORD, LONG, and other helpers *
**********************************************************************/
// Skip a given number of bytes, returning true on success.
def skipBytes(fis: FIS, count: Int)(implicit state: leon.io.State): Boolean = {
var i = 0
var success = true
while (success && i < count) {
val opt = fis.tryReadByte()
success = opt.isDefined
i += 1
}
success
}
// Fill the output with copies of the given byte.
@tailrec // <- a good indicator that the C compiler could optimise out the recursion.
def writeBytes(fos: FOS, byte: Byte, count: Int): Boolean = {
if (count == 0) true
else fos.write(byte) && writeBytes(fos, byte, count - 1)
}
// Attempt to read a WORD (16-bit unsigned integer).
// The result is represented using an Int.
def maybeReadWord(fis: FIS)(implicit state: leon.io.State): MaybeResult[Int] = {
// From little to big endian
val byte2 = fis.tryReadByte
val byte1 = fis.tryReadByte
if (byte1.isDefined && byte2.isDefined) Result(constructWord(byte1.get, byte2.get))
else Failure[Int](ReadError())
}
private def constructWord(byte1: Byte, byte2: Byte): Int = {
// Shift range appropriately to respect unsigned numbers representation
val signed = (byte1 << 8) | (byte2 & 0xff) // has Int type
val unsigned = if (signed < 0) signed + (2 * 32768) else signed
unsigned
}
// Write a WORD
def writeWord(fos: FOS, word: Int): Boolean = {
val (b1, b2) = destructWord(word)
// From big endian to little endian
fos.write(b2) && fos.write(b1)
}
private def destructWord(word: Int): (Byte, Byte) = {
// Shift range appropriately to respect integer representation
val signed = if (word >= 32768) word - (2 * 32768) else word
val b1 = (signed >>> 8).toByte
val b2 = signed.toByte
(b1, b2)
}
private def lemmaWord(byte1: Byte, byte2: Byte): Boolean = {
val word = constructWord(byte1, byte2)
val (b1, b2) = destructWord(word)
b1 == byte1 && b2 == byte2
}.holds
// Attempt to read a DWORD (32-bit unsigned integer).
// The result is represented using an Int, and values bigger than 2^31 - 1 results in DomainError.
def maybeReadDword(fis: FIS)(implicit state: leon.io.State): MaybeResult[Int] = {
// From little to big endian
def buildInt(b1: Byte, b2: Byte, b3: Byte, b4: Byte): Int = {
(b4 << 24) | ((b3 & 0xff) << 16) | ((b2 & 0xff) << 8) | (b1 & 0xff)
}
val byte1 = fis.tryReadByte
val byte2 = fis.tryReadByte
val byte3 = fis.tryReadByte
val byte4 = fis.tryReadByte // the most significant byte
if (byte1.isDefined && byte2.isDefined && byte3.isDefined && byte4.isDefined) {
if (byte4.get >= 0) {
val dword = buildInt(byte1.get, byte2.get, byte3.get, byte4.get)
Result(dword)
} else Failure[Int](DomainError())
} else Failure[Int](ReadError())
}
// Write a DWORD
def writeDword(fos: FOS, dword: Int): Boolean = {
val b4 = (dword >>> 24).toByte
val b3 = (dword >>> 16).toByte
val b2 = (dword >>> 8).toByte
val b1 = dword.toByte
// Big endian to little endian conversion
fos.write(b1) && fos.write(b2) && fos.write(b3) && fos.write(b4)
}
// Attempt to read a LONG (32-bit signed integer).
// The result is represented using an Int.
def maybeReadLong(fis: FIS)(implicit state: leon.io.State): MaybeResult[Int] = {
// From little to big endian
def buildInt(b1: Byte, b2: Byte, b3: Byte, b4: Byte): Int = {
(b4 << 24) | ((b3 & 0xff) << 16) | ((b2 & 0xff) << 8) | (b1 & 0xff)
}
val byte1 = fis.tryReadByte
val byte2 = fis.tryReadByte
val byte3 = fis.tryReadByte
val byte4 = fis.tryReadByte // the most significant byte
if (byte1.isDefined && byte2.isDefined && byte3.isDefined && byte4.isDefined) {
val long = buildInt(byte1.get, byte2.get, byte3.get, byte4.get)
Result(long)
} else Failure[Int](ReadError())
}
// Write a LONG
def writeLong(fos: FOS, long: Int): Boolean = {
val b4 = (long >>> 24).toByte
val b3 = (long >>> 16).toByte
val b2 = (long >>> 8).toByte
val b1 = long.toByte
// Big endian to little endian conversion
fos.write(b1) && fos.write(b2) && fos.write(b3) && fos.write(b4)
}
/**********************************************************************
* I/O functions for the BMP format *
**********************************************************************/
// Attempt to read the file header.
// Upon success, 14 bytes have been read.
def maybeReadFileHeader(fis: FIS)(implicit state: leon.io.State): MaybeResult[FileHeader] = {
var skipSuccess = skipBytes(fis, WordSize)
val sizeRes = maybeReadDword(fis)
skipSuccess = skipSuccess && skipBytes(fis, WordSize * 2)
val offsetRes = maybeReadDword(fis)
combine(sizeRes, offsetRes) match {
case _ if !skipSuccess => Failure[FileHeader](ReadError())
case Failure(status) => Failure[FileHeader](status)
case Result((size, offset)) => {
if (14 <= size && 14 + 40 <= offset && offset <= size) Result(FileHeader(size, offset))
else Failure[FileHeader](InvalidFileHeaderError())
}
}
}
// Attempt to read the bitmap header (minimal version).
// Upon success, 18 bytes have been read.
def maybeReadBitmapHeader(fis: FIS)(implicit state: leon.io.State): MaybeResult[BitmapHeader] = {
var skipSuccess = skipBytes(fis, DwordSize)
val widthRes = maybeReadLong(fis)
val heightRes = maybeReadLong(fis)
skipSuccess = skipSuccess && skipBytes(fis, WordSize)
val bppRes = maybeReadWord(fis)
val compressionRes = maybeReadWord(fis)
combine(widthRes, heightRes, bppRes, compressionRes) match {
case _ if !skipSuccess => Failure[BitmapHeader](ReadError())
case Failure(status) => Failure[BitmapHeader](status)
case Result((w, h, bpp, compression)) =>
if (w < 0 || h < 0 || bpp != 24 || compression != 0) {
Failure(InvalidBitmapHeaderError())
} else Result(BitmapHeader(w, h))
}
}
def loadImageData(fis: FIS, image: Image)(implicit state: leon.io.State): Status = {
val size = image.w * image.h
var i = 0
var status: Status = Success()
while (status.isSuccess && i < size) {
val rOpt = fis.tryReadByte()
val gOpt = fis.tryReadByte()
val bOpt = fis.tryReadByte()
if (rOpt.isEmpty || gOpt.isEmpty || bOpt.isEmpty) {
status = ReadError()
} else {
image.r(i) = rOpt.get
image.g(i) = gOpt.get
image.b(i) = bOpt.get
}
i += 1
}
status
}
def saveImage(fos: FOS, image: Image): Status = {
def writeFileHeader(): Boolean = {
// Size: the headers and 3 channels per pixel, 1 byte per pixel component.
val size = 14 + 40 + image.w * image.h * 3
val reserved = 0 // two WORDs are reserved
val offset = 14 + 40 // after the two headers
fos.write(0x42.toByte) && fos.write(0x4d.toByte) && // the signature "BM"
writeDword(fos, size) &&
writeWord(fos, reserved) && writeWord(fos, reserved) &&
writeDword(fos, offset)
}
def writeBitmapHeader(): Boolean = {
val size = 40
val w = image.w
val h = image.h
val planes = 1
val bpp = 24
val comp = 0
writeDword(fos, size) &&
writeLong(fos, w) && writeLong(fos, h) &&
writeWord(fos, planes) &&
writeWord(fos, bpp) &&
writeWord(fos, comp) &&
writeBytes(fos, 0, 22) // the last 22 bytes are all not relevant for us and are set to 0
}
def writeImage(): Boolean = {
val count = image.w * image.h
var i = 0
var success = true
while (success && i < count) {
success = fos.write(image.r(i)) && fos.write(image.g(i)) && fos.write(image.b(i))
i += 1
}
success
}
if (writeFileHeader() && writeBitmapHeader() && writeImage()) Success()
else WriteError()
}
/**********************************************************************
* Kernel & Image Processing Algorithm *
**********************************************************************/
case class Kernel(size: Int, scale: Int, kernel: Array[Int]) {
/*
* Apply the kernel on the given channel. Return the new value for pixel component
* at the given index.
*/
private def apply(channel: Array[Byte], width: Int, height: Int, index: Int): Byte = {
// Clamping helper
def fix(x: Int, side: Int): Int = {
clamp(x, 0, side - 1)
}
// Get the color component at the given position in the range [0, 255]
def at(col: Int, row: Int): Int = {
val c = fix(col, width)
val r = fix(row, height)
val component = channel(r * width + c) // unsigned
if (component < 0) component + 255 else component
}
val mid = size / 2
val i = index % width
val j = index / width
var res = 0
var p = -mid
while (p <= mid) {
var q = -mid
val oldP = p // Fix p for the inner loop (the invariant is not automatically inferred)
while (q <= mid) {
val kcol = p + mid
val krow = q + mid
val kidx = krow * size + kcol
// Here, the += and * operation could overflow
res += at(i + p, j + q) * kernel(kidx)
q += 1
}
p += 1
}
res = clamp(res / scale, 0, 255)
res.toByte
}
def apply(src: Image, dest: Image): Unit = {
val size = src.w * src.h
var i = 0
while (i < size) {
dest.r(i) = apply(src.r, src.w, src.h, i)
dest.g(i) = apply(src.g, src.w, src.h, i)
dest.b(i) = apply(src.b, src.w, src.h, i)
i += 1
}
}
}
/**********************************************************************
* Main Program *
**********************************************************************/
@extern
def main(args: Array[String]): Unit = _main()
def _main(): Int = {
val res = benchmark()
if (res != 0) StdOut.println("ERROR")
res
}
def benchmark(): Int = {
// Note: GenC currently doesn't support string manipulations so
// we need to hard code some literals.
implicit val state = leon.io.newState
benchmarkKernels("../../../input1.bmp", "output1id.bmp", "output1smooth.bmp", "output1emboss.bmp", "output1blur.bmp", "output1edges.bmp", "output1sharpen.bmp") +
benchmarkKernels("../../../input2.bmp", "output2id.bmp", "output2smooth.bmp", "output2emboss.bmp", "output2blur.bmp", "output2edges.bmp", "output2sharpen.bmp") +
benchmarkKernels("../../../input3.bmp", "output3id.bmp", "output3smooth.bmp", "output3emboss.bmp", "output3blur.bmp", "output3edges.bmp", "output3sharpen.bmp") +
benchmarkKernels("../../../input4.bmp", "output4id.bmp", "output4smooth.bmp", "output4emboss.bmp", "output4blur.bmp", "output4edges.bmp", "output4sharpen.bmp") +
benchmarkKernels("../../../input5.bmp", "output5id.bmp", "output5smooth.bmp", "output5emboss.bmp", "output5blur.bmp", "output5edges.bmp", "output5sharpen.bmp") +
benchmarkKernels("../../../input6.bmp", "output6id.bmp", "output6smooth.bmp", "output6emboss.bmp", "output6blur.bmp", "output6edges.bmp", "output6sharpen.bmp") +
benchmarkKernels("../../../input7.bmp", "output7id.bmp", "output7smooth.bmp", "output7emboss.bmp", "output7blur.bmp", "output7edges.bmp", "output7sharpen.bmp") +
benchmarkKernels("../../../input7big.bmp", "output7bigid.bmp", "output7bigsmooth.bmp", "output7bigemboss.bmp", "output7bigblur.bmp", "output7bigedges.bmp", "output7bigsharpen.bmp")
}
// return 0 on success
def benchmarkKernels(in: String,
outId: String, outSmooth: String, outEmboss: String,
outBlur: String, outEdges: String, outSharpen: String
)(implicit state: leon.io.State): Int = {
val input = FIS.open(in)
val outputId = FOS.open(outId)
val outputSmooth = FOS.open(outSmooth)
val outputEmboss = FOS.open(outEmboss)
val outputBlur = FOS.open(outBlur)
val outputEdges = FOS.open(outEdges)
val outputSharpen = FOS.open(outSharpen)
val kIdentity = Kernel(1, 1, Array(
1
))
val kSmooth = Kernel(3, 10, Array(
1, 1, 1,
1, 2, 1,
1, 1, 1
))
val kEmboss = Kernel(3, 1, Array(
-2, -1, 0,
-1, 1, 1,
0, 1, 2
))
val kBlur = Kernel(5, 25, Array(
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 1, 1, 1, 1
))
val kEdges = Kernel(5, 1, Array(
0, 0, -1, 0, 0,
0, 0, -1, 0, 0,
-1, -1, 8, -1, -1,
0, 0, -1, 0, 0,
0, 0, -1, 0, 0
))
val kSharpen = Kernel(5, 8, Array(
-1, -1, -1, -1, -1,
-1, 2, 2, 2, -1,
-1, 2, 8, 2, -1,
-1, 2, 2, 2, -1,
-1, -1, -1, -1, -1
))
if (input.isOpen && outputId.isOpen && outputSmooth.isOpen && outputEmboss.isOpen && outputBlur.isOpen && outputEdges.isOpen && outputSharpen.isOpen) {
val res = loadHeaders(input) match {
case Failure(_) => 1
case Result((w, h)) =>
val image = createImage(w, h)
val status = loadImageData(input, image)
if (status.isSuccess) {
// Image loaded successfully, ready for benchmark!
process(in, image, "id", kIdentity, outputId) +
process(in, image, "smooth", kSmooth, outputSmooth) +
process(in, image, "emboss", kEmboss, outputEmboss) +
process(in, image, "blur", kBlur, outputBlur) +
process(in, image, "edges", kEdges, outputEdges) +
process(in, image, "sharpen", kSharpen, outputSharpen)
} else 1
}
outputSharpen.close()
outputEdges.close()
outputBlur.close()
outputEmboss.close()
outputSmooth.close()
outputId.close()
input.close()
res
} else 1
}
// return 0 on success
def process(in: String, src: Image, filter: String, kernel: Kernel, fos: FOS)(implicit state: leon.io.State): Int = {
var status = 0
var i = 0
while (i < BenchmarkRuns) {
// Compute the processing time, without I/Os
val t1 = TimePoint.now()
val dest = createImage(src.w, src.h)
kernel.apply(src, dest)
val t2 = TimePoint.now()
val ms = TimePoint.elapsedMillis(t1, t2)
// CSV output
StdOut.print(in)
StdOut.print(";")
StdOut.print(filter)
StdOut.print(";")
StdOut.println(ms)
i += 1
// Save the image on the last run
if (i == BenchmarkRuns) {
status = if (saveImage(fos, dest).isSuccess) 0 else 1
}
}
status
}
def loadHeaders(fis: FIS)(implicit state: leon.io.State): MaybeResult[(Int, Int)] = {
val fileHeaderRes = maybeReadFileHeader(fis)
val bitmapHeaderRes = maybeReadBitmapHeader(fis)
combine(fileHeaderRes, bitmapHeaderRes) match {
case Failure(status) =>
Failure(status)
/*
* Report an error when the file is corrupted, i.e. it's too small.
* 40 is the minimal bitmap header size, 14 is the file header size.
* Note that more sanity check could be done but that's not the main
* point of this example.
*/
case Result((fh, bh)) if fh.size <= 14 + 40 =>
Failure(CorruptedDataError())
case Result((fh, bh)) =>
// Skip bytes until the start of the bitmap data
val toSkip = fh.offset - (14 + 18) // some bytes were already eaten
val success = skipBytes(fis, toSkip)
// Break test of size so we avoid overflows.
if (!success) Failure(CorruptedDataError())
else if (bh.width == 0 || bh.height == 0) Failure(NoImageError())
else Result((bh.width, bh.height))
}
}
}
| mantognini/GenC | 3 master thesis project report/case_studies/Image Processing/benchmark/ImageProcessingVLABenchmark.vc_removed.scala | Scala | mit | 24,231 |
package com.cloudray.scalapress.plugin.ecommerce.tag
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
import com.cloudray.scalapress.plugin.ecommerce.tags.InvoiceNumberTag
import com.cloudray.scalapress.plugin.ecommerce.domain.Order
import javax.servlet.http.HttpServletRequest
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
class InvoiceNumberTagTest extends FunSuite with MockitoSugar with OneInstancePerTest {
val order = new Order
order.id = 51
order.vatable = true
val tag = new InvoiceNumberTag()
val req = mock[HttpServletRequest]
val context = mock[ScalapressContext]
val sreq = new ScalapressRequest(req, context).withOrder(order)
test("tag renders order id") {
val actual = tag.render(sreq, Map.empty)
assert("51" === actual.get)
}
}
| vidyacraghav/scalapress | src/test/scala/com/cloudray/scalapress/plugin/ecommerce/tag/InvoiceNumberTagTest.scala | Scala | apache-2.0 | 907 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 Heiko Blobner
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package de.windelknecht.stup.utils.ui.fxml
import de.windelknecht.stup.utils.ui.BaseResourceLoader
import java.io.{ByteArrayInputStream, File}
import javafx.fxml.FXMLLoader
import javafx.scene.Node
/**
* This object loads fxml files via fxml loader and return controller and view node..
* Don't forget to set the base path. Otherwise all fxml files
* will be prefixed with '.'.
*
* @author Heiko Blobner <windelknecht@gmail.com>
* @version %I%, %G%
* @since 0.1
*/
object ResourceFxmlLoader
extends BaseResourceLoader {
/**
* This method loads a fxml file and pass the given controller object.
*
* @param fileName is the filename to the fxml file
* @param controller is the controller object for to new created ui fxml node
* @param isAResource if true, the image is loaded from internal resource path
* @param isRelativePath is the filename relative or absolute
* @tparam N is the type parameter from the loaded ui node
* @tparam C is the type parameter from the controller
* @return the loaded ui node and the (loaded or passed) controller
*
* @throws IOException if there is an I/O error
*/
def loadFile[N <: Node, C <: AnyRef](
fileName: String,
controller: Option[C] = None,
isAResource: Boolean = true,
isRelativePath: Boolean = true
): (N, C) = {
trace(s"loading file $fileName (controller=$controller, isAResource=$isAResource, isRelativePath=$isRelativePath)")
if(isAResource)
loadFromResource[N,C](controller = controller, filePath = fileName, isRelativePath = isRelativePath)
else
loadFromFile[N,C](controller = controller, filePath = fileName, isRelativePath = isRelativePath)
}
/**
* This method loads a fxml string and pass the given controller object.
*
* @param fxml is the fxml content
* @param controller is the controller object for to new created ui fxml node
* @tparam N is the type parameter from the loaded ui node
* @tparam C is the type parameter from the controller
* @return the loaded ui node and the (loaded or passed) controller
*/
def loadString[N <: Node, C <: AnyRef](
fxml: String,
controller: Option[C] = None
): (N, C) = {
trace(s"loading fxml string (controller=$controller)")
val fxmlLoader = new FXMLLoader()
controller match {
case Some(x) => fxmlLoader.setController(x)
case None =>
}
fxmlLoader.load(new ByteArrayInputStream(fxml.getBytes))
(fxmlLoader.getRoot.asInstanceOf[N], fxmlLoader.getController.asInstanceOf[C])
}
/**
* This method loads a local fxml file and returns the ui node and the controller object.
*
* @param controller is an eventually existing controller object
* @param filePath is the filename of the fxml file
* @param isRelativePath says if the filename is relative or absolute
* @tparam N is the type of the ui node
* @tparam C is the type of the controller
* @return ui node and (loaded or passed) controller
*
* @throws IOException if there is an I/O error
*/
private def loadFromFile[N <: Node, C <: AnyRef](
controller: Option[C] = None,
filePath: String,
isRelativePath: Boolean = true
): (N, C) = loadFromURL(controller, new File(if(isRelativePath) buildPath(filePath) else filePath).toURI.toURL)
/**
* This method loads a fxml resource file and returns the ui node and the controller object.
*
* @param controller is an eventually existing controller object
* @param filePath is the filename of the fxml file
* @param isRelativePath says if the filename is relative or absolute
* @tparam N is the type of the ui node
* @tparam C is the type of the controller
* @return ui node and (loaded or passed) controller
*
* @throws IOException if there is an I/O error
*/
private def loadFromResource[N <: Node, C <: AnyRef](
controller: Option[C] = None,
filePath: String,
isRelativePath: Boolean = true
): (N, C) = loadFromURL(controller, ClassLoader.getSystemResource(if(isRelativePath) buildPath(filePath) else filePath))
/**
* This method loads a fxml file from given url and returns the ui node and the controller object.
*
* @param controller is an eventually existing controller object
* @param fileURL file url location
* @tparam N is the type of the ui node
* @tparam C is the type of the controller
* @return ui node and (loaded or passed) controller
*
* @throws IOException if there is an I/O error
*/
private def loadFromURL[N <: Node, C <: AnyRef](
controller: Option[C] = None,
fileURL: java.net.URL
): (N, C) = {
val fxmlLoader = new FXMLLoader(fileURL)
controller match {
case Some(x) => fxmlLoader.setController(x)
case None =>
}
try {
fxmlLoader.load()
} catch {
case e: Exception => println(s"failed to load ${fileURL.toString} (err: ${e.getMessage})")
}
(fxmlLoader.getRoot.asInstanceOf[N], fxmlLoader.getController.asInstanceOf[C])
}
}
| windelknecht/stup-utils | src/main/scala/de/windelknecht/stup/utils/ui/fxml/ResourceFxmlLoader.scala | Scala | mit | 6,162 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.