code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1
value | license stringclasses 15
values | size int64 5 1M |
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala.lang.parser.parsing.builder
import com.intellij.lang.PsiBuilder
import collection.mutable.Stack
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
import com.intellij.lang.impl.PsiBuilderAdapter
import com.intellij.openapi.util.text.StringUtil
import org.jetbrains.plugins.scala.lang.TokenSets
/**
* @author Alexander Podkhalyuzin
*/
class ScalaPsiBuilderImpl(builder: PsiBuilder)
extends PsiBuilderAdapter(builder) with ScalaPsiBuilder {
private final val newlinesEnabled: Stack[Boolean] = new Stack[Boolean]
def newlineBeforeCurrentToken: Boolean = {
countNewlineBeforeCurrentToken() > 0
}
def twoNewlinesBeforeCurrentToken: Boolean = {
countNewlineBeforeCurrentToken() > 1
}
/**
* @return 0 if new line is disabled here, or there is no \\n chars between tokens
* 1 if there is no blank lines between tokens
* 2 otherwise
*/
private def countNewlineBeforeCurrentToken(): Int = {
if (!newlinesEnabled.isEmpty && !newlinesEnabled.top) return 0
if (eof) return 0
if (!ParserUtils.elementCanStartStatement(getTokenType, this)) return 0
var i = 1
while (i < getCurrentOffset && TokenSets.WHITESPACE_OR_COMMENT_SET.contains(rawLookup(-i))) i += 1
val textBefore = getOriginalText.subSequence(rawTokenTypeStart(-i + 1), rawTokenTypeStart(0)).toString
if (!textBefore.contains('\\n')) return 0
val lines = s"start $textBefore end".split('\\n')
if (lines.exists(_.forall(StringUtil.isWhiteSpace))) 2
else 1
}
def disableNewlines {
newlinesEnabled.push(false)
}
def enableNewlines {
newlinesEnabled.push(true)
}
def restoreNewlinesState {
assert(newlinesEnabled.size >= 1)
newlinesEnabled.pop()
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/builder/ScalaPsiBuilderImpl.scala | Scala | apache-2.0 | 1,770 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.declaration
import forms.{Form2, InvalidForm, ValidForm}
import generators.ResponsiblePersonGenerator
import jto.validation.{Path, ValidationError}
import models.declaration.WhoIsRegistering
import models.responsiblepeople.{PersonName, ResponsiblePerson}
import org.scalacheck.Gen
import org.scalatest.MustMatchers
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.declaration.who_is_registering_this_update
class who_is_registering_this_updateSpec extends AmlsViewSpec with MustMatchers with ResponsiblePersonGenerator {
trait ViewFixture extends Fixture {
lazy val who_is_registering_this_update = app.injector.instanceOf[who_is_registering_this_update]
implicit val requestWithToken = addTokenForView()
}
"who_is_registering_this_update view" must {
"have correct title, heading and required fields" in new ViewFixture {
val form2: ValidForm[WhoIsRegistering] = Form2(WhoIsRegistering("PersonName"))
val people = Gen.listOfN(2, responsiblePersonGen).sample.get
def view = who_is_registering_this_update(form2, people)
doc.title mustBe s"${Messages("declaration.who.is.registering.amendment.title")} - ${Messages("title.amls")} - ${Messages("title.gov")}"
heading.html must be(Messages("declaration.who.is.registering.amendment.title"))
subHeading.html must include(Messages("submit.amendment.application"))
doc.getElementsByAttributeValue("class", "link-back") must not be empty
people.zipWithIndex.foreach { case (p, i) =>
val id = s"person-$i"
doc.getElementById(id).`val`() must be(i.toString)
doc.getElementById(id).parent.text must include(p.personName.get.fullName)
}
doc.select("input[type=radio]").size mustBe people.size + 1
doc.getElementsContainingOwnText(Messages("declaration.who.is.registering.text")).hasText must be(true)
}
"show errors in the correct locations" in new ViewFixture {
val form2: InvalidForm = InvalidForm(Map.empty,
Seq(
(Path \\ "person") -> Seq(ValidationError("not a message Key"))
))
def view = who_is_registering_this_update(form2, Seq(ResponsiblePerson()))
errorSummary.html() must include("not a message Key")
doc.getElementById("person")
.getElementsByClass("error-notification").first().html() must include("not a message Key")
}
}
}
| hmrc/amls-frontend | test/views/declaration/who_is_registering_this_updateSpec.scala | Scala | apache-2.0 | 3,036 |
package org.scaladebugger.api.profiles.java.requests.vm
import java.util.concurrent.atomic.AtomicBoolean
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiConstants, ApiTestUtilities, VirtualMachineFixtures}
class JavaVMDeathRequestIntegrationSpec extends ParallelMockFunSpec
with VirtualMachineFixtures
with ApiTestUtilities
{
describe("JavaVMDeathRequest") {
it("should trigger when a virtual machine dies", ApiConstants.NoWindows) {
val testClass = "org.scaladebugger.test.misc.MainUsingApp"
val detectedDeath = new AtomicBoolean(false)
val s = DummyScalaVirtualMachine.newInstance()
// Mark that we want to receive vm death events and watch for one
s.withProfile(JavaDebugProfile.Name)
.getOrCreateVMDeathRequest()
.foreach(_ => detectedDeath.set(true))
// Start our VM and listen for the start event
withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
// Kill the JVM process so we get a disconnect event
// NOTE: This does not seem to trigger the VMDeathEvent on Windows
s.underlyingVirtualMachine.process().destroy()
// Eventually, we should receive the start event
logTimeTaken(eventually {
detectedDeath.get() should be (true)
})
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/it/scala/org/scaladebugger/api/profiles/java/requests/vm/JavaVMDeathRequestIntegrationSpec.scala | Scala | apache-2.0 | 1,471 |
/**
* Copyright 2015 ICT.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.ac.ict.acs.netflow.util
import cn.ac.ict.acs.netflow.Logging
object NetFlowUncaughtExceptionHandler extends Thread.UncaughtExceptionHandler with Logging {
override def uncaughtException(thread: Thread, exception: Throwable) {
try {
logError("Uncaught exception in thread " + thread, exception)
// We may have been called from a shutdown hook. If so, we must not call System.exit().
// (If we do, we will deadlock.)
if (!Utils.inShutdown()) {
if (exception.isInstanceOf[OutOfMemoryError]) {
System.exit(NetFlowExitCode.OOM)
} else {
System.exit(NetFlowExitCode.UNCAUGHT_EXCEPTION)
}
}
} catch {
case oom: OutOfMemoryError => Runtime.getRuntime.halt(NetFlowExitCode.OOM)
case t: Throwable => Runtime.getRuntime.halt(NetFlowExitCode.UNCAUGHT_EXCEPTION_TWICE)
}
}
def uncaughtException(exception: Throwable) {
uncaughtException(Thread.currentThread(), exception)
}
}
| DataSysLab/netflow | common/src/main/scala/cn/ac/ict/acs/netflow/util/NetFlowUncaughtExceptionHandler.scala | Scala | apache-2.0 | 1,817 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.TaskContext
import org.apache.spark.rdd.{MapPartitionsWithPreparationRDD, RDD}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression2
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{UnaryNode, SparkPlan}
import org.apache.spark.sql.execution.metric.SQLMetrics
case class TungstenAggregate(
requiredChildDistributionExpressions: Option[Seq[Expression]],
groupingExpressions: Seq[NamedExpression],
nonCompleteAggregateExpressions: Seq[AggregateExpression2],
completeAggregateExpressions: Seq[AggregateExpression2],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
child: SparkPlan)
extends UnaryNode {
override private[sql] lazy val metrics = Map(
"numInputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of input rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def outputsUnsafeRows: Boolean = true
override def canProcessUnsafeRows: Boolean = true
override def canProcessSafeRows: Boolean = true
override def output: Seq[Attribute] = resultExpressions.map(_.toAttribute)
override def requiredChildDistribution: List[Distribution] = {
requiredChildDistributionExpressions match {
case Some(exprs) if exprs.length == 0 => AllTuples :: Nil
case Some(exprs) if exprs.length > 0 => ClusteredDistribution(exprs) :: Nil
case None => UnspecifiedDistribution :: Nil
}
}
// This is for testing. We force TungstenAggregationIterator to fall back to sort-based
// aggregation once it has processed a given number of input rows.
//这是为了测试,我们强制TungstenAggregationIterator在处理了给定数量的输入行后回退到基于排序的聚合
private val testFallbackStartsAt: Option[Int] = {
sqlContext.getConf("spark.sql.TungstenAggregate.testFallbackStartsAt", null) match {
case null | "" => None
case fallbackStartsAt => Some(fallbackStartsAt.toInt)
}
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
val numInputRows = longMetric("numInputRows")
val numOutputRows = longMetric("numOutputRows")
/**
* Set up the underlying unsafe data structures used before computing the parent partition.
* This makes sure our iterator is not starved by other operators in the same task.
* 设置在计算父分区之前使用的基础不安全数据结构,这可以确保我们的迭代器不会被同一任务中的其他运算符所匮乏
*/
def preparePartition(): TungstenAggregationIterator = {
new TungstenAggregationIterator(
groupingExpressions,
nonCompleteAggregateExpressions,
completeAggregateExpressions,
initialInputBufferOffset,
resultExpressions,
newMutableProjection,
child.output,
testFallbackStartsAt,
numInputRows,
numOutputRows)
}
/** Compute a partition using the iterator already set up previously.
* 使用先前已设置的迭代器计算分区*/
def executePartition(
context: TaskContext,
partitionIndex: Int,
aggregationIterator: TungstenAggregationIterator,
parentIterator: Iterator[InternalRow]): Iterator[UnsafeRow] = {
val hasInput = parentIterator.hasNext
if (!hasInput) {
// We're not using the underlying map, so we just can free it here
//我们没有使用底层Map,所以我们可以在这里释放它
aggregationIterator.free()
if (groupingExpressions.isEmpty) {
numOutputRows += 1
Iterator.single[UnsafeRow](aggregationIterator.outputForEmptyGroupingKeyWithoutInput())
} else {
// This is a grouped aggregate and the input iterator is empty,
//这是一个分组聚合,输入迭代器是空的
// so return an empty iterator.
Iterator.empty
}
} else {
aggregationIterator.start(parentIterator)
aggregationIterator
}
}
// Note: we need to set up the iterator in each partition before computing the
// parent partition, so we cannot simply use `mapPartitions` here (SPARK-9747).
val resultRdd = {
new MapPartitionsWithPreparationRDD[UnsafeRow, InternalRow, TungstenAggregationIterator](
child.execute(), preparePartition, executePartition, preservesPartitioning = true)
}
resultRdd.asInstanceOf[RDD[InternalRow]]
}
override def simpleString: String = {
val allAggregateExpressions = nonCompleteAggregateExpressions ++ completeAggregateExpressions
testFallbackStartsAt match {
case None =>
val keyString = groupingExpressions.mkString("[", ",", "]")
val functionString = allAggregateExpressions.mkString("[", ",", "]")
val outputString = output.mkString("[", ",", "]")
s"TungstenAggregate(key=$keyString, functions=$functionString, output=$outputString)"
case Some(fallbackStartsAt) =>
s"TungstenAggregateWithControlledFallback $groupingExpressions " +
s"$allAggregateExpressions $resultExpressions fallbackStartsAt=$fallbackStartsAt"
}
}
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregate.scala | Scala | apache-2.0 | 6,254 |
package cakesolutions.kafka
import java.util.concurrent.TimeUnit
import cakesolutions.kafka.TypesafeConfigExtensions._
import com.typesafe.config.Config
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.producer.{Callback, ProducerConfig, ProducerRecord, RecordMetadata, KafkaProducer => JKafkaProducer, Producer => JProducer}
import org.apache.kafka.common.{PartitionInfo, TopicPartition}
import org.apache.kafka.common.serialization.Serializer
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
trait KafkaProducerLike[K, V] {
/**
* Asynchronously send a record to a topic.
*
* @param record `ProducerRecord` to sent
* @return the result of the sent records as a `Future`
*/
def send(record: ProducerRecord[K, V]): Future[RecordMetadata]
/**
* Asynchronously send a record to a topic and invoke the provided callback when the send has been acknowledged.
*
* @param record `ProducerRecord` to sent
* @param callback callback that is called when the send has been acknowledged
*/
def sendWithCallback(record: ProducerRecord[K, V])(callback: Try[RecordMetadata] => Unit): Unit
/**
* Make all buffered records immediately available to send and wait until records have been sent.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#flush() flush]] method
*/
def flush(): Unit
/**
* Get the partition metadata for the give topic.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#partitionsFor(java.lang.String) partitionsFor]] method
*/
def partitionsFor(topic: String): List[PartitionInfo]
/**
* Initialise a transaction.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#initTransactions() initTransactions]] method
*/
def initTransactions(): Unit
/**
* Begin the transaction.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#beginTransaction() beginTransactions]] method
*/
def beginTransaction(): Unit
/**
* Sends a list of consumed offsets to the consumer group coordinator, and also marks those offsets as part of the current transaction.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#sendOffsetsToTransaction(java.util.Map,%20java.lang.String) sendOffsetsToTransaction]] method
*/
def sendOffsetsToTransaction(offsets: Map[TopicPartition, OffsetAndMetadata], consumerGroupId: String): Unit
/**
* Commits the transaction.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#commitTransaction() commitTransaction]] method
*/
def commitTransaction(): Unit
/**
* Aborts the transaction.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#abortTransaction() abortTransaction]] method
*/
def abortTransaction(): Unit
/**
* Close this producer.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html#close() close]] method
*/
def close(): Unit
/**
* Close this producer.
*
* @see Java `KafkaProducer` [[http://kafka.apache.org/0110/javadoc/org/apache/kafka/clients/producer/Producer.html#close(long,%20java.util.concurrent.TimeUnit) close]] method
*/
def close(timeout: FiniteDuration): Unit
}
/**
* Utilities for creating a Kafka producer.
*
* This companion object provides tools for creating Kafka producers.
*/
object KafkaProducer {
/**
* Utilities for creating Kafka producer configurations.
*/
object Conf {
/**
* Kafka producer configuration constructor with common configurations as parameters.
* For more detailed configuration, use the other [[Conf]] constructors.
*
* @param keySerializer the serialiser for the key
* @param valueSerializer the serialiser for the value
* @param bootstrapServers a list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
* @param acks the number of acknowledgments the producer requires the leader to have received before considering a request complete
* @param retries how many times sending is retried
* @param batchSize the size of the batch of sent messages in bytes
* @param lingerMs how long will the producer wait for additional messages before it sends a batch
* @param bufferMemory the total bytes of memory the producer can use to buffer records waiting to be sent to the server
* @param enableIdempotence when set to true, the producer will ensure that exactly one copy of each message is written in the stream.
* @tparam K key serialiser type
* @tparam V value serialiser type
* @return producer configuration consisting of all the given values
*/
def apply[K, V](
keySerializer: Serializer[K],
valueSerializer: Serializer[V],
bootstrapServers: String = "localhost:9092",
acks: String = "all",
retries: Int = 0,
batchSize: Int = 16384,
lingerMs: Int = 1,
bufferMemory: Int = 33554432,
enableIdempotence: Boolean = false,
transactionalId: Option[String] = None
): Conf[K, V] = {
val configMap = mutable.Map[String, AnyRef](
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
ProducerConfig.ACKS_CONFIG -> acks,
ProducerConfig.BATCH_SIZE_CONFIG -> batchSize.toString,
ProducerConfig.LINGER_MS_CONFIG -> lingerMs.toString,
ProducerConfig.BUFFER_MEMORY_CONFIG -> bufferMemory.toString,
ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG -> enableIdempotence.toString
)
// Must only explicitly set if differs from default
if (retries != 0) {
configMap.put(ProducerConfig.RETRIES_CONFIG, retries.toString)
}
transactionalId.foreach(tid =>
configMap.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tid.toString)
)
apply(configMap.toMap, keySerializer, valueSerializer)
}
/**
* Creates a Kafka producer configuration from a Typesafe config.
*
* The configuration names and values must match the Kafka's `ProducerConfig` style.
*
* @param config a Typesafe config to build configuration from
* @param keySerializer serialiser for the key
* @param valueSerializer serialiser for the value
* @tparam K key serialiser type
* @tparam V value serialiser type
* @return consumer configuration
*/
def apply[K, V](config: Config, keySerializer: Serializer[K], valueSerializer: Serializer[V]): Conf[K, V] =
Conf(config.toPropertyMap, keySerializer, valueSerializer)
}
/**
* Configuration object for the Kafka producer.
*
* The config is compatible with Kafka's `ProducerConfig`.
* All the key-value properties are specified in the given map, except the serializers.
* The key and value serialiser instances are provided explicitly to ensure type-safety.
*
* @param props map of `ProducerConfig` properties
* @tparam K key serializer type
* @tparam V value serializer type
*/
final case class Conf[K, V](
props: Map[String, AnyRef],
keySerializer: Serializer[K],
valueSerializer: Serializer[V]
) {
/**
* Extend the config with additional Typesafe config.
* The supplied config overrides existing properties.
*/
def withConf(config: Config): Conf[K, V] =
copy(props = props ++ config.toPropertyMap)
/**
* Extend the configuration with a single key-value pair.
*/
def withProperty(key: String, value: AnyRef): Conf[K, V] =
copy(props = props + (key -> value))
}
/**
* Create [[KafkaProducer]] from given configurations.
*
* @param conf the configurations for the producer
* @tparam K type of the key that the producer accepts
* @tparam V type of the value that the producer accepts
* @return Kafka producer instance
*/
def apply[K, V](conf: Conf[K, V]): KafkaProducer[K, V] = {
conf.keySerializer.configure(conf.props.asJava, true)
conf.valueSerializer.configure(conf.props.asJava, false)
apply(new JKafkaProducer[K, V](conf.props.asJava, conf.keySerializer, conf.valueSerializer))
}
/**
* Create [[KafkaProducer]] from a given Java `KafkaProducer` object.
*
* @param producer Java `KafkaProducer` object
* @tparam K type of the key that the producer accepts
* @tparam V type of the value that the producer accepts
* @return Kafka producer instance
*/
def apply[K, V](producer: JProducer[K, V]): KafkaProducer[K, V] =
new KafkaProducer(producer)
}
/**
* Wraps the Java `KafkaProducer`
* providing send operations that indicate the result of the operation with either
* a Scala `Future` or a Function callback.
*
* @param producer the underlying Java `KafkaProducer`
* @tparam K type of the key that the producer accepts
* @tparam V type of the value that the producer accepts
*/
final class KafkaProducer[K, V](val producer: JProducer[K, V]) extends KafkaProducerLike[K, V] {
override def send(record: ProducerRecord[K, V]): Future[RecordMetadata] = {
val promise = Promise[RecordMetadata]()
try {
producer.send(record, producerCallback(promise))
} catch {
case NonFatal(e) => promise.failure(e)
}
promise.future
}
override def sendWithCallback(record: ProducerRecord[K, V])(callback: Try[RecordMetadata] => Unit): Unit =
producer.send(record, producerCallback(callback))
override def flush(): Unit =
producer.flush()
override def partitionsFor(topic: String): List[PartitionInfo] =
producer.partitionsFor(topic).asScala.toList
override def initTransactions(): Unit =
producer.initTransactions()
override def beginTransaction(): Unit =
producer.beginTransaction()
override def sendOffsetsToTransaction(offsets: Map[TopicPartition, OffsetAndMetadata], consumerGroupId: String): Unit =
producer.sendOffsetsToTransaction(offsets.asJava, consumerGroupId)
override def commitTransaction(): Unit =
producer.commitTransaction()
override def abortTransaction(): Unit =
producer.abortTransaction()
override def close(): Unit =
producer.close()
override def close(timeout: FiniteDuration): Unit =
producer.close(timeout.toMillis, TimeUnit.MILLISECONDS)
private def producerCallback(promise: Promise[RecordMetadata]): Callback =
producerCallback(result => promise.complete(result))
private def producerCallback(callback: Try[RecordMetadata] => Unit): Callback =
new Callback {
override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
val result =
if (exception == null) Success(metadata)
else Failure(exception)
callback(result)
}
}
} | simonsouter/scala-kafka-client | client/src/main/scala/cakesolutions/kafka/KafkaProducer.scala | Scala | mit | 11,514 |
package com.twitter.finatra.http.tests.integration.startup
import com.twitter.finagle.http.Request
import com.twitter.finatra.http.routing.HttpRouter
import com.twitter.finatra.http.EmbeddedHttpServer
import com.twitter.finatra.http.{Controller, HttpServer}
import com.twitter.inject.Test
class HttpServerStartupIntegrationTest extends Test {
"Duplicate route paths fails server startup" in {
val server = new EmbeddedHttpServer(
twitterServer = new HttpServer {
override def configureHttp(router: HttpRouter): Unit = {
router.add(new Controller {
get("/foo") { request: Request =>
}
get("/foo") { request: Request =>
}
})
}
})
try {
val e = intercept[AssertionError] {
server.start()
}
e.getMessage should be("assertion failed: Found non-unique routes GET /foo")
}
finally {
server.close()
}
}
"Empty callbacks fails server startup" in {
val server = new EmbeddedHttpServer(
twitterServer = new HttpServer {
override def configureHttp(router: HttpRouter): Unit = {
router.add(new Controller {
get("/nothing") {
"nothing"
}
})
}
})
intercept[Exception] {
server.start()
}
server.close()
}
"Callback with parameter of type Int fails server startup" in {
val server = new EmbeddedHttpServer(
twitterServer = new HttpServer {
override def configureHttp(router: HttpRouter): Unit = {
router.add(new Controller {
get("/int") { r: Int =>
"int"
}
})
}
})
intercept[Exception] {
server.start()
}
server.close()
}
}
| syamantm/finatra | http/src/test/scala/com/twitter/finatra/http/tests/integration/startup/HttpServerStartupIntegrationTest.scala | Scala | apache-2.0 | 1,798 |
package com.github.yoskhdia.sqscala
case class MessageId(id: String)
| yoskhdia/sqscala | src/main/scala/com/github/yoskhdia/sqscala/MessageId.scala | Scala | mit | 70 |
/*
Webscripter - a simple html creator
Copyright (C) 2010 Davide Inglima
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact: limacat@gmail.com
*/
package net.limacat.webscripter
import java.util.List
import java.util.LinkedList
import java.util.NoSuchElementException
abstract class Mapper[T] {
protected def getValueFrom(line: String): String = {
if (line.indexOf(";") > -1) {
line.substring(0, line.indexOf(";"))
} else {
line
}
}
def readFrom(field: String): T
def isBlank(read: T): Boolean = {
read == null;
}
}
object StringMapper extends Mapper[String] {
def readFrom(field: String): String = {
getValueFrom(field).trim()
}
override def isBlank(read: String): Boolean = {
read == null || "".equals(read.trim());
}
}
case object CategoriaMapper extends MadMapper[Categoria](Map("C" -> CASUAL, "H" -> HARDCORE, "P" -> PSN, "U" -> SCONOSCIUTO, "Z" -> SHOVELWARE))
case object ControllerMapper extends MadMapper[Controller](Map("1" -> C1, "2" -> C2, "N" -> C1N, "U" -> CNA))
case object SupportoMapper extends MadMapper[Supporto](Map("C" -> SC, "P" -> SP, "F" -> SF, "U" -> SU))
class MadMapper[T >: Null <: AnyRef](collection: Map[String, T]) extends Mapper[T] {
def readFrom(field: String): T = {
try {
collection(getValueFrom(field).trim)
} catch {
case exc: NoSuchElementException => null
}
}
}
case object MetacriticLinkMapper extends LinkMapper("metacritic");
case object TrailerLinkMapper extends LinkMapper("trailer");
class LinkMapper(val linkName: String) extends Mapper[Link] {
def readFrom(field: String): Link = {
new Link(linkName, getValueFrom(field).trim())
}
}
object ArrayLinkMapper extends Mapper[Array[Link]] {
def readNextLink(index: Int, field: String): scala.List[Link] = {
var tail: scala.List[Link] = Nil
var xB: String = field;
if (field.indexOf(",") > -1) {
val subString = field.substring(field.indexOf(",") + 1)
tail = readNextLink(index + 1, subString)
xB = field.substring(0, field.indexOf(","))
}
xB = xB.trim
if (xB.length == 0) {
Nil
} else {
new Link(index.toString(), xB.trim()) :: tail;
}
}
def mySubString(theField: String): String = {
var myField = theField
if (myField.indexOf(";") > -1) {
myField = myField.substring(0, myField.indexOf(";"));
}
myField
}
def readFrom(field: String): Array[Link] = {
val x: scala.List[Link] = readNextLink(1, mySubString(field));
val size = x.length;
val y: Array[Link] = new Array[Link](size);
for (i <- 0 until size) {
y(i) = x(i)
}
y
}
}
class LicenseReader() {
def readLine(line: String): License = {
new License(line.substring("@LICENSE".length).trim)
}
}
class GameLineReader() {
private def readField[T](line: String, mapper: Mapper[T]): (T, String) = {
readField(line, mapper, "", null);
}
private def getNext(line: String): String = {
if (line.indexOf(";") > -1) {
line.substring(line.indexOf(";") + 1);
} else {
line;
}
}
private def readField[T](line: String, mapper: Mapper[T], name: String, fieldInErrors: List[String]): (T, String) = {
var next = getNext(line);
var value = mapper.readFrom(line);
if (fieldInErrors != null && mapper.isBlank(value)) {
fieldInErrors.add(name);
}
(value, next);
}
def readLine(line: String): Game = {
val fieldInErrors: List[String] = new LinkedList[String]
val title = readField(line, StringMapper, "title", fieldInErrors)
val cathegory = readField(title._2, CategoriaMapper, "cathegory", fieldInErrors)
val exit = readField(cathegory._2, StringMapper, "exitDate", fieldInErrors)
val controller = readField(exit._2, ControllerMapper, "controller", fieldInErrors)
val supporto = readField(controller._2, SupportoMapper, "supporto", fieldInErrors)
val metacritic = readField(supporto._2, MetacriticLinkMapper);
val trailer = readField(metacritic._2, TrailerLinkMapper);
val links = readField(trailer._2, ArrayLinkMapper);
val note = readField(links._2, StringMapper);
if (fieldInErrors.size > 0) {
throw new FormatException(fieldInErrors);
}
new Game(title._1, cathegory._1, exit._1, controller._1, supporto._1, metacritic._1, trailer._1, links._1, note._1)
}
}
class FormatException(val fieldsInError: List[String]) extends RuntimeException() {
}
| BackupTheBerlios/webscripter | src/net/limacat/webscripter/Reader.scala | Scala | agpl-3.0 | 5,266 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package lagom
import sbt._
import sbtunidoc.BaseUnidocPlugin
import sbtunidoc.GenJavadocPlugin
import sbtunidoc.JavaUnidocPlugin
import sbtunidoc.ScalaUnidocPlugin
import sbtunidoc.BaseUnidocPlugin.autoImport._
import sbtunidoc.JavaUnidocPlugin.autoImport._
import sbtunidoc.GenJavadocPlugin.autoImport._
import sbtunidoc.ScalaUnidocPlugin.autoImport._
import sbt.Keys._
import sbt.File
import sbt.ScopeFilter.ProjectFilter
object Scaladoc extends AutoPlugin {
object CliOptions {
val scaladocAutoAPI = CliOption("lagom.scaladoc.autoapi", true)
}
override def trigger = allRequirements
override def requires = plugins.JvmPlugin
override lazy val projectSettings = {
inTask(doc)(
Seq(
scalacOptions in Compile ++= scaladocOptions(version.value, (baseDirectory in ThisBuild).value),
autoAPIMappings := CliOptions.scaladocAutoAPI.get
)
)
}
def scaladocOptions(ver: String, base: File): List[String] = {
val urlString = GitHub.url(ver) + "/€{FILE_PATH}.scala"
val opts = List("-implicits", "-doc-source-url", urlString, "-sourcepath", base.getAbsolutePath)
opts
}
}
/**
* Unidoc settings for root project. Adds unidoc command.
*/
object UnidocRoot extends AutoPlugin {
override def trigger = noTrigger
override def requires = ScalaUnidocPlugin && JavaUnidocPlugin
private def projectsAndDependencies(projects: Seq[ProjectReference]): ProjectFilter = {
//projects.map(p => inDependencies(p, transitive = true, includeRoot = true)).reduce(_ || _)
projects.map(p => inProjects(p)).reduce(_ || _)
}
/**
* @param javadslProjects javadsl Projects (will only appear on the javadocs)
* @param scaladslProjects scaladsl Projects (will only appear in scaladocs)
* @param otherProjects a random list of other projects (mostly ***-core) added in both scaladocs
* and javadocs. This is meant for projects which implement code that's public
* API in both javadsl and scaladsl such as `projections-core`
* @return The unidoc-specific setting enabling scaladoc or javadoc generation for each of the provided
* ProjectReference in the arguments.
*/
def settings(
javadslProjects: Seq[ProjectReference],
scaladslProjects: Seq[ProjectReference],
otherProjects: ProjectReference*
) = {
inTask(unidoc)(
Seq(
unidocProjectFilter in ScalaUnidoc := projectsAndDependencies(scaladslProjects ++ otherProjects),
unidocProjectFilter in JavaUnidoc := projectsAndDependencies(javadslProjects ++ otherProjects),
autoAPIMappings in ScalaUnidoc := true
)
)
}
def excludeJavadoc = Set("internal", "protobuf", "scaladsl")
private val allGenjavadocSources = Def.taskDyn {
(sources in (Genjavadoc, doc)).all((unidocScopeFilter in (JavaUnidoc, unidoc)).value)
}
/**
* This ensures that we can link to the frames version of a page (ie, instead of api/foo/Bar.html,
* link to api/index.html?foo/Bar.html), while still being able to also link to a specific method.
*
* It checks whether the current window is a class frame (rather than the package frame, or
* the top level window if not using frames), and if the top window has a hash, takes the
* current frame to that hash.
*
* I'm not sure exactly how this string is processed by what and where, but it seems escaping
* newlines and double quotes makes it work with javadoc.
*/
private val framesHashScrollingCode =
"""<script type="text/javascript">
| if (window.name == "classFrame" && window.top.location.hash) {
| window.location.href = window.top.location.hash;
| }
|</script>""".stripMargin.replaceAll("\\n", "\\\\\\\\n").replaceAll("\\"", "\\\\\\\\\\"")
/**
* JDK 1.8.0_121 introduced a restriction that prevents the inclusion of JS inside generated
* javadoc HTML files. That check can be disabled but requires an extra argument.
*/
private val JavaBuildVersion = """1\\.8\\.0_(\\d+)""".r
private val enableScriptsArgs = sys.props.get("java.version") match {
case Some(JavaBuildVersion(build)) if build.toInt < 121 => Nil
case _ => Seq("--allow-script-in-comments")
}
override lazy val projectSettings = Seq(
unidocAllSources in (JavaUnidoc, unidoc) ++= allGenjavadocSources.value,
unidocAllSources in (JavaUnidoc, unidoc) := {
(unidocAllSources in (JavaUnidoc, unidoc)).value
.map(_.filterNot(f => excludeJavadoc.exists(f.getCanonicalPath.contains)))
},
// Override the Scala unidoc target to *not* include the Scala version, since we don't cross-build docs
target in (ScalaUnidoc, unidoc) := target.value / "unidoc",
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq("-skip-packages", "com.lightbend.lagom.internal"),
javacOptions in doc := Seq(
"-windowtitle",
"Lagom Services API",
// Adding a user agent when we run `javadoc` is necessary to create link docs
// with Akka (at least, maybe play too) because doc.akka.io is served by Cloudflare
// which blocks requests without a User-Agent header.
"-J-Dhttp.agent=Lagom-Unidoc-Javadoc",
"-link",
"https://docs.oracle.com/javase/8/docs/api/",
"-link",
"https://doc.akka.io/japi/akka/current/",
"-link",
"https://doc.akka.io/japi/akka-http/current/",
"-link",
"https://www.playframework.com/documentation/latest/api/java/",
"-public",
"-group",
"Services API",
packageList(
"com.lightbend.lagom.javadsl",
"com.lightbend.lagom.javadsl.api",
"com.lightbend.lagom.javadsl.client",
"com.lightbend.lagom.javadsl.server",
"com.lightbend.lagom.javadsl.api.deser",
"com.lightbend.lagom.javadsl.api.paging"
),
"-group",
"Persistence",
packageList(
"com.lightbend.lagom.javadsl.persistence",
"com.lightbend.lagom.javadsl.persistence.cassandra",
"com.lightbend.lagom.javadsl.persistence.cassandra.testkit",
"com.lightbend.lagom.javadsl.persistence.jdbc",
"com.lightbend.lagom.javadsl.persistence.jdbc.testkit",
"com.lightbend.lagom.javadsl.persistence.jpa",
"com.lightbend.lagom.javadsl.persistence.testkit"
),
"-group",
"Cluster",
packageList(
"com.lightbend.lagom.javadsl.pubsub",
"com.lightbend.lagom.javadsl.cluster"
),
"-group",
"Projection",
packageList(
"com.lightbend.lagom.javadsl.projection",
"com.lightbend.lagom.projection"
),
"-group",
"Message Broker",
packageList(
"com.lightbend.lagom.javadsl.api.broker",
"com.lightbend.lagom.javadsl.api.broker.kafka",
"com.lightbend.lagom.javadsl.broker",
"com.lightbend.lagom.javadsl.broker.kafka"
),
"-noqualifier",
"java.lang",
"-encoding",
"UTF-8",
"-source",
"1.8",
"-notimestamp",
"-footer",
framesHashScrollingCode
)
++ enableScriptsArgs
)
def packageList(names: String*): String =
names.mkString(":")
}
/**
* Unidoc settings for every multi-project. Adds genjavadoc specific settings.
*/
object Unidoc extends AutoPlugin {
lazy val GenjavadocCompilerPlugin = config("genjavadocplugin") hide
override def requires = plugins.JvmPlugin
override def projectConfigurations: Seq[Configuration] = Seq(Genjavadoc)
// Define a new compile task in the genjavadoc configuration that enables genjavadoc
// This is so that we don't generate the javadoc code on every Scala compile, but only when we actually want to
// build the javadocs.
// This means scalac actually will be invoked 3 times any time a publishLocal is done - this can probably be optimised
// down to two assuming https://github.com/typesafehub/genjavadoc/issues/66 is possible.
override lazy val projectSettings = inConfig(Genjavadoc)(Defaults.configSettings) ++ Seq(
ivyConfigurations += GenjavadocCompilerPlugin,
libraryDependencies += ("com.typesafe.genjavadoc" % "genjavadoc-plugin" % "0.16" % "genjavadocplugin->default(compile)")
.cross(CrossVersion.full),
scalacOptions in Genjavadoc ++= Seq(
"-P:genjavadoc:out=" + (target.value / "java"),
"-P:genjavadoc:fabricateParams=false"
),
scalacOptions in Genjavadoc ++=
update.value
.matching(configurationFilter(GenjavadocCompilerPlugin.name))
.filter(_.getName.contains("genjavadoc"))
.map("-Xplugin:" + _.getAbsolutePath),
sources in Genjavadoc := (sources in Compile).value,
sources in (Genjavadoc, doc) := {
val _ = (compile in Genjavadoc).value
(target.value / "java" ** "*.java").get
},
dependencyClasspath in Genjavadoc := (dependencyClasspath in Compile).value
)
}
| lagom/lagom | project/Doc.scala | Scala | apache-2.0 | 9,003 |
package at.logic.gapt.formats.tptp
import at.logic.gapt.expr.{ Bottom, FOLFormula }
import at.logic.gapt.expr.hol.{ univclosure, CNFn, CNFp }
import at.logic.gapt.proofs.{ Sequent, FOLClause }
import at.logic.gapt.proofs.sketch.{ SketchAxiom, SketchInference, RefutationSketch }
import scala.collection.mutable
class TptpProofParser extends TPTPParser {
type StepList = Seq[( String, ( String, String, FOLFormula, List[GeneralTerm] ) )]
def comment: Parser[Unit] = """[#%](.*)\\n""".r ^^ { _ => () }
def step: Parser[( String, ( String, String, FOLFormula, List[GeneralTerm] ) )] = ( "cnf" | "fof" ) ~ "(" ~ name ~ "," ~ name ~ "," ~ formula ~ ( "," ~> general_term ).* ~ ")." ^^ {
case lang ~ _ ~ num ~ _ ~ name ~ _ ~ clause ~ just ~ _ =>
num -> ( lang, name, clause, just )
}
sealed trait GeneralTerm
case class GTList( elements: Seq[GeneralTerm] ) extends GeneralTerm
case class GTFun( name: String, args: Seq[GeneralTerm] ) extends GeneralTerm
case class GTInt( int: Int ) extends GeneralTerm
def general_term: Parser[GeneralTerm] = "[" ~> repsep( general_term, "," ).^^ { GTList( _ ) } <~ "]" |
( name.^^ { GTFun( _, Seq() ) } <~ ":" <~ general_term ) | variable.^^ { v => GTFun( v.name, Seq() ) } |
( "$fot" ~ "(" ~ term ~ ")" ^^ { _ => GTFun( "$fot", Seq() ) } ) |
( "$cnf" ~ "(" ~ formula ~ ")" ^^ { _ => GTFun( "$fot", Seq() ) } ) |
( name ~ opt( "(" ~> repsep( general_term, "," ) <~ ")" ) ^^ { case ( f ~ a ) => GTFun( f, a.getOrElse( Nil ) ) } ) | integer.^^ { GTInt }
def tptpProof: Parser[StepList] = ( comment ^^ { _ => Seq() } | step ^^ { Seq( _ ) } ).* ^^ { _.flatten }
}
object TptpProofParser extends TptpProofParser {
def parse( out: String, labelledCNF: Map[String, Seq[FOLClause]] ): RefutationSketch =
parseAll( tptpProof, out ) match {
case Success( result, _ ) =>
parseSteps( result, labelledCNF )
}
def parse( out: String ): ( Sequent[FOLFormula], RefutationSketch ) =
parseAll( tptpProof, out ) match {
case Success( stepList_, _ ) =>
val stepList = inventSources( stepList_ )
val ( endSequent, labelledCNF ) = extractEndSequentAndCNF( stepList )
endSequent -> parseSteps( stepList, labelledCNF )
}
def inventSources( stepList: StepList ): StepList = stepList map {
case ( label, ( lang, role @ ( "axiom" | "hypothesis" | "conjecture" | "negated_conjecture" ), formula, Seq() ) ) =>
label -> ( lang, role, formula, List( GTFun( "file", List( GTFun( "", List() ), GTFun( s"source_$label", List() ) ) ) ) )
case ( label, ( lang, role @ ( "axiom" | "hypothesis" | "conjecture" | "negated_conjecture" ), formula, GTFun( "file", List( _, GTFun( "unknown", _ ) ) ) +: _ ) ) =>
label -> ( lang, role, formula, List( GTFun( "file", List( GTFun( "", List() ), GTFun( s"source_$label", List() ) ) ) ) )
case other => other
}
def extractEndSequentAndCNF( stepList: StepList ): ( Sequent[FOLFormula], Map[String, Seq[FOLClause]] ) = {
var endSequent = Sequent[FOLFormula]()
var labelledCNF = Map[String, Seq[FOLClause]]()
stepList.map( _._2 ) foreach {
case ( "fof", "conjecture", formula, List( GTFun( "file", List( _, GTFun( label, List() ) ) ) ) ) =>
endSequent :+= formula
labelledCNF += label -> CNFn.toClauseList( formula )
case ( lang, _, formula, List( GTFun( "file", List( _, GTFun( label, List() ) ) ) ) ) =>
endSequent +:= ( if ( lang == "cnf" ) univclosure( formula ) else formula )
labelledCNF += label -> CNFp.toClauseList( formula )
case _ =>
}
endSequent -> labelledCNF
}
def parseSteps( stepList: StepList, labelledCNF: Map[String, Seq[FOLClause]] ): RefutationSketch = {
val steps = stepList.toMap
def getParents( justification: GeneralTerm ): Seq[String] = justification match {
case GTFun( "inference", List( _, _, GTList( parents ) ) ) => parents flatMap getParents
case GTFun( "introduced", List( GTFun( "tautology", _ ), _ ) ) => Seq()
case GTFun( "theory", GTFun( "equality", _ ) +: _ ) => Seq()
case GTFun( parent, List() ) => Seq( parent )
}
val memo = mutable.Map[String, Seq[RefutationSketch]]()
def convert( stepName: String ): Seq[RefutationSketch] = memo.getOrElseUpdate( stepName, steps( stepName ) match {
case ( "fof", "conjecture", _, GTFun( "file", List( _, GTFun( label, _ ) ) ) +: _ ) =>
labelledCNF( label ) map SketchAxiom
case ( _, _, axiom, GTFun( "file", List( _, GTFun( label, _ ) ) ) +: _ ) =>
CNFp.toClauseList( axiom ) match {
case Seq( axiomClause ) =>
Seq( SketchInference(
axiomClause,
labelledCNF( label ) map SketchAxiom
) )
case clauses => labelledCNF( label ) map SketchAxiom
}
case ( _, _, conclusion, justification +: _ ) =>
CNFp.toClauseList( conclusion ) match {
case Seq( conclusionClause ) =>
val sketchParents = getParents( justification ) flatMap convert
Seq( SketchInference( conclusionClause, sketchParents ) )
case clauses => getParents( justification ) flatMap convert
}
} )
convert( stepList.find( _._2._3 == Bottom() ).get._1 ).head
}
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/formats/tptp/tptpProofParser.scala | Scala | gpl-3.0 | 5,329 |
package offGridOrcs
final case class Vec3(x: Double, y: Double, z: Double) {
def r = x
def g = y
def b = z
def +(other: Vec3) =
Vec3(x + other.x, y + other.y, z + other.z)
def -(other: Vec3) =
Vec3(x - other.x, y - other.y, z - other.z)
def *(other: Vec3) =
Vec3(x * other.x, y * other.y, z * other.z)
def *(scale: Double) =
Vec3(x * scale, y * scale, z * scale)
def /(scale: Double) =
Vec3(x / scale, y / scale, z / scale)
def dot(other: Vec3) =
x * other.x + y * other.y + z * other.z
def lengthSquared =
this dot this
def length =
Math.sqrt(lengthSquared)
def lighten(value: Double) = {
this * (1 + value)
}
def darken(value: Double) = {
this * (1 - value)
}
def mix(other: Vec3, otherAmount: Double) = {
this * (1 - otherAmount) + other * otherAmount
}
}
object Vec3 {
val Zero = Vec3(0, 0, 0)
val One = Vec3(1, 1, 1)
def hexRGB(hexCode: Int): Vec3 = {
val r = (hexCode & 0xFF0000) >>> 16
val g = (hexCode & 0x00FF00) >>> 8
val b = (hexCode & 0x0000FF) >>> 0
Vec3(
r.toDouble / 255,
g.toDouble / 255,
b.toDouble / 255)
}
}
| dcecile/off-grid-orcs | src/Vec3.scala | Scala | mit | 1,162 |
/*
* Copyright © 2015 Lukas Rosenthaler, Benjamin Geer, Ivan Subotic,
* Tobias Schweizer, André Kilchenmann, and Sepideh Alassi.
* This file is part of Knora.
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.{BeforeAndAfterAll, Suite}
import scala.languageFeature.postfixOps
object ITKnoraFakeSpec {
val defaultConfig: Config = ConfigFactory.load()
}
/**
* This class can be used in End-to-End testing. It starts a Fake Knora server and
* provides access to settings and logging.
*/
class ITKnoraFakeSpec(_system: ActorSystem) extends Core with KnoraFakeService with Suite with BeforeAndAfterAll {
def this(name: String, config: Config) = this(ActorSystem(name, config.withFallback(ITKnoraFakeSpec.defaultConfig)))
def this(config: Config) = this(ActorSystem("IntegrationTests", config.withFallback(ITKnoraFakeSpec.defaultConfig)))
def this(name: String) = this(ActorSystem(name, ITKnoraFakeSpec.defaultConfig))
def this() = this(ActorSystem("IntegrationTests", ITKnoraFakeSpec.defaultConfig))
/* needed by the core trait */
implicit lazy val system: ActorSystem = _system
/* needed by the core trait */
implicit lazy val settings: SettingsImpl = Settings(system)
/* needed by the core trait */
implicit lazy val log: LoggingAdapter = akka.event.Logging(system, "ITSpec")
if (!settings.knoraApiUseHttp) throw HttpConfigurationException("Integration tests currently require HTTP")
protected val baseApiUrl: String = settings.knoraApiHttpBaseUrl
protected val baseSipiUrl: String = s"${settings.sipiBaseUrl}:${settings.sipiPort}"
override def beforeAll: Unit = {
/* Set the startup flags and start the Knora Server */
log.debug(s"Starting Knora Service")
startService()
}
override def afterAll: Unit = {
/* Stop the server when everything else has finished */
log.debug(s"Stopping Knora Service")
stopService()
}
}
| nie-ine/Knora | webapi/src/it/scala/org/knora/webapi/ITKnoraFakeSpec.scala | Scala | agpl-3.0 | 2,716 |
package com.benkolera.slick.joda
import java.sql.Timestamp
import scala.util.parsing.combinator.RegexParsers
case class InstantRangeParser[A:Instant](
f:Timestamp => InfiniteInstant[A]
) extends RegexParsers {
def pinf = "infinity".r ^^ { _ => PosInfinity[A]() }
def ninf = "-infinity".r ^^ { _ => NegInfinity[A]() }
def tz = """[+-]?\\d{0,2}""".r
def date = "\\"" ~> """\\d{4}-\\d{1,2}-\\d{1,2} \\d\\d:\\d\\d:\\d\\d\\.?\\d*""".r <~ (tz ~ "\\"") ^^ {
d => f( Timestamp.valueOf(d) )
}
def pgdt = pinf | ninf | date
def open = """[\\(\\[]""".r
def close = """[\\)\\]]""".r
def incOpen = open ^^ { _ == "[" }
def emptyOpen = open ^^ { _ => EmptyStart[A]() }
def incClose = close ^^ { _ == "]" }
def emptyClose = close ^^ { _ => EmptyEnd[A]() }
def fst =
( incOpen ~ pgdt ) ^^ { case inc ~ date =>
if (inc) InclusiveStart[A](date) else ExclusiveStart[A](date)
}
def validityFst =
( incOpen ~ pgdt ) ^^ { case inc ~ date =>
if (inc) date else throw new Exception("Exclusive start not possible in validity")
}
def scd =
( pgdt ~ incClose ) ^^ { case date ~ inc =>
if (inc) InclusiveEnd[A](date) else ExclusiveEnd[A](date)
}
def validityScd =
( pgdt ~ incClose ) ^^ { case date ~ inc =>
if (!inc) date else throw new Exception("Inclusive end not possible in validity")
}
def defined =
( (fst | emptyOpen) ~ "," ~ (scd | emptyClose) ) ^^ {
case fst ~ _ ~ scd => InstantRange[A]( fst, scd )
}
def validityDefined =
( validityFst ~ "," ~ validityScd ) ^^ {
case fst ~ _ ~ scd => ValidityRange[A]( fst, scd )
}
def empty = "empty".r ^^ { _ => EmptyRange[A]() }
def range = defined | empty
def validityRange = validityDefined
def fromSql( sql:String ): InstantRange[A] = {
parseAll(range, sql) match {
case Success(result, _) => result
case n: NoSuccess => throw new Exception( s"Cannot parse $sql as InstantRange: ${n.msg}" )
}
}
def validityFromSql( sql:String ): ValidityRange[A] = {
parseAll(validityRange, sql) match {
case Success(result, _) => result
case n: NoSuccess => throw new Exception( s"Cannot parse $sql as ValidityRange: ${n.msg}" )
}
}
}
| benkolera/scala-slick-extras | src/main/scala/joda/InstantRangeParser.scala | Scala | mit | 2,222 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.notifications.v4m0
import models.notifications.NotificationParams
import org.scalatest.MustMatchers
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.notifications.v4m0.revocation_reasons
class revocation_reasonsSpec extends AmlsViewSpec with MustMatchers {
trait ViewFixture extends Fixture {
lazy val revocation_reasons = app.injector.instanceOf[revocation_reasons]
implicit val requestWithToken = addTokenForView()
val notificationParams = NotificationParams(businessName = Some("Fake Name Ltd."), msgContent = "msgContent", amlsRefNo = Some("amlsRegNo"), endDate = Some("endDate"))
}
"revocation_reasons view" must {
"have correct title" in new ViewFixture {
def view = revocation_reasons(notificationParams)
doc.title must be("Your supervision has been revoked" +
" - " + "Your registration" +
" - " + Messages("title.amls") +
" - " + Messages("title.gov"))
}
"have correct headings" in new ViewFixture {
def view = revocation_reasons(notificationParams)
heading.html must be("Your supervision has been revoked")
subHeading.html must include("Your registration")
}
"have correct content, businessName, endDate and reference displayed" in new ViewFixture {
def view = revocation_reasons(notificationParams)
doc.html must (include("msgContent") and include("Fake Name Ltd.") and include("amlsRegNo") and include("endDate"))
}
"have a back link" in new ViewFixture {
def view = revocation_reasons(notificationParams)
doc.getElementsByAttributeValue("class", "link-back") must not be empty
}
}
}
| hmrc/amls-frontend | test/views/notifications/v4m0/revocation_reasonsSpec.scala | Scala | apache-2.0 | 2,310 |
package me.rexim.issuestant.mock
import scala.collection.mutable.ListBuffer
import scala.util._
import org.http4s._
import org.http4s.client._
import scalaz.concurrent._
import scalaz.stream._
import scodec.bits._
// TODO(#55): Make ETagLogging less stateful
//
// Something like
// ```scala
// val (requests, responses) = EtagLogging { client =>
// new EtagPolling(client, Uri(path = "/rexim")).responses.take(5)
// }
// ```
// or similar
class EtagLogging {
private val requestLog: ListBuffer[Request] = ListBuffer.empty
private def randomEtag(): String =
Random.nextString(10).toList.map(_.toInt.toHexString).mkString
lazy val client: Client = Client(
open = HttpService.lift { (request) =>
Task {
val etag = randomEtag()
requestLog += request
Response(
status = Status.Ok,
headers = Headers(
Header("ETag", etag)
),
body = Process.emit(ByteVector(s"""\\"$etag\\"""".getBytes))
)
}
}.map((r) => DisposableResponse(r, Task({}))),
shutdown = Task({})
)
def log: List[Request] = requestLog.toList
}
| tsoding/Issuestant | src/test/scala/me/rexim/mock/EtagLogging.scala | Scala | mit | 1,128 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.planner.runtime.FileSystemITCaseBase
import org.apache.flink.table.planner.runtime.utils.{StreamingTestBase, TestSinkUtil, TestingAppendSink}
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit.{Before, Test}
import scala.collection.Seq
/**
* Streaming [[FileSystemITCaseBase]].
*/
abstract class StreamFileSystemITCaseBase extends StreamingTestBase with FileSystemITCaseBase {
@Before
override def before(): Unit = {
super.before()
super.open()
}
override def tableEnv: TableEnvironment = {
tEnv
}
override def check(sqlQuery: String, expectedResult: Seq[Row]): Unit = {
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink()
result.addSink(sink)
env.execute()
assertEquals(
expectedResult.map(TestSinkUtil.rowToString(_)).sorted,
sink.getAppendResults.sorted)
}
// Streaming mode not support overwrite
@Test
override def testInsertOverwrite(): Unit = {}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/StreamFileSystemITCaseBase.scala | Scala | apache-2.0 | 2,039 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.testingUtils
import java.time.Duration
import java.util
import java.util.Collections
import java.util.concurrent._
import akka.actor.{ActorRef, Kill}
import com.typesafe.config.ConfigFactory
import org.apache.flink.api.common.time.Time
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.flink.runtime.concurrent.{ScheduledExecutor, ScheduledExecutorServiceAdapter}
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.language.postfixOps
/**
* Convenience functions to test actor based components.
*/
object TestingUtils {
private var sharedExecutorInstance: ScheduledExecutorService = _
val testConfig = ConfigFactory.parseString(getDefaultTestingActorSystemConfigString)
val TESTING_DURATION = Duration.ofMinutes(2L);
val TESTING_TIMEOUT = Duration.ofMinutes(1L);
val TIMEOUT = Time.minutes(1L)
val DEFAULT_AKKA_ASK_TIMEOUT = "200 s"
def getDefaultTestingActorSystemConfigString: String = {
val logLevel = AkkaUtils.getLogLevel
s"""akka.daemonic = on
|akka.test.timefactor = 10
|akka.loggers = ["akka.event.slf4j.Slf4jLogger"]
|akka.loglevel = $logLevel
|akka.stdout-loglevel = OFF
|akka.jvm-exit-on-fatal-error = off
|akka.log-config-on-start = off
|akka.logger-startup-timeout = 30s
""".stripMargin
}
def getDefaultTestingActorSystemConfig = testConfig
def infiniteTime: Time = {
Time.milliseconds(Integer.MAX_VALUE);
}
/**
* Gets the shared global testing execution context
*/
def defaultExecutionContext: ExecutionContextExecutor = {
ExecutionContext.fromExecutor(defaultExecutor)
}
/**
* Gets the shared global testing scheduled executor
*/
def defaultExecutor: ScheduledExecutorService = {
synchronized {
if (sharedExecutorInstance == null || sharedExecutorInstance.isShutdown) {
sharedExecutorInstance = Executors.newSingleThreadScheduledExecutor();
}
sharedExecutorInstance
}
}
def defaultScheduledExecutor: ScheduledExecutor = {
val scheduledExecutorService = defaultExecutor
new ScheduledExecutorServiceAdapter(scheduledExecutorService)
}
/** Returns an [[ExecutionContext]] which uses the current thread to execute the runnable.
*
* @return Direct [[ExecutionContext]] which executes runnables directly
*/
def directExecutionContext = ExecutionContext
.fromExecutor(org.apache.flink.runtime.concurrent.Executors.directExecutor())
/** @return A new [[QueuedActionExecutionContext]] */
def queuedActionExecutionContext = {
new QueuedActionExecutionContext(new ActionQueue())
}
/** [[ExecutionContext]] which queues [[Runnable]] up in an [[ActionQueue]] instead of
* execution them. If the automatic execution mode is activated, then the [[Runnable]] are
* executed.
*/
class QueuedActionExecutionContext private[testingUtils] (val actionQueue: ActionQueue)
extends AbstractExecutorService with ExecutionContext with ScheduledExecutorService {
var automaticExecution = false
def toggleAutomaticExecution() = {
automaticExecution = !automaticExecution
}
override def execute(runnable: Runnable): Unit = {
if(automaticExecution){
runnable.run()
}else {
actionQueue.queueAction(runnable)
}
}
override def reportFailure(t: Throwable): Unit = {
t.printStackTrace()
}
override def scheduleAtFixedRate(
command: Runnable,
initialDelay: Long,
period: Long,
unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def schedule(command: Runnable, delay: Long, unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def schedule[V](callable: Callable[V], delay: Long, unit: TimeUnit)
: ScheduledFuture[V] = {
throw new UnsupportedOperationException()
}
override def scheduleWithFixedDelay(
command: Runnable,
initialDelay: Long,
delay: Long,
unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def shutdown(): Unit = ()
override def isTerminated: Boolean = false
override def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = false
override def shutdownNow(): util.List[Runnable] = Collections.emptyList()
override def isShutdown: Boolean = false
}
/** Queue which stores [[Runnable]] */
class ActionQueue {
private val runnables = scala.collection.mutable.Queue[Runnable]()
def triggerNextAction() {
val r = runnables.dequeue
r.run()
}
def popNextAction(): Runnable = {
runnables.dequeue()
}
def queueAction(r: Runnable) {
runnables.enqueue(r)
}
def isEmpty: Boolean = {
runnables.isEmpty
}
}
/** Stops the given actor by sending it a Kill message
*
* @param actor
*/
def stopActor(actor: ActorRef): Unit = {
if (actor != null) {
actor ! Kill
}
}
}
| rmetzger/flink | flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala | Scala | apache-2.0 | 5,938 |
package eu.shiftforward.Elements
import eu.shiftforward.{Bus, Wire}
import scala.collection.mutable.ArrayBuffer
trait OptimizedControlFlow extends ControlFlow {
override def mux(a: Wire, b: Wire, s: Wire) = {
val output = new Wire
def action() {
val inputA = a.getSignal
val inputB = b.getSignal
val selector = s.getSignal
schedule(GenericGateDelay) {
output <~ (if (selector) inputB else inputA)
}
}
a addAction action
b addAction action
s addAction action
output
}
override def demux(a: Wire, s: Wire) = {
val outA, outB = new Wire
def action() {
val input = a.getSignal
val selector = s.getSignal
schedule(GenericGateDelay) {
outA <~ (if (!selector) input else false)
outB <~ (if (selector) input else false)
}
}
a addAction action
s addAction action
(outA, outB)
}
}
trait OptimizedArithmetic extends Arithmetic {
override def fullAdder(a: Wire, b: Wire, cin: Wire) = {
val sum, cout = new Wire
def action() {
val (outSum, outCarry) = (a.getSignal, b.getSignal, cin.getSignal) match {
case (false, false, false) => (false, false)
case (false, false, true) => (true, false)
case (false, true, false) => (true, false)
case (false, true, true) => (false, true)
case (true, false, false) => (true, false)
case (true, false, true) => (false, true)
case (true, true, false) => (false, true)
case (true, true, true) => (true, true)
}
schedule(GenericGateDelay) {
sum <~ outSum
cout <~ outCarry
}
}
a addAction action
b addAction action
cin addAction action
(sum, cout)
}
}
trait OptimizedMemory extends Memory {
override def ram(data: Bus, address: Bus, load: Wire)(implicit clock: Wire): Bus = {
val state = ArrayBuffer.fill(math.pow(2, address.size).toInt)(0)
val out = new Bus(data.size)
def action() {
val clockIn = clock.getSignal
val loadIn = load.getSignal
val inputA = data.toInt
schedule(ClockedGateDelay) {
if (clockIn && loadIn) state(address.toInt) = inputA
if (clockIn) out <~ state(address.toInt)
}
}
clock addAction action
out
}
}
trait OptimizedElements extends OptimizedControlFlow with OptimizedArithmetic with OptimizedMemory | hugoferreira/from-zero-to-computer | src/main/scala/eu/shiftforward/Elements/OptimizedElements.scala | Scala | mit | 2,429 |
package odfi.server.manager.modules.run
import org.odfi.wsb.fwapp.SiteApp
import org.odfi.indesign.core.harvest.HarvestedResource
class SiteAppClass[T <: SiteApp](cl: Class[T]) extends HarvestedResource {
def getId = cl.getCanonicalName
} | richnou/odfi-manager | server/src/main/scala/odfi/server/manager/modules/run/SiteAppClass.scala | Scala | lgpl-3.0 | 248 |
package fastparsers.tools
/**
* Created by Eric on 10.04.14.
see
http://stackoverflow.com/questions/5260298/how-can-i-obtain-the-default-value-for-a-type-in-scala
http://missingfaktor.blogspot.ch/2011/08/emulating-cs-default-keyword-in-scala.html
*/
class Default[+A](val default: A)
trait LowerPriorityImplicits {
// Stop AnyRefs from clashing with AnyVals
implicit def defaultNull[A <: AnyRef]: Default[A] = new Default[A](null.asInstanceOf[A])
}
object Default extends LowerPriorityImplicits {
implicit object DefaultDouble extends Default[Double](0.0)
implicit object DefaultFloat extends Default[Float](0.0F)
implicit object DefaultInt extends Default[Int](0)
implicit object DefaultLong extends Default[Long](0L)
implicit object DefaultShort extends Default[Short](0)
implicit object DefaultByte extends Default[Byte](0)
implicit object DefaultChar extends Default[Char]('\\u0000')
implicit object DefaultBoolean extends Default[Boolean](false)
implicit object DefaultUnit extends Default[Unit](())
implicit object DefaultAny extends Default[Any](null)
def value[A](implicit value: Default[A]): A = value.default
} | begeric/FastParsers | FastParsers/src/main/scala/fastparsers/tools/DefaultValue.scala | Scala | mit | 1,143 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.ValidatableBox._
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600.v3.retriever.RepaymentsBoxRetriever
case class B935(value: String) extends CtBoxIdentifier("account name")
with CtString with Input with ValidatableBox[RepaymentsBoxRetriever] {
override def validate(boxRetriever: RepaymentsBoxRetriever): Set[CtValidation] = {
validateAllFilledOrEmptyStringsForBankDetails(boxRetriever,"B935") ++
validateStringByLength("B935", this, 2, 28) ++
validateStringByRegex("B935", this, ValidNonForeignLessRestrictiveCharacters)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B935.scala | Scala | apache-2.0 | 1,219 |
package dao.organization
import javax.inject.Inject
import javax.inject.Singleton
import dao.ColumnTypeMappings
import dao.organization.table.{CourseTables, OrganizationTables}
import dao.quiz.QuizDAO
import dao.user.UserDAO
import dao.user.table.UserTables
import models._
import models.organization.{Course, Course2Quiz, Organization, User2Course}
import models.quiz.Quiz
import models.user.User
import org.joda.time.DateTime
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import play.api.mvc.Result
import play.api.mvc.Results.NotFound
import slick.lifted
import slick.lifted.PrimaryKey
import models.organization.Course2Quiz
import scala.concurrent.{ExecutionContext, Future}
// ====
import slick.driver.JdbcProfile
//import slick.jdbc.JdbcProfile // Use this after upgrading slick
// ====
@Singleton
class CourseDAO @Inject()(protected val dbConfigProvider: DatabaseConfigProvider, protected val userTables: UserTables, protected val organizationTables: OrganizationTables, protected val courseTables: CourseTables)(implicit executionContext: ExecutionContext) extends HasDatabaseConfigProvider[JdbcProfile] with ColumnTypeMappings {
// ====
// import profile.api._ // Use this after upgrading slick
import dbConfig.driver.api._
// ====
// * ====== TABLE INSTANCES ====== *
val Courses = courseTables.Courses
val User2Courses = courseTables.User2Courses
// * ====== QUERIES ====== *
// ====== FIND ======
def all(): Future[Seq[Course]] = db.run(Courses.result)
def byId(id : CourseId): Future[Option[Course]] = db.run(Courses.filter(_.id === id).result.headOption)
def apply(courseId: CourseId): Future[Either[Result, Course]] = byId(courseId).map { _ match {
case None => Left(NotFound(views.html.errors.notFoundPage("There was no course for id=["+courseId+"]")))
case Some(course) => Right(course)
} }
def byIds(organizationId: OrganizationId, id : CourseId): Future[Option[Course]] = db.run(Courses.filter(c => c.id === id && c.organizationId === organizationId).result.headOption)
def apply(organizationId: OrganizationId, courseId: CourseId): Future[Either[Result, Course]] = byIds(organizationId, courseId).map { _ match {
case None => Left(NotFound(views.html.errors.notFoundPage("There was no Course for id=["+courseId+"] which also had Organization Id [" + organizationId + "]")))
case Some(course) => Right(course)
} }
def coursesFor(organizationId: OrganizationId) : Future[Seq[Course]] = db.run(Courses.filter(_.organizationId === organizationId).result)
def studentOf(courseId : CourseId, studentId: UserId): Future[Option[User]] = db.run({
(for(u2c <- User2Courses; u <- userTables.Users
if u2c.courseId === courseId && u2c.userId === studentId && u.id === studentId && u2c.access === Access.view
) yield u).result.headOption
})
def apply(courseId : CourseId, studentId: UserId) : Future[Either[Result, User]] = studentOf(courseId, studentId).map{ _ match {
case None => Left(NotFound(views.html.errors.notFoundPage("There was no User for id=["+studentId+"] which was also a student in [" + courseId + "]")))
case Some(student) => Right(student)
} }
// ====== Access ======
def access(userId: UserId, courseId : CourseId): Future[Access] = db.run {
val ownerAccess = (for(c <- Courses if c.ownerId === userId && c.id === courseId) yield c).result.headOption.map(_ match { case Some(_) => Own case None => Non})
val directAccess = (for(u2c <- User2Courses if u2c.userId === userId && u2c.courseId === courseId) yield u2c.access).result.headOption.map(_.getOrElse(Non))
ownerAccess.flatMap(oa => directAccess.map( da => oa max da))
}
def grantAccess(user: User, course: Course, access: Access) =
db.run(User2Courses += User2Course(user.id, course.id, access)).map { _ => () }
def revokeAccess(user: User, course: Course) =
db.run(User2Courses.filter(u2c => u2c.userId === user.id && u2c.courseId === course.id).delete)
// ====== Courses <-> Users ======
def coursesFor(user: User): Future[Seq[Course]] =
db.run({
(for(u2c <- User2Courses; c <- Courses if u2c.userId === user.id && u2c.courseId === c.id) yield c)
.union(
Courses.filter(_.ownerId === user.id)
).result })
def coursesAndAccessFor(user: User): Future[Seq[(Course, Access)]] =
db.run({
(for(u2c <- User2Courses; c <- Courses if u2c.userId === user.id && u2c.courseId === c.id) yield (c, u2c.access))
.union(Courses.filter(_.ownerId === user.id).map( (_, models.Own))
).result})
def studentsIn(course: Course): Future[Seq[User]] = db.run({
(for (u2c <- User2Courses; u <- userTables.Users
if u2c.courseId === course.id && u2c.access === Access.view && u2c.userId === u.id
) yield u).sortBy(_.name).result
})
// ====== Create ======
def insert(course: Course): Future[Course] =
db.run({ (Courses returning Courses.map(_.id) into ((needsId, id) => needsId.copy(id = id))) += course })
}
| kristiankime/calc-tutor | app/dao/organization/CourseDAO.scala | Scala | mit | 5,026 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import scala.collection.mutable
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{Command, CTERelationDef, CTERelationRef, InsertIntoDir, LogicalPlan, ParsedStatement, SubqueryAlias, UnresolvedWith, WithCTE}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern._
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.internal.SQLConf.{LEGACY_CTE_PRECEDENCE_POLICY, LegacyBehaviorPolicy}
/**
* Analyze WITH nodes and substitute child plan with CTE references or CTE definitions depending
* on the conditions below:
* 1. If in legacy mode, or if the query is a SQL command or DML statement, replace with CTE
* definitions, i.e., inline CTEs.
* 2. Otherwise, replace with CTE references `CTERelationRef`s. The decision to inline or not
* inline will be made later by the rule `InlineCTE` after query analysis.
*
* All the CTE definitions that are not inlined after this substitution will be grouped together
* under one `WithCTE` node for each of the main query and the subqueries. Any of the main query
* or the subqueries that do not contain CTEs or have had all CTEs inlined will obviously not have
* any `WithCTE` nodes. If any though, the `WithCTE` node will be in the same place as where the
* outermost `With` node once was.
*
* The CTE definitions in a `WithCTE` node are kept in the order of how they have been resolved.
* That means the CTE definitions are guaranteed to be in topological order base on their
* dependency for any valid CTE query (i.e., given CTE definitions A and B with B referencing A,
* A is guaranteed to appear before B). Otherwise, it must be an invalid user query, and an
* analysis exception will be thrown later by relation resolving rules.
*/
object CTESubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
if (!plan.containsPattern(UNRESOLVED_WITH)) {
return plan
}
val isCommand = plan.find {
case _: Command | _: ParsedStatement | _: InsertIntoDir => true
case _ => false
}.isDefined
val cteDefs = mutable.ArrayBuffer.empty[CTERelationDef]
val (substituted, lastSubstituted) =
LegacyBehaviorPolicy.withName(conf.getConf(LEGACY_CTE_PRECEDENCE_POLICY)) match {
case LegacyBehaviorPolicy.EXCEPTION =>
assertNoNameConflictsInCTE(plan)
traverseAndSubstituteCTE(plan, isCommand, cteDefs)
case LegacyBehaviorPolicy.LEGACY =>
(legacyTraverseAndSubstituteCTE(plan, cteDefs), None)
case LegacyBehaviorPolicy.CORRECTED =>
traverseAndSubstituteCTE(plan, isCommand, cteDefs)
}
if (cteDefs.isEmpty) {
substituted
} else if (substituted eq lastSubstituted.get) {
WithCTE(substituted, cteDefs.toSeq)
} else {
var done = false
substituted.resolveOperatorsWithPruning(_ => !done) {
case p if p eq lastSubstituted.get =>
done = true
WithCTE(p, cteDefs.toSeq)
}
}
}
/**
* Spark 3.0 changes the CTE relations resolution, and inner relations take precedence. This is
* correct but we need to warn users about this behavior change under EXCEPTION mode, when we see
* CTE relations with conflicting names.
*
* Note that, before Spark 3.0 the parser didn't support CTE in the FROM clause. For example,
* `WITH ... SELECT * FROM (WITH ... SELECT ...)` was not supported. We should not fail for this
* case, as Spark versions before 3.0 can't run it anyway. The parameter `startOfQuery` is used
* to indicate where we can define CTE relations before Spark 3.0, and we should only check
* name conflicts when `startOfQuery` is true.
*/
private def assertNoNameConflictsInCTE(
plan: LogicalPlan,
outerCTERelationNames: Seq[String] = Nil,
startOfQuery: Boolean = true): Unit = {
val resolver = conf.resolver
plan match {
case UnresolvedWith(child, relations) =>
val newNames = mutable.ArrayBuffer.empty[String]
newNames ++= outerCTERelationNames
relations.foreach {
case (name, relation) =>
if (startOfQuery && outerCTERelationNames.exists(resolver(_, name))) {
throw QueryCompilationErrors.ambiguousRelationAliasNameInNestedCTEError(name)
}
// CTE relation is defined as `SubqueryAlias`. Here we skip it and check the child
// directly, so that `startOfQuery` is set correctly.
assertNoNameConflictsInCTE(relation.child, newNames.toSeq)
newNames += name
}
assertNoNameConflictsInCTE(child, newNames.toSeq, startOfQuery = false)
case other =>
other.subqueries.foreach(assertNoNameConflictsInCTE(_, outerCTERelationNames))
other.children.foreach(
assertNoNameConflictsInCTE(_, outerCTERelationNames, startOfQuery = false))
}
}
private def legacyTraverseAndSubstituteCTE(
plan: LogicalPlan,
cteDefs: mutable.ArrayBuffer[CTERelationDef]): LogicalPlan = {
plan.resolveOperatorsUp {
case UnresolvedWith(child, relations) =>
val resolvedCTERelations =
resolveCTERelations(relations, isLegacy = true, isCommand = false, cteDefs)
substituteCTE(child, alwaysInline = true, resolvedCTERelations)
}
}
/**
* Traverse the plan and expression nodes as a tree and replace matching references with CTE
* references if `isCommand` is false, otherwise with the query plans of the corresponding
* CTE definitions.
* - If the rule encounters a WITH node then it substitutes the child of the node with CTE
* definitions of the node right-to-left order as a definition can reference to a previous
* one.
* For example the following query is valid:
* WITH
* t AS (SELECT 1),
* t2 AS (SELECT * FROM t)
* SELECT * FROM t2
* - If a CTE definition contains an inner WITH node then substitution of inner should take
* precedence because it can shadow an outer CTE definition.
* For example the following query should return 2:
* WITH
* t AS (SELECT 1),
* t2 AS (
* WITH t AS (SELECT 2)
* SELECT * FROM t
* )
* SELECT * FROM t2
* - If a CTE definition contains a subquery that contains an inner WITH node then substitution
* of inner should take precedence because it can shadow an outer CTE definition.
* For example the following query should return 2:
* WITH t AS (SELECT 1 AS c)
* SELECT max(c) FROM (
* WITH t AS (SELECT 2 AS c)
* SELECT * FROM t
* )
* - If a CTE definition contains a subquery expression that contains an inner WITH node then
* substitution of inner should take precedence because it can shadow an outer CTE
* definition.
* For example the following query should return 2:
* WITH t AS (SELECT 1)
* SELECT (
* WITH t AS (SELECT 2)
* SELECT * FROM t
* )
* @param plan the plan to be traversed
* @return the plan where CTE substitution is applied
*/
private def traverseAndSubstituteCTE(
plan: LogicalPlan,
isCommand: Boolean,
cteDefs: mutable.ArrayBuffer[CTERelationDef]): (LogicalPlan, Option[LogicalPlan]) = {
var lastSubstituted: Option[LogicalPlan] = None
val newPlan = plan.resolveOperatorsUpWithPruning(
_.containsAnyPattern(UNRESOLVED_RELATION, PLAN_EXPRESSION)) {
case UnresolvedWith(child: LogicalPlan, relations) =>
val resolvedCTERelations =
resolveCTERelations(relations, isLegacy = false, isCommand, cteDefs)
if (!isCommand) {
cteDefs ++= resolvedCTERelations.map(_._2)
}
lastSubstituted = Some(substituteCTE(child, isCommand, resolvedCTERelations))
lastSubstituted.get
case other =>
other.transformExpressionsWithPruning(_.containsPattern(PLAN_EXPRESSION)) {
case e: SubqueryExpression => e.withNewPlan(apply(e.plan))
}
}
(newPlan, lastSubstituted)
}
private def resolveCTERelations(
relations: Seq[(String, SubqueryAlias)],
isLegacy: Boolean,
isCommand: Boolean,
cteDefs: mutable.ArrayBuffer[CTERelationDef]): Seq[(String, CTERelationDef)] = {
val resolvedCTERelations = new mutable.ArrayBuffer[(String, CTERelationDef)](relations.size)
for ((name, relation) <- relations) {
val innerCTEResolved = if (isLegacy) {
// In legacy mode, outer CTE relations take precedence. Here we don't resolve the inner
// `With` nodes, later we will substitute `UnresolvedRelation`s with outer CTE relations.
// Analyzer will run this rule multiple times until all `With` nodes are resolved.
relation
} else {
// A CTE definition might contain an inner CTE that has a higher priority, so traverse and
// substitute CTE defined in `relation` first.
traverseAndSubstituteCTE(relation, isCommand, cteDefs)._1
}
// CTE definition can reference a previous one
val substituted =
substituteCTE(innerCTEResolved, isLegacy || isCommand, resolvedCTERelations.toSeq)
val cteRelation = CTERelationDef(substituted)
resolvedCTERelations += (name -> cteRelation)
}
resolvedCTERelations.toSeq
}
private def substituteCTE(
plan: LogicalPlan,
alwaysInline: Boolean,
cteRelations: Seq[(String, CTERelationDef)]): LogicalPlan =
plan.resolveOperatorsUpWithPruning(_.containsAnyPattern(UNRESOLVED_RELATION, PLAN_EXPRESSION)) {
case u @ UnresolvedRelation(Seq(table), _, _, _) =>
cteRelations.find(r => plan.conf.resolver(r._1, table)).map { case (_, d) =>
if (alwaysInline) {
d.child
} else {
// Add a `SubqueryAlias` for hint-resolving rules to match relation names.
SubqueryAlias(table, CTERelationRef(d.id, d.resolved, d.output))
}
}.getOrElse(u)
case other =>
// This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE.
other.transformExpressionsWithPruning(_.containsPattern(PLAN_EXPRESSION)) {
case e: SubqueryExpression =>
e.withNewPlan(apply(substituteCTE(e.plan, alwaysInline, cteRelations)))
}
}
}
| nchammas/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CTESubstitution.scala | Scala | apache-2.0 | 11,247 |
package dotty.tools.scaladoc
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobalScope
@js.native
@JSGlobalScope
object CodeSnippetsGlobals extends js.Object {
val scastie: Scastie = js.native
val scastieConfiguration: String = js.native
} | dotty-staging/dotty | scaladoc-js/common/src/code-snippets/CodeSnippetsGlobals.scala | Scala | apache-2.0 | 260 |
// code-examples/BasicOOP/scoping/private-type-wont-compile.scala
// WON'T COMPILE
package scopeA {
class PrivateClass1(private[PrivateClass1] val privateField1: Int) {
private[PrivateClass1] val privateField2 = 1
def equalFields(other: PrivateClass1) =
(privateField1 == other.privateField1) &&
(privateField2 == other.privateField2) &&
(nested == other.nested)
class Nested {
private[Nested] val nestedField = 1
}
private[PrivateClass1] val nested = new Nested
val nestedNested = nested.nestedField // ERROR
}
class PrivateClass2 extends PrivateClass1(1) {
val field1 = privateField1 // ERROR
val field2 = privateField2 // ERROR
val nField = new Nested().nestedField // ERROR
}
class PrivateClass3 {
val privateClass1 = new PrivateClass1(1)
val privateField1 = privateClass1.privateField1 // ERROR
val privateField2 = privateClass1.privateField2 // ERROR
val privateNField = privateClass1.nested.nestedField // ERROR
}
}
| XClouded/t4f-core | scala/src/tmp/BasicOOP/scoping/private-type-wont-compile.scala | Scala | apache-2.0 | 1,025 |
//https://www.hackerrank.com/challenges/valid-bst
import java.io._;
object Solution {
class Node(v: Byte) {
var value: Byte = v;
var left: Node = null;
var right: Node = null;
}
def main(args: Array[String]) {
var T: Byte = readLine().toByte;
for(t <- 0 until T){
var N: Byte = readLine().toByte;
var A: Array[Node] = new Array[Node](N);
N = 0;
for(s <- readLine().split(" ")){
A(N) = new Node(s.toByte);
N = (N + 1).toByte;
}
if(isBSTPreorder(A)){
println("YES");
} else {
println("NO");
}
}
}
//Usually you would seperate the logic of creating
//the tree and the iteration through it, checking
//if it matches the bst string given. This approach
//is intended only for this challenge.
def isBSTPreorder(nodes: Array[Node]): Boolean = {
var tree: Node = null;
for(node <- nodes){
if (tree == null){
tree = node;
} else if(!add(tree, node)){
return false;
}
}
return true;
}
def add(parent: Node, node: Node): Boolean = {
if (node.value < parent.value){
if (parent.right != null){
return false;
}
if (parent.left == null){
parent.left = node;
return true;
}
return add(parent.left, node);
}
if(parent.right == null){
parent.right = node;
return true;
}
return add(parent.right, node);
}
} | havelessbemore/hackerrank | functional_programming/functional_structures/valid-bst.scala | Scala | mit | 1,803 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render.epub
import cats.effect.IO
import laika.io.model.RenderedTreeRoot
import munit.FunSuite
class HTMLNavRendererSpec extends FunSuite {
val renderer = new HtmlNavRenderer
val title = "Tree 1"
def result (navItems: String): String = renderer.fileContent(title, "", navItems)
def render (input: RenderedTreeRoot[IO], depth: Int = 1): String =
renderer.render(input, title, Some(depth))
def run (input: RenderedTreeRoot[IO], expectedNavItems: String, depth: Int = 1)(implicit loc: munit.Location): Unit =
assertEquals(render(input, depth), result(expectedNavItems))
test("render an empty tree") {
run(EmptyTree.input, "")
}
test("render a tree with a single document") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| </ol>""".stripMargin
run(SingleDocument.input, expected)
}
test("render a tree with two documents") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| <li id="toc-li-1">
| <a href="content/bar.epub.xhtml">Title 3</a>
|
| </li>
| </ol>""".stripMargin
run(TwoDocuments.input, expected)
}
test("render a tree with a single document and a CSS file") {
val html =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| </ol>""".stripMargin
val cssLink = """<link rel="stylesheet" type="text/css" href="content/test-style.css" />"""
val expected = renderer.fileContent(title, cssLink, html)
assertEquals(render(DocumentPlusStyle.input), expected)
}
test("render a tree with a title document") {
val html =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/bar.epub.xhtml">Title 3</a>
|
| </li>
| </ol>""".stripMargin
val title = "From TitleDoc"
val expected = renderer.fileContent(title, "", html, titleDoc = Some("content/title.epub.xhtml"))
val actual = renderer.render(DocumentPlusTitle.input, "From TitleDoc", Some(1))
assertEquals(actual, expected)
}
test("render a tree with a cover image") {
val html =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
| </li>
| <li id="toc-li-1">
| <a href="content/bar.epub.xhtml">Title 3</a>
| </li>
| </ol>""".stripMargin
val expected = renderer.fileContent(title, "", html, coverDoc = Some("content/cover.epub.xhtml"))
assertEquals(render(DocumentPlusCover.input), expected)
}
test("render a tree with a nested tree") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| <li id="toc-li-1">
| <a href="content/sub/bar.epub.xhtml">Tree 4</a>
| <ol class="toc">
| <li id="toc-li-2">
| <a href="content/sub/bar.epub.xhtml">Title 3</a>
|
| </li>
| </ol>
| </li>
| </ol>""".stripMargin
run(NestedTree.input, expected, depth = 2)
}
test("render a tree with a nested tree with a title document") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| <li id="toc-li-1">
| <a href="content/sub/title.epub.xhtml">From TitleDoc</a>
| <ol class="toc">
| <li id="toc-li-2">
| <a href="content/sub/bar.epub.xhtml">Title 3</a>
|
| </li>
| </ol>
| </li>
| </ol>""".stripMargin
run(NestedTreeWithTitleDoc.input, expected, depth = 2)
}
test("not render a nested tree if the depth is 1") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| </ol>""".stripMargin
run(NestedTree.input, expected)
}
test("render a document with sections when the depth is 2") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
| <ol class="toc">
| <li id="toc-li-1">
| <a href="content/foo.epub.xhtml#A">Section A</a>
|
| </li>
| <li id="toc-li-2">
| <a href="content/foo.epub.xhtml#B">Section B</a>
|
| </li>
| </ol>
| </li>
| <li id="toc-li-3">
| <a href="content/bar.epub.xhtml">Title 3</a>
| <ol class="toc">
| <li id="toc-li-4">
| <a href="content/bar.epub.xhtml#A">Section A</a>
|
| </li>
| <li id="toc-li-5">
| <a href="content/bar.epub.xhtml#B">Section B</a>
|
| </li>
| </ol>
| </li>
| </ol>""".stripMargin
run(DocumentsWithSections.input, expected, depth = 2)
}
test("not render a document with sections when the depth is 1") {
val expected = """ <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">Title 2</a>
|
| </li>
| <li id="toc-li-1">
| <a href="content/bar.epub.xhtml">Title 3</a>
|
| </li>
| </ol>""".stripMargin
run(DocumentsWithSections.input, expected)
}
test("escape special characters in titles") {
val expected =
""" <ol class="toc">
| <li id="toc-li-0">
| <a href="content/foo.epub.xhtml">This & That</a>
|
| </li>
| </ol>""".stripMargin
run(DocumentWithSpecialChars.input, expected)
}
}
| planet42/Laika | io/src/test/scala/laika/render/epub/HTMLNavRendererSpec.scala | Scala | apache-2.0 | 7,123 |
package ch4
import org.learningconcurrency._
/**
* Future 它是一个容器类型,代表一个代码最终会返回的T类型结果。不过,代码可能会出错或执行超时,
* 所以当Future完成时,它有可能完全没有被成功执行,这时它会代表一个异常
* Future 表示一个可能还没有实际完成的异步任务的结果,
* 针对这个结果可以添加 Callback 以便在任务执行成功或失败后做出对应的操作
*/
object FuturesComputation extends App {
/**
* Computation计算
* Futures 执行计算
* 1,首先引入global执行上下文,可以确保在全局上下文中执行Future计算
* 调用log方法的次序不是确定,后跟代码块的Futures单例对象,是为调用Apply方法而添加语法糖
*
*/
import scala.concurrent._
import ExecutionContext.Implicits.global
Future {
log(s"the future is here")
}
log(s"the future is coming")
}
/**
* 通过Futures计算,使用Source.fromFile对象读取build.sbt文件内容
* 1,首先引入global执行上下文,可以确保在全局上下文中执行Future计算
*
*
*/
object FuturesDataType extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
//通过Futures计算,使用Source.fromFile对象读取build.sbt文件内容
val buildFile: Future[String] = Future {
val f = Source.fromFile("build.sbt")
try f.getLines.mkString("\\n") finally f.close()
}
log(s"started reading build file asynchronously")
/**
* main线程会调用Future对象中的isCompleted方法,该Future对象为通过执行Future计算获得buildFile对象
* 读取build.sbt文件的操作很可能很快完成,因此isCompleted方法会返回false,
* 过250毫秒后,main线程会再次调用isCompleted方法会返回true,
* 最后main线程会调用value方法,该方法会返回build.sb文件的内容
*
*/
log(s"status: ${buildFile.isCompleted}")//当异步操作完成了且返回了true值,是success
Thread.sleep(250)
log(s"status: ${buildFile.isCompleted}")//当异步操作没有完成返回false
log(s"status: ${buildFile.value}")//还回值
}
/**
* Futures对象的回调函数
* 从w3网中查找出所有单词telnet.
*/
object FuturesCallbacks extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
/**
* 从w3网站获得url规范的文档,使用Source对象存储该文件的内容,并使用getUrlSpec文件中的Future对象
* 以异步方式执行Http请求操作,getUrlSpec方法会先调用fromURL获取含有文本文档的Source对象,然后会调用
* getLines方法获取文档中的行列表.
*/
def getUrlSpec(): Future[Seq[String]] = Future {
//以异步方式执行http请求操作
val f = Source.fromURL("http://www.w3.org/Addressing/URL/url-spec.txt")
try {
f.getLines.toList
}finally{
f.close()
}
}
val urlSpec: Future[Seq[String]] = getUrlSpec()
/**
* 要在Future对象urlSpec中找到包含有单词telnet的行,可以使用find方法
* 该方法将行的列表和需要搜索的单词接收为参数,并且返回含有匹配内容的字符串.
* 接收一个Seq类型的参数,而urlSpec对象返回Future[Seq[String]],因此无法将Future对象urlSpec
* 直接发送给find方法,而且在程序调用find方法时,该Future很可能还无法使用.
*/
def find(lines: Seq[String], word: String) = lines.zipWithIndex.collect {
//zipWithIndex, 返回对偶列表,第二个组成部分是元素下标
case (line, n) if line.contains(word) => (n, line)
}.mkString("\\n")//此迭代器转换为字符串
/**
* 我们使用foreach方法为这个Future添加一个回调函数,注意onSuccess方法与foreach方法等价,但onSuccess方法可能
* 会在scala 2.11之后被弃用,foreach方法接收偏函数作为其参数
*/
urlSpec.foreach {//foreach接收一个偏函数
case lines => log(s"Found occurrences of 'telnet'\\n${find(lines, "telnet")}\\n")
}
/**
* Thread.sleep
* 此处的要点是:添加回调函数是非阻塞操作,回调用函数注册后,main线程中用的log语句会立即执行
* 但是执行回调函数中log语句的时间可以晚得多
* 在Future对象被完善后,无须立刻调用回调参数,大多数执行上下文通过调用任务,以异步方式处理回调函数
*/
Thread.sleep(2000)//添加此方法如果不添加不异步调用不显示信息
log("callbacks registered, continuing with other work")
/**
* ForkJoinPool-1-worker-5: Found occurrences of 'telnet'
* (207, telnet , rlogin and tn3270 )
* (745, nntpaddress | prosperoaddress | telnetaddress)
* (806, telnetaddress t e l n e t : / / login )
* (931, for a given protocol (for example, CR and LF characters for telnet)
ForkJoinPool-1-worker-5: Found occurrences of 'password'
* (107, servers). The password, is present, follows)
* (109, the user name and optional password are)
* (111, user of user name and passwords which are)
* (222, User name and password)
* (225, password for those systems which do not use the anonymous FTP)
* (226, convention. The default, however, if no user or password is)
* (234, is "anonymous" and the password the user's Internet-style mail)
* (240, currently vary in their treatment of the anonymous password. )
* (816, login [ user [ : password ] @ ] hostport )
* (844, password alphanum2 [ password ] )
* (938, The use of URLs containing passwords is clearly unwise. )
*/
/**
* Future对象添加多个回调函数,如果我们还想在文档中找出所有单词password,可以再添加一个回调函数
*/
urlSpec.foreach {
lines => log(s"Found occurrences of 'password'\\n${find(lines, "password")}\\n")
}
Thread.sleep(1000)
log("callbacks installed, continuing with other work")
}
/**
* Failure计算和异常处理
*/
object FuturesFailure extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
/**
* 当完善Future对象的操作被执行后,有可能成功Future对象,也有可能失败Future对象,
*/
val urlSpec: Future[String] = Future {
/**
* 向一个非法的URL发送了http请求,fromURL方法抛出了一个异常,而且Future对象urlSpec的操作失败了.
*/
Source.fromURL("http://www.w3.org/non-existent-url-spec.txt").mkString
}
/**
* foreach方法会接收处理成功完善的Future对象的回调函数,
* failed方法会接收处理接失败情况的回调函数,返回Future[Throwable]对象,该对象代表Future失败情况的异常
* 将failed方法与foreach一起使用可以访问异常
*/
urlSpec.failed.foreach {
case t => {
log(s"exception occurred - $t")
}
}
Thread.sleep(2000)
}
/**
* 为了使代码简洁,有时候要在同一个回调函数中处理成功和失败的情况
* 使用Try类型,有两个子类型:Success类型用于表示成功执行操作结果
* Failure类型用于表示执行失败的异常对象
* 我们可以使用模式匹配功能确定Try对象是那种子类型
*/
object FuturesExceptions extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
val file = Future { Source.fromFile(".gitignore-SAMPLE").getLines.mkString("\\n") }
//成功回调
file.foreach {
text => log(text)
}
//失败回调
file.failed foreach {//异常处理,抛出异常类型FileNotFoundException
case fnfe: java.io.FileNotFoundException => log(s"Cannot find file - $fnfe")
case t => log(s"Failed due to $t")
}
import scala.util.{Try, Success, Failure}
file.onComplete {//回调函数时,我们使用提供Success[T]值和Failure[T]值匹配的偏函数
case Success(text) => log(text)
case Failure(t) => log(s"Failed due to $t")
}
Thread.sleep(2000)
}
/**
* 使用Try类型,有两个子类型:Success类型用于表示成功执行操作结果
* Failure类型用于表示执行失败的异常对象
* 我们可以使用模式匹配功能确定Try对象是那种子类型
*/
object FuturesTry extends App {
import scala.util._
/**
* Try[String]对象是通过同步方式使用的不可变对象,与Future对象不同,从被创建的那一刻起,Try[String]就会含有一个值
* 或异常,与其说像Future对象,倒不如说它更像集合.
*/
val threadName: Try[String] = Try(Thread.currentThread.getName)
val someText: Try[String] = Try("Try objects are created synchronously")
val message: Try[String] = for {
tn <- threadName
st <- someText
} yield s"$st, t = $tn"
message match {
case Success(msg) => log(msg)
case Failure(error) => log(s"There should be no $error here.")
}
}
/**
* 致命异常
* 前面介绍Future对象会失效的情况,Future无法捕捉的Throwable对象,
*/
object FuturesNonFatal extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
val f = Future { throw new InterruptedException }
val g = Future { throw new IllegalArgumentException }
f.failed foreach { case t => log(s"error - $t") }
g.failed foreach { case t => log(s"error - $t") }
Thread.sleep(2000)
}
/**
* Future对象中的函数组合
* 引入Future对象后就将阻塞线程的责任,从API上转移到了调用者线程身上,
*/
object FuturesClumsyCallback extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import org.apache.commons.io.FileUtils._
import java.io._
import scala.io.Source
import scala.collection.convert.decorateAsScala._
/**
* blacklistFile该方法会读取文件.gitignore内容,
* 过虑掉空白行和所有以#开头的注释行,findFiles方法递归查找符合文件名(gitignore)
* 返回的Future对象,最终会含有一个字符串列表,代表SBT文件存储目录中由scala创建的文件.
*
* 异步操作读取文件内容
*/
def blacklistFile(filename: String) = Future {
val lines = Source.fromFile(filename).getLines
lines.filter(x => !x.startsWith("#") && !x.isEmpty()).toList
}
/**
* 异步读取文件的内容,以异步方式扫描项目目录中的所有文件并对它们执行匹配操作
*
* 将格式列表提交给该方法后,该方法可以找到当前目录中符合这些格式的所有文件
*/
def findFiles(patterns: List[String]): List[String] = {
val root = new File(".")
//println(root.getAbsolutePath+"|||||"+root.getCanonicalPath)
for {
//iterateFiles方法开源IO包中,会返回这些项目文件的Java迭代器,因此可以通过调用asScala方法将
//之转换为Scala迭代器,然后获得所有匹配的文件路径
f <- iterateFiles(root, null, true).asScala.toList
pat <- patterns
abspat = root.getCanonicalPath + File.separator + pat
if f.getCanonicalPath.contains(abspat)
} yield
{
println(">>>>>>>."+f.getCanonicalPath)
f.getCanonicalPath
}
}
/**
* 通过Future对象中的函数组合可以在for推导语句中使用Future对象,而且通常比使用回调函数直观.
* 使用foreach可以避开彻底阻塞的情况
*
*/
//val lines = Source.fromFile(".gitignore").getLines
// lines.filter(x => !x.startsWith("#") && !x.isEmpty()).foreach { x => println(">>>>>>>>>>"+x) }
blacklistFile(".gitignore").foreach {
case lines =>
val files = findFiles(lines)
files.foreach { x => println("|||||||||"+x) }
log(s"matches: ${files.mkString("\\n")}")
}
/**
* 可以使用Map方式简化操作,Map方法接收函数f,并返回新的Future对象,
*/
def blacklistFiles(filename: String):Future[List[String]]=blacklistFile(filename).map(patterns => findFiles(patterns))
blacklistFiles(".gitignore").foreach { x => log(s"matches: ${x.mkString("\\n")}") }
Thread.sleep(2000)
//System.exit(0);
}
/**
* 使用Map方式简化操作,Map方法接收函数f,并返回新的Future对象,
* 我们使用Future对象,从build.sbt文件中获最长的一行,
*/
object FuturesMap extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
import scala.util.Success
//从硬盘读取该文件
val buildFile = Future { Source.fromFile("build.sbt").getLines }
//异步读取文件
val gitignoreFile = Future { Source.fromFile(".gitignore-SAMPLE").getLines }
/**
* Future.map方法可以将一个Future对象中的值与另一个Future对象中的值对应起来,该方法不会阻塞线程,会立即回返回Future对象
* 当初始的Future对象通过某个值完善后,被返回Future[S]对象最终会被f(x)方法完善
*/
val longestBuildLine = buildFile.map(lines => lines.maxBy(_.length))//获最长的一行
longestBuildLine.onComplete {
case Success(line) => log(s"the longest line is '$line'")
}
//使用for推导语句,异常处理
val longestGitignoreLine = for (lines <- gitignoreFile) yield lines.maxBy(_.length)
longestGitignoreLine.failed.foreach {
case t => log(s"no longest line, because ${t.getMessage}")
}
Thread.sleep(2000)
}
/**
* Raw 未加工的; 无经验的
*/
object FuturesFlatMapRaw extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
//异步方式获取网络文件
val netiquette = Future { Source.fromURL("http://www.ietf.org/rfc/rfc1855.txt").mkString }
//异步方式获取标题规范
val urlSpec = Future { Source.fromURL("http://www.w3.org/Addressing/URL/url-spec.txt").mkString }
//使用flatMap和map组合
val answer = netiquette.flatMap { nettext =>
urlSpec.map { urltext =>
"First, read this: " + nettext + ". Now, try this: " + urltext
}
}
answer foreach {
case contents => log(contents)
}
Thread.sleep(2000)
}
object FuturesFlatMap extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
/**
* 通过隐含方式在for推导语句中使用flatMap方法
*/
val netiquette = Future { Source.fromURL("http://www.ietf.org/rfc/rfc1855.txt").mkString }
val urlSpec = Future { Source.fromURL("http://www.w3.org/Addressing/URL/url-spec.txt").mkString }
val answer = for {
nettext <- netiquette
urltext <- urlSpec
} yield {
"First of all, read this: " + nettext + " Once you're done, try this: " + urltext
}
/**
* 经过编译后,该for推导语句就会变为使用flatMap方法的代码,这样使用我们的工作变得简单多了,这段程序的可读性非常高.
* 对于Future对象netiquette中的nettext值和Future对象urltext值来说,answer是通过将nettext和urltext值连接起来生成新
* 的Future对象
*/
answer.foreach {
case contents => log(contents)
}
Thread.sleep(2000)
}
object FuturesDifferentFlatMap extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
/**
* for推导语句nettext是从第一个Future对象被完善后,完善第二个Future对象的操作才会被执行,
* 当异步方式使用nettext值计算第二个Future对象的完善时,这种模式才有意义
*/
val answer = for {
nettext <- Future { Source.fromURL("http://www.ietf.org/rfc/rfc1855.txt").mkString }
urltext <- Future { Source.fromURL("http://www.w3.org/Addressing/URL/url-spec.txt").mkString }
} yield {
"First of all, read this: " + nettext + " Once you're done, try this: " + urltext
}
answer foreach {
case contents => log(contents)
}
}
object FuturesRecover extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.io.Source
/**
* Future提供出错时显示默认消息
*/
val netiquetteUrl = "http://www.ietf.org/rfc/rfc1855.doc"
val netiquette = Future { Source.fromURL(netiquetteUrl).mkString } recover {
case f: java.io.FileNotFoundException =>
"Dear boss, thank you for your e-mail." +
"You might be interested to know that ftp links " +
"can also point to regular files we keep on our servers."
}
netiquette foreach {
case contents => log(contents)
}
}
object FuturesReduce extends App {
import scala.concurrent._
import ExecutionContext.Implicits.global
val squares = for (i <- 0 until 10) yield Future { i * i }
val sumOfSquares = Future.reduce(squares)(_ + _)
sumOfSquares foreach {
case sum => log(s"Sum of squares = $sum")
}
}
| tophua/spark1.52 | examples/src/main/scala/scalaDemo/threadConcurrency/ch4/Futures.scala | Scala | apache-2.0 | 16,735 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.text.SimpleDateFormat
import java.util.Date
import java.io.EOFException
import scala.collection.immutable.Map
import scala.reflect.ClassTag
import scala.collection.mutable.ListBuffer
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.mapred.FileSplit
import org.apache.hadoop.mapred.InputFormat
import org.apache.hadoop.mapred.InputSplit
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.RecordReader
import org.apache.hadoop.mapred.Reporter
import org.apache.hadoop.mapred.JobID
import org.apache.hadoop.mapred.TaskAttemptID
import org.apache.hadoop.mapred.TaskID
import org.apache.hadoop.mapred.lib.CombineFileSplit
import org.apache.hadoop.util.ReflectionUtils
import org.apache.spark._
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.DataReadMethod
import org.apache.spark.rdd.HadoopRDD.HadoopMapPartitionsWithSplitRDD
import org.apache.spark.util.{SerializableConfiguration, ShutdownHookManager, NextIterator, Utils}
import org.apache.spark.scheduler.{HostTaskLocation, HDFSCacheTaskLocation}
import org.apache.spark.storage.StorageLevel
/**
* A Spark split class that wraps around a Hadoop InputSplit.
*/
private[spark] class HadoopPartition(rddId: Int, idx: Int, @transient s: InputSplit)
extends Partition {
val inputSplit = new SerializableWritable[InputSplit](s)
override def hashCode(): Int = 41 * (41 + rddId) + idx
override val index: Int = idx
/**
* Get any environment variables that should be added to the users environment when running pipes
* @return a Map with the environment variables and corresponding values, it could be empty
*/
def getPipeEnvVars(): Map[String, String] = {
val envVars: Map[String, String] = if (inputSplit.value.isInstanceOf[FileSplit]) {
val is: FileSplit = inputSplit.value.asInstanceOf[FileSplit]
// map_input_file is deprecated in favor of mapreduce_map_input_file but set both
// since its not removed yet
Map("map_input_file" -> is.getPath().toString(),
"mapreduce_map_input_file" -> is.getPath().toString())
} else {
Map()
}
envVars
}
}
/**
* :: DeveloperApi ::
* An RDD that provides core functionality for reading data stored in Hadoop (e.g., files in HDFS,
* sources in HBase, or S3), using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* Note: Instantiating this class directly is not recommended, please use
* [[org.apache.spark.SparkContext.hadoopRDD()]]
*
* @param sc The SparkContext to associate the RDD with.
* @param broadcastedConf A general Hadoop Configuration, or a subclass of it. If the enclosed
* variabe references an instance of JobConf, then that JobConf will be used for the Hadoop job.
* Otherwise, a new JobConf will be created on each slave using the enclosed Configuration.
* @param initLocalJobConfFuncOpt Optional closure used to initialize any JobConf that HadoopRDD
* creates.
* @param inputFormatClass Storage format of the data to be read.
* @param keyClass Class of the key associated with the inputFormatClass.
* @param valueClass Class of the value associated with the inputFormatClass.
* @param minPartitions Minimum number of HadoopRDD partitions (Hadoop Splits) to generate.
*/
@DeveloperApi
class HadoopRDD[K, V](
@transient sc: SparkContext,
broadcastedConf: Broadcast[SerializableConfiguration],
initLocalJobConfFuncOpt: Option[JobConf => Unit],
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int)
extends RDD[(K, V)](sc, Nil) with Logging {
if (initLocalJobConfFuncOpt.isDefined) {
sc.clean(initLocalJobConfFuncOpt.get)
}
def this(
sc: SparkContext,
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int) = {
this(
sc,
sc.broadcast(new SerializableConfiguration(conf))
.asInstanceOf[Broadcast[SerializableConfiguration]],
None /* initLocalJobConfFuncOpt */,
inputFormatClass,
keyClass,
valueClass,
minPartitions)
}
protected val jobConfCacheKey = "rdd_%d_job_conf".format(id)
protected val inputFormatCacheKey = "rdd_%d_input_format".format(id)
// used to build JobTracker ID
private val createTime = new Date()
private val shouldCloneJobConf = sc.conf.getBoolean("spark.hadoop.cloneConf", false)
// Returns a JobConf that will be used on slaves to obtain input splits for Hadoop reads.
protected def getJobConf(): JobConf = {
val conf: Configuration = broadcastedConf.value.value
if (shouldCloneJobConf) {
// Hadoop Configuration objects are not thread-safe, which may lead to various problems if
// one job modifies a configuration while another reads it (SPARK-2546). This problem occurs
// somewhat rarely because most jobs treat the configuration as though it's immutable. One
// solution, implemented here, is to clone the Configuration object. Unfortunately, this
// clone can be very expensive. To avoid unexpected performance regressions for workloads and
// Hadoop versions that do not suffer from these thread-safety issues, this cloning is
// disabled by default.
HadoopRDD.CONFIGURATION_INSTANTIATION_LOCK.synchronized {
logDebug("Cloning Hadoop Configuration")
val newJobConf = new JobConf(conf)
if (!conf.isInstanceOf[JobConf]) {
initLocalJobConfFuncOpt.map(f => f(newJobConf))
}
newJobConf
}
} else {
if (conf.isInstanceOf[JobConf]) {
logDebug("Re-using user-broadcasted JobConf")
conf.asInstanceOf[JobConf]
} else if (HadoopRDD.containsCachedMetadata(jobConfCacheKey)) {
logDebug("Re-using cached JobConf")
HadoopRDD.getCachedMetadata(jobConfCacheKey).asInstanceOf[JobConf]
} else {
// Create a JobConf that will be cached and used across this RDD's getJobConf() calls in the
// local process. The local cache is accessed through HadoopRDD.putCachedMetadata().
// The caching helps minimize GC, since a JobConf can contain ~10KB of temporary objects.
// Synchronize to prevent ConcurrentModificationException (SPARK-1097, HADOOP-10456).
HadoopRDD.CONFIGURATION_INSTANTIATION_LOCK.synchronized {
logDebug("Creating new JobConf and caching it for later re-use")
val newJobConf = new JobConf(conf)
initLocalJobConfFuncOpt.map(f => f(newJobConf))
HadoopRDD.putCachedMetadata(jobConfCacheKey, newJobConf)
newJobConf
}
}
}
}
protected def getInputFormat(conf: JobConf): InputFormat[K, V] = {
if (HadoopRDD.containsCachedMetadata(inputFormatCacheKey)) {
return HadoopRDD.getCachedMetadata(inputFormatCacheKey).asInstanceOf[InputFormat[K, V]]
}
// Once an InputFormat for this RDD is created, cache it so that only one reflection call is
// done in each local process.
val newInputFormat = ReflectionUtils.newInstance(inputFormatClass.asInstanceOf[Class[_]], conf)
.asInstanceOf[InputFormat[K, V]]
if (newInputFormat.isInstanceOf[Configurable]) {
newInputFormat.asInstanceOf[Configurable].setConf(conf)
}
HadoopRDD.putCachedMetadata(inputFormatCacheKey, newInputFormat)
newInputFormat
}
override def getPartitions: Array[Partition] = {
val jobConf = getJobConf()
// add the credentials here as this can be called before SparkContext initialized
SparkHadoopUtil.get.addCredentials(jobConf)
val inputFormat = getInputFormat(jobConf)
if (inputFormat.isInstanceOf[Configurable]) {
inputFormat.asInstanceOf[Configurable].setConf(jobConf)
}
val inputSplits = inputFormat.getSplits(jobConf, minPartitions)
val array = new Array[Partition](inputSplits.size)
for (i <- 0 until inputSplits.size) {
array(i) = new HadoopPartition(id, i, inputSplits(i))
}
array
}
override def compute(theSplit: Partition, context: TaskContext): InterruptibleIterator[(K, V)] = {
val iter = new NextIterator[(K, V)] {
val split = theSplit.asInstanceOf[HadoopPartition]
logInfo("Input split: " + split.inputSplit)
val jobConf = getJobConf()
val inputMetrics = context.taskMetrics.getInputMetricsForReadMethod(DataReadMethod.Hadoop)
// Find a function that will return the FileSystem bytes read by this thread. Do this before
// creating RecordReader, because RecordReader's constructor might read some bytes
val bytesReadCallback = inputMetrics.bytesReadCallback.orElse {
split.inputSplit.value match {
case _: FileSplit | _: CombineFileSplit =>
SparkHadoopUtil.get.getFSBytesReadOnThreadCallback()
case _ => None
}
}
inputMetrics.setBytesReadCallback(bytesReadCallback)
var reader: RecordReader[K, V] = null
val inputFormat = getInputFormat(jobConf)
HadoopRDD.addLocalConfiguration(new SimpleDateFormat("yyyyMMddHHmm").format(createTime),
context.stageId, theSplit.index, context.attemptNumber, jobConf)
reader = inputFormat.getRecordReader(split.inputSplit.value, jobConf, Reporter.NULL)
// Register an on-task-completion callback to close the input stream.
context.addTaskCompletionListener{ context => closeIfNeeded() }
val key: K = reader.createKey()
val value: V = reader.createValue()
override def getNext(): (K, V) = {
try {
finished = !reader.next(key, value)
} catch {
case eof: EOFException =>
finished = true
}
if (!finished) {
inputMetrics.incRecordsRead(1)
}
(key, value)
}
override def close() {
if (reader != null) {
// Close the reader and release it. Note: it's very important that we don't close the
// reader more than once, since that exposes us to MAPREDUCE-5918 when running against
// Hadoop 1.x and older Hadoop 2.x releases. That bug can lead to non-deterministic
// corruption issues when reading compressed input.
try {
reader.close()
} catch {
case e: Exception =>
if (!ShutdownHookManager.inShutdown()) {
logWarning("Exception in RecordReader.close()", e)
}
} finally {
reader = null
}
if (bytesReadCallback.isDefined) {
inputMetrics.updateBytesRead()
} else if (split.inputSplit.value.isInstanceOf[FileSplit] ||
split.inputSplit.value.isInstanceOf[CombineFileSplit]) {
// If we can't get the bytes read from the FS stats, fall back to the split size,
// which may be inaccurate.
try {
inputMetrics.incBytesRead(split.inputSplit.value.getLength)
} catch {
case e: java.io.IOException =>
logWarning("Unable to get input size to set InputMetrics for task", e)
}
}
}
}
}
new InterruptibleIterator[(K, V)](context, iter)
}
/** Maps over a partition, providing the InputSplit that was used as the base of the partition. */
@DeveloperApi
def mapPartitionsWithInputSplit[U: ClassTag](
f: (InputSplit, Iterator[(K, V)]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = {
new HadoopMapPartitionsWithSplitRDD(this, f, preservesPartitioning)
}
override def getPreferredLocations(split: Partition): Seq[String] = {
val hsplit = split.asInstanceOf[HadoopPartition].inputSplit.value
val locs: Option[Seq[String]] = HadoopRDD.SPLIT_INFO_REFLECTIONS match {
case Some(c) =>
try {
val lsplit = c.inputSplitWithLocationInfo.cast(hsplit)
val infos = c.getLocationInfo.invoke(lsplit).asInstanceOf[Array[AnyRef]]
Some(HadoopRDD.convertSplitLocationInfo(infos))
} catch {
case e: Exception =>
logDebug("Failed to use InputSplitWithLocations.", e)
None
}
case None => None
}
locs.getOrElse(hsplit.getLocations.filter(_ != "localhost"))
}
override def checkpoint() {
// Do nothing. Hadoop RDD should not be checkpointed.
}
override def persist(storageLevel: StorageLevel): this.type = {
if (storageLevel.deserialized) {
logWarning("Caching NewHadoopRDDs as deserialized objects usually leads to undesired" +
" behavior because Hadoop's RecordReader reuses the same Writable object for all records." +
" Use a map transformation to make copies of the records.")
}
super.persist(storageLevel)
}
def getConf: Configuration = getJobConf()
}
private[spark] object HadoopRDD extends Logging {
/**
* Configuration's constructor is not threadsafe (see SPARK-1097 and HADOOP-10456).
* Therefore, we synchronize on this lock before calling new JobConf() or new Configuration().
*/
val CONFIGURATION_INSTANTIATION_LOCK = new Object()
/** Update the input bytes read metric each time this number of records has been read */
val RECORDS_BETWEEN_BYTES_READ_METRIC_UPDATES = 256
/**
* The three methods below are helpers for accessing the local map, a property of the SparkEnv of
* the local process.
*/
def getCachedMetadata(key: String): Any = SparkEnv.get.hadoopJobMetadata.get(key)
def containsCachedMetadata(key: String): Boolean = SparkEnv.get.hadoopJobMetadata.containsKey(key)
private def putCachedMetadata(key: String, value: Any): Unit =
SparkEnv.get.hadoopJobMetadata.put(key, value)
/** Add Hadoop configuration specific to a single partition and attempt. */
def addLocalConfiguration(jobTrackerId: String, jobId: Int, splitId: Int, attemptId: Int,
conf: JobConf) {
val jobID = new JobID(jobTrackerId, jobId)
val taId = new TaskAttemptID(new TaskID(jobID, true, splitId), attemptId)
conf.set("mapred.tip.id", taId.getTaskID.toString)
conf.set("mapred.task.id", taId.toString)
conf.setBoolean("mapred.task.is.map", true)
conf.setInt("mapred.task.partition", splitId)
conf.set("mapred.job.id", jobID.toString)
}
/**
* Analogous to [[org.apache.spark.rdd.MapPartitionsRDD]], but passes in an InputSplit to
* the given function rather than the index of the partition.
*/
private[spark] class HadoopMapPartitionsWithSplitRDD[U: ClassTag, T: ClassTag](
prev: RDD[T],
f: (InputSplit, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false)
extends RDD[U](prev) {
override val partitioner = if (preservesPartitioning) firstParent[T].partitioner else None
override def getPartitions: Array[Partition] = firstParent[T].partitions
override def compute(split: Partition, context: TaskContext): Iterator[U] = {
val partition = split.asInstanceOf[HadoopPartition]
val inputSplit = partition.inputSplit.value
f(inputSplit, firstParent[T].iterator(split, context))
}
}
private[spark] class SplitInfoReflections {
val inputSplitWithLocationInfo =
Utils.classForName("org.apache.hadoop.mapred.InputSplitWithLocationInfo")
val getLocationInfo = inputSplitWithLocationInfo.getMethod("getLocationInfo")
val newInputSplit = Utils.classForName("org.apache.hadoop.mapreduce.InputSplit")
val newGetLocationInfo = newInputSplit.getMethod("getLocationInfo")
val splitLocationInfo = Utils.classForName("org.apache.hadoop.mapred.SplitLocationInfo")
val isInMemory = splitLocationInfo.getMethod("isInMemory")
val getLocation = splitLocationInfo.getMethod("getLocation")
}
private[spark] val SPLIT_INFO_REFLECTIONS: Option[SplitInfoReflections] = try {
Some(new SplitInfoReflections)
} catch {
case e: Exception =>
logDebug("SplitLocationInfo and other new Hadoop classes are " +
"unavailable. Using the older Hadoop location info code.", e)
None
}
private[spark] def convertSplitLocationInfo(infos: Array[AnyRef]): Seq[String] = {
val out = ListBuffer[String]()
infos.foreach { loc => {
val locationStr = HadoopRDD.SPLIT_INFO_REFLECTIONS.get.
getLocation.invoke(loc).asInstanceOf[String]
if (locationStr != "localhost") {
if (HadoopRDD.SPLIT_INFO_REFLECTIONS.get.isInMemory.
invoke(loc).asInstanceOf[Boolean]) {
logDebug("Partition " + locationStr + " is cached by Hadoop.")
out += new HDFSCacheTaskLocation(locationStr).toString
} else {
out += new HostTaskLocation(locationStr).toString
}
}
}}
out.seq
}
}
| practice-vishnoi/dev-spark-1 | core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala | Scala | apache-2.0 | 17,728 |
package org.apache.mesos.chronos.scheduler.state
import org.apache.mesos.chronos.scheduler.jobs._
import org.specs2.mock._
import org.specs2.mutable._
class PersistenceStoreSpec extends SpecificationWithJUnit with Mockito {
"MesosStatePersistenceStore" should {
"Writing and reading ScheduledBasedJob a job works" in {
val store = new MesosStatePersistenceStore(null, null)
val startTime = "R1/2012-01-01T00:00:01.000Z/PT1M"
val job = ScheduleBasedJob(schedule = startTime, name = "sample-name",
command = "sample-command", successCount = 1L,
executor = "fooexecutor", executorFlags = "args", taskInfoData = "SomeData")
store.persistJob(job)
val job2 = store.getJob(job.name)
job2.name must_== job.name
job2.executor must_== job.executor
job2.taskInfoData must_== job.taskInfoData
job2.successCount must_== job.successCount
job2.command must_== job.command
}
"Writing and reading DependencyBasedJob a job works" in {
val store = new MesosStatePersistenceStore(null, null)
val startTime = "R1/2012-01-01T00:00:01.000Z/PT1M"
val schedJob = ScheduleBasedJob(schedule = startTime, name = "sample-name",
command = "sample-command")
val job = DependencyBasedJob(parents = Set("sample-name"),
name = "sample-dep", command = "sample-command",
softError = true,
successCount = 1L, errorCount = 0L,
executor = "fooexecutor", executorFlags = "-w", taskInfoData = "SomeData",
retries = 1, disabled = false)
store.persistJob(job)
val job2 = store.getJob(job.name)
job2.name must_== job.name
job2.command must_== job.command
job2.softError must_== job.softError
job2.successCount must_== job.successCount
job2.errorCount must_== job.errorCount
job2.executor must_== job.executor
job2.executorFlags must_== job.executorFlags
job2.taskInfoData must_== job.taskInfoData
job2.retries must_== job.retries
job2.disabled must_== job.disabled
}
}
}
| vixns/chronos | src/test/scala/org/apache/mesos/chronos/scheduler/state/PersistenceStoreSpec.scala | Scala | apache-2.0 | 2,078 |
object Solution {
def main(args: Array[String]) {
val Array(n, m) = readLine.split(" ").map(_.toInt)
val nums = (1 to n).map(_ => BigInt(readLine, 2))
val counts = for {
i <- 0 until nums.size
j <- i + 1 until nums.size
} yield (nums(i) | nums(j)).bitCount
val maxCount = counts.max
val maxCountC = counts.filter(_ == maxCount).size
println(maxCount)
println(maxCountC)
}
}
| advancedxy/hackerrank | algorithms/warmup/ACMICPCTeam.scala | Scala | mit | 424 |
import akka.actor.{ActorLogging, Actor}
class CSVWriter extends Actor with ActorLogging {
def receive = {
case "read" => log.info(s"read line from queue and write it to file")
// loop and read html files in src folder
// Create HTMLfile model for each file, with attribute : html, text, etc
// ScraperActor ! scrape(HTMLfile,xpath)
// send Reply
}
} | dgkris/XPathExtractor | XPathExtractor/src/main/scala/com/dgkris/xpathextractor/actors/CSVWriter.scala | Scala | mit | 391 |
package is.hail.expr.ir
import is.hail.TestUtils._
import is.hail.expr.ir.TestUtils.IRCall
import is.hail.types.virtual.{TArray, TBoolean, TCall, TInt32}
import is.hail.variant._
import is.hail.{ExecStrategy, HailSuite}
import org.testng.annotations.{DataProvider, Test}
class CallFunctionsSuite extends HailSuite {
implicit val execStrats = ExecStrategy.javaOnly
@DataProvider(name = "basic")
def basicData(): Array[Array[Any]] = {
assert(true)
Array(
Array(Call0()),
Array(Call1(0, false)),
Array(Call1(1, true)),
Array(Call2(1, 0, true)),
Array(Call2(0, 1, false)),
Array(CallN(Array(1, 1), false)),
Array(Call.parse("0|1"))
)
}
@DataProvider(name = "diploid")
def uphasedDiploidData(): Array[Array[Any]] = {
assert(true)
Array(
Array(Call2(0, 0, false)),
Array(Call2(1, 0, false)),
Array(Call2(0, 1, false)),
Array(Call2(3, 1, false)),
Array(Call2(3, 3, false))
)
}
@DataProvider(name = "basicWithIndex")
def basicDataWIndex(): Array[Array[Any]] = {
assert(true)
Array(
Array(Call1(0, false), 0),
Array(Call1(1, true), 0),
Array(Call2(1, 0, true), 0),
Array(Call2(1, 0, true), 1),
Array(Call2(0, 1, false), 0),
Array(Call2(0, 1, false), 1),
Array(CallN(Array(1, 1), false), 0),
Array(CallN(Array(1, 1), false), 1),
Array(Call.parse("0|1"), 0),
Array(Call.parse("0|1"), 1)
)
}
@Test def constructors() {
assertEvalsTo(invoke("Call", TCall, False()), Call0())
assertEvalsTo(invoke("Call", TCall, I32(0), True()), Call1(0, true))
assertEvalsTo(invoke("Call", TCall, I32(1), False()), Call1(1, false))
assertEvalsTo(invoke("Call", TCall, I32(0), I32(0), False()), Call2(0, 0, false))
assertEvalsTo(invoke("Call", TCall, TestUtils.IRArray(0, 1), False()), CallN(Array(0, 1), false))
assertEvalsTo(invoke("Call", TCall, Str("0|1")), Call2(0, 1, true))
}
@Test(dataProvider = "basic")
def isPhased(c: Call) {
assertEvalsTo(invoke("isPhased", TBoolean, IRCall(c)),
Option(c).map(Call.isPhased).orNull)
}
@Test(dataProvider = "basic")
def isHomRef(c: Call) {
assertEvalsTo(invoke("isHomRef", TBoolean, IRCall(c)),
Option(c).map(Call.isHomRef).orNull)
}
@Test(dataProvider = "basic")
def isHet(c: Call) {
assertEvalsTo(invoke("isHet", TBoolean, IRCall(c)),
Option(c).map(Call.isHet).orNull)
}
@Test(dataProvider = "basic")
def isHomVar(c: Call) {
assertEvalsTo(invoke("isHomVar", TBoolean,IRCall(c)),
Option(c).map(Call.isHomVar).orNull)
}
@Test(dataProvider = "basic")
def isNonRef(c: Call) {
assertEvalsTo(invoke("isNonRef", TBoolean, IRCall(c)),
Option(c).map(Call.isNonRef).orNull)
}
@Test(dataProvider = "basic")
def isHetNonRef(c: Call) {
assertEvalsTo(invoke("isHetNonRef", TBoolean, IRCall(c)),
Option(c).map(Call.isHetNonRef).orNull)
}
@Test(dataProvider = "basic")
def isHetRef(c: Call) {
assertEvalsTo(invoke("isHetRef", TBoolean, IRCall(c)),
Option(c).map(Call.isHetRef).orNull)
}
@Test(dataProvider = "basic")
def nNonRefAlleles(c: Call) {
assertEvalsTo(invoke("nNonRefAlleles", TInt32, IRCall(c)),
Option(c).map(Call.nNonRefAlleles).orNull)
}
@Test(dataProvider = "basicWithIndex")
def alleleByIndex(c: Call, idx: Int) {
assertEvalsTo(invoke("index", TInt32, IRCall(c), I32(idx)),
Option(c).map(c => Call.alleleByIndex(c, idx)).orNull)
}
@Test(dataProvider = "basicWithIndex")
def downcode(c: Call, idx: Int) {
assertEvalsTo(invoke("downcode", TCall, IRCall(c), I32(idx)),
Option(c).map(c => Call.downcode(c, idx)).orNull)
}
@Test(dataProvider = "diploid")
def unphasedDiploidGtIndex(c: Call) {
assertEvalsTo(invoke("unphasedDiploidGtIndex", TInt32, IRCall(c)),
Option(c).map(c => Call.unphasedDiploidGtIndex(c)).orNull)
}
@Test(dataProvider = "basic")
def oneHotAlleles(c: Call) {
assertEvalsTo(invoke("oneHotAlleles", TArray(TInt32), IRCall(c), I32(2)),
Option(c).map(c => Call.oneHotAlleles(c, 2)).orNull)
}
}
| hail-is/hail | hail/src/test/scala/is/hail/expr/ir/CallFunctionsSuite.scala | Scala | mit | 4,149 |
package org.map
import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}
class MapTests extends FunSuite with Matchers with BeforeAndAfter {
var map: Map = Nil
before {
map = new Map
}
test("create a new empty map") {
map.size shouldBe 0
map.isEmpty shouldBe true
}
test("size of map should increase when put into it") {
val oldSize = map.size
map.put(1, 1)
map.isEmpty shouldBe false
map.size shouldBe oldSize + 1
}
test("size of map should decrease when remove from it") {
map.put(1, 1)
val oldSize = map.size
map.remove(1)
map.size shouldBe oldSize - 1
}
test("find put value inside map") {
map.put(1, 1)
map.contains(1) shouldBe true
map.put(2, 2)
map.contains(2) shouldBe true
}
test("map should contains all put key into it") {
map.put(1, 1)
map.put(2, 1)
map.put(3, 1)
map.contains(1) shouldBe true
map.contains(2) shouldBe true
map.contains(3) shouldBe true
}
test("get put value into map") {
map.put(1, 1)
map.get(1) shouldBe Some(1)
map.get(3) shouldBe None
}
}
| Alex-Diez/Scala-TDD-Katas | old-katas/map-kata/day-1/src/test/scala/org/map/MapTests.scala | Scala | mit | 1,242 |
package org.bitcoins.core.p2p
import org.bitcoins.crypto.DoubleSha256Digest
import org.bitcoins.testkitcore.gen.p2p.DataMessageGenerator
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
class GetDataMessageTest extends BitcoinSUnitTest {
it must "have serialization symmetry" in {
forAll(DataMessageGenerator.getDataMessages) { dataMsg =>
assert(GetDataMessage(dataMsg.hex) == dataMsg)
}
}
it must "be constructable from inventories" in {
forAll(DataMessageGenerator.getDataMessages) { getData =>
assert(GetDataMessage(getData.inventories) == getData)
}
}
it must "be constructable from a single inventory" in {
val inventory = Inventory(TypeIdentifier.MsgBlock, DoubleSha256Digest.empty)
assert(GetDataMessage(inventory) == GetDataMessage(Seq(inventory)))
}
it must "have a meaningful toString" in {
forAll(DataMessageGenerator.getDataMessages) { message =>
assert(message.toString.length() < 200)
}
}
}
| bitcoin-s/bitcoin-s | core-test/src/test/scala/org/bitcoins/core/p2p/GetDataMessageTest.scala | Scala | mit | 982 |
package org.apache.mesos.chronos.scheduler.config
import java.net.InetSocketAddress
import org.rogach.scallop.ScallopConf
/**
* Configuration values that may be parsed from a YAML file.
* @author Florian Leibert (flo@leibert.de)
*/
trait SchedulerConfiguration extends ScallopConf {
lazy val master = opt[String]("master",
descr = "The URL of the Mesos master",
default = Some("local"),
required = true,
noshort = true)
lazy val user = opt[String]("user",
descr = "The chronos user to run the processes under",
default = Some("root"))
//TODO(FL): Be consistent and do everything in millis
lazy val failoverTimeoutSeconds = opt[Int]("failover_timeout",
descr = "The failover timeout in seconds for Mesos",
default = Some(604800))
lazy val scheduleHorizonSeconds = opt[Int]("schedule_horizon",
descr = "The look-ahead time for scheduling tasks in seconds",
default = Some(60))
lazy val clusterName = opt[String]("cluster_name",
descr = "The name of the cluster where Chronos is run",
default = None)
lazy val zookeeperServersString = opt[String]("zk_hosts",
descr = "The list of ZooKeeper servers for storing state",
default = Some("localhost:2181"))
lazy val hostname = opt[String]("hostname",
descr = "The advertised hostname of this Chronos instance for network communication. This is used by other" +
"Chronos instances and the Mesos master to communicate with this instance",
default = Some(java.net.InetAddress.getLocalHost.getHostName))
lazy val leaderMaxIdleTimeMs = opt[Int]("leader_max_idle_time",
descr = "The look-ahead time for scheduling tasks in milliseconds",
default = Some(5000))
lazy val zooKeeperTimeout = opt[Long]("zk_timeout",
descr = "The timeout for ZooKeeper in milliseconds",
default = Some(10000L))
lazy val zooKeeperPath = opt[String]("zk_path",
descr = "Path in ZooKeeper for storing state",
default = Some("/chronos/state"))
lazy val mailServer = opt[String]("mail_server",
descr = "Address of the mailserver in server:port format",
default = None)
lazy val mailUser = opt[String]("mail_user",
descr = "Mail user (for auth)",
default = None)
lazy val mailPassword = opt[String]("mail_password",
descr = "Mail password (for auth)",
default = None)
lazy val mailFrom = opt[String]("mail_from",
descr = "Mail from field",
default = None)
lazy val mailSslOn = opt[Boolean]("mail_ssl",
descr = "Mail SSL",
default = Some(false))
lazy val ravenDsn = opt[String]("raven_dsn",
descr = "Raven DSN for connecting to a raven or sentry event service",
default = None)
lazy val slackWebhookUrl = opt[String]("slack_url",
descr = "Webhook URL for posting to Slack",
default = None)
lazy val hipChatUrl = opt[String]("hip_chat_url",
descr = "HipChat URL, e.g. http://company.hipchat.com/",
default = None)
lazy val hipChatToken = opt[String]("hip_chat_token",
descr = "HipChat API token",
default = None)
lazy val httpNotificationUrl = opt[String]("http_notification_url",
descr = "Http URL for notifying failures",
default = None)
lazy val httpNotificationCredentials = opt[String]("http_notification_credentials",
descr = "Http notification URL credentials in format username:password",
default = None)
lazy val failureRetryDelayMs = opt[Long]("failure_retry",
descr = "Number of ms between retries",
default = Some(60000))
lazy val disableAfterFailures = opt[Long]("disable_after_failures",
descr = "Disables a job after this many failures have occurred",
default = Some(0))
lazy val mesosTaskMem = opt[Double]("mesos_task_mem",
descr = "Amount of memory to request from Mesos for each task (MB)",
default = Some(128.0))
lazy val mesosTaskCpu = opt[Double]("mesos_task_cpu",
descr = "Number of CPUs to request from Mesos for each task",
default = Some(0.1))
lazy val mesosTaskDisk = opt[Double]("mesos_task_disk",
descr = "Amount of disk capacity to request from Mesos for each task (MB)",
default = Some(256.0))
lazy val mesosCheckpoint = opt[Boolean]("mesos_checkpoint",
descr = "Enable checkpointing in Mesos",
default = Some(true))
lazy val mesosRole = opt[String]("mesos_role",
descr = "The Mesos role to run tasks under",
default = Some("*"))
lazy val taskEpsilon = opt[Int]("task_epsilon",
descr = "The default epsilon value for tasks, in seconds",
default = Some(60))
// Chronos version
lazy val version =
Option(classOf[SchedulerConfiguration].getPackage.getImplementationVersion).getOrElse("unknown")
lazy val mesosFrameworkName = opt[String]("mesos_framework_name",
descr = "The framework name",
default = Some("chronos-" + version))
lazy val webuiUrl = opt[String]("webui_url",
descr = "The http(s) url of the web ui, defaulting to the advertised hostname",
noshort = true,
default = None)
lazy val reconciliationInterval = opt[Int]("reconciliation_interval",
descr = "Reconciliation interval in seconds",
default = Some(600))
lazy val mesosAuthenticationPrincipal = opt[String]("mesos_authentication_principal",
descr = "Mesos Authentication Principal",
noshort = true)
lazy val mesosAuthenticationSecretFile = opt[String]("mesos_authentication_secret_file",
descr = "Mesos Authentication Secret",
noshort = true)
lazy val reviveOffersForNewJobs = opt[Boolean]("revive_offers_for_new_jobs",
descr = "Whether to call reviveOffers for new or changed jobs. (Default: do not use reviveOffers) ",
default = Some(false))
lazy val declineOfferDuration = opt[Long]("decline_offer_duration",
descr = "(Default: Use mesos default of 5 seconds) " +
"The duration (milliseconds) for which to decline offers by default",
default = None)
lazy val minReviveOffersInterval = opt[Long]("min_revive_offers_interval",
descr = "Do not ask for all offers (also already seen ones) more often than this interval (ms). (Default: 5000)",
default = Some(5000))
def zooKeeperHostAddresses: Seq[InetSocketAddress] =
for (s <- zookeeperServers().split(",")) yield {
val splits = s.split(":")
require(splits.length == 2, "expected host:port for zk servers")
new InetSocketAddress(splits(0), splits(1).toInt)
}
def zookeeperServers(): String = {
if (zookeeperServersString().startsWith("zk://")) {
return zookeeperServersString().replace("zk://", "").replaceAll("/.*", "")
}
zookeeperServersString()
}
def zooKeeperStatePath = "%s/state".format(zooKeeperPath())
def zooKeeperCandidatePath = "%s/candidate".format(zooKeeperPath())
}
| rickfast/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/config/SchedulerConfiguration.scala | Scala | apache-2.0 | 6,706 |
package com.obecto.gattakka.genetics.operators
import com.obecto.gattakka.IndividualDescriptor
trait SelectionStrategy {
def selectBest(from: Seq[IndividualDescriptor]): IndividualDescriptor
def selectWorst(from: Seq[IndividualDescriptor]): IndividualDescriptor
}
| obecto/gattakka | src/main/scala/com/obecto/gattakka/genetics/operators/SelectionStrategy.scala | Scala | mit | 270 |
/* NSC -- new Scala compiler
* Copyright 2007-2013 LAMP/EPFL
* @author Pedro Furlanetto
*/
package reflectdoc
package tools
package nsc
package doc
package html
package page
import doc.model._
class ReferenceIndex(letter: Char, index: doc.Index, universe: Universe) extends HtmlPage {
def path = List("index-"+letter+".html", "index")
def title = {
val s = universe.settings
( if (!s.doctitle.isDefault) s.doctitle.value else "" ) +
( if (!s.docversion.isDefault) (" " + s.docversion.value) else "" )
}
def headers =
<xml:group>
<link href={ relativeLinkTo(List("ref-index.css", "lib")) } media="screen" type="text/css" rel="stylesheet"/>
<script type="text/javascript" src={ relativeLinkTo{List("jquery.js", "lib")} }></script>
</xml:group>
private def entry(name: String, methods: Iterable[MemberEntity]) = {
val occurrences = methods.map(method => {
val html = templateToHtml(method.inDefinitionTemplates.head)
if (method.deprecation.isDefined) {
<strike>{ html }</strike>
} else {
html
}
}).toList.distinct
<div class="entry">
<div class="name">{
if (methods.find { ! _.deprecation.isDefined } != None)
name
else
<strike>{ name }</strike>
}</div>
<div class="occurrences">{
for (owner <- occurrences) yield owner ++ scala.xml.Text(" ")
}</div>
</div>
}
def body =
<body>{
for(groups <- index.firstLetterIndex(letter)) yield
entry(groups._1, groups._2.view)
}</body>
}
| VladUreche/reflectdoc | components/core/src/reflectdoc/tools/nsc/doc/html/page/ReferenceIndex.scala | Scala | bsd-3-clause | 1,580 |
package com.typesafe.slick.docs
import scala.concurrent.ExecutionContext.Implicits.global
import slick.jdbc.H2Profile.api._
import slick.jdbc.H2Profile
object CodeGenerator extends App {
val uri = "#slick.db.default"
val profile = "slick.jdbc.H2Profile"
val jdbcDriver = "org.h2.Driver"
val url = "jdbc:postgresql://localhost/test"
val outputFolder = ""
val pkg = "demo"
val user = ""
val password = ""
if(false){
val db = Database.forURL("jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1", driver="org.h2.Driver")
//#default-runner-uri
slick.codegen.SourceCodeGenerator.main(
Array(uri, outputFolder)
)
//#default-runner-uri
//#default-runner
slick.codegen.SourceCodeGenerator.main(
Array(profile, jdbcDriver, url, outputFolder, pkg)
)
//#default-runner
//#default-runner-with-auth
slick.codegen.SourceCodeGenerator.main(
Array(profile, jdbcDriver, url, outputFolder, pkg, user, password)
)
//#default-runner-with-auth
//#customization
import slick.codegen.SourceCodeGenerator
// fetch data model
val modelAction = H2Profile.createModel(Some(H2Profile.defaultTables)) // you can filter specific tables here
val modelFuture = db.run(modelAction)
// customize code generator
val codegenFuture = modelFuture.map(model => new SourceCodeGenerator(model) {
// override mapped table and class name
override def entityName =
dbTableName => dbTableName.dropRight(1).toLowerCase.toCamelCase
override def tableName =
dbTableName => dbTableName.toLowerCase.toCamelCase
// add some custom import
override def code = "import foo.{MyCustomType,MyCustomTypeMapper}" + "\\n" + super.code
// override table generator
override def Table = new Table(_){
// disable entity class generation and mapping
override def EntityType = new EntityType{
override def classEnabled = false
}
// override contained column generator
override def Column = new Column(_){
// use the data model member of this column to change the Scala type,
// e.g. to a custom enum or anything else
override def rawType =
if(model.name == "SOME_SPECIAL_COLUMN_NAME") "MyCustomType" else super.rawType
}
}
})
codegenFuture.onSuccess { case codegen =>
codegen.writeToFile(
"slick.jdbc.H2Profile","some/folder/","some.packag","Tables","Tables.scala"
)
}
//#customization
}
}
| AtkinsChang/slick | slick/src/sphinx/code/CodeGenerator.scala | Scala | bsd-2-clause | 2,531 |
package com.gu.pandomainauth
import com.amazonaws.auth.{DefaultAWSCredentialsProviderChain, AWSCredentialsProvider}
import com.amazonaws.regions.{Regions, Region}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.language.postfixOps
import akka.actor.{ActorRef, Props, Actor, ActorSystem}
import akka.agent.Agent
import akka.event.Logging
import com.gu.pandomainauth.model.PanDomainAuthSettings
import com.gu.pandomainauth.service.{ProxyConfiguration, S3Bucket}
import scala.concurrent.duration.FiniteDuration
trait PanDomainAuth {
lazy val actorSystem = ActorSystem()
/**
* the domain you are authin agains
* @return
*/
def domain: String
/**
* the identifier for your app, typically the same as the subdomain your app runs on
* @return
*/
def system: String
/**
* the AwsCredentials to access the configuration bucket, defaults to None so the instance credentials are used
* @return
*/
def awsCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
/**
* the aws region the configuration bucket is in, defaults to eu-west-1 as that's where the guardian tends to run stuff
* @return
*/
def awsRegion: Option[Region] = Option( Region getRegion Regions.EU_WEST_1 )
/**
* the proxy configuration to use when connecting to aws
* @return
*/
def proxyConfiguration: Option[ProxyConfiguration] = None
lazy val bucket = new S3Bucket(awsCredentialsProvider, awsRegion, proxyConfiguration)
lazy val settingsMap = bucket.readDomainSettings(domain)
lazy val authSettings: Agent[PanDomainAuthSettings] = Agent(PanDomainAuthSettings(settingsMap))
lazy val domainSettingsRefreshActor = actorSystem.actorOf(Props(classOf[DomainSettingsRefreshActor], domain, bucket, authSettings), "PanDomainAuthSettingsRefresher")
actorSystem.scheduler.scheduleOnce(1 minute, domainSettingsRefreshActor, Refresh)
def shutdown = actorSystem.shutdown
def settings = authSettings.get()
}
class DomainSettingsRefreshActor(domain: String, bucket: S3Bucket, authSettings: Agent[PanDomainAuthSettings]) extends Actor {
val frequency: FiniteDuration = 1 minute
val log = Logging(context.system, this)
override def receive: Receive = {
case Refresh => {
try {
val settingsMap = bucket.readDomainSettings(domain)
val settings = PanDomainAuthSettings(settingsMap)
authSettings send settings
log.debug("reloaded settings for {}", domain)
} catch {
case e: Exception => log.error(e, "failed to refresh domain {} settings", domain)
}
reschedule
}
}
override def postRestart(reason: Throwable) {
reschedule
}
def reschedule {
context.system.scheduler.scheduleOnce(frequency, self, Refresh)
}
}
case object Refresh
| m4tx/pan-domain-authentication | pan-domain-auth-core/src/main/scala/com/gu/pandomainauth/PanDomainAuth.scala | Scala | apache-2.0 | 2,861 |
package org.openurp.edu.eams.teach.grade.course.service
import org.openurp.base.Semester
import org.openurp.edu.base.Project
import org.openurp.edu.eams.teach.grade.lesson.model.GradeInputSwitch
trait GradeInputSwitchService {
def getSwitch(project: Project, semester: Semester): GradeInputSwitch
def getOpenedSemesters(project: Project): List[Semester]
}
| openurp/edu-eams-webapp | grade/src/main/scala/org/openurp/edu/eams/teach/grade/course/service/GradeInputSwitchService.scala | Scala | gpl-3.0 | 367 |
package com.sfxcode.sapphire.core.demo.form.controller
import com.sfxcode.sapphire.core.controller.ViewController
import com.sfxcode.sapphire.core.value.{FXBeanAdapter, KeyBindings}
import com.sfxcode.sapphire.core.demo.form.model.Person
import com.sfxcode.sapphire.core.Includes._
class FormController extends ViewController {
lazy val formAdapter = FXBeanAdapter[Person](this)
val person = Person()
override def didGainVisibility() {
super.didGainVisibility()
val bindingList = List("name", "age", "description", "active")
val bindings = KeyBindings(bindingList, "form1_")
bindings.add(bindingList, "form2_")
bindings.add("person", "Person ${_self.name()} with age of ${_self.age()} is active: ${_self.active()}")
formAdapter.addBindings(bindings)
formAdapter.addConverter("form1_age", "IntegerStringConverter")
formAdapter.addConverter("form2_age", "IntegerStringConverter")
formAdapter.addConverter("form2_active", "BooleanStringConverter")
formAdapter.set(person)
}
}
| swhgoon/sapphire-demo | form/src/main/scala/com/sfxcode/sapphire/core/demo/form/controller/FormController.scala | Scala | apache-2.0 | 1,026 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.{ToolBox, ToolBoxError}
import scala.tools.reflect.Eval
object Test extends App {
object Extractor { def unapply(x: Int): Option[Int] = Some(x) }
val extractor = reify {
2 match { case Extractor(x) => x }
}
println(extractor.eval)
val tb = cm.mkToolBox()
val textractor = tb.typecheck(extractor.tree)
println(textractor)
val rtextractor = tb.untypecheck(textractor)
try {
// should print 2 without error
println(tb.eval(rtextractor))
} catch {
// this is the old behaviour, rather than the desired behavior; see scala/bug#5465
case _: ToolBoxError => println("error!")
}
}
| scala/scala | test/files/run/idempotency-extractors.scala | Scala | apache-2.0 | 734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.spark.testsuite.filterexpr
import java.io.File
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.CarbonHiveContext._
import org.apache.spark.sql.common.util.QueryTest
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.scalatest.BeforeAndAfterAll
/**
* Test Class for filter expression query on String datatypes
*
* @author N00902756
*
*/
class CountStarTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists filtertestTables")
sql("drop table if exists filterTimestampDataType")
sql("CREATE TABLE filterTimestampDataType (ID int, date Timestamp, country String, " +
"name String, phonetype String, serialname String, salary int) " +
"STORED BY 'org.apache.carbondata.format'"
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/mm/dd")
val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
.getCanonicalPath
var csvFilePath = currentDirectory + "/src/test/resources/datanullmeasurecol.csv";
sql(
s"LOAD DATA LOCAL INPATH '" + csvFilePath + "' INTO TABLE " +
s"filterTimestampDataType " +
s"OPTIONS('DELIMITER'= ',', " +
s"'FILEHEADER'= '')"
)
}
test("select count ") {
checkAnswer(
sql("select count(*) from filterTimestampDataType where country='china'"),
Seq(Row(2))
)
}
override def afterAll {
sql("drop table if exists filtertestTables")
sql("drop table if exists filterTimestampDataType")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
} | Zhangshunyu/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala | Scala | apache-2.0 | 2,630 |
package edu.berkeley.velox.datamodel.api.operation
import scala.concurrent.Future
import edu.berkeley.velox.datamodel.ResultSet
trait Operation {
def execute() : Future[ResultSet]
}
| pbailis/fast-tpcc-repo | core/src/main/scala/edu/berkeley/velox/frontend/api/operation/Operation.scala | Scala | apache-2.0 | 186 |
package com.plasmaconduit.edge
import io.netty.channel.nio.NioEventLoopGroup
object EdgeEventLoop {
val loop = new NioEventLoopGroup()
}
| plasmaconduit/edge | src/main/scala/com/plasmaconduit/edge/EdgeEventLoop.scala | Scala | mit | 143 |
package com.todesking.platebuilder
trait Distributions {
import DSL.GeneratorSyntax
def dirichlet[I <: String](param: Var[Type.Vec[I, Type.Real]]): Generator[Type.Vec[I, Type.Real]] =
stochastic"Dirichlet($param)"
def categorical[I <: String](param: Var[Type.Vec[I, Type.Real]]): Generator[Type.Category[I]] =
stochastic"Categorical($param)"
def normal(mu: Var[Type.Real], s2: Var[Type.Real]): Generator[Type.Real] =
stochastic"Normal($mu, $s2)"
}
| todesking/platebuilder | core/src/main/scala/Distributions.scala | Scala | mit | 472 |
type op = PartialFunction[Char, (Int, Int) => Int]
val operators:List[op] = List(
{case '+' => (x ,y) => x+y},
{case '-' => (x,y) => /*start*/x-y/*end*/}
)
//Int | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL6091.scala | Scala | apache-2.0 | 162 |
package io.boontadata.spark.job1
import com.datastax.spark.connector.streaming._
import com.datastax.spark.connector.SomeColumns
import kafka.serializer.StringDecoder
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
import org.apache.spark.SparkConf
class IotEvent(
val messageId: String,
val deviceId: String,
val timestamp: String,
val category: String,
val measure1: Int,
val measure2: Float) extends Serializable {
override def toString(): String = {
"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n".format(messageId, deviceId, timestamp, category, measure1, measure2)
}
}
object IotEventFactory {
def fromString(in: String): IotEvent = {
val parts = in.split("\\\\|")
new IotEvent(parts(0), parts(1), parts(2), parts(3), parts(4).toInt, parts(5).toFloat)
}
def fromParts(
messageId: String,
deviceId: String,
timestamp: String,
category: String,
measure1: Int,
measure2: Float): IotEvent = {
new IotEvent(messageId, deviceId, timestamp, category, measure1, measure2)
}
}
class Aggregate(
val window_time: String,
val device_id: String,
val category: String,
val m1_sum_downstream: Int,
val m2_sum_downstream: Float) extends Serializable {
override def toString(): String = {
"%s\\t%s\\t%s\\t%s\\t%s\\n".format(window_time, device_id, category, m1_sum_downstream, m2_sum_downstream)
}
}
object DirectKafkaAggregateEvents {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println(s"""
|Usage: DirectKafkaAggregateEvents <brokers> <topics>
| <brokers> is a list of one or more Kafka brokers
| <topics> is a list of one or more kafka topics to consume from
|
""".stripMargin)
System.exit(1)
}
val Array(brokers, topics) = args
val windowTimeFormat = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
// Create context with 2 second batch interval
val sparkConf = new SparkConf()
.setAppName("boontadata-DirectKafkaAggregateEvents")
.set("spark.cassandra.connection.host", "cassandra1,cassandra2,cassandra3")
val ssc = new StreamingContext(sparkConf, Seconds(5))
// Create direct kafka stream with brokers and topics
val topicsSet = topics.split(",").toSet
val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers)
val messages = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, topicsSet)
val lines = messages.map(tuple => tuple._2) // Spark receives Kafka payload as the second field of the tuple
val parsed = lines.map(IotEventFactory.fromString(_))
val parsedDeduplicated = parsed
.map(e => (e.messageId, e))
.reduceByKey((vN, vNplus1) => vNplus1)
.map(tuple => tuple._2)
val aggregated = parsedDeduplicated
.map(e => ((e.deviceId, e.category), e))
.reduceByKey(
(vN, vNplus1)
=>
IotEventFactory.fromParts("", vN.deviceId, "", vN.category,
vN.measure1 + vNplus1.measure1,
vN.measure2 + vNplus1.measure2))
.transform((rdd, time) => rdd
.map({ case(k, e) => new Aggregate(
windowTimeFormat.format(new java.util.Date(time.milliseconds)),
e.deviceId,
e.category,
e.measure1,
e.measure2)}))
aggregated.print()
aggregated.saveToCassandra("boontadata", "agg_events",
SomeColumns("device_id", "category", "window_time", "m1_sum_downstream", "m2_sum_downstream"))
// Start the computation
ssc.start()
ssc.awaitTermination()
}
}
| boontadata/boontadata-streams | code/spark/master/code/StreamingJob.scala | Scala | mit | 3,626 |
/*
* Copyright 2015 2lemetry, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.m2m.mqtt
import org.mashupbots.socko.routes.{Path, HttpRequest, GET, Routes}
import akka.actor.{ActorLogging, Actor, Props}
import org.mashupbots.socko.events.HttpRequestEvent
import org.mashupbots.socko.webserver
import org.mashupbots.socko.webserver.WebServerConfig
object WebServer {
import LoadTest.system
val webServer = system.actorOf(Props[WebServer])
val routes = Routes {
case HttpRequest(request) => request match {
case (GET(Path("/current"))) =>
webServer ! Current(request)
}
}
val akkaConfig = new WebServerConfig(system.settings.config, "http")
val server = new webserver.WebServer(akkaConfig, routes, system)
println(akkaConfig.hostname)
def enabled =
if (system.settings.config.hasPath("http.enabled"))
system.settings.config.getBoolean("http.enabled")
else
true
def start() {
if (!enabled) return
server.start()
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() { server.stop() }
})
}
}
case class Current(request: HttpRequestEvent)
class WebServer extends Actor with ActorLogging {
def receive = {
case Current(request) =>
request.response.write(Reporter.lastReport.map(_.json).getOrElse("{}"))
}
}
| m2mIO/mqtt-loadtest | src/main/scala/io/m2m/mqtt/WebServer.scala | Scala | apache-2.0 | 1,873 |
/*
* MIT License
*
* Copyright (c) 2018 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.annotation
import scala.annotation.StaticAnnotation
class postPersist extends StaticAnnotation | gonmarques/slick-repo | src/main/scala/com/byteslounge/slickrepo/annotation/postPersist.scala | Scala | mit | 1,270 |
package com.blinkbox.books.slick
import com.codahale.metrics.health.HealthCheck
import com.codahale.metrics.health.HealthCheck.Result
import scala.slick.jdbc.{GetResult, StaticQuery}
import scala.util.control.NonFatal
class DatabaseHealthCheck[DB <: DatabaseSupport](db: DB#Database) extends HealthCheck {
override def check(): Result =
try {
db.withSession { implicit session =>
StaticQuery.queryNA("SELECT 1")(GetResult.GetInt) foreach { c => }
}
HealthCheck.Result.healthy()
} catch {
case NonFatal(e) => HealthCheck.Result.unhealthy(e)
}
}
| blinkboxbooks/common-slick.scala | src/main/scala/com/blinkbox/books/slick/DatabaseHealthCheck.scala | Scala | mit | 593 |
package aoc.day16
import io.IO
object Part2 extends App {
/*
--- Part Two ---
As you're about to send the thank you note, something in the MFCSAM's
instructions catches your eye. Apparently, it has an outdated
retroencabulator, and so the output from the machine isn't exact values
- some of them indicate ranges.
In particular, the cats and trees readings indicates that there are greater
than that many (due to the unpredictable nuclear decay of cat dander and
tree pollen), while the pomeranians and goldfish readings indicate that
there are fewer than that many (due to the modial interaction of
magnetoreluctance).
What is the number of the real Aunt Sue?
*/
import Part1.{ Aunt, strToInfo }
def matchingAunt2(a: Aunt): Boolean = {
def check(auntInfo: Map[String, Int],
checkValues: Map[String, Int]): Boolean = {
// Helper function
def contains(pair: (String, Int)): Boolean = {
val (s, n) = pair
if (auntInfo.contains(s)) {
s match {
case "cats" | "trees" =>
auntInfo(s) > n
case "pomeranians" | "goldfish" =>
auntInfo(s) < n
case other =>
auntInfo(s) == n
}
} else {
// Unavailable info
true
}
}
checkValues.forall(contains(_))
}
val checkValues = Map("children" -> 3, "cats" -> 7, "samoyeds" -> 2, "pomeranians" -> 3,
"akitas" -> 0, "vizslas" -> 0, "goldfish" -> 5, "trees" -> 3,
"cars" -> 2, "perfumes" -> 1)
check(a.info, checkValues)
}
val input = IO.getLines(strToInfo)
val aunts2 = input.filter { matchingAunt2(_) }
println(s"The number of the real Aunt Sue is: ${aunts2(0).n}")
} | GuillaumeDD/AdventOfCode2015 | src/main/scala/aoc/day16/Part2.scala | Scala | gpl-3.0 | 1,743 |
package im.actor.push
import akka.actor.{ ActorSystem, Props }
import akka.event.Logging
import akka.http.scaladsl._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.spingo.op_rabbit._
import im.actor.push.resource.{ MessageResource, SubscriptionResource }
import org.flywaydb.core.Flyway
import slick.driver.PostgresDriver.api._
import slick.jdbc.hikaricp.HikariCPJdbcDataSource
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object PushServer extends App {
implicit val system = ActorSystem("push-server")
implicit val mat = ActorMaterializer()
implicit val ec = system.dispatcher
val log = Logging(system, getClass)
val rootLogger = org.slf4j.LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[ch.qos.logback.classic.Logger]
rootLogger.setLevel(ch.qos.logback.classic.Level.INFO)
try {
val ds =
HikariCPJdbcDataSource.forConfig(
system.settings.config.getConfig("sql"),
null,
"postgres",
getClass.getClassLoader
)
val flyway = new Flyway()
flyway.setDataSource(ds.ds)
flyway.setBaselineOnMigrate(true)
flyway.migrate()
val db = Database.forDataSource(ds.ds)
val rabbitControl = system.actorOf(Props[RabbitControl])
val subsRes = new SubscriptionResource(system, rabbitControl, db).route
val msgRes = new MessageResource(system, rabbitControl, db).route
val route = msgRes ~ subsRes
val bindFuture = Http(system).bindAndHandle(route, "0.0.0.0", 9000)
bindFuture onFailure {
case e ⇒
system.terminate()
log.error(e, "Failed to bind")
}
} catch {
case e: Throwable ⇒
log.error(e, "Failed to start")
system.terminate()
}
Await.result(system.whenTerminated, Duration.Inf)
}
| actorapp/actor-push | push-server/src/main/scala/im/actor/push/PushServer.scala | Scala | apache-2.0 | 1,836 |
package com.sksamuel.elastic4s
import org.elasticsearch.search.highlight.HighlightBuilder
/** @author Stephen Samuel */
trait HighlightDsl {
implicit def string2highlightfield(name: String) = new HighlightDefinition(name)
def highlight = new HighlightExpectsField
class HighlightExpectsField {
def field(name: String) = new HighlightDefinition(name)
}
def highlight(field: String) = new HighlightDefinition(field)
def options = new HighlightOptionsDefinition
class HighlightOptionsDefinition {
var _preTags: Seq[String] = Nil
var _postTags: Seq[String] = Nil
var _encoder: Option[HighlightEncoder] = None
var _order: Option[HighlightOrder] = None
var _tagSchema: Option[TagSchema] = None
var _requireFieldMatch: Boolean = false
var _boundary_chars: Option[String] = None
var _boundary_max_scan: Int = 20
def boundaryMaxScan(max: Int): HighlightOptionsDefinition = {
_boundary_max_scan = max
this
}
def boundaryChars(chars: String): HighlightOptionsDefinition = {
_boundary_chars = Option(chars)
this
}
def requireFieldMatch(requireFieldMatch: Boolean): HighlightOptionsDefinition = {
_requireFieldMatch = requireFieldMatch
this
}
def tagSchema(tagSchema: TagSchema): HighlightOptionsDefinition = {
_tagSchema = Option(tagSchema)
this
}
def order(order: HighlightOrder): HighlightOptionsDefinition = {
_order = Option(order)
this
}
def encoder(encoder: HighlightEncoder): HighlightOptionsDefinition = {
this._encoder = Option(encoder)
this
}
def postTags(iterable: Iterable[String]): HighlightOptionsDefinition = postTags(iterable.toSeq: _*)
def postTags(tags: String*): HighlightOptionsDefinition = {
this._postTags = tags
this
}
def preTags(iterable: Iterable[String]): HighlightOptionsDefinition = preTags(iterable.toSeq: _*)
def preTags(tags: String*): HighlightOptionsDefinition = {
this._preTags = tags
this
}
}
}
abstract class HighlightOrder(val elastic: String)
object HighlightOrder {
case object Score extends HighlightOrder("score")
}
abstract class TagSchema(val elastic: String)
object TagSchema {
case object Styled extends TagSchema("styled")
}
abstract class HighlightEncoder(val elastic: String)
object HighlightEncoder {
case object Default extends HighlightEncoder("default")
case object Html extends HighlightEncoder("html")
}
class HighlightDefinition(field: String) {
val builder = new HighlightBuilder.Field(field)
def fragmentSize(f: Int) = {
builder.fragmentSize(f)
this
}
def numberOfFragments(n: Int) = {
builder.numOfFragments(n)
this
}
def fragmentOffset(n: Int) = {
builder.fragmentOffset(n)
this
}
def highlighterType(`type`: String) = {
builder.highlighterType(`type`)
this
}
} | maxcom/elastic4s | src/main/scala/com/sksamuel/elastic4s/highlighting.scala | Scala | apache-2.0 | 2,903 |
object Test extends App {
import scala.collection.generic.CanBuildFrom
val cbf = implicitly[CanBuildFrom[Nothing, Nothing, Array[Nothing]]]
println(cbf().result.getClass)
println(new Array[Nothing](0).getClass)
println(Array[Nothing]().getClass)
} | felixmulder/scala | test/files/run/t5923b/Test.scala | Scala | bsd-3-clause | 257 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util
import kafka.server.KafkaConfig
import kafka.utils.{JaasTestUtils, TestUtils, ZkUtils}
import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig}
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.security.scram.internals.ScramMechanism
import org.apache.kafka.common.security.token.delegation.DelegationToken
import org.junit.Before
import scala.collection.JavaConverters._
class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest {
val kafkaClientSaslMechanism = "SCRAM-SHA-256"
val kafkaServerSaslMechanisms = ScramMechanism.mechanismNames.asScala.toList
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism))
override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism))
override val clientPrincipal = JaasTestUtils.KafkaScramUser
private val clientPassword = JaasTestUtils.KafkaScramPassword
override val kafkaPrincipal = JaasTestUtils.KafkaScramAdmin
private val kafkaPassword = JaasTestUtils.KafkaScramAdminPassword
this.serverConfig.setProperty(KafkaConfig.DelegationTokenMasterKeyProp, "testKey")
override def configureSecurityBeforeServersStart() {
super.configureSecurityBeforeServersStart()
zkClient.makeSurePersistentPathExists(ZkUtils.ConfigChangesPath)
// Create broker admin credentials before starting brokers
createScramCredentials(zkConnect, kafkaPrincipal, kafkaPassword)
}
override def configureSecurityAfterServersStart() {
super.configureSecurityAfterServersStart()
// create scram credential for user "scram-user"
createScramCredentials(zkConnect, clientPrincipal, clientPassword)
//create a token with "scram-user" credentials
val token = createDelegationToken()
// pass token to client jaas config
val clientLoginContext = JaasTestUtils.tokenClientLoginModule(token.tokenInfo().tokenId(), token.hmacAsBase64String())
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
}
@Before
override def setUp() {
startSasl(jaasSections(kafkaServerSaslMechanisms, Option(kafkaClientSaslMechanism), Both))
super.setUp()
}
private def createDelegationToken(): DelegationToken = {
val config = new util.HashMap[String, Object]
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
val securityProps: util.Map[Object, Object] =
TestUtils.adminClientSecurityConfigs(securityProtocol, trustStoreFile, clientSaslProperties)
securityProps.asScala.foreach { case (key, value) => config.put(key.asInstanceOf[String], value) }
val clientLoginContext = jaasClientLoginModule(kafkaClientSaslMechanism)
config.put(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext)
val adminClient = AdminClient.create(config)
val token = adminClient.createDelegationToken().delegationToken().get()
//wait for token to reach all the brokers
TestUtils.waitUntilTrue(() => servers.forall(server => !server.tokenCache.tokens().isEmpty),
"Timed out waiting for token to propagate to all servers")
adminClient.close()
token
}
}
| ollie314/kafka | core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala | Scala | apache-2.0 | 4,242 |
/*
* Copyright 2017 Kailuo Wang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mainecoon
package laws
import cats.~>
import cats.laws._
import cats.data.Tuple2K
trait ApplyKLaws[F[_[_]]] extends SemigroupalKLaws[F] with FunctorKLaws[F] {
implicit def F: ApplyK[F]
def applyKAssociativity[A[_], B[_], C[_]](af: F[A], ag: F[B], ah: F[C]): IsEq[F[Tuple2K[A, Tuple2K[B, C, ?], ?]]] = {
F.productK(af, F.productK(ag, ah)) <->
F.mapK(F.productK(F.productK(af, ag), ah))(new (Tuple2K[Tuple2K[A, B, ?], C, ?] ~> Tuple2K[A, Tuple2K[B, C, ?], ?]) {
def apply[X](fa: Tuple2K[Tuple2K[A, B, ?], C, X]): Tuple2K[A, Tuple2K[B, C, ?], X] =
Tuple2K(fa.first.first, Tuple2K(fa.first.second, fa.second))
})
}
}
object ApplyKLaws {
def apply[F[_[_]]](implicit ev: ApplyK[F]): ApplyKLaws[F] =
new ApplyKLaws[F] { val F = ev }
}
| kailuowang/mainecoon | laws/src/main/scala/mainecoon/laws/ApplyKLaws.scala | Scala | apache-2.0 | 1,384 |
/**
* Generated by apidoc - http://www.apidoc.me
* Service version: 0.11.20
* apidoc:0.11.21 http://www.apidoc.me/bryzek/apidoc-common/0.11.20/play_2_x_json
*/
package com.bryzek.apidoc.common.v0.models {
case class Audit(
createdAt: _root_.org.joda.time.DateTime,
createdBy: com.bryzek.apidoc.common.v0.models.ReferenceGuid,
updatedAt: _root_.org.joda.time.DateTime,
updatedBy: com.bryzek.apidoc.common.v0.models.ReferenceGuid
)
case class Healthcheck(
status: String
)
/**
* Represents a reference to another model.
*/
case class Reference(
guid: _root_.java.util.UUID,
key: String
)
case class ReferenceGuid(
guid: _root_.java.util.UUID
)
}
package com.bryzek.apidoc.common.v0.models {
package object json {
import play.api.libs.json.__
import play.api.libs.json.JsString
import play.api.libs.json.Writes
import play.api.libs.functional.syntax._
import com.bryzek.apidoc.common.v0.models.json._
private[v0] implicit val jsonReadsUUID = __.read[String].map(java.util.UUID.fromString)
private[v0] implicit val jsonWritesUUID = new Writes[java.util.UUID] {
def writes(x: java.util.UUID) = JsString(x.toString)
}
private[v0] implicit val jsonReadsJodaDateTime = __.read[String].map { str =>
import org.joda.time.format.ISODateTimeFormat.dateTimeParser
dateTimeParser.parseDateTime(str)
}
private[v0] implicit val jsonWritesJodaDateTime = new Writes[org.joda.time.DateTime] {
def writes(x: org.joda.time.DateTime) = {
import org.joda.time.format.ISODateTimeFormat.dateTime
val str = dateTime.print(x)
JsString(str)
}
}
implicit def jsonReadsApidoccommonAudit: play.api.libs.json.Reads[Audit] = {
(
(__ \\ "created_at").read[_root_.org.joda.time.DateTime] and
(__ \\ "created_by").read[com.bryzek.apidoc.common.v0.models.ReferenceGuid] and
(__ \\ "updated_at").read[_root_.org.joda.time.DateTime] and
(__ \\ "updated_by").read[com.bryzek.apidoc.common.v0.models.ReferenceGuid]
)(Audit.apply _)
}
def jsObjectAudit(obj: com.bryzek.apidoc.common.v0.models.Audit) = {
play.api.libs.json.Json.obj(
"created_at" -> play.api.libs.json.JsString(_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(obj.createdAt)),
"created_by" -> jsObjectReferenceGuid(obj.createdBy),
"updated_at" -> play.api.libs.json.JsString(_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(obj.updatedAt)),
"updated_by" -> jsObjectReferenceGuid(obj.updatedBy)
)
}
implicit def jsonWritesApidoccommonAudit: play.api.libs.json.Writes[Audit] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Audit] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Audit) = {
jsObjectAudit(obj)
}
}
}
implicit def jsonReadsApidoccommonHealthcheck: play.api.libs.json.Reads[Healthcheck] = {
(__ \\ "status").read[String].map { x => new Healthcheck(status = x) }
}
def jsObjectHealthcheck(obj: com.bryzek.apidoc.common.v0.models.Healthcheck) = {
play.api.libs.json.Json.obj(
"status" -> play.api.libs.json.JsString(obj.status)
)
}
implicit def jsonWritesApidoccommonHealthcheck: play.api.libs.json.Writes[Healthcheck] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Healthcheck] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Healthcheck) = {
jsObjectHealthcheck(obj)
}
}
}
implicit def jsonReadsApidoccommonReference: play.api.libs.json.Reads[Reference] = {
(
(__ \\ "guid").read[_root_.java.util.UUID] and
(__ \\ "key").read[String]
)(Reference.apply _)
}
def jsObjectReference(obj: com.bryzek.apidoc.common.v0.models.Reference) = {
play.api.libs.json.Json.obj(
"guid" -> play.api.libs.json.JsString(obj.guid.toString),
"key" -> play.api.libs.json.JsString(obj.key)
)
}
implicit def jsonWritesApidoccommonReference: play.api.libs.json.Writes[Reference] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Reference] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Reference) = {
jsObjectReference(obj)
}
}
}
implicit def jsonReadsApidoccommonReferenceGuid: play.api.libs.json.Reads[ReferenceGuid] = {
(__ \\ "guid").read[_root_.java.util.UUID].map { x => new ReferenceGuid(guid = x) }
}
def jsObjectReferenceGuid(obj: com.bryzek.apidoc.common.v0.models.ReferenceGuid) = {
play.api.libs.json.Json.obj(
"guid" -> play.api.libs.json.JsString(obj.guid.toString)
)
}
implicit def jsonWritesApidoccommonReferenceGuid: play.api.libs.json.Writes[ReferenceGuid] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.ReferenceGuid] {
def writes(obj: com.bryzek.apidoc.common.v0.models.ReferenceGuid) = {
jsObjectReferenceGuid(obj)
}
}
}
}
}
package com.bryzek.apidoc.common.v0 {
object Bindables {
import play.api.mvc.{PathBindable, QueryStringBindable}
import org.joda.time.{DateTime, LocalDate}
import org.joda.time.format.ISODateTimeFormat
import com.bryzek.apidoc.common.v0.models._
// Type: date-time-iso8601
implicit val pathBindableTypeDateTimeIso8601 = new PathBindable.Parsing[org.joda.time.DateTime](
ISODateTimeFormat.dateTimeParser.parseDateTime(_), _.toString, (key: String, e: Exception) => s"Error parsing date time $key. Example: 2014-04-29T11:56:52Z"
)
implicit val queryStringBindableTypeDateTimeIso8601 = new QueryStringBindable.Parsing[org.joda.time.DateTime](
ISODateTimeFormat.dateTimeParser.parseDateTime(_), _.toString, (key: String, e: Exception) => s"Error parsing date time $key. Example: 2014-04-29T11:56:52Z"
)
// Type: date-iso8601
implicit val pathBindableTypeDateIso8601 = new PathBindable.Parsing[org.joda.time.LocalDate](
ISODateTimeFormat.yearMonthDay.parseLocalDate(_), _.toString, (key: String, e: Exception) => s"Error parsing date $key. Example: 2014-04-29"
)
implicit val queryStringBindableTypeDateIso8601 = new QueryStringBindable.Parsing[org.joda.time.LocalDate](
ISODateTimeFormat.yearMonthDay.parseLocalDate(_), _.toString, (key: String, e: Exception) => s"Error parsing date $key. Example: 2014-04-29"
)
}
}
| movio/movio-apidoc-generator | generated/app/BryzekApidocCommonV0Models.scala | Scala | mit | 6,513 |
package is.hail.expr.ir.functions
import is.hail.annotations.{Region, StagedRegionValueBuilder}
import is.hail.asm4s._
import is.hail.expr.ir.{EmitMethodBuilder, _}
import is.hail.{asm4s, types}
import is.hail.types.physical._
import is.hail.types.physical.stypes.concrete.{SCanonicalLocusPointer, SCanonicalLocusPointerCode}
import is.hail.types.virtual._
import is.hail.utils._
import is.hail.variant._
object LocusFunctions extends RegistryFunctions {
def rgCode(mb: EmitMethodBuilder[_], rg: ReferenceGenome): Code[ReferenceGenome] =
mb.getReferenceGenome(rg)
def tlocus(name: String): Type = tv(name, "locus")
def tvariant(name: String): TStruct = TStruct("locus" -> tlocus(name), "alleles" -> TArray(TString))
def tinterval(name: String): TInterval = TInterval(tlocus(name))
def emitLocus(r: EmitRegion, locus: Code[Locus], rt: PLocus): Code[Long] = {
val srvb = new StagedRegionValueBuilder(r, rt)
Code(emitLocus(srvb, locus), srvb.offset)
}
def emitLocus(srvb: StagedRegionValueBuilder, locus: Code[Locus]): Code[Unit] = {
val llocal = srvb.mb.newLocal[Locus]()
Code.memoize(locus, "locus_funs_emit_locus_locus") { locus =>
Code(
llocal := locus,
srvb.start(),
srvb.addString(locus.invoke[String]("contig")),
srvb.advance(),
srvb.addInt(locus.invoke[Int]("position")),
srvb.advance())
}
}
def emitVariant(r: EmitRegion, variant: Code[(Locus, IndexedSeq[String])], rt: PStruct): Code[Long] = {
val vlocal = r.mb.newLocal[(Locus, IndexedSeq[String])]()
val alocal = r.mb.newLocal[IndexedSeq[String]]()
val len = r.mb.newLocal[Int]()
val srvb = new StagedRegionValueBuilder(r, rt)
val addLocus = { srvb: StagedRegionValueBuilder =>
emitLocus(srvb, Code.checkcast[Locus](vlocal.getField[java.lang.Object]("_1")))
}
val addAlleles = { srvb: StagedRegionValueBuilder =>
Code(
srvb.start(len),
Code.whileLoop(srvb.arrayIdx < len,
srvb.addString(alocal.invoke[Int, String]("apply", srvb.arrayIdx)),
srvb.advance()))
}
Code(
vlocal := variant,
alocal := Code.checkcast[IndexedSeq[String]](vlocal.getField[java.lang.Object]("_2")),
len := alocal.invoke[Int]("size"),
srvb.start(),
srvb.addBaseStruct(types.coerce[PStruct](rt.field("locus").typ.fundamentalType), addLocus),
srvb.advance(),
srvb.addArray(rt.field("alleles").typ.asInstanceOf[PArray], addAlleles),
srvb.advance(),
srvb.offset)
}
def emitInterval(r: EmitRegion, interval: Code[Interval], pt: PInterval): Code[Long] = {
val srvb = new StagedRegionValueBuilder(r, pt)
Code(emitInterval(srvb, interval, pt), srvb.offset)
}
def emitInterval(srvb: StagedRegionValueBuilder, interval: Code[Interval], pt: PInterval): Code[Unit] = {
val ilocal = srvb.mb.newLocal[Interval]()
val addLocus = { (srvb: StagedRegionValueBuilder, point: String) =>
emitLocus(srvb, Code.checkcast[Locus](ilocal.invoke[java.lang.Object](point)))
}
val lt = pt.pointType.fundamentalType.asInstanceOf[PBaseStruct]
Code(FastIndexedSeq(
ilocal := interval,
srvb.start(),
srvb.addBaseStruct(types.coerce[PBaseStruct](lt), addLocus(_, "start")),
srvb.advance(),
srvb.addBaseStruct(types.coerce[PBaseStruct](lt), addLocus(_, "end")),
srvb.advance(),
srvb.addBoolean(ilocal.invoke[Boolean]("includesStart")),
srvb.advance(),
srvb.addBoolean(ilocal.invoke[Boolean]("includesEnd")),
srvb.advance()))
}
def emitLiftoverLocus(r: EmitRegion, result: Code[(Locus, Boolean)], rt: PStruct): Code[Long] = {
val rlocal = r.mb.newLocal[(Locus, Boolean)]()
val blocal = r.mb.newLocal[Boolean]()
val srvb = new StagedRegionValueBuilder(r, rt)
val addLocus = { srvb: StagedRegionValueBuilder =>
emitLocus(srvb, Code.checkcast[Locus](rlocal.getField[java.lang.Object]("_1")))
}
Code(
rlocal := result,
blocal := Code.checkcast[java.lang.Boolean](rlocal.getField[java.lang.Object]("_2")).invoke[Boolean]("booleanValue"),
srvb.start(),
srvb.addBaseStruct(types.coerce[PStruct](rt.field("result").typ.fundamentalType), addLocus),
srvb.advance(),
srvb.addBoolean(blocal),
srvb.advance(),
srvb.offset)
}
def emitLiftoverLocusInterval(r: EmitRegion, result: Code[(Interval, Boolean)], pt: PStruct): Code[Long] = {
val rlocal = r.mb.newLocal[(Interval, Boolean)]()
val ilocal = r.mb.newLocal[Interval]()
val blocal = r.mb.newLocal[Boolean]()
val srvb = new StagedRegionValueBuilder(r, pt)
val pinterval = pt.field("result").typ.asInstanceOf[PInterval]
val addInterval = { srvb: StagedRegionValueBuilder =>
emitInterval(srvb, ilocal, pinterval)
}
Code(
rlocal := result,
ilocal := Code.checkcast[Interval](rlocal.getField[java.lang.Object]("_1")),
blocal := Code.checkcast[java.lang.Boolean](rlocal.getField[java.lang.Object]("_2")).invoke[Boolean]("booleanValue"),
srvb.start(),
srvb.addBaseStruct(types.coerce[PStruct](pinterval.fundamentalType), addInterval),
srvb.advance(),
srvb.addBoolean(blocal),
srvb.advance(),
srvb.offset)
}
def registerLocusCode(methodName: String)(f: IR => IR): Unit =
registerIR1(methodName, tlocus("T"), TBoolean)((_, a) => f(a))
def inX(locus: IR): IR = {
val xContigs = Literal(TSet(TString), locus.typ.asInstanceOf[TLocus].rg.xContigs)
invoke("contains", TBoolean, xContigs, invoke("contig", TString, locus))
}
def inY(locus: IR): IR = {
val yContigs = Literal(TSet(TString), locus.typ.asInstanceOf[TLocus].rg.yContigs)
invoke("contains", TBoolean, yContigs, invoke("contig", TString, locus))
}
def inPar(locus: IR): IR = {
val t = locus.typ.asInstanceOf[TLocus]
val par = Literal(TArray(TInterval(t)), t.rg.par.toFastIndexedSeq)
ArrayFunctions.exists(par, interval => invoke("contains", TBoolean, interval, locus))
}
def isMitochondrial(locus: IR): IR = {
val mtContigs = Literal(TSet(TString), locus.typ.asInstanceOf[TLocus].rg.mtContigs)
invoke("contains", TBoolean, mtContigs, invoke("contig", TString, locus))
}
def isAutosomal(locus: IR): IR = !(inX(locus) || inY(locus) || isMitochondrial(locus))
def registerAll() {
val locusClass = Locus.getClass
registerPCode1("contig", tlocus("T"), TString,
(_: Type, x: PType) => x.asInstanceOf[PLocus].contigType) {
case (r, cb, rt, locus: PLocusCode) =>
locus.contig(cb).asPCode
}
registerCode1("position", tlocus("T"), TInt32, (_: Type, x: PType) => x.asInstanceOf[PLocus].positionType) {
case (r, rt, (locusT: PLocus, locus: Code[Long])) =>
locusT.position(locus)
}
registerLocusCode("isAutosomalOrPseudoAutosomal") { locus =>
isAutosomal(locus) || ((inX(locus) || inY(locus)) && inPar(locus))
}
registerLocusCode("isAutosomal")(isAutosomal)
registerLocusCode("isMitochondrial")(isMitochondrial)
registerLocusCode("inXPar") { locus => inX(locus) && inPar(locus) }
registerLocusCode("inYPar") { locus => inY(locus) && inPar(locus) }
registerLocusCode("inXNonPar") { locus => inX(locus) && !inPar(locus) }
registerLocusCode("inYNonPar") { locus => inY(locus) && !inPar(locus) }
registerPCode2("min_rep", tlocus("T"), TArray(TString), TStruct("locus" -> tv("T"), "alleles" -> TArray(TString)), {
(returnType: Type, _: PType, _: PType) => {
val locusPT = PCanonicalLocus(returnType.asInstanceOf[TStruct].field("locus").typ.asInstanceOf[TLocus].rg, true)
PCanonicalStruct("locus" -> locusPT, "alleles" -> PCanonicalArray(PCanonicalString(true), true))
}
}) {
case (r, cb, rt: PStruct, locus: PLocusCode, alleles: PIndexableCode) =>
val tuple = EmitCodeBuilder.scopedCode(r.mb) { cb => Code.invokeScalaObject2[Locus, IndexedSeq[String], (Locus, IndexedSeq[String])](
VariantMethods.getClass, "minRep",
locus.getLocusObj(cb),
Code.checkcast[IndexedSeq[String]](wrapArg(r, alleles.pt)(alleles.code).asInstanceOf[Code[AnyRef]]))}
val code = Code.memoize(tuple, "min_rep_tuple") { tuple =>
Code.memoize(
Code.checkcast[Locus](tuple.getField[java.lang.Object]("_1")), "min_rep_new_locus",
Code.checkcast[IndexedSeq[String]](tuple.getField[java.lang.Object]("_2")), "min_rep_new_alleles"
) { (newLocus, newAlleles) =>
val newLocusT = rt.field("locus").typ
val newAllelesT = rt.field("alleles").typ.asInstanceOf[PArray]
val srvb = new StagedRegionValueBuilder(r, rt)
Code(
srvb.start(),
srvb.addBaseStruct(newLocusT.fundamentalType.asInstanceOf[PStruct], { locusBuilder =>
Code(
locusBuilder.start(),
locusBuilder.addString(newLocus.invoke[String]("contig")),
locusBuilder.advance(),
locusBuilder.addInt(newLocus.invoke[Int]("position")))
}),
srvb.advance(),
srvb.addArray(newAllelesT, { allelesBuilder =>
Code(
allelesBuilder.start(newAlleles.invoke[Int]("size")),
Code.whileLoop(allelesBuilder.arrayIdx < newAlleles.invoke[Int]("size"),
allelesBuilder.addString(Code.checkcast[String](newAlleles.invoke[Int, java.lang.Object]("apply", allelesBuilder.arrayIdx))),
allelesBuilder.advance()))
}),
srvb.offset)
}
}
PCode(rt, code)
}
registerCode2("locus_windows_per_contig", TArray(TArray(TFloat64)), TFloat64, TTuple(TArray(TInt32), TArray(TInt32)), {
(_: Type, _: PType, _: PType) =>
PCanonicalTuple(false, PCanonicalArray(PInt32(true), true), PCanonicalArray(PInt32(true), true))
}) {
case (r: EmitRegion, rt: PTuple, (groupedT: PArray, _coords: Code[Long]), (radiusT: PFloat64, _radius: Code[Double])) =>
val coordT = types.coerce[PArray](groupedT.elementType)
val coords = r.mb.newLocal[Long]("coords")
val radius = r.mb.newLocal[Double]("radius")
val ncontigs = r.mb.newLocal[Int]("ncontigs")
val totalLen = r.mb.newLocal[Int]("l")
val iContig = r.mb.newLocal[Int]()
val len = r.mb.newLocal[Int]("l")
val i = r.mb.newLocal[Int]("i")
val idx = r.mb.newLocal[Int]("i")
val coordsPerContig = r.mb.newLocal[Long]("coords")
val offset = r.mb.newLocal[Int]("offset")
val lastCoord = r.mb.newLocal[Double]("coord")
val getCoord = { i: Code[Int] =>
asm4s.coerce[Double](Region.loadIRIntermediate(coordT.elementType)(coordT.elementOffset(coordsPerContig, len, i)))
}
def forAllContigs(c: Code[Unit]): Code[Unit] = {
Code(iContig := 0,
Code.whileLoop(iContig < ncontigs,
coordsPerContig := asm4s.coerce[Long](
Region.loadIRIntermediate(coordT)(
groupedT.elementOffset(coords, ncontigs, iContig))),
c,
iContig += 1))
}
val startCond = getCoord(i) > (getCoord(idx) + radius)
val endCond = getCoord(i) >= (getCoord(idx) - radius)
val addIdxWithCondition = { (cond: Code[Boolean], sab: StagedRegionValueBuilder) =>
Code(
sab.start(totalLen),
offset := 0,
forAllContigs(
Code(
i := 0,
idx := 0,
len := coordT.loadLength(coordsPerContig),
len.ceq(0).mux(lastCoord := 0.0, lastCoord := getCoord(0)),
Code.whileLoop(i < len,
coordT.isElementMissing( coordsPerContig, i).mux(
Code._fatal[Unit](
const("locus_windows: missing value for 'coord_expr' at row ")
.concat((offset + i).toS)),
(lastCoord > getCoord(i)).mux(
Code._fatal[Unit]("locus_windows: 'coord_expr' must be in ascending order within each contig."),
lastCoord := getCoord(i))),
Code.whileLoop((idx < len) && cond, idx += 1),
sab.addInt(offset + idx),
sab.advance(),
i += 1),
offset := offset + len)))
}
val srvb = new StagedRegionValueBuilder(r, rt)
Code(Code(FastIndexedSeq(
coords := _coords,
radius := _radius,
ncontigs := groupedT.loadLength(coords),
totalLen := 0,
forAllContigs(totalLen := totalLen + coordT.loadLength(coordsPerContig)),
srvb.start(),
srvb.addArray(PCanonicalArray(PInt32()), addIdxWithCondition(startCond, _)),
srvb.advance(),
srvb.addArray(PCanonicalArray(PInt32()), addIdxWithCondition(endCond, _)))),
srvb.end())
}
registerCode1("Locus", TString, tlocus("T"), {
(returnType: Type, _: PType) => PCanonicalLocus(returnType.asInstanceOf[TLocus].rg)
}) {
case (r, rt: PLocus, (strT, locusoff: Code[Long])) =>
val slocus = asm4s.coerce[String](wrapArg(r, strT)(locusoff))
val locus = Code
.invokeScalaObject2[String, ReferenceGenome, Locus](
locusClass, "parse", slocus, rgCode(r.mb, rt.rg))
emitLocus(r, locus, rt)
}
registerPCode2("Locus", TString, TInt32, tlocus("T"), {
(returnType: Type, _: PType, _: PType) => PCanonicalLocus(returnType.asInstanceOf[TLocus].rg)
}) {
case (r, cb, rt: PCanonicalLocus, contig, pos) =>
val contigMemo = contig.memoize(cb, "locus_contig")
val posMemo = pos.memoize(cb, "locus_pos")
val srvb = new StagedRegionValueBuilder(r, rt)
cb += rgCode(r.mb, rt.rg).invoke[String, Int, Unit]("checkLocus", contigMemo.asString.loadString(), posMemo.asInt.intCode(cb))
cb += srvb.start()
cb += srvb.addIRIntermediate(contigMemo)
cb += srvb.advance()
cb += srvb.addInt(posMemo.asInt.intCode(cb))
new SCanonicalLocusPointerCode(SCanonicalLocusPointer(rt), srvb.offset)
}
registerCode1("LocusAlleles", TString, tvariant("T"), {
(returnType: Type, _: PType) => {
val lTyp = returnType.asInstanceOf[TStruct].field("locus").typ.asInstanceOf[TLocus]
PCanonicalStruct("locus" -> PCanonicalLocus(lTyp.rg, true), "alleles" -> PCanonicalArray(PCanonicalString(true), true))
}
}) {
case (r, rt: PStruct, (strT, variantoff: Code[Long])) =>
val plocus = rt.types(0).asInstanceOf[PLocus]
val svar = asm4s.coerce[String](wrapArg(r, strT)(variantoff))
val variant = Code
.invokeScalaObject2[String, ReferenceGenome, (Locus, IndexedSeq[String])](
VariantMethods.getClass, "parse", svar, rgCode(r.mb, plocus.rg))
emitVariant(r, variant, rt)
}
registerEmitCode2("LocusInterval", TString, TBoolean, tinterval("T"), {
(returnType: Type, _: PType, _: PType) => {
val lPTyp = returnType.asInstanceOf[TInterval].pointType.asInstanceOf[TLocus]
PCanonicalInterval(PCanonicalLocus(lPTyp.asInstanceOf[TLocus].rg))
}
}) {
case (r: EmitRegion, rt: PInterval, ioff: EmitCode, invalidMissing: EmitCode) =>
val plocus = rt.pointType.asInstanceOf[PLocus]
val sinterval = asm4s.coerce[String](wrapArg(r, ioff.pt)(ioff.value[Long]))
val intervalLocal = r.mb.newLocal[Interval](name="intervalObject")
val interval = Code.invokeScalaObject3[String, ReferenceGenome, Boolean, Interval](
locusClass, "parseInterval", sinterval, rgCode(r.mb, plocus.rg), invalidMissing.value[Boolean])
EmitCode(
Code(ioff.setup, invalidMissing.setup),
ioff.m || invalidMissing.m || Code(intervalLocal := interval, intervalLocal.isNull),
PCode(rt, emitInterval(r, intervalLocal, rt))
)
}
registerEmitCode6("LocusInterval", TString, TInt32, TInt32, TBoolean, TBoolean, TBoolean, tinterval("T"), {
(returnType: Type, _: PType, _: PType, _: PType, _: PType, _: PType, _: PType) => {
val lPTyp = returnType.asInstanceOf[TInterval].pointType.asInstanceOf[TLocus]
PCanonicalInterval(PCanonicalLocus(lPTyp.rg))
}
}) {
case (r: EmitRegion, rt: PInterval,
locoff: EmitCode,
pos1: EmitCode,
pos2: EmitCode,
include1: EmitCode,
include2: EmitCode,
invalidMissing: EmitCode) =>
val plocus = rt.pointType.asInstanceOf[PLocus]
val sloc = asm4s.coerce[String](wrapArg(r, locoff.pt)(locoff.value[Long]))
val intervalLocal = r.mb.newLocal[Interval]("intervalObject")
val interval = Code.invokeScalaObject7[String, Int, Int, Boolean, Boolean, ReferenceGenome, Boolean, Interval](
locusClass, "makeInterval", sloc, pos1.value[Int], pos2.value[Int], include1.value[Boolean], include2.value[Boolean], rgCode(r.mb, plocus.rg), invalidMissing.value[Boolean])
EmitCode(
Code(locoff.setup, pos1.setup, pos2.setup, include1.setup, include2.setup, invalidMissing.setup),
locoff.m || pos1.m || pos2.m || include1.m || include2.m || invalidMissing.m || Code(intervalLocal := interval, intervalLocal.isNull),
PCode(rt, emitInterval(r, intervalLocal, rt))
)
}
registerCode1("globalPosToLocus", TInt64, tlocus("T"), {
(returnType: Type, _: PType) =>
PCanonicalLocus(returnType.asInstanceOf[TLocus].rg)
}) {
case (r, rt: PLocus, (globalPositionT, globalPosition: Code[Long])) =>
val locus = rgCode(r.mb, rt.rg).invoke[Long, Locus]("globalPosToLocus", globalPosition)
emitLocus(r, locus, rt)
}
registerCode1("locusToGlobalPos", tlocus("T"), TInt64, (_: Type, _: PType) => PInt64()) {
case (r, rt, (locusT: PLocus, locus: Code[Long])) =>
val locusObject = Code.checkcast[Locus](wrapArg(r, locusT)(locus).asInstanceOf[Code[AnyRef]])
unwrapReturn(r, rt)(rgCode(r.mb, locusT.rg).invoke[Locus, Long]("locusToGlobalPos", locusObject))
}
registerEmitCode2("liftoverLocus", tlocus("T"), TFloat64, TStruct("result" -> tv("U", "locus"), "is_negative_strand" -> TBoolean), {
(returnType: Type, _: PType, _: PType) => {
val lTyp = returnType.asInstanceOf[TStruct].field("result").typ.asInstanceOf[TLocus]
PCanonicalStruct("result" -> PCanonicalLocus(lTyp.rg, true), "is_negative_strand" -> PBoolean(true))
}
}) {
case (r, rt: PStruct, loc, minMatch) =>
val locT = loc.pt.asInstanceOf[PLocus]
val srcRG = locT.rg
val destRG = rt.types(0).asInstanceOf[PLocus].rg
val locus = Code.checkcast[Locus](asm4s.coerce[AnyRef](wrapArg(r, locT)(loc.value[Long])))
val tlocal = r.mb.newLocal[(Locus, Boolean)]()
val lifted = rgCode(r.mb, srcRG).invoke[String, Locus, Double, (Locus, Boolean)]("liftoverLocus", destRG.name, locus, minMatch.value[Double])
EmitCode(
Code(loc.setup, minMatch.setup, tlocal := Code._null),
loc.m || minMatch.m || Code(tlocal := lifted, tlocal.isNull),
PCode(rt, emitLiftoverLocus(r, tlocal, rt))
)
}
registerEmitCode2("liftoverLocusInterval", tinterval("T"), TFloat64, TStruct("result" -> tinterval("U"), "is_negative_strand" -> TBoolean), {
(returnType: Type, _: PType, _: PType) => {
val lTyp = returnType.asInstanceOf[TStruct].field("result").typ.asInstanceOf[TInterval].pointType.asInstanceOf[TLocus]
PCanonicalStruct("result" -> PCanonicalInterval(PCanonicalLocus(lTyp.rg, true), true), "is_negative_strand" -> PBoolean(true))
}
}) {
case (r, rt: PStruct, i, minMatch) =>
val iT = i.pt.asInstanceOf[PInterval]
val srcRG = iT.pointType.asInstanceOf[PLocus].rg
val destRG = rt.types(0).asInstanceOf[PInterval].pointType.asInstanceOf[PLocus].rg
val interval = Code.checkcast[Interval](asm4s.coerce[AnyRef](wrapArg(r, iT)(i.value[Long])))
val tlocal = r.mb.newLocal[(Interval, Boolean)]()
val lifted = rgCode(r.mb, srcRG).invoke[String, Interval, Double, (Interval, Boolean)]("liftoverLocusInterval", destRG.name, interval, minMatch.value[Double])
EmitCode(
Code(i.setup, minMatch.setup, tlocal := Code._null),
i.m || minMatch.m || Code(tlocal := lifted, tlocal.isNull),
PCode(rt, emitLiftoverLocusInterval(r, tlocal, rt))
)
}
}
}
| danking/hail | hail/src/main/scala/is/hail/expr/ir/functions/LocusFunctions.scala | Scala | mit | 20,629 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
class Bean {
@JavaAnnotationWithNestedEnum_1(JavaAnnotationWithNestedEnum_1.Value.VALUE)
def value = 1
}
object Test extends dotty.runtime.LegacyApp {
println(cm.staticClass("Bean").isCaseClass)
println(typeOf[Bean].decl(TermName("value")).annotations)
}
| folone/dotty | tests/pending/run/t6548/Test_2.scala | Scala | bsd-3-clause | 356 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.resource
import com.krux.hyperion.aws.{AdpEmrConfiguration, AdpRef}
import com.krux.hyperion.common.{BaseFields, PipelineObjectId, NamedPipelineObject}
case class EmrConfiguration private (
baseFields: BaseFields,
classification: Option[String],
properties: Seq[Property],
configurations: Seq[EmrConfiguration]
) extends NamedPipelineObject {
type Self = EmrConfiguration
def updateBaseFields(fields: BaseFields) = copy(baseFields = fields)
def withClassification(classification: String) = copy(classification = Option(classification))
def withProperty(property: Property*) = copy(properties = this.properties ++ property)
def withConfiguration(configuration: EmrConfiguration*) = copy(configurations = this.configurations ++ configuration)
def objects = configurations ++ properties
lazy val serialize = AdpEmrConfiguration(
id = id,
name = name,
classification = classification,
property = properties.map(_.ref),
configuration = configurations.map(_.ref)
)
def ref: AdpRef[AdpEmrConfiguration] = AdpRef(serialize)
}
object EmrConfiguration {
@deprecated("Use apply(classification: String) instead", "5.0.0")
def apply(): EmrConfiguration = EmrConfiguration(
baseFields = BaseFields(PipelineObjectId(EmrConfiguration.getClass)),
classification = None,
properties = Seq.empty,
configurations = Seq.empty
)
def apply(classification: String): EmrConfiguration = EmrConfiguration(
baseFields = BaseFields(PipelineObjectId(EmrConfiguration.getClass)),
classification = Option(classification),
properties = Seq.empty,
configurations = Seq.empty
)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/resource/EmrConfiguration.scala | Scala | bsd-3-clause | 1,902 |
package im.actor.server.persist
import im.actor.server.model.Sex
import slick.driver.PostgresDriver.api._
object SexColumnType {
implicit val sexColumnType =
MappedColumnType.base[Sex, Int](_.toInt, Sex.fromInt)
}
| ljshj/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/persist/SexColumnType.scala | Scala | mit | 222 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp
import cc.factorie.app.nlp.coref.WithinDocCoref
import cc.factorie.util.{Attr, UniqueId}
import cc.factorie.variable.CategoricalVar
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/** A portion of the string contents of a Document.
@author Andrew McCallum */
trait DocumentSubstring {
/** The Document of which this DocumentSubstring is a part. */
def document: Document
/** The character offset into the Document.string at which this DocumentSubstring begins. */
def stringStart: Int
/** The character offset into the Document.string at which this DocumentSubstring is over.
In other words, the last character of the DocumentSubstring is Document.string(this.stringEnd-1). */
def stringEnd: Int
/** The substring of the Document encompassed by this DocumentSubstring. */
def string: String
}
/** A Document holds a String containing the original raw string contents
of a natural language document to be processed. The Document also holds
a sequence of Sections, each of which is delineated by character offsets
into the Document's string, and each of which contains a sequence of Tokens,
Sentences and other TokenSpans which may be annotated.
Documents may be constructed with their full string contents, or they may
have their string contents augmented by the appendString method.
Documents also have an optional "name" which can be set by Document.setName.
This is typically used to hold a filename in the file system, or some other similar identifier.
The Document.stringLength method may be a faster alternative to Document.string.length
when you are in the middle of multiple appendString calls because it will
efficiently use the underlying string buffer length, rather than flushing the buffer
to create a string.
The canonical sequence of Sections in the Document is available through
the Document.sections method.
By default the canonical sequence of Sections holds a single Section that covers the
entire string contents of the Document (even as the Document grows). This canonical sequence
of Sections may be modified by the user, but this special all-encompassing Section
instance will always be available as Document.asSection.
Even though Tokens, Sentences and TokenSpans are really stored in the Sections,
Document has basic convenience methods for obtaining iterable collections of these
by concatenating them from the canonical sequence of Sections. These iterable
collections are of type Iterable[Token], not Seq[Token], however.
If you need the Tokens as a Seq[Token] rather than an Iterable[Token], or you need
more advanced queries for TokenSpan types, you should use methods on a Section,
not on the Document. In this case typical processing looks like:
"for (section <- document.sections) section.tokens.someMethodOnSeq()...".
@author Andrew McCallum */
class Document extends DocumentSubstring with Attr with UniqueId with Serializable {
/** Create a new Document, initializing it to have contents given by the argument. */
def this(stringContents:String) = { this(); _string = stringContents }
/** Return the "name" assigned to this Document by the 'setName' method.
This may be any String, but is typically a filename or other similar identifier. */
def name: String = { val dn = this.attr[DocumentName]; if (dn ne null) dn.string else null }
/** Set the value that will be returned by the 'name' method.
It accomplishes this by setting the DocumentName attr on Document.
If the String argument is null, it will remove DocumentName attr if present. */
def setName(s:String): this.type = { if (s ne null) this.attr += DocumentName(s) else this.attr.remove[DocumentName]; this }
/** The unique identifier for this Document, e.g. used for database lookup, etc.
Defined to be the Document's name; we are relying on the user to set the name to a unique value. */
def uniqueId = name
// One of the following two is always null, the other non-null. The later is used while multiple appendString() method calls are made.
private var _string: String = ""
private var _stringbuf: StringBuffer = null
/** Append the string 's' to this Document.
@return the length of the Document's string before string 's' was appended. */
def appendString(s:String): Int = this.synchronized {
if (_stringbuf eq null) _stringbuf = new StringBuffer(_string)
val result = _stringbuf.length
_stringbuf.append(s)
_string = null
result
}
/** The string contents of this Document. */
def string: String = {
this.synchronized {
if (_string eq null) _string = _stringbuf.toString
_stringbuf = null
}
_string
}
/** The number of characters in this Document's string.
Use this instead of Document.string.length because it is more efficient when the Document's string is growing with appendString. */
def stringLength: Int = if (_string ne null) _string.length else _stringbuf.length
// For the DocumentSubstring trait
/** A method required by the DocumentSubstring trait, which in this case simply returns this Document itself. */
def document: Document = this
/** A method required by the DocumentSubstring trait, which in this case simply returns 0. */
def stringStart: Int = 0
/** A method required by the DocumentSubstring trait, which in this case simply returns Document.stringLength. */
def stringEnd: Int = stringLength
// Managing sections. These are the canonical Sections, but alternative Sections can be attached as Attr's.
/** A predefined Section that covers the entirety of the Document string, and even grows as the length of this Document may grow.
If the user does not explicitly add Sections to the document, this Section is the only one returned by the "sections" method. */
lazy val asSection: Section = new Section { def document: Document = Document.this; def stringStart = 0; def stringEnd = document.stringEnd }
private lazy val _sections: mutable.Buffer[Section] = new ArrayBuffer[Section] += asSection
/** The canonical list of Sections containing the tokens of the document.
The user may create and add Sections covering various substrings within the Document.
If the user does not explicitly add any Sections, by default there will be one Section that covers the entire Document string;
this one Section is the one returned by "Document.asSection".
Note that Sections may overlap with each other, representing alternative tokenizations or annotations. */
def sections: Seq[Section] = _sections // if (_sections.length == 0) Seq(asSection) else _sections
/** Add a new Section to this Document's canonical list of Sections.
If the only previously existing Section is the default (asSection), then remove it before adding the argument. */
def +=(s: Section) = { if (_sections.length == 1 && _sections(0) == asSection) _sections.clear(); _sections += s }
/** Remove a Section from this Document's canonical list of Sections. */
def -=(s: Section) = _sections -= s
/** Remove all Section from this Document's canonical list of Sections. */
def clearSections(): Unit = _sections.clear()
// A few iterators that combine the results from the Sections
/** Return an Iterable collection of all Tokens in all canonical Sections of this Document. */
def tokens: Iterable[Token] = if (sections.length == 1) sections.head.tokens else new Iterable[Token] { def iterator = for (section <- sections.iterator; token <- section.tokens.iterator) yield token }
/** Return an Iterable collection of all Sentences in all canonical Sections of this Document. */
def sentences: Iterable[Sentence] = if (sections.length == 1) sections.head.sentences else new Iterable[Sentence] { def iterator = for (section <- sections.iterator; sentence <- section.sentences.iterator) yield sentence }
/** An efficient way to get the total number of Tokens in the canonical Sections of this Document. */
def tokenCount: Int = if (sections.length == 0) sections.head.length else sections.foldLeft(0)((result, section) => result + section.length)
/** An efficient way to get the total number of Sentences in the canonical Sections of this Document. */
def sentenceCount: Int = if (sections.length == 0) sections.head.sentences.length else sections.foldLeft(0)((result, section) => result + section.sentences.length)
/** The collection of DocumentAnnotators that have been run on this Document,
For keeping records of which DocumentAnnotators have been run on this document, producing which annotations.
A Map from the annotation class to the DocumentAnnotator that produced it,
for example from classOf[cc.factorie.app.nlp.pos.PennPos] to classOf[cc.factorie.app.nlp.pos.ChainPosTagger].
Note that this map records annotations placed not just on the Document itself, but also its constituents,
such as NounPhraseNumberLabel on NounPhrase, PennPos on Token, ParseTree on Sentence, etc. */
lazy val annotators = new collection.mutable.LinkedHashMap[Class[_], Class[_]]
/** Return true if an annotation of class 'c' been placed somewhere within this Document. */
def hasAnnotation(c:Class[_]): Boolean = annotators.keys.exists(k => c.isAssignableFrom(k))
/** Optionally return the DocumentAnnotator that produced the annotation of class 'c' within this Document. */
def annotatorFor(c:Class[_]): Option[Class[_]] = annotators.keys.find(k => c.isAssignableFrom(k)).collect({case k:Class[_] => annotators(k)})
// /** Return a String containing the Token strings in the document, with sentence and span boundaries indicated with SGML. */
// def sgmlString(spanLists:SpanList[_,_,_]*): String = {
// val buf = new StringBuffer
// for (section <- sections; token <- section.tokens) {
// if (token.isSentenceStart) buf.append("<sentence>")
// token.startsSpans.foreach(span => buf.append("<"+span.name+">"))
// buf.append(token.string)
// token.endsSpans.foreach(span => buf.append("</"+span.name+">"))
// if (token.isSentenceEnd) buf.append("</sentence>")
// buf.append(" ")
// }
// buf.toString
// }
// Common attributes, will return null if not present
def coref: WithinDocCoref = this.attr[WithinDocCoref]
def targetCoref: WithinDocCoref = { val coref = this.attr[WithinDocCoref]; if (coref eq null) null else coref.target }
/** Return the WithinDocCoref solution for this Document. If not already present create it. */
def getCoref: WithinDocCoref = this.attr.getOrElseUpdate[WithinDocCoref](new WithinDocCoref(this))
/** Return the gold-standard WithinDocCoref.target solution for this Document. If not already present create it. */
def getTargetCoref: WithinDocCoref = { val coref = this.getCoref; if (coref.target eq null) coref.target = new WithinDocCoref(this); coref.target }
/** Return a String containing the Token strings in the document, formatted with one-word-per-line
and various tab-separated attributes appended on each line, generated as specified by the argument. */
def owplString(attributes:Iterable[(Token)=>Any]): String = {
val buf = new StringBuffer
for (section <- sections; token <- section.tokens) {
if (token.isSentenceStart) buf.append("\n")
buf.append("%d\t%d\t%s\t".format(token.position+1, token.positionInSentence+1, token.string))
//buf.append(token.stringStart); buf.append("\t")
//buf.append(token.stringEnd)
for (af <- attributes) {
buf.append("\t")
af(token) match {
case cv:CategoricalVar[String @unchecked] => buf.append(cv.categoryValue.toString)
case null => {}
case v:Any => buf.append(v.toString)
}
}
buf.append("\n")
}
buf.toString
}
/** Return a String containing the Token strings in the document, formatted with one-word-per-line
and various tab-separated attributes appended on each line, generated from the 'annotator.tokenAnnotationString' method. */
def owplString(annotator:DocumentAnnotator): String = annotator match {
case pipeline:DocumentAnnotationPipeline => owplString(pipeline.annotators.map(a => a.tokenAnnotationString(_)))
case annotator:DocumentAnnotator => owplString(Seq(annotator.tokenAnnotationString(_)))
}
/** Return the Section that contains the pair of string offsets into the document. */
def getSectionByOffsets(strStart:Int, strEnd:Int):Option[Section] =
this.sections.map(sec => (sec.stringStart, sec.stringEnd, sec)).sortBy(_._1)
.find{case(start, end, _) => start <= strStart && end >= strEnd}.map(_._3)
}
/** Used as an attribute on Document to hold the document's name. */
case class DocumentName(string:String) {
override def toString: String = string
}
// TODO Consider removing DocumentCubbie because this implementation is inefficient,
// and it isn't sensible that everyone would want the same selection of saved items.
/** A Cubbie for serializing a Document, with separate slots for the Tokens, Sentences, and TokenSpans.
Note that it does not yet serialize Sections, and relies on Document.asSection being the only Section. */
//class DocumentCubbie[TC<:TokenCubbie,SC<:SentenceCubbie,TSC<:TokenSpanCubbie](val tc:()=>TC, val sc:()=>SC, val tsc:()=>TSC) extends Cubbie with AttrCubbieSlots {
// val name = StringSlot("name")
// val string = StringSlot("string")
// val tokens = CubbieListSlot("tokens", tc)
// val sentences = CubbieListSlot("sentences", sc)
// val spans = CubbieListSlot("spans", tsc)
// def storeDocument(doc:Document): this.type = {
// name := doc.name
// string := doc.string
// if (doc.asSection.length > 0) tokens := doc.tokens.toSeq.map(t => tokens.constructor().storeToken(t))
//// if (doc.spans.length > 0) spans := doc.spans.map(s => spans.constructor().store(s))
// if (doc.asSection.sentences.length > 0) sentences := doc.sentences.toSeq.map(s => sentences.constructor().storeSentence(s))
// storeAttr(doc)
// this
// }
// def fetchDocument: Document = {
// val doc = new Document(string.value).setName(name.value)
// if (tokens.value ne null) tokens.value.foreach(tc => doc.asSection += tc.fetchToken)
// //if (spans.value ne null) spans.value.foreach(sc => doc += sc.fetch(doc))
// if (sentences.value ne null) sentences.value.foreach(sc => sc.fetchSentence(doc.asSection))
// fetchAttr(doc)
// doc
// }
//}
// TODO Consider moving this to file util/Attr.scala
//trait AttrCubbieSlots extends Cubbie {
// val storeHooks = new cc.factorie.util.Hooks1[Attr]
// val fetchHooks = new cc.factorie.util.Hooks1[AnyRef]
// def storeAttr(a:Attr): this.type = { storeHooks(a); this }
// def fetchAttr(a:Attr): Attr = { fetchHooks(a); a }
//}
//
//trait DateAttrCubbieSlot extends AttrCubbieSlots {
// val date = DateSlot("date")
// storeHooks += ((a:Attr) => date := a.attr[java.util.Date])
// //fetchHooks += ((a:Attr) => a.attr += date.value)
// fetchHooks += { case a:Attr => a.attr += date.value }
//}
| patverga/factorie | src/main/scala/cc/factorie/app/nlp/Document.scala | Scala | apache-2.0 | 15,957 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.hadoop.hive.serde.serdeConstants
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.hive.client.{ExternalTable, HiveColumn, HiveTable, ManagedTable}
class HiveQlSuite extends SparkFunSuite with BeforeAndAfterAll {
private def extractTableDesc(sql: String): (HiveTable, Boolean) = {
HiveQl.createPlan(sql).collect {
case CreateTableAsSelect(desc, child, allowExisting) => (desc, allowExisting)
}.head
}
test("Test CTAS #1") {
val s1 =
"""CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view
|(viewTime INT,
|userid BIGINT,
|page_url STRING,
|referrer_url STRING,
|ip STRING COMMENT 'IP Address of the User',
|country STRING COMMENT 'country of origination')
|COMMENT 'This is the staging page view table'
|PARTITIONED BY (dt STRING COMMENT 'date type', hour STRING COMMENT 'hour of the day')
|ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\054' STORED AS RCFILE
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src""".stripMargin
val (desc, exists) = extractTableDesc(s1)
assert(exists == true)
assert(desc.specifiedDatabase == Some("mydb"))
assert(desc.name == "page_view")
assert(desc.tableType == ExternalTable)
assert(desc.location == Some("/user/external/page_view"))
assert(desc.schema ==
HiveColumn("viewtime", "int", null) ::
HiveColumn("userid", "bigint", null) ::
HiveColumn("page_url", "string", null) ::
HiveColumn("referrer_url", "string", null) ::
HiveColumn("ip", "string", "IP Address of the User") ::
HiveColumn("country", "string", "country of origination") :: Nil)
// TODO will be SQLText
assert(desc.viewText == Option("This is the staging page view table"))
assert(desc.partitionColumns ==
HiveColumn("dt", "string", "date type") ::
HiveColumn("hour", "string", "hour of the day") :: Nil)
assert(desc.serdeProperties ==
Map((serdeConstants.SERIALIZATION_FORMAT, "\\054"), (serdeConstants.FIELD_DELIM, "\\054")))
assert(desc.inputFormat == Option("org.apache.hadoop.hive.ql.io.RCFileInputFormat"))
assert(desc.outputFormat == Option("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"))
assert(desc.serde == Option("org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"))
assert(desc.properties == Map(("p1", "v1"), ("p2", "v2")))
}
test("Test CTAS #2") {
val s2 =
"""CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view
|(viewTime INT,
|userid BIGINT,
|page_url STRING,
|referrer_url STRING,
|ip STRING COMMENT 'IP Address of the User',
|country STRING COMMENT 'country of origination')
|COMMENT 'This is the staging page view table'
|PARTITIONED BY (dt STRING COMMENT 'date type', hour STRING COMMENT 'hour of the day')
|ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe'
| STORED AS
| INPUTFORMAT 'parquet.hive.DeprecatedParquetInputFormat'
| OUTPUTFORMAT 'parquet.hive.DeprecatedParquetOutputFormat'
|LOCATION '/user/external/page_view'
|TBLPROPERTIES ('p1'='v1', 'p2'='v2')
|AS SELECT * FROM src""".stripMargin
val (desc, exists) = extractTableDesc(s2)
assert(exists == true)
assert(desc.specifiedDatabase == Some("mydb"))
assert(desc.name == "page_view")
assert(desc.tableType == ExternalTable)
assert(desc.location == Some("/user/external/page_view"))
assert(desc.schema ==
HiveColumn("viewtime", "int", null) ::
HiveColumn("userid", "bigint", null) ::
HiveColumn("page_url", "string", null) ::
HiveColumn("referrer_url", "string", null) ::
HiveColumn("ip", "string", "IP Address of the User") ::
HiveColumn("country", "string", "country of origination") :: Nil)
// TODO will be SQLText
assert(desc.viewText == Option("This is the staging page view table"))
assert(desc.partitionColumns ==
HiveColumn("dt", "string", "date type") ::
HiveColumn("hour", "string", "hour of the day") :: Nil)
assert(desc.serdeProperties == Map())
assert(desc.inputFormat == Option("parquet.hive.DeprecatedParquetInputFormat"))
assert(desc.outputFormat == Option("parquet.hive.DeprecatedParquetOutputFormat"))
assert(desc.serde == Option("parquet.hive.serde.ParquetHiveSerDe"))
assert(desc.properties == Map(("p1", "v1"), ("p2", "v2")))
}
test("Test CTAS #3") {
val s3 = """CREATE TABLE page_view AS SELECT * FROM src"""
val (desc, exists) = extractTableDesc(s3)
assert(exists == false)
assert(desc.specifiedDatabase == None)
assert(desc.name == "page_view")
assert(desc.tableType == ManagedTable)
assert(desc.location == None)
assert(desc.schema == Seq.empty[HiveColumn])
assert(desc.viewText == None) // TODO will be SQLText
assert(desc.serdeProperties == Map())
assert(desc.inputFormat == Option("org.apache.hadoop.mapred.TextInputFormat"))
assert(desc.outputFormat == Option("org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat"))
assert(desc.serde.isEmpty)
assert(desc.properties == Map())
}
test("Test CTAS #4") {
val s4 =
"""CREATE TABLE page_view
|STORED BY 'storage.handler.class.name' AS SELECT * FROM src""".stripMargin
intercept[AnalysisException] {
extractTableDesc(s4)
}
}
test("Test CTAS #5") {
val s5 = """CREATE TABLE ctas2
| ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
| WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2")
| STORED AS RCFile
| TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22")
| AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin
val (desc, exists) = extractTableDesc(s5)
assert(exists == false)
assert(desc.specifiedDatabase == None)
assert(desc.name == "ctas2")
assert(desc.tableType == ManagedTable)
assert(desc.location == None)
assert(desc.schema == Seq.empty[HiveColumn])
assert(desc.viewText == None) // TODO will be SQLText
assert(desc.serdeProperties == Map(("serde_p1" -> "p1"), ("serde_p2" -> "p2")))
assert(desc.inputFormat == Option("org.apache.hadoop.hive.ql.io.RCFileInputFormat"))
assert(desc.outputFormat == Option("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"))
assert(desc.serde == Option("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"))
assert(desc.properties == Map(("tbl_p1" -> "p11"), ("tbl_p2" -> "p22")))
}
test("Invalid interval term should throw AnalysisException") {
def assertError(sql: String, errorMessage: String): Unit = {
val e = intercept[AnalysisException] {
HiveQl.parseSql(sql)
}
assert(e.getMessage.contains(errorMessage))
}
assertError("select interval '42-32' year to month",
"month 32 outside range [0, 11]")
assertError("select interval '5 49:12:15' day to second",
"hour 49 outside range [0, 23]")
assertError("select interval '.1111111111' second",
"nanosecond 1111111111 outside range")
}
}
| pronix/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveQlSuite.scala | Scala | apache-2.0 | 8,209 |
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unit.controllers
import scala.concurrent.Future
import org.joda.time.LocalDate
import org.mockito.Matchers
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import connectors.{HodsApiConnector, LockConnector}
import controllers._
import play.api.mvc.Results._
import play.api.test.FakeRequest
import schedule.SchedulingService
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.test.UnitSpec
class ConsoleControllerSpec extends UnitSpec with MockitoSugar {
val December242015 = "2015-12-24"
trait Setup {
val schedulingService: SchedulingService = mock[SchedulingService]
val lockConnector: LockConnector = mock[LockConnector]
val hodsApiConnector: HodsApiConnector = mock[HodsApiConnector]
val emisReportService: EmisReportService = mock[EmisReportService]
val emisReportController: EmisReportController = mock[EmisReportController]
val consoleController = new ConsoleController(
schedulingService,
lockConnector,
emisReportService,
hodsApiConnector,
emisReportController
)
}
"The downloadAndProcessWithLock" should {
"use the supplied date" in new Setup {
when(emisReportController.triggerEmisDownloadFor(downloadDate = Matchers.eq(new LocalDate(2015,12,24)), chunkSize = Matchers.any(), force = Matchers.eq(false))(Matchers.any())).thenReturn(Future.successful(Ok("")))
when(lockConnector.create(Matchers.eq("worldpay-downloader"))(Matchers.any[HeaderCarrier])).thenReturn(Future.successful(true))
when(lockConnector.release(Matchers.eq("worldpay-downloader"))(Matchers.any[HeaderCarrier])).thenReturn(Future.successful(true))
val req = FakeRequest()
await(consoleController.downloadAndProcessWithLock(December242015)(req))
}
}
}
| hmrc/worldpay-downloader | test/unit/controllers/ConsoleControllerSpec.scala | Scala | apache-2.0 | 2,391 |
// P24 Lotto: Draw N different random numbers from the set 1..M.
import util.Random
// The obvious answer is P23.f1(n, (1 to m).toList)
// Let's use "distinct" for a different solution
def f1[T](n: Int, m: Int): List[Int] =
Stream.continually(1 + Random.nextInt(m)).distinct.take(n).toList | pavelfatin/ninety-nine | scala/P24.scala | Scala | gpl-3.0 | 293 |
/*
* Copyright 2016-2018 Michal Harish, michal.harish@gmail.com
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.example
import akka.http.scaladsl.model.HttpMethods
import akka.util.Timeout
import io.amient.affinity.avro.record.{AvroRecord, Fixed}
import io.amient.affinity.core.ack
import io.amient.affinity.core.actor.{GatewayHttp, GatewayStream, Partition, Routed}
import io.amient.affinity.core.http.RequestMatchers.{HTTP, INT, PATH, QUERY}
import io.amient.affinity.core.storage.Record
import io.amient.affinity.core.util._
import scala.concurrent.duration._
import scala.language.postfixOps
case class Account(sortcode: String, number: Int) extends AvroRecord
case class Transaction(id: Long, amount: Double, timestamp: Long) extends AvroRecord with EventTime {
override def eventTimeUnix() = timestamp
}
class ExampleBank extends GatewayStream with GatewayHttp {
implicit val executor = context.dispatcher
implicit val scheduler = context.system.scheduler
implicit val timeout = Timeout(5 seconds)
val defaultKeyspace = keyspace("default")
input[Account, Transaction]("input-stream") { record: Record[Account, Transaction] =>
defaultKeyspace ?! StoreTransaction(record.key, record.value)
}
override def handle: Receive = {
case HTTP(HttpMethods.GET, PATH("transactions", sortcode, INT(number)), _, response) =>
defaultKeyspace ?! GetAccountTransactions(Account(sortcode, number)) map (handleAsJson(response, _))
case HTTP(HttpMethods.GET, PATH("transactions", sortcode), QUERY(("before", before)), response) =>
defaultKeyspace ?? GetBranchTransactions(sortcode, EventTime.unix(before+"T00:00:00+00:00")) map (handleAsJson(response, _))
case HTTP(HttpMethods.GET, PATH("transactions", sortcode), _, response) =>
defaultKeyspace ?? GetBranchTransactions(sortcode) map (handleAsJson(response, _))
}
}
case class StoreTransaction(key: Account, t: Transaction) extends AvroRecord with Routed with Reply[Option[Transaction]]
case class StorageKey(@Fixed(8) sortcode: String, @Fixed account: Int, txn: Long) extends AvroRecord
case class GetAccountTransactions(key: Account) extends AvroRecord with Routed with Reply[Seq[Transaction]]
case class GetBranchTransactions(sortcode: String, beforeUnixTs: Long = Long.MaxValue) extends AvroRecord with ScatterIterable[Transaction]
class DefaultPartition extends Partition {
val transactions = state[StorageKey, Transaction]("transactions")
implicit val executor = context.dispatcher
override def handle: Receive = {
case request@StoreTransaction(Account(sortcode, number), transaction) => request(sender) ! {
transactions.replace(StorageKey(sortcode, number, transaction.id), transaction)
} map {
_ => context.system.eventStream.publish(request) //this is only to have determinist way of testing all data was processed
}
case request@GetBranchTransactions(sortcode, before) =>
request(sender) ! transactions.range(TimeRange.until(before), sortcode).values.toList
case request@GetAccountTransactions(account) => request(sender) ! {
transactions.range(TimeRange.UNBOUNDED, account.sortcode, account.number).values.toList
}
}
} | amient/affinity | examples/example-range-lookups/src/test/scala/io/amient/affinity/example/ExampleBank.scala | Scala | apache-2.0 | 3,978 |
package org.jetbrains.plugins.scala.worksheet.ui
import java.util.regex.Pattern
import com.intellij.codeInsight.daemon.DaemonCodeAnalyzer
import com.intellij.openapi.command.WriteCommandAction
import com.intellij.openapi.editor.{Editor, LogicalPosition}
import com.intellij.openapi.module.Module
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi._
import org.jetbrains.plugins.scala.extensions
import org.jetbrains.plugins.scala.extensions.implementation.iterator.PrevSiblignsIterator
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.{FileDeclarationsHolder, ScalaFile}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.worksheet.actions.{RunWorksheetAction, WorksheetGotoResNHandler}
import org.jetbrains.plugins.scala.worksheet.interactive.WorksheetAutoRunner
import org.jetbrains.plugins.scala.worksheet.processor.{WorksheetCompiler, WorksheetInterpretExprsIterator, WorksheetPsiGlue}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* User: Dmitry.Naydanov
* Date: 07.02.17.
*/
class WorksheetIncrementalEditorPrinter(editor: Editor, viewer: Editor, file: ScalaFile)
extends WorksheetEditorPrinterBase(editor, viewer) {
import WorksheetIncrementalEditorPrinter._
private var lastProcessed: Option[Int] = None
private var currentFile = file
private var hasErrors = false
private var hasMessages = false
private val outputBuffer = new StringBuilder
private val messagesBuffer = new StringBuilder
private val psiToProcess = mutable.Queue[QueuedPsi]()
private val inputToOutputMapping = mutable.ListBuffer[(Int, Int)]()
private def cleanViewerFrom(ln: Int) {
if (ln == 0) {
extensions.invokeLater {
extensions.inWriteAction {
simpleUpdate("", viewerDocument)
}
}
return
}
WriteCommandAction.runWriteCommandAction(project, new Runnable {
override def run(): Unit =
viewerDocument.deleteString (
viewerDocument.getLineStartOffset(ln),
viewerDocument.getLineEndOffset(viewerDocument.getLineCount - 1)
)
})
}
private def fetchNewPsi() {
lastProcessed match {
case Some(lineNumber) =>
val i = inputToOutputMapping.lastIndexWhere(_._1 == lineNumber)
if (i == -1) cleanViewerFrom(0) else {
val j = inputToOutputMapping.apply(i)._2
if (j + 1 < viewerDocument.getLineCount) cleanViewerFrom(j + 1)
if (inputToOutputMapping.length > j + 1) inputToOutputMapping.remove(j + 1, inputToOutputMapping.length - j - 1)
}
case _ => cleanViewerFrom(0)
}
psiToProcess.clear()
val buffer = mutable.ListBuffer[QueuedPsi]()
val glue = new WorksheetPsiGlue(buffer)
new WorksheetInterpretExprsIterator(getScalaFile, Option(originalEditor), lastProcessed).collectAll(
glue.processPsi, None
)
psiToProcess.enqueue(buffer: _*)
}
private def clearMessages() {
hasMessages = false
hasErrors = false
}
private def clearBuffer() {
outputBuffer.clear()
messagesBuffer.clear()
}
override def getScalaFile: ScalaFile = currentFile
override def processLine(line: String): Boolean = {
if (!isInited) init()
line.trim match {
case REPL_START =>
fetchNewPsi()
if (lastProcessed.isEmpty) cleanFoldings()
clearMessages()
clearBuffer()
false
case REPL_LAST_CHUNK_PROCESSED =>
flushBuffer()
refreshLastMarker()
true
case REPL_CHUNK_END =>
if (hasErrors) refreshLastMarker()
flushBuffer()
hasErrors
case ReplMessage(info) =>
messagesBuffer.append(info.msg).append("\\n")
hasMessages = true
false
case "" => //do nothing
false
case outputLine =>
if (hasMessages) {
messagesBuffer.append(line).append("\\n")
outputLine == "^" && {hasMessages = false; processMessage()}
} else {
outputBuffer.append(augmentLine(outputLine)).append("\\n")
false
}
}
}
override def flushBuffer(): Unit = {
if (psiToProcess.isEmpty) return // empty output is possible see SCL-11720
val str = outputBuffer.toString().trim
outputBuffer.clear()
val queuedPsi: QueuedPsi = psiToProcess.dequeue()
if (!queuedPsi.isValid) return //warning here?
val linesOutput = countNewLines(str) + 1
val linesInput = countNewLines(queuedPsi.getText) + 1
@inline def originalLn(offset: Int) = originalDocument getLineNumber offset
val originalTextRange = queuedPsi.getWholeTextRange
val processedStartLine = originalLn(queuedPsi.getFirstProcessedOffset)
val processedStartEndLine = originalLn(queuedPsi.getLastProcessedOffset)
val processedEndLine = originalLn(originalTextRange.getEndOffset)
val firstOffsetFix = if (lastProcessed.isEmpty) 0 else 1
lastProcessed = Some(processedStartEndLine)
WorksheetAutoRunner.getInstance(project).replExecuted(originalDocument, originalTextRange.getEndOffset)
extensions.invokeLater {
WriteCommandAction.runWriteCommandAction(project, new Runnable {
override def run(): Unit = {
val oldLinesCount = viewerDocument.getLineCount
val baseDiff = Math.max(processedStartLine - viewerDocument.getLineCount - 1, 0) + queuedPsi.getBaseDiff
val prefix = getNewLines(baseDiff + firstOffsetFix)
simpleAppend(prefix, viewerDocument)
var addedDiff = 0
queuedPsi.getPrintStartOffset(str) foreach {
case (absoluteOffset, relativeOffset, outputChunk) =>
val df = originalLn(absoluteOffset) - originalLn(absoluteOffset - relativeOffset)
addedDiff += df
val currentPrefix = getNewLines(df)
simpleAppend(currentPrefix + outputChunk, viewerDocument)
}
inputToOutputMapping.append((processedStartEndLine, linesOutput + baseDiff + addedDiff - 1 + viewerDocument.getLineCount))
saveEvaluationResult(viewerDocument.getText)
if (linesOutput > linesInput) {
val lineCount = viewerDocument.getLineCount
updateFoldings(Seq((oldLinesCount + baseDiff + firstOffsetFix - 1, viewerDocument.getLineEndOffset(lineCount - 1), linesInput, processedEndLine)))
}
}
})
}
}
/*
Looks like we don't need any flushing here
*/
override def scheduleWorksheetUpdate(): Unit = {}
/**
*
* @return Number of the last processed line
*/
def getLastProcessedLine: Option[Int] = lastProcessed
def setLastProcessedLine(i: Option[Int]) {
lastProcessed = i
}
def updateScalaFile(file: ScalaFile) {
currentFile = file
}
private def augmentLine(inputLine: String): String = {
val idx = inputLine.indexOf("$Lambda$")
if (idx == -1) inputLine else
inputLine.substring(0, Math.max(idx - 1, 0)) +
"<function>" +
inputLine.substring(Math.min(inputLine.length, LAMBDA_LENGTH + idx + 1), inputLine.length)
}
/**
*
* @return true if error and should stop
*/
private def processMessage(): Boolean = {
if (psiToProcess.isEmpty) return false
val currentPsi = psiToProcess.head
val offset = currentPsi.getWholeTextRange.getStartOffset
val str = messagesBuffer.toString().trim
messagesBuffer.clear()
val MessageInfo(msg, vertOffset, horizontalOffset, severity) = extractInfoFromAllText(str).getOrElse((str, 0, 0, WorksheetCompiler.InfoSeverity))
val position = {
val p = extensions.inReadAction { originalEditor.offsetToLogicalPosition(offset) }
new LogicalPosition(p.line + vertOffset, p.column + horizontalOffset)
}
val isFatal = severity.isFatal
val onError = if (isFatal) () => {originalEditor.getCaretModel moveToLogicalPosition position} else () => {}
WorksheetCompiler.showCompilationMessage(
getScalaFile.getVirtualFile, severity, position.line, position.column, project, onError, msg.split('\\n').map(_.trim).filter(_.length > 0))
if (isFatal) {
hasErrors = true
psiToProcess.dequeue()
}
hasErrors
}
def extractInfoFromAllText(toMatch: String): Option[MessageInfo] = {
val indexOfNl = toMatch.lastIndexOf('\\n')
if (indexOfNl == -1) return None
val indexOfC = toMatch.lastIndexOf('^')
val horOffset = if (indexOfC < indexOfNl) 0 else indexOfC - indexOfNl
val allMessageStrings = toMatch.substring(0, indexOfNl)
val matcher = CONSOLE_MESSAGE_PATTERN matcher allMessageStrings
val (textWoConsoleLine, lineNumStr) = if (matcher.find()) (allMessageStrings.substring(matcher.end()), matcher.group(1)) else (allMessageStrings, "0")
val (textWoSeverity, severity) = textWoConsoleLine match {
case error if error.startsWith("error: ") =>
(error.substring("error: ".length), WorksheetCompiler.ErrorSeverity)
case warning if warning.startsWith("warning: ") =>
(warning.substring("warning: ".length), WorksheetCompiler.WarningSeverity)
case _ => return None
}
val (finalText, vertOffset) = {
splitLineNumberFromRepl(textWoSeverity) match {
case Some(a) => a
case _ => // we still have a fall back variant here as some erros aren't raised from the text of our input
(textWoSeverity, Integer.parseInt(lineNumStr) - getConsoleHeaderLines(RunWorksheetAction getModuleFor getScalaFile))
}
}
Option(MessageInfo(finalText, vertOffset, horOffset, severity))
}
private def refreshLastMarker() {
rehighlight(getScalaFile)
}
}
object WorksheetIncrementalEditorPrinter {
private val REPL_START = "$$worksheet$$repl$$start$$"
private val REPL_CHUNK_END = "$$worksheet$$repl$$chunk$$end$$"
private val REPL_LAST_CHUNK_PROCESSED = "$$worksheet$$repl$$last$$chunk$$processed$$"
private val CONSOLE_ERROR_START = "<console>:"
private val CONSOLE_MESSAGE_PATTERN = {
val regex = "\\\\s*(\\\\d+)" + Pattern.quote(":") + "\\\\s*"
Pattern.compile(regex)
}
private val LAMBDA_LENGTH = 32
private def getConsoleHeaderLines(module: Module): Int = {
import org.jetbrains.plugins.scala.project._
import org.jetbrains.plugins.scala.project.ScalaLanguageLevel._
val before = 7
val after = 11
module.scalaSdk.map(
sdk => (sdk.compilerVersion, sdk.languageLevel)
) map {
case (v, l) => l match {
case Scala_2_8 | Scala_2_9 | Scala_2_10 => before
case Scala_2_11 => if (v.exists(_ startsWith "2.11.8")) after else before
case _ => after
}
} getOrElse after
}
def executeResNDeclarations(processor: PsiScopeProcessor, file: FileDeclarationsHolder, state: ResolveState): Boolean = {
var ind = 0
file.getChildren foreach {
case expr: ScExpression =>
val text = expr.getText
if (!text.contains(ScalaCompletionUtil.DUMMY_IDENTIFIER)) {
val name = s"res$ind"
val inds = ArrayBuffer[Int]()
val m = name.r.pattern.matcher(text)
while (m.find()) {
inds += m.start()
}
val skip = inds exists {
idx => file.findElementAt(expr.getTextRange.getStartOffset + idx + 1) match {
case psi: PsiElement if psi.getNode.getElementType == ScalaTokenTypes.tIDENTIFIER =>
true
case _ => false
}
}
if (!skip) ScalaPsiElementFactory.createDefinitionWithContext(s" val res$ind = $text", file, expr) match {
case patternDef: ScPatternDefinition =>
patternDef.declaredElements foreach {
declared =>
declared.putUserData(WorksheetGotoResNHandler.WORKSHEET_GOTO_PSI_KEY, expr)
if (!processor.execute(declared, state)) return false
}
case _ =>
}
ind += 1
}
case _ =>
}
true
}
def countNewLines(str: String): Int = StringUtil countNewLines str
def rehighlight(file: PsiFile) {
DaemonCodeAnalyzer.getInstance(file.getProject).restart(file)
}
case class MessageStart(msg: String)
case class MessageInfo(text: String, verOffset: Int, horOffset: Int, severity: WorksheetCompiler.CompilationMessageSeverity)
object ReplMessage {
def unapply(arg: String): Option[MessageStart] =
if (arg startsWith CONSOLE_ERROR_START) Option(MessageStart(arg substring CONSOLE_ERROR_START.length)) else None
}
def splitLineNumberFromRepl(line: String): Option[(String, Int)] = {
val i = line.lastIndexOf("//")
if (i == -1) return None
try {
Option((line.substring(0, i), Integer parseInt line.substring(i + 2).trim))
} catch {
case _: NumberFormatException => None
}
}
trait QueuedPsi {
/**
* @return underlying psi(-s) is valid
*/
final def isValid: Boolean = extensions.inReadAction{ isValidImpl }
protected def isValidImpl: Boolean
/**
* @return the whole corresponding input text
*/
final def getText: String = extensions.inReadAction{ getTextImpl }
protected def getTextImpl: String
/**
* @return input text range
*/
def getWholeTextRange: TextRange
/**
* @param output the whole trimmed output from the interpreter
* @return sequence of splited output (absolute offset in input document, offset from the end of previous output token or from rel zero, output token text)
*/
def getPrintStartOffset(output: String): Seq[(Int, Int, String)]
def getFirstProcessedOffset: Int
def getLastProcessedOffset: Int = getFirstProcessedOffset
def getBaseDiff: Int
protected def computeStartPsi(psi: PsiElement): PsiElement = {
val actualStart = psi.getFirstChild match {
case comment: PsiComment =>
var c = comment.getNextSibling
while (c.isInstanceOf[PsiComment] || c.isInstanceOf[PsiWhiteSpace]) c = c.getNextSibling
if (c != null) c else psi
case _ => psi
}
actualStart
}
protected def psiToStartOffset(psi: PsiElement): Int = psi.getTextRange.getStartOffset
protected def countLinesWoCode(nextFrom: PsiElement): Int = {
val it = new PrevSiblignsIterator(nextFrom)
var counter = 1
while (it.hasNext) it.next() match {
case ws: PsiWhiteSpace => counter += countNewLines(ws.getText)
case com: PsiComment => counter += countNewLines(com.getText)
case null => return counter - 1
case _ => return counter - 2
}
counter - 1
}
protected def startPsiOffset(psi: PsiElement): Int = psiToStartOffset(computeStartPsi(psi))
protected def getPsiTextWithCommentLine(psi: PsiElement): String =
storeLineInfoRepl(StringUtil.splitByLines(psi.getText, false))
protected def storeLineInfoRepl(lines: Array[String]): String = {
lines.zipWithIndex.map {
case (line, index) => line + s" //$index\\n"
}.mkString("")
}
}
case class SingleQueuedPsi(psi: PsiElement) extends QueuedPsi {
override protected def isValidImpl: Boolean = psi.isValid
override protected def getTextImpl: String = getPsiTextWithCommentLine(psi)
override def getWholeTextRange: TextRange = psi.getTextRange
override def getPrintStartOffset(output: String): Seq[(Int, Int, String)] = Seq((startPsiOffset(psi), 0, output))
override def getBaseDiff: Int = countLinesWoCode(psi)
override def getFirstProcessedOffset: Int = startPsiOffset(psi)
}
case class ClassObjectPsi(clazz: ScClass, obj: ScObject, mid: String, isClazzFirst: Boolean) extends QueuedPsi {
private val (first, second) = if (isClazzFirst) (clazz, obj) else (obj, clazz)
override protected def isValidImpl: Boolean = clazz.isValid && obj.isValid
override protected def getTextImpl: String = getPsiTextWithCommentLine(first) + mid + getPsiTextWithCommentLine(second)
override def getWholeTextRange: TextRange = new TextRange(psiToStartOffset(first), second.getTextRange.getEndOffset)
override def getPrintStartOffset(output: String): Seq[(Int, Int, String)] = {
//we assume output is class A defined \\n class B defined
val i = output.indexOf('\\n')
val (one, two) = if (i == -1) (output, "") else output.splitAt(i)
val firstOffset = startPsiOffset(first)
val secondOffset = startPsiOffset(second)
Seq((firstOffset, 0, one), (secondOffset, secondOffset - firstOffset, two.trim))
}
override def getBaseDiff: Int = countLinesWoCode(first)
override def getFirstProcessedOffset: Int = startPsiOffset(first)
override def getLastProcessedOffset: Int = startPsiOffset(second)
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/worksheet/ui/WorksheetIncrementalEditorPrinter.scala | Scala | apache-2.0 | 17,518 |
package com.teambytes.awsleader.test.util
import akka.actor.Props
class SeqRefsFactor(props: List[Props]) {
private var index = 0
def next(batchSize: Int):Props = {
assert(batchSize == 10)
next()
}
def next():Props = {
val actorRef = props(index)
index += 1
actorRef
}
}
| grahamar/aws-leader-election | src/multi-jvm/scala/com/teambytes/awsleader/test/util/SeqRefsFactor.scala | Scala | apache-2.0 | 305 |
package com.olvind
package mui
object MuiTypeMapper extends TypeMapper {
val typeT = Normal("T").generic("T")
val typeTJs = Normal("T").genericJs("T")
def apply(compName: CompName, fieldName: PropName, typeString: String): Type = {
def is(s: String) =
fieldName.value.toLowerCase contains s.toLowerCase
def split(drop: Int, s: String) =
s.split("[\\'\\"\\\\(\\\\)\\\\[\\\\],\\\\s]").map(_.trim).filterNot(_.isEmpty).drop(drop)
//Please note that the following match statement is incredibly order-sensitive, if you change order around you can break things
(compName.value, fieldName.value, typeString) match {
// i dont have patience to do this properly (GridList)
case (_, "cellHeight", _) => Normal("Int")
case (_, _, e) if e.contains("oneOfType") =>
Normal(split(1, e) map (t => apply(compName, fieldName, t)) map (_.name) mkString " | ")
case (_, _, enum) if enum.startsWith("Mui.oneOf") =>
Enum(compName, split(1, enum))
/* Double => Int */
case (_, "autoHideDuration", "number") => Normal("Int")
case (_, "cols", "number") => Normal("Int")
case (_, "columnNumber", "number") => Normal("Int")
case (_, "columnId", "number") => Normal("Int")
case (_, "initialSelectedIndex", "number") => Normal("Int")
case (_, "left", "number") => Normal("Int")
case (_, "maxHeight", "number") => Normal("Int")
case (_, "nestedLevel", "number") => Normal("Int")
case (_, "padding", "number") => Normal("Int")
case (_, "rowNumber", "number") => Normal("Int")
case (_, "rows", "number") => Normal("Int")
case (_, "rowsMax", "number") => Normal("Int")
case (_, "selectedIndex", "number") => Normal("Int")
case ("Avatar", "size", "number") => Normal("Int")
case ("RefreshIndicator", "size", "number") => Normal("Int")
case (_, "top", "number") => Normal("Int")
case (_, "touchTapCloseDelay", "number") => Normal("Int")
case (_, _, e) if e.toLowerCase.contains("index") => Normal("Int")
/* specific */
case ("AutoComplete", "dataSource", "Mui.array") => Normal("js.Array[String]")
case ("DatePicker", "value", _) => Normal("js.Date")
case ("DatePicker", "defaultDate", "Mui.object") => Normal("js.Date")
case ("DatePicker", "maxDate", "Mui.object") => Normal("js.Date")
case ("DatePicker", "minDate", "Mui.object") => Normal("js.Date")
case ("DatePicker", "initialDate", "Mui.object") => Normal("js.Date")
case ("Dialog", "width", "Mui.any") => Normal("Int")
case ("DropDownMenu", "value", "Mui.any") => typeT
case ("EnhancedSwitch", "value", "Mui.any") => typeT
case ("RadioButtonGroup", "defaultSelected", "Mui.any") => typeT
case ("RadioButtonGroup", "valueSelected", "Mui.any") => typeT
case ("RadioButton", "value", "Mui.any") => typeT
case ("Tab", "index", "Mui.any") => Normal("js.Any")
case ("ListItem", "nestedItems", "Mui.arrayOf(Mui.element)") =>
Normal("js.Array[VdomElement]")
case ("Menu", "value", "Mui.any") => Normal("T | js.Array[T]").generic("T")
case ("MenuItem", "value", "Mui.any") => typeT
case ("SelectField", "selectFieldRoot", "Mui.object") => Normal("CssProperties")
case ("SelectField", "value", "Mui.any") => typeT
case ("Slider", "defaultValue", "valueInRangePropType") => Normal("Double")
case ("Slider", "max", "minMaxPropType") => Normal("Double")
case ("Slider", "min", "minMaxPropType") => Normal("Double")
case ("Slider", "value", "valueInRangePropType") => Normal("Double")
case ("Step", "controlButtonsGroup", "Mui.arrayOf(Mui.node)") => Normal("js.Array[VdomNode]")
case ("Step", "actions", "Mui.arrayOf(Mui.node)") => Normal("js.Array[VdomNode]")
case ("Tab", "value", "Mui.any") => typeTJs
case ("Tabs", "value", "Mui.any") => typeTJs
case ("TextField", "value", "Mui.any") => Normal("String")
case ("TextField", "defaultValue", "Mui.any") => Normal("String")
case ("TimePicker", "defaultTime", "Mui.object") => Normal("js.Date")
case ("TimePicker", "value", "Mui.object") => Normal("js.Date")
case ("DatePicker", "utils", "Mui.object") => Normal("DatePickerUtils") //TODO ???
case ("SelectField", "dropDownMenuProps", "Mui.object") =>
Normal("DropDownMenuProps") //TODO ???
/* TODO: dubious */
case ("EnhancedTextarea", "defaultValue", "Mui.any") => Normal("js.Any")
case ("GridTile", "rootClass", "Mui.object") => Normal("js.Any")
case ("Popover", "anchorEl", "Mui.object") => Normal("js.Any")
case ("Stepper", "createIcon", "Mui.func") => Normal("js.Function")
case ("Stepper", "updateAvatarBackgroundColor", "Mui.func") => Normal("js.Function")
case (_, "valueLink", "Mui.object") => Normal("js.Any")
/* mui general */
case (_, _, "Mui.string") if is("color") => Normal("MuiColor")
case (_, _, "Mui.object") if is("style") => Normal("CssProperties")
case (_, _, "Mui.object") if is("muiTheme") => Normal("MuiTheme")
case (_, _, "Mui.object") => Normal("js.Object")
case (_, _, "Mui.any") => Normal("js.Any")
case (_, "label", "validateLabel") => Normal("String")
case (_, "zDepth", _) => Normal("ZDepth")
case (_, _, "Mui.origin") => Normal("Origin")
case (_, _, "_propTypes4.default.origin") => Normal("Origin") //TODO???
case (_, _, "_propTypes4.default.cornersAndCenter") => Normal("CornersAndCenter") //TODO???
case (_, _, "_propTypes4.default.stringOrNumber") => Normal("String | Double") //TODO???
case (_, _, "Mui.cornersAndCenter") => Normal("CornersAndCenter")
case (_, _, "Mui.corners") => Normal("Corners")
case (_, _, "Mui.stringOrNumber") => Normal("String | Double")
/* general */
case (_, _, "Mui.string") => Normal("String")
case (_, _, "Mui.bool") => Normal("Boolean")
case (_, "children", "Mui.element") => Normal("VdomElement")
case (_, _, "Mui.element") => Normal("VdomElement")
case (_, "children", "Mui.node") => Normal("VdomNode")
case (_, _, "Mui.node") => Normal("VdomNode")
case (_, _, "Mui.number") => Normal("Double")
case (_, "children", "Mui.arrayOf(Mui.element)") => Normal("js.Array[VdomElement]")
case ("Stepper", "children", "Mui.arrayOf(Mui.node)") => Normal("js.Any")
case (_, _, "Mui.func") =>
Normal(MuiTypeMapperFunction(compName, fieldName))
}
}
}
| chandu0101/scalajs-react-components | gen/src/main/scala/com/olvind/mui/MuiTypeMapper.scala | Scala | apache-2.0 | 7,723 |
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 12/10/2017.
*/
/**
* Process Parameter Trl Entity
* @param processParameterId Process Parameter ID
* @param language Language
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param name Name
* @param description Description
* @param help Help
* @param isTranslated Is Translated
* @param uuid UUID
*/
case class ProcessParameterTrl(processParameterId: Int,
language: String,
tenantId: Int,
organizationId: Int,
isActive : Boolean = true,
created : DateTime = DateTime.now,
createdBy : Int ,
updated : DateTime = DateTime.now,
updatedBy : Int,
name : String,
description: Option[String],
help: Option[String],
isTranslated: Boolean = false,
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = processParameterId
override val entityName: String = "AD_Process_Para_Trl"
override val identifier: String = null
}
object ProcessParameterTrl {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[ProcessParameterTrl]
def create(processParameterId: Int,
language: String,
tenantId: Int,
organizationId: Int,
isActive : Boolean,
created : DateTime,
createdBy : Int ,
updated : DateTime,
updatedBy : Int,
name : String,
description: String,
help: String,
isTranslated: Boolean,
uuid: String) = ProcessParameterTrl(processParameterId, language, tenantId, organizationId,
isActive, created, createdBy, updated, updatedBy, name, None, None, isTranslated, uuid)
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/ProcessParameterTrl.scala | Scala | gpl-3.0 | 3,485 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hbase.spark
import org.apache.spark.sql.test._
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.apache.hadoop.hbase.HBaseTestingUtility
/* Implicits */
import org.apache.spark.sql.test.TestSQLContext._
class HBaseSuite
extends FunSuite
with BeforeAndAfterAll {
val util = new HBaseTestingUtility()
override def beforeAll() {
util.startMiniCluster()
}
override def afterAll() {
util.shutdownMiniCluster()
}
test("dsl test") {
val tableName = "spark_read_test"
val schemaDefine = "rowkey string, cf:qual1 int, cf:qual2 double"
val hbaseSite = "/tmp/hbase-site.xml"
val results = TestSQLContext
.hbaseTable(tableName, schemaDefine, hbaseSite)
.select('title)
.collect()
assert(results.size === 8)
}
test("sql test") {
val tableName = "spark_read_test"
val schemaDefine = "rowkey string, cf:qual1 int, cf:qual2 double"
val hbaseSite = "/tmp/hbase-site.xml"
sql(
s"""
|CREATE TEMPORARY TABLE hbaseTable
|USING org.apache.hbase.spark
|OPTIONS (
| table "$tableName",
| schema "$schemaDefine",
| hbase-site "$hbaseSite",
|)
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM hbaseTable").collect().size === 8)
}
} | haosdent/spark-hbase | src/test/scala/org/apache/hbase/spark/HBaseSuite.scala | Scala | apache-2.0 | 2,135 |
/* Copyright 2009-2018 EPFL, Lausanne */
package inox
package solvers
package theories
import utils._
trait TheoryEncoder extends transformers.ProgramTransformer { self =>
val targetProgram: Program { val trees: sourceProgram.trees.type }
lazy val trees: sourceProgram.trees.type = sourceProgram.trees
}
trait SimpleEncoder extends TheoryEncoder with transformers.ProgramEncoder {
val t: sourceProgram.trees.type = sourceProgram.trees
}
object NoEncoder {
def apply(p: Program, ctx: Context): TheoryEncoder {
val sourceProgram: p.type
val targetProgram: Program { val trees: p.trees.type }
} = new TheoryEncoder {
val sourceProgram: p.type = p
val targetProgram: Program { val trees: p.trees.type } =
p.asInstanceOf[Program { val trees: p.trees.type }]
import trees._
protected object encoder extends IdentityTreeTransformer
protected object decoder extends IdentityTreeTransformer
}
}
| romac/inox | src/main/scala/inox/solvers/theories/TheoryEncoder.scala | Scala | apache-2.0 | 939 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.general
import com.github.mauricio.async.db.RowData
class ArrayRowData(
row: Int,
val mapping: Map[String, Int],
val columns: Array[Any]
) extends RowData {
/**
* Returns a column value by it's position in the originating query.
*
* @param columnNumber
* @return
*/
def apply(columnNumber: Int): Any = columns(columnNumber)
/**
* Returns a column value by it's name in the originating query.
*
* @param columnName
* @return
*/
def apply(columnName: String): Any = columns(mapping(columnName))
/**
* Number of this row in the query results. Counts start at 0.
*
* @return
*/
def rowNumber: Int = row
def length: Int = columns.length
}
| dripower/postgresql-async | db-async-common/src/main/scala/com/github/mauricio/async/db/general/ArrayRowData.scala | Scala | apache-2.0 | 1,377 |
package com.peterpotts.logging
trait SLF4JLevelLogger {
val logger: org.slf4j.Logger
def enabled: Boolean
def log(message: String)
def log(message: String, error: Throwable)
}
trait SLF4JTraceLogger extends SLF4JLevelLogger {
def enabled = logger.isTraceEnabled
def log(message: String) = logger.trace(message)
def log(message: String, error: Throwable) = logger.trace(message, error)
}
trait SLF4JDebugLogger extends SLF4JLevelLogger {
def enabled = logger.isDebugEnabled
def log(message: String) = logger.debug(message)
def log(message: String, error: Throwable) = logger.debug(message, error)
}
trait SLF4JInfoLogger extends SLF4JLevelLogger {
def enabled = logger.isInfoEnabled
def log(message: String) = logger.info(message)
def log(message: String, error: Throwable) = logger.info(message, error)
}
trait SLF4JWarnLogger extends SLF4JLevelLogger {
def enabled = logger.isWarnEnabled
def log(message: String) = logger.warn(message)
def log(message: String, error: Throwable) = logger.warn(message, error)
}
trait SLF4JErrorLogger extends SLF4JLevelLogger {
def enabled = logger.isErrorEnabled
def log(message: String) = logger.error(message)
def log(message: String, error: Throwable) = logger.error(message, error)
}
| peterpotts/logging | src/main/scala/com/peterpotts/logging/SLF4JLevelLogger.scala | Scala | mit | 1,280 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.types._
object UnsafeRowUtils {
/**
* Use the following rules to check the integrity of the UnsafeRow:
* - schema.fields.length == row.numFields should always be true
* - UnsafeRow.calculateBitSetWidthInBytes(row.numFields) < row.getSizeInBytes should always be
* true if the expectedSchema contains at least one field.
* - For variable-length fields: if null bit says it's null then don't do anything, else extract
* offset and size:
* 1) 0 <= size < row.getSizeInBytes should always be true. We can be even more precise than
* this, where the upper bound of size can only be as big as the variable length part of
* the row.
* 2) offset should be >= fixed sized part of the row.
* 3) offset + size should be within the row bounds.
* - For fixed-length fields that are narrower than 8 bytes (boolean/byte/short/int/float), if
* null bit says it's null then don't do anything, else:
* check if the unused bits in the field are all zeros. The UnsafeRowWriter's write() methods
* make this guarantee.
* - Check the total length of the row.
*/
def validateStructuralIntegrity(row: UnsafeRow, expectedSchema: StructType): Boolean = {
if (expectedSchema.fields.length != row.numFields) {
return false
}
val bitSetWidthInBytes = UnsafeRow.calculateBitSetWidthInBytes(row.numFields)
val rowSizeInBytes = row.getSizeInBytes
if (expectedSchema.fields.length > 0 && bitSetWidthInBytes >= rowSizeInBytes) {
return false
}
var varLenFieldsSizeInBytes = 0
expectedSchema.fields.zipWithIndex.foreach {
case (field, index) if !UnsafeRow.isFixedLength(field.dataType) && !row.isNullAt(index) =>
val offsetAndSize = row.getLong(index)
val offset = (offsetAndSize >> 32).toInt
val size = offsetAndSize.toInt
if (size < 0 ||
offset < bitSetWidthInBytes + 8 * row.numFields || offset + size > rowSizeInBytes) {
return false
}
varLenFieldsSizeInBytes += size
case (field, index) if UnsafeRow.isFixedLength(field.dataType) && !row.isNullAt(index) =>
field.dataType match {
case BooleanType =>
if ((row.getLong(index) >> 1) != 0L) return false
case ByteType =>
if ((row.getLong(index) >> 8) != 0L) return false
case ShortType =>
if ((row.getLong(index) >> 16) != 0L) return false
case IntegerType =>
if ((row.getLong(index) >> 32) != 0L) return false
case FloatType =>
if ((row.getLong(index) >> 32) != 0L) return false
case _ =>
}
case (_, index) if row.isNullAt(index) =>
if (row.getLong(index) != 0L) return false
case _ =>
}
if (bitSetWidthInBytes + 8 * row.numFields + varLenFieldsSizeInBytes > rowSizeInBytes) {
return false
}
true
}
}
| maropu/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/UnsafeRowUtils.scala | Scala | apache-2.0 | 3,844 |
package com.github.jt.api.swagger
import org.scalatra.ScalatraServlet
import org.scalatra.swagger.{Swagger, ApiInfo, JacksonSwaggerBase}
/**
* Setup Swagger
*/
class SwaggerServlet extends ScalatraServlet with JacksonSwaggerBase {
implicit val swagger = SwaggerSetup.swagger
}
object SwaggerSetup {
private val apiVersion = "1.0"
private val apiInfo = ApiInfo(
"Scalatra REST API",
"Example project",
"tosUrl",
"contact",
"Apache License",
"http://choosealicense.com/licenses/apache-2.0/"
)
val swagger = new Swagger(Swagger.SpecVersion, apiVersion, apiInfo)
}
| joeytsai/scalatra-starter | src/main/scala/com/github/jt/api/swagger/SwaggerServlet.scala | Scala | apache-2.0 | 603 |
package funsets
/**
* 2. Purely Functional Sets.
*/
object FunSets {
/**
* We represent a set by its characteristic function, i.e.
* its `contains` predicate.
*/
type Set = Int => Boolean
/**
* Indicates whether a set contains a given element.
*/
def contains(s: Set, elem: Int): Boolean = s(elem)
/**
* Returns the set of the one given element.
*/
def singletonSet(elem: Int): Set = ???
/**
* Returns the union of the two given sets,
* the sets of all elements that are in either `s` or `t`.
*/
def union(s: Set, t: Set): Set = ???
/**
* Returns the intersection of the two given sets,
* the set of all elements that are both in `s` and `t`.
*/
def intersect(s: Set, t: Set): Set = ???
/**
* Returns the difference of the two given sets,
* the set of all elements of `s` that are not in `t`.
*/
def diff(s: Set, t: Set): Set = ???
/**
* Returns the subset of `s` for which `p` holds.
*/
def filter(s: Set, p: Int => Boolean): Set = ???
/**
* The bounds for `forall` and `exists` are +/- 1000.
*/
val bound = 1000
/**
* Returns whether all bounded integers within `s` satisfy `p`.
*/
def forall(s: Set, p: Int => Boolean): Boolean = {
def iter(a: Int): Boolean = {
if (???) ???
else if (???) ???
else iter(???)
}
iter(???)
}
/**
* Returns whether there exists a bounded integer within `s`
* that satisfies `p`.
*/
def exists(s: Set, p: Int => Boolean): Boolean = ???
/**
* Returns a set transformed by applying `f` to each element of `s`.
*/
def map(s: Set, f: Int => Int): Set = ???
/**
* Displays the contents of a set
*/
def toString(s: Set): String = {
val xs = for (i <- -bound to bound if contains(s, i)) yield i
xs.mkString("{", ",", "}")
}
/**
* Prints the contents of a set on the console.
*/
def printSet(s: Set) {
println(toString(s))
}
}
| matticala/functional-programming-principles-in-scala | week2-funsets/src/main/scala/funsets/FunSets.scala | Scala | apache-2.0 | 1,993 |
package filodb.kafka
import scala.concurrent.Await
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
import monix.execution.Scheduler
import filodb.coordinator.{FilodbSettings, IngestionStreamFactory, StoreFactory}
import filodb.core.memstore.SomeData
import filodb.core.store.IngestionConfig
/**
* A simple app which uses a sourceconfig of your choice to test reading
* data from Kafka (or whatever configured source factory) and test reading from certain offsets.
* It reads dataset definition from MetaStore, so please pass the server.conf with Cassandra/metastore details.
*
* To launch: java -Xmx4G -Dconfig.file=conf/timeseries-filodb-server.conf \\
* -cp <path>/standalone-assembly-0.7.0.jar filodb.kafka.TestConsumer \\
* my-kafka-sourceconfig.conf <partition#>
* It will keep reading records, printing out the offsets of each record.
* Optional: pass in a second arg which is the offset to seek to.
* Optional: third arg which is key=value, allows filtering output by contents of any stringColumn
*/
object TestConsumer extends App {
val settings = new FilodbSettings()
val storeFactory = StoreFactory(settings, Scheduler.io())
val sourceConfPath = args(0)
val offsetOpt = args.drop(1).headOption.map(_.toLong)
val shard = if (args.length > 1) args(1).toInt else 0
val filterArg = if (args.length > 2) Some(args(2)) else None
val sourceConf = ConfigFactory.parseFile(new java.io.File(sourceConfPath))
//scalastyle:off
println(s"TestConsumer starting with shard $shard, config $sourceConf\\nand offset $offsetOpt")
import monix.execution.Scheduler.Implicits.global
val ingestConf = IngestionConfig(sourceConf, classOf[KafkaIngestionStreamFactory].getClass.getName).get
val dataset = settings.datasetFromStream(sourceConf)
val ctor = Class.forName(ingestConf.streamFactoryClass).getConstructors.head
val streamFactory = ctor.newInstance().asInstanceOf[IngestionStreamFactory]
// Figure out filter. What field # in BinaryRecord to filter by?
val (filterField, filterVal) =
filterArg.map { filt =>
val parts = filt.split('=')
if (parts.size == 2) {
val partColIndex = dataset.partitionColumns.indexWhere(_.name == parts(0))
if (partColIndex >= 0) { (dataset.ingestionSchema.partitionFieldStart.get + partColIndex, parts(1)) }
else { (-1, "") }
} else {
(-1, "")
}
}.getOrElse((-1, ""))
val stream = streamFactory.create(sourceConf, settings.schemas, shard, offsetOpt)
val fut = stream.get//.take(10)
.foreach { case SomeData(container, offset) =>
println(s"\\n----- Offset $offset -----")
// Use record reader to filter? Or maybe just use ingestionSchema getString etc.
if (filterField >= 0)
container.foreach { case (base, offset) =>
if (dataset.ingestionSchema.asJavaString(base, offset, filterField) == filterVal)
println(s" ${dataset.ingestionSchema.stringify(base, offset)}")
}
else
container.foreach { case (base, offset) =>
println(s" ${dataset.ingestionSchema.stringify(base, offset)}")
}
}
Await.result(fut, 10.minutes)
} | tuplejump/FiloDB | kafka/src/main/scala/filodb/kafka/TestConsumer.scala | Scala | apache-2.0 | 3,419 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.mockito.Mockito.when
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.BoxValidationFixture
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.version.calculations.ComputationsBoxRetrieverForTest
class CP666Spec extends WordSpec with MockitoSugar with Matchers with BoxValidationFixture[ComputationsBoxRetriever] {
val boxRetriever = mock[ComputationsBoxRetriever]
override def setUpMocks = {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(boxRetriever.cp78()).thenReturn(CP78(Some(31000)))
}
testBoxIsZeroOrPositive("CP666", CP666.apply)
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP666Spec.scala | Scala | apache-2.0 | 1,371 |
package core.raytracing
import core.main._, core.pieces._, core.shapes.dim0._, core.shapes.dim1._, core.shapes.dim2._, core.shapes.forces._, core.forces._
import wrapper._
object TraceNil extends Trace {
val head = null
val tail = this
def render(s: Surface) {}
} | radiotech/FlatLand | src/core/raytracing/TraceNil.scala | Scala | mit | 271 |
/*
* Copyright 2013-2015 James Shade
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.shade.time
import org.scalatest.WordSpec
import org.scalatest.Matchers
import org.joda.time.chrono.ISOChronology
import org.joda.time.DateTimeZone
class TimePackageSpec extends WordSpec with Matchers {
"The isoUtc contstant" should {
"have ISO chronolgy in the UTC timezone" in {
IsoUtc.isInstanceOf[ISOChronology] shouldBe true
IsoUtc.getZone shouldBe DateTimeZone.UTC
}
}
}
| jamesshade/time | src/test/scala/org/shade/time/TimePackageSpec.scala | Scala | apache-2.0 | 1,025 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.base.libraryLoaders.{ScalaZCoreLoader, Specs2Loader, ThirdPartyLibraryLoader}
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* @author Nikolay.Tropin
*/
@Category(Array(classOf[PerfCycleTests]))
class Specs2ToScalaCheckImplicitTest extends TypeInferenceTestBase {
override protected def additionalLibraries(): Array[ThirdPartyLibraryLoader] =
Array(Specs2Loader("2.4.15")(module), ScalaZCoreLoader()(module))
def testSCL8864(): Unit = doTest {
s"""object Main extends App {
| import org.specs2.ScalaCheck
| import org.specs2.mutable.Specification
|
| class Foo extends Specification with ScalaCheck {
| prop { (numbers: Seq[Int]) =>
| numbers.nonEmpty ==> {
| ${START}numbers.sum / numbers.size must be_>(1)$END
| }
| }
| }
|}
|//Prop
""".stripMargin
}
}
| loskutov/intellij-scala | test/org/jetbrains/plugins/scala/failed/typeInference/Specs2ToScalaCheckImplicitTest.scala | Scala | apache-2.0 | 1,117 |
package de.tu_berlin.impro3.flink.spatio_temporal_dynamics.io
import de.tu_berlin.impro3.flink.spatio_temporal_dynamics._
import model.Tweet
import parsers.JaxParser
import org.apache.flink.api.common.io.DelimitedInputFormat
import annotation.tailrec
class JaxInputFormat extends DelimitedInputFormat[Tweet] {
val parser = new JaxParser
override def nextRecord(record: Tweet): Tweet = {
@tailrec def next: Tweet =
try { super.nextRecord(record) }
catch { case _: Exception => next }
next
}
def readRecord(reuse: Tweet, bytes: Array[Byte],
offset: Int, numBytes: Int) =
parser.parse(new String(bytes, offset, numBytes)).get
}
| joroKr21/spatio-temporal-dynamics | impro3-ws14-flink/src/main/scala/de/tu_berlin/impro3/flink/spatio_temporal_dynamics/io/JaxInputFormat.scala | Scala | apache-2.0 | 683 |
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.pmml.transforms.xmltoraw.common
import org.apache.logging.log4j.{ Logger, LogManager }
import org.xml.sax.Attributes
import com.ligadata.pmml.compiler._
import com.ligadata.pmml.traits._
import com.ligadata.pmml.support._
import com.ligadata.pmml.syntaxtree.raw.common._
import com.ligadata.pmml.transforms.xmltoraw.common._
import com.ligadata.pmml.transforms.xmltoraw.ruleset._
class PmmlNodeGeneratorDispatcher(ctx : PmmlContext) extends PmmlNodeGeneratorDispatch with LogTrait {
/**
Select the appropriate PmmlNode generator for the supplied xml values, locate its
PmmlNodeGenerator (the 'qName' is the key), and dispatch it. The returned node is added to the
syntax tree owned by (or addressable by) the PmmlNodeGeneratorDispatch implementation.
@param namespaceURI: String
@param localName: String
@param qName:String
@param atts: Attributes
@param lineNumber : Int
@param columnNumber : Int
@return Unit
*/
def dispatch(namespaceURI: String, localName: String , qName:String , atts: Attributes, lineNumber : Int, columnNumber : Int) : Unit = {
val generator : PmmlNodeGenerator = if (qName != null) {
val gen : PmmlNodeGenerator = pmmlNodeDispatchMap.getOrElse(qName, null)
if (gen == null) {
if (! ignoreList.contains(qName)) {
PmmlError.logError(ctx, s"there is no PmmlNode generator for node type $qName ... check dispatch map initialization")
}
}
gen
}
else {
PmmlError.logError(ctx, s"This is a fine howdy-doo! No node supplied to the CodePrinterDispatch")
null
}
if (generator != null) {
val node : PmmlNode = generator.make(namespaceURI, localName, qName, atts, lineNumber, columnNumber)
if (node != null) {
/** update the parent on the stack if appropriate */
if (! ctx.pmmlNodeStack.isEmpty) {
ctx.pmmlNodeStack.top.addChild(node)
}
/** push the newly established node to the stack */
ctx.pmmlNodeStack.push(node)
}
}
}
/** In the event it is desirable to ignore certain missing handlers, add the qName here */
val ignoreList : Set[String] = Set[String]("PMML")
val pmmlNodeDispatchMap = Map[String, PmmlNodeGenerator](
("Pmml" -> new ApplicationPmmlNodeGenerator)
, ("Application" -> new ApplicationPmmlNodeGenerator)
, ("Apply" -> new ApplyPmmlNodeGenerator)
, ("Array" -> new ArrayPmmlNodeGenerator)
, ("CompoundPredicate" -> new CompoundPredicatePmmlNodeGenerator)
, ("Constant" -> new ConstantPmmlNodeGenerator)
, ("DataDictionary" -> new DataDictionaryPmmlNodeGenerator)
, ("DataField" -> new DataFieldPmmlNodeGenerator)
, ("DefineFunction" -> new DefineFunctionPmmlNodeGenerator)
, ("DerivedField" -> new DerivedFieldPmmlNodeGenerator)
, ("FieldColumnPair" -> new FieldColumnPairPmmlNodeGenerator)
, ("FieldRef" -> new FieldRefPmmlNodeGenerator)
, ("InlineTable" -> new InlineTablePmmlNodeGenerator)
, ("Interval" -> new IntervalPmmlNodeGenerator)
, ("MapValues" -> new MapValuesPmmlNodeGenerator)
, ("MiningField" -> new MiningFieldPmmlNodeGenerator)
, ("MiningSchema" -> new MiningSchemaPmmlNodeGenerator)
, ("ParameterField" -> new ParameterFieldPmmlNodeGenerator)
, ("Header" -> new PmmlHeaderPmmlNodeGenerator)
, ("row" -> new RowPmmlNodeGenerator)
, ("RowTuple" -> new RowTuplePmmlNodeGenerator)
, ("RuleSelectionMethod" -> new RuleSelectionMethodPmmlNodeGenerator)
, ("RuleSetModel" -> new RuleSetModelPmmlNodeGenerator)
, ("RuleSet" -> new RuleSetPmmlNodeGenerator)
, ("ScoreDistribution" -> new ScoreDistributionPmmlNodeGenerator)
, ("SimplePredicate" -> new SimplePredicatePmmlNodeGenerator)
, ("SimpleRule" -> new SimpleRulePmmlNodeGenerator)
, ("SimpleSetPredicate" -> new SimpleSetPredicatePmmlNodeGenerator)
, ("TableLocator" -> new TableLocatorPmmlNodeGenerator)
, ("TransformationDictionary" -> new TransformationDictionaryPmmlNodeGenerator)
, ("Value" -> new ValuePmmlNodeGenerator))
}
| traytonwhite/Kamanja | trunk/Pmml/PmmlCompiler/src/main/scala/com/ligadata/pmml/transforms/xmltoraw/common/PmmlNodeGeneratorDispatcher.scala | Scala | apache-2.0 | 4,684 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io._
import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\\\.\\\\[]+".r | "['" ~> "[^\\\\'\\\\?]+".r <~ "']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactory()
// Enabled for Hive compatibility
jsonFactory.enable(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS)
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
@ExpressionDescription(
usage = "_FUNC_(json_txt, path) - Extracts a json object from `path`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":"b"}', '$.a');
b
""")
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import com.fasterxml.jackson.core.JsonToken._
import PathInstruction._
import SharedFactory._
import WriteStyle._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, jsonStr)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, p1, p2, ..., pn) - Returns a tuple like the function get_json_object, but it takes multiple names. All the input parameters and output column types are string.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":2}', 'a', 'b');
1 2
""")
// scalastyle:on line.size.limit
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[Option[String]] = {
fieldExpressions.map {
case expr if expr.foldable => Option(expr.eval()).map(_.asInstanceOf[UTF8String].toString)
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
})
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames.map(_.orNull)
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName.orNull
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val jsonField = parser.getCurrentName
var idx = fieldNames.indexOf(jsonField)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
val jsonValue = UTF8String.fromBytes(output.toByteArray)
// SPARK-21804: json_tuple returns null values within repeated columns
// except the first one; so that we need to check the remaining fields.
do {
row(idx) = jsonValue
idx = fieldNames.indexOf(jsonField, idx + 1)
} while (idx >= 0)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
/**
* Converts an json input string to a [[StructType]], [[ArrayType]] or [[MapType]]
* with the specified schema.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, schema[, options]) - Returns a struct value with the given `jsonStr` and `schema`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":0.8}', 'a INT, b DOUBLE');
{"a":1, "b":0.8}
> SELECT _FUNC_('{"time":"26/08/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MM/yyyy'));
{"time":"2015-08-26 00:00:00.0"}
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class JsonToStructs(
schema: DataType,
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
val forceNullableSchema = SQLConf.get.getConf(SQLConf.FROM_JSON_FORCE_NULLABLE_SCHEMA)
// The JSON input data might be missing certain fields. We force the nullability
// of the user-provided schema to avoid data corruptions. In particular, the parquet-mr encoder
// can generate incorrect files if values are missing in columns declared as non-nullable.
val nullableSchema = if (forceNullableSchema) schema.asNullable else schema
override def nullable: Boolean = true
// Used in `FunctionRegistry`
def this(child: Expression, schema: Expression, options: Map[String, String]) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = options,
child = child,
timeZoneId = None)
def this(child: Expression, schema: Expression) = this(child, schema, Map.empty[String, String])
def this(child: Expression, schema: Expression, options: Expression) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
override def checkInputDataTypes(): TypeCheckResult = nullableSchema match {
case _: StructType | _: ArrayType | _: MapType =>
super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"Input schema ${nullableSchema.catalogString} must be a struct, an array or a map.")
}
@transient
private lazy val castRow = nullableSchema match {
case _: StructType => (row: InternalRow) => row
case _: ArrayType => (row: InternalRow) => row.getArray(0)
case _: MapType => (row: InternalRow) => row.getMap(0)
}
// This converts parsed rows to the desired output by the given schema.
private def convertRow(rows: Iterator[InternalRow]) = {
if (rows.hasNext) {
val result = rows.next()
// JSON's parser produces one record only.
assert(!rows.hasNext)
castRow(result)
} else {
throw new IllegalArgumentException("Expected one row from JSON parser.")
}
}
val nameOfCorruptRecord = SQLConf.get.getConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD)
@transient lazy val parser = {
val parsedOptions = new JSONOptions(options, timeZoneId.get, nameOfCorruptRecord)
val mode = parsedOptions.parseMode
if (mode != PermissiveMode && mode != FailFastMode) {
throw new IllegalArgumentException(s"from_json() doesn't support the ${mode.name} mode. " +
s"Acceptable modes are ${PermissiveMode.name} and ${FailFastMode.name}.")
}
val (parserSchema, actualSchema) = nullableSchema match {
case s: StructType =>
ExprUtils.verifyColumnNameOfCorruptRecord(s, parsedOptions.columnNameOfCorruptRecord)
(s, StructType(s.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)))
case other =>
(StructType(StructField("value", other) :: Nil), other)
}
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = false)
val createParser = CreateJacksonParser.utf8String _
new FailureSafeParser[UTF8String](
input => rawParser.parse(input, createParser, identity[UTF8String]),
mode,
parserSchema,
parsedOptions.columnNameOfCorruptRecord,
parsedOptions.multiLine)
}
override def dataType: DataType = nullableSchema
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(json: Any): Any = {
convertRow(parser.parse(json.asInstanceOf[UTF8String]))
}
override def inputTypes: Seq[AbstractDataType] = StringType :: Nil
override def sql: String = schema match {
case _: MapType => "entries"
case _ => super.sql
}
override def prettyName: String = "from_json"
}
/**
* Converts a [[StructType]], [[ArrayType]] or [[MapType]] to a JSON output string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr[, options]) - Returns a JSON string with a given struct value",
examples = """
Examples:
> SELECT _FUNC_(named_struct('a', 1, 'b', 2));
{"a":1,"b":2}
> SELECT _FUNC_(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), map('timestampFormat', 'dd/MM/yyyy'));
{"time":"26/08/2015"}
> SELECT _FUNC_(array(named_struct('a', 1, 'b', 2)));
[{"a":1,"b":2}]
> SELECT _FUNC_(map('a', named_struct('b', 1)));
{"a":{"b":1}}
> SELECT _FUNC_(map(named_struct('a', 1),named_struct('b', 2)));
{"[1]":{"b":2}}
> SELECT _FUNC_(map('a', 1));
{"a":1}
> SELECT _FUNC_(array((map('a', 1))));
[{"a":1}]
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class StructsToJson(
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
def this(options: Map[String, String], child: Expression) = this(options, child, None)
// Used in `FunctionRegistry`
def this(child: Expression) = this(Map.empty, child, None)
def this(child: Expression, options: Expression) =
this(
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
@transient
lazy val writer = new CharArrayWriter()
@transient
lazy val gen = new JacksonGenerator(
inputSchema, writer, new JSONOptions(options, timeZoneId.get))
@transient
lazy val inputSchema = child.dataType
// This converts rows to the JSON output according to the given schema.
@transient
lazy val converter: Any => UTF8String = {
def getAndReset(): UTF8String = {
gen.flush()
val json = writer.toString
writer.reset()
UTF8String.fromString(json)
}
inputSchema match {
case _: StructType =>
(row: Any) =>
gen.write(row.asInstanceOf[InternalRow])
getAndReset()
case _: ArrayType =>
(arr: Any) =>
gen.write(arr.asInstanceOf[ArrayData])
getAndReset()
case _: MapType =>
(map: Any) =>
gen.write(map.asInstanceOf[MapData])
getAndReset()
}
}
override def dataType: DataType = StringType
override def checkInputDataTypes(): TypeCheckResult = inputSchema match {
case struct: StructType =>
try {
JacksonUtils.verifySchema(struct)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case map: MapType =>
// TODO: let `JacksonUtils.verifySchema` verify a `MapType`
try {
val st = StructType(StructField("a", map) :: Nil)
JacksonUtils.verifySchema(st)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case array: ArrayType =>
try {
JacksonUtils.verifyType(prettyName, array)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case _ => TypeCheckResult.TypeCheckFailure(
s"Input type ${child.dataType.catalogString} must be a struct, array of structs or " +
"a map or array of map.")
}
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(value: Any): Any = converter(value)
override def inputTypes: Seq[AbstractDataType] = TypeCollection(ArrayType, StructType) :: Nil
override def prettyName: String = "to_json"
}
/**
* A function infers schema of JSON string.
*/
@ExpressionDescription(
usage = "_FUNC_(json[, options]) - Returns schema in the DDL format of JSON string.",
examples = """
Examples:
> SELECT _FUNC_('[{"col":0}]');
array<struct<col:bigint>>
> SELECT _FUNC_('[{"col":01}]', map('allowNumericLeadingZeros', 'true'));
array<struct<col:bigint>>
""",
since = "2.4.0")
case class SchemaOfJson(
child: Expression,
options: Map[String, String])
extends UnaryExpression with CodegenFallback {
def this(child: Expression) = this(child, Map.empty[String, String])
def this(child: Expression, options: Expression) = this(
child = child,
options = ExprUtils.convertToMapData(options))
override def dataType: DataType = StringType
override def nullable: Boolean = false
@transient
private lazy val jsonOptions = new JSONOptions(options, "UTC")
@transient
private lazy val jsonFactory = {
val factory = new JsonFactory()
jsonOptions.setJacksonOptions(factory)
factory
}
@transient
private lazy val jsonInferSchema = new JsonInferSchema(jsonOptions)
@transient
private lazy val json = child.eval().asInstanceOf[UTF8String]
override def checkInputDataTypes(): TypeCheckResult = child match {
case Literal(s, StringType) if s != null => super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"The input json should be a string literal and not null; however, got ${child.sql}.")
}
override def eval(v: InternalRow): Any = {
val dt = Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parser.nextToken()
jsonInferSchema.inferField(parser)
}
UTF8String.fromString(dt.catalogString)
}
override def prettyName: String = "schema_of_json"
}
| guoxiaolongzte/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala | Scala | apache-2.0 | 27,737 |
package com.rackspace.prefs.model
import scala.slick.driver.JdbcDriver.simple._
object DBTables {
class ResourceTypes(tag: Tag)
extends Table[(String, String, String)](tag, "RESOURCE_TYPES") {
def slug = column[String]("SLUG", O.PrimaryKey)
def name = column[String]("NAME")
def identifier = column[String]("IDENTIFIER")
def * = (slug, name, identifier)
}
val resourceTypes = TableQuery[ResourceTypes]
class ResourceAttributes(tag: Tag) extends Table[(Int, String, String, String, String, String)](tag, "RESOURCE_ATTRIBUTES") {
def id = column[Int]("ID", O.PrimaryKey)
def resourceType = column[String]("RESOURCE_TYPE")
def key = column[String]("KEY")
def valueType = column[String]("VALUE_TYPE")
def use = column[String]("USE")
def validation = column[String]("VALIDATION")
def * = (id, resourceType, key, valueType, use, validation)
def slug = foreignKey("ATTRIBUTE_SLUG_FK", resourceType, resourceTypes)(_.slug)
}
val resourceAttributes = TableQuery[ResourceAttributes]
class Resources(tag: Tag) extends Table[(String, String, String)](tag, "RESOURCES") {
//TODO: figure out how to make this unique on (resourceType, id)
def resourceType = column[String]("RESOURCE_TYPE")
def id = column[String]("ID")
def payload = column[String]("PAYLOAD")
def * = (resourceType, id ,payload)
def slug = foreignKey("RESOURCE_SLUG_FK", resourceType, resourceTypes)(_.slug)
}
val resources = TableQuery[Resources]
}
| ChandraAddala/preferences-service | src/main/scala/com/rackspace/prefs/model/DBTables.scala | Scala | apache-2.0 | 1,502 |
package com.gx.factorykit
/**
* Copyright 2017 josephguan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
trait Weapon
class Axe extends Weapon {
override def toString: String = "Axe"
}
class Sword extends Weapon {
override def toString: String = "Sword"
}
class Bow extends Weapon {
override def toString: String = "Bow"
}
| josephguan/scala-design-patterns | creational/factory-kit/src/main/scala/com/gx/factorykit/Weapon.scala | Scala | apache-2.0 | 863 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import org.apache.mxnet.CheckUtils._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
import org.scalacheck.Gen
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class OperatorSuite extends FunSuite with BeforeAndAfterAll
with Matchers with GeneratorDrivenPropertyChecks {
private def checkElementwiseSumWithShape(shape: Shape, n: Int) = {
// forward
val inputs = (0 until n).map(i => Symbol.Variable(s"arg $i"))
val out = Symbol.ElementWiseSum(name = "esum")(inputs: _*)()
val arr = (0 until n).map(_ => Random.uniform(-10, 10, shape))
val arrGrad = (0 until n).map(_ => NDArray.empty(shape))
val exec = out.bind(Context.cpu(), args = arr, argsGrad = arrGrad)
exec.forward()
val forwardOutput = exec.outputs(0)
val forwardOutputExpected = arr.reduce(_ + _)
assert(reldiff(forwardOutput, forwardOutputExpected) < 5e-5)
// backward
val outGrad = Random.uniform(-10, 10, shape)
exec.backward(outGrad)
arrGrad.foreach(grad => assert(grad === outGrad))
}
test("elementwise sum") {
checkElementwiseSumWithShape(Shape(5, 5, 3), 4)
forAll (Gen.choose(1, 4), Gen.choose(1, 8)) { (dim, n) =>
forAll (Gen.listOfN(dim, Gen.choose(1, Math.pow(1000, 1.0 / dim).toInt))) { shape =>
checkElementwiseSumWithShape(Shape(shape), n)
}
}
}
// TODO: checkSliceChannel
private def checkConcatWithShape(shapes: Seq[Shape], dimension: Int, skipSecond: Boolean) = {
// if skipSecond is true, second argument will not have gradient.
// it is to test #1130
// forward
val targetDim = shapes.map(_(dimension)).sum
val inputs = (0 until shapes.size).map(i => Symbol.Variable(s"arg$i"))
val out = Symbol.Concat(name = "conc")(inputs: _*)(Map("dim" -> dimension))
val arr = shapes.map { shape =>
val nd = NDArray.empty(shape)
nd.set(shape(dimension))
}
val arrNp = arr.map(_.copy())
val arrGrad = shapes.map(NDArray.empty(_))
val argNames = out.listArguments()
val dictGrad =
(argNames zip arrGrad).filter { case (name, d) =>
!skipSecond || name != "arg1"
}.toMap
val args = out.listArguments()
val (argShapes, outShapes, auxShapes) = out.inferShape(args.zip(shapes).toMap)
val outGrad = NDArray.empty(outShapes(0))
val exec1 = out.bind(Context.cpu(), arr, dictGrad)
exec1.forward()
val out1 = exec1.outputs(0)
// FIXME: only support concatenate at axis0
val ret = NDArray.concatenate(arr)
assert(out1 === ret)
// backward
out1.copyTo(outGrad)
outGrad += 1
exec1.backward(outGrad)
argNames.zipWithIndex.foreach { case (name, i) =>
if (!skipSecond || name != "arg1") {
val grad = dictGrad(name)
val npGrad = arrNp(i)
assert(grad === npGrad + 1)
}
}
}
test("concat") {
val merge = Array(2, 3, 4, 5, 6)
forAll (Gen.choose(2, 5)) { dim =>
val shapes = mutable.ArrayBuffer.empty[Shape]
for (i <- 0 until dim) {
shapes += Shape(merge(i), 2)
}
// TODO: check dimension > 0
checkConcatWithShape(shapes, 0, skipSecond = true)
checkConcatWithShape(shapes, 0, skipSecond = false)
}
}
private def checkRegression(model: Symbol,
forward: Float => Float,
backward: (Float, Float) => Float) = {
val shape = Shape(3, 1)
val arrData = Random.uniform(-1, 1, shape)
val arrLabel = Random.uniform(0, 1, Shape(shape.head))
val arrGrad = NDArray.empty(shape)
val exec1 = model.bind(Context.cpu(),
args = Array(arrData, arrLabel), argsGrad = Map("data" -> arrGrad))
exec1.forward()
assert(exec1.outputs(0).shape === shape)
val out1 = exec1.outputs(0).toArray
val npout = arrData.toArray.map(forward(_))
assert(CheckUtils.reldiff(npout, out1) < 1e-6f)
exec1.backward()
// arrData shape: Vector(3, 1)
// arrLabel shape: Vector(3)
val npoutBack = (npout zip arrLabel.toArray).map { case (data, label) =>
backward(data, label)
}
assert(CheckUtils.reldiff(npoutBack, arrGrad.toArray) < 1e-6f)
}
test("regression") {
checkRegression(Symbol.LogisticRegressionOutput()()(
Map("data" -> Symbol.Variable("data"), "label" -> Symbol.Variable("label"))),
(x: Float) => 1.0f / (1.0f + Math.exp(-x).toFloat),
(x: Float, y: Float) => x - y)
checkRegression(Symbol.LinearRegressionOutput()()(
Map("data" -> Symbol.Variable("data"), "label" -> Symbol.Variable("label"))),
(x: Float) => x,
(x: Float, y: Float) => x - y)
}
// TODO: test softmax
test("swap axes") {
val data = Symbol.Variable("data")
val shape = Shape(2, 3, 4)
val arrData = NDArray.ones(shape)
arrData.slice(0).set(1f)
arrData.slice(1).set(2f)
// arrData =
//
// [[[ 1., 1., 1., 1.],
// [ 1., 1., 1., 1.],
// [ 1., 1., 1., 1.]],
//
// [[ 2., 2., 2., 2.],
// [ 2., 2., 2., 2.],
// [ 2., 2., 2., 2.]]]
val swap0 = Symbol.SwapAxis()()(Map("data" -> data, "dim1" -> 0, "dim2" -> 2))
val swap = Symbol.SwapAxis()()(Map("data" -> swap0, "dim1" -> 1, "dim2" -> 2))
val exec = swap.bind(Context.cpu(), args = Array(arrData))
exec.forward()
val out = exec.outputs(0)
// After swapaxes(swapaxes(arrData, 0, 2), 1, 2)
// out should be
// [[[ 1., 1., 1.],
// [ 2., 2., 2.]],
//
// [[ 1., 1., 1.],
// [ 2., 2., 2.]],
//
// [[ 1., 1., 1.],
// [ 2., 2., 2.]],
//
// [[ 1., 1., 1.],
// [ 2., 2., 2.]]]
assert(out.shape === Shape(4, 2, 3))
for (i <- 0 until 4) {
val axis0 = out.slice(i)
assert(CheckUtils.reldiff(axis0.toArray, Array(1f, 1f, 1f, 2f, 2f, 2f)) < 1e-6f)
}
}
test("scalar op") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5
val test = {
import org.apache.mxnet.SymbolConversions._
2 / (4 - ((1 + data + 1) * 2 / 5) - 0.2)
}
val (npout1, npout) = {
import org.apache.mxnet.NDArrayConversions._
val npout1 = 4 - ((1 + dataTmp + 1) * 2 / 5) - 0.2f
val npout = 2 / npout1
(npout1, npout)
}
checkSymbolicForward(test, Array(dataTmp), Array(npout))
val npoutGrad = new NDArrayConversions(2f * (2f * 2f / 5f)) / (npout1 * npout1)
checkSymbolicBackward(test, Array(dataTmp), Array(NDArray.ones(shape) * 2), Array(npoutGrad))
}
test("ones") {
val ones = Symbol.ones(shape = Shape(2, 2))
val exe = ones.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map())
exe.forward(isTrain = false)
assert(CheckUtils.reldiff(Array(1f, 1f, 1f, 1f), exe.outputs.head.toArray) <= 1e-5f)
}
test("zeros") {
val zeros = Symbol.zeros(shape = Shape(2, 2))
val exe = zeros.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map())
exe.forward(isTrain = false)
assert(Array(0f, 0f, 0f, 0f) === exe.outputs.head.toArray)
}
test("arange") {
for (i <- 0 until 5) {
val start = scala.util.Random.nextFloat() * 5
val stop = start + scala.util.Random.nextFloat() * 100
val step = scala.util.Random.nextFloat() * 4
val repeat = 1
val result = (start.toDouble until stop.toDouble by step.toDouble)
.flatMap(x => Array.fill[Float](repeat)(x.toFloat))
val x = Symbol.arange(start = start, stop = Some(stop), step = step, repeat = repeat)
var exe = x.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map())
exe.forward(isTrain = false)
assert(exe.gradArrays.length == 0)
assert(CheckUtils.reldiff(result.toArray, exe.outputs.head.toArray) <= 1e-4f)
}
}
test("scalar pow") {
val data = Symbol.Variable("data")
val shape = Shape(1, 1)
val dataTmp = NDArray.ones(shape) * 3
val dataTmpPowered = NDArray.ones(shape) * 9
val test = data ** 2
// TODO: check numeric gradient
checkSymbolicForward(test, Array(dataTmp), Array(dataTmpPowered))
checkSymbolicBackward(test, Array(dataTmp), Array(NDArray.ones(shape)), Array(dataTmp * 2))
}
test("symbol pow") {
val shape = Shape(1, 1)
val data = Symbol.Variable("data")
val dataTmp = NDArray.ones(shape) * 2
val exp = Symbol.Variable("exp")
val expTmp = NDArray.ones(shape) * 3
val test = data ** exp
// TODO: check numeric gradient
checkSymbolicForward(test, Seq(dataTmp, expTmp), Seq(NDArray.ones(shape) * 8))
val dataDir = NDArray.ones(shape) * 4 * expTmp // dataTmp**(expTmp - 1) * expTmp
// expDir = dataTmp**(expTmp) * log(dataTmp)
val expDir = NDArray.ones(shape) * 8 * (NDArray.ones(shape) * Math.log(2).toFloat)
checkSymbolicBackward(test, Seq(dataTmp, expTmp),
Seq(NDArray.ones(shape)), Seq(dataDir, expDir))
}
test("pow fn") {
val shape = Shape(3, 4)
val exp = Symbol.Variable("exp")
import SymbolConversions._
val y = 2 ** exp
val x = NDArray.ones(shape) * 3
// TODO: check numeric gradient
checkSymbolicForward(y, Seq(x), Seq(NDArray.ones(shape) * 8)) // 2**x
checkSymbolicBackward(y, Seq(x), Seq(NDArray.ones(shape)),
// log(2) * 2**x
Seq(NDArray.ones(shape) * 8 * Math.log(2).toFloat))
}
test("scalar equal") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 0f), shape)
val test = Symbol.equal(data, 2f)
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol equal") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape)
val test = Symbol.equal(data, data2)
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar equal 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 0f), shape)
val test = Symbol.equal(2f, data)
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar not_equal") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 1f), shape)
val test = Symbol.notEqual(data, 2f)
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol not_equal") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape)
val test = Symbol.notEqual(data, data2)
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar not_equal 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 1f), shape)
val test = Symbol.notEqual(2f, data)
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar greater") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 1f), shape)
val test = data > 2f
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol greater") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 0f), shape)
val test = data > data2
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar greater 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape)
import SymbolConversions._
val test = 2f > data
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar greater_equal") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape)
val test = data >= 2f
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol greater_equal") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 0f), shape)
val test = data >= data2
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar greater_equal 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 0f), shape)
import SymbolConversions._
val test = 2f >= data
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar lesser") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape)
val test = data < 2f
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol lesser") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 1f), shape)
val test = data < data2
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar lesser 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 1f), shape)
import SymbolConversions._
val test = 2f < data
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar lesser_equal") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 0f), shape)
val test = data <= 2f
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("symbol lesser_equal") {
val data = Symbol.Variable("datas")
val data2 = Symbol.Variable("datas2")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 1f), shape)
val test = data <= data2
val exec = test.simpleBind(Context.cpu(), gradReq = "write",
shapeDict = Map("datas" -> shape, "datas2" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f))
}
test("scalar lesser_equal 2") {
val data = Symbol.Variable("datas")
val shape = Shape(2, 2)
val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape)
import SymbolConversions._
val test = 2f <= data
val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape))
exec.argDict("datas").set(Array(1f, 2f, 3f, 4f))
exec.forward()
assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f)
exec.backward(NDArray.ones(shape))
assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f))
}
test("embedding") {
val inDim = 10
val outDim = 4
val batch = 24
val data = Symbol.Variable("data")
val embed = Symbol.Embedding(name = "embed")()(
Map("data" -> data, "input_dim" -> inDim, "output_dim" -> outDim))
// TODO
// scalastyle:off println
println(s"Embeded symbol: ${embed.toJson}")
// scalastyle:on println
}
// check ops handle duplicate input correctly.
test("binary op duplicate input") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5
val arrData = dataTmp.copy()
val arrGrad = NDArray.ones(shape) * 3
val outGrad = NDArray.ones(shape)
val square = data * data
val exeSquare = square.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad))
exeSquare.forward()
assert(reldiff(exeSquare.outputs.head, dataTmp * dataTmp) < 1e-6f)
exeSquare.backward(outGrad)
assert(reldiff(arrGrad, dataTmp * 2f) < 1e-6f)
}
test("sign") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5
val arrData = dataTmp.copy()
val arrGrad = NDArray.ones(shape) * 3
val test = Symbol.sign()(data)()
val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad))
exeTest.forward()
val out = exeTest.outputs.head
val npout = NDArray.sign(dataTmp)
assert(reldiff(out, npout) < 1e-6)
val outGrad = NDArray.ones(shape) * 2
exeTest.backward(outGrad)
arrGrad.toArray.foreach(elem => assert(elem === 0f +- 1e-3f))
}
test("round, ceil, floor") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5.543f
val arrData = dataTmp.copy()
val arrGrad = NDArray.ones(shape) * 2
val test = Symbol.round()(data)() + Symbol.ceil()(data)() + Symbol.floor()(data)()
val exeTest = test.bind(Context.cpu(), args = Array(arrData))
exeTest.forward()
val out = exeTest.outputs.head
val npout = NDArray.round(dataTmp) + NDArray.ceil(dataTmp) + NDArray.floor(dataTmp)
assert(reldiff(out, npout) < 1e-6)
}
test("rsqrt, cos, sin") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5
val arrData = dataTmp.copy()
val arrGrad = NDArray.ones(shape) * 3
val test = Symbol.rsqrt()(data)() + Symbol.cos()(data)() + Symbol.sin()(data)()
val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad))
exeTest.forward()
val out = exeTest.outputs.head
val npout = {
import org.apache.mxnet.NDArrayConversions._
1 / NDArray.sqrt(dataTmp) + NDArray.cos(dataTmp) + NDArray.sin(dataTmp)
}
assert(reldiff(out, npout) < 1e-6)
val outGrad = NDArray.ones(shape) * 2
val npoutGrad = {
import org.apache.mxnet.NDArrayConversions._
outGrad * -(1 / (2 * dataTmp * NDArray.sqrt(dataTmp))) +
outGrad * -1 * NDArray.sin(dataTmp) + outGrad * NDArray.cos(dataTmp)
}
exeTest.backward(outGrad)
assert(reldiff(arrGrad, npoutGrad) < 1e-6)
}
test("maximum") {
val data1 = Symbol.Variable("data")
val data2 = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp1 = Random.uniform(0, 100, shape)
val dataTmp2 = Random.uniform(0, 100, shape)
val arrData1 = dataTmp1.copy()
val arrData2 = dataTmp2.copy()
val test = Symbol.max(data1, data2)
val exeTest = test.bind(Context.cpu(), args = Array(arrData1, arrData2))
exeTest.forward()
val out = exeTest.outputs.head
val expected = (dataTmp1.toArray zip dataTmp2.toArray).map { case (a, b) => Math.max(a, b) }
assert(reldiff(out.toArray, expected) < 1e-6)
}
test("minimum") {
val data1 = Symbol.Variable("data")
val data2 = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp1 = Random.uniform(0, 100, shape)
val dataTmp2 = Random.uniform(0, 100, shape)
val arrData1 = dataTmp1.copy()
val arrData2 = dataTmp2.copy()
val test = Symbol.min(data1, data2)
val exeTest = test.bind(Context.cpu(), args = Array(arrData1, arrData2))
exeTest.forward()
val out = exeTest.outputs.head
val expected = (dataTmp1.toArray zip dataTmp2.toArray).map { case (a, b) => Math.min(a, b) }
assert(reldiff(out.toArray, expected) < 1e-6)
}
test("transpose") {
val data = Symbol.Variable("data")
val test = Symbol.transpose()(data)()
val shape = Shape(3, 4)
val ctx = Context.cpu()
val arrData = Random.uniform(0, 100, shape, ctx)
val trans: Array[Float] = {
val tmp = arrData.toArray.toList.grouped(4).toList
for (i <- 0 until 4) yield {
List(tmp(0)(i), tmp(1)(i), tmp(2)(i))
}
}.flatten.toArray
val exeTest = test.bind(ctx, args = Map("data" -> arrData))
exeTest.forward(isTrain = false)
val out = exeTest.outputs.head
assert(out.shape == Shape(4, 3))
assert(reldiff(out.toArray, trans) < 1e-6)
}
test("smooth_l1 & makeloss") {
val data = Symbol.Variable("data")
val smoothL1 = Symbol.smooth_l1()()(Map("data" -> data, "scalar" -> 1.0f))
val loss = Symbol.MakeLoss()()(Map("data" -> smoothL1))
val shape = Shape(2, 6)
val ctx = Context.cpu()
val input = NDArray.empty(ctx, shape.toArray: _*)
val grad = NDArray.empty(ctx, shape.toArray: _*)
val array = Array[Float](
-3.5f, -2.5f, -1.5f, -0.5f, -0.3f, -0.1f,
0.1f, 0.3f, 0.5f, 1.5f, 2.5f, 3.5f)
input.set(array)
val arrTmp = Array[Float](
3.0f, 2.0f, 1.0f, 0.125f, 0.045f, 0.005f,
0.005f, 0.045f, 0.125f, 1.0f, 2.0f, 3.0f)
val gradTmp = Array[Float](
-1.0f, -1.0f, -1.0f, -0.5f, -0.3f, -0.1f,
0.1f, 0.3f, 0.5f, 1.0f, 1.0f, 1.0f)
val exeTest =
loss.bind(ctx, args = Map("data" -> input), argsGrad = Map("data" -> grad))
exeTest.forward(isTrain = true)
val out = exeTest.outputs.head
assert(reldiff(out.toArray, arrTmp) < 1e-6)
exeTest.backward()
assert(reldiff(grad.toArray, gradTmp) < 1e-6)
}
test("maximum minimum scalar") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 2
val arrData = dataTmp.copy()
val test = Symbol.max(data, 3) + Symbol.max(9, data) + Symbol.min(5, data) + Symbol.min(data, 4)
val exeTest = test.bind(Context.cpu(), args = Array(arrData))
exeTest.forward()
val out = exeTest.outputs.head
// 3 + 9 + 2 + 2
assert(reldiff(out, NDArray.ones(shape) * 16) < 1e-6)
}
test("abs") {
val data = Symbol.Variable("data")
val shape = Shape(3, 4)
val dataTmp = NDArray.ones(shape) * 5
val arrData = dataTmp.copy()
val arrGrad = NDArray.ones(shape) * 3
val test = Symbol.abs()(data)()
val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad))
exeTest.forward()
val out = exeTest.outputs.head
val npout = NDArray.abs(dataTmp)
assert(reldiff(out, npout) < 1e-6)
val outGrad = NDArray.ones(shape) * 2
val npoutGrad = outGrad * NDArray.sign(dataTmp)
exeTest.backward(outGrad)
assert(reldiff(arrGrad, npoutGrad) < 1e-6)
}
// configure A: input --> conv --> deconv --> output.
// the convolution and deconvoluiton has similar parameter which ensure
// the input shape is the same as output, and the same weights between conv
// and deconv;
// If the input value of forward() and backwrad() is the same, then
// the output value of them should also the same;
private def checkDeconvolutionForwardBackward(inputShape: Shape,
numFilter: Int,
kernel: (Int, Int),
stride: (Int, Int),
pad: (Int, Int)): Unit = {
require(inputShape(1) == numFilter)
val data = Symbol.Variable(name = "data")
val conv = Symbol.Convolution(name = "conv")()(Map(
"data" -> data, "kernel" -> kernel, "stride" -> stride, "pad" -> pad,
"num_filter" -> numFilter, "no_bias" -> "true"))
val deconv = Symbol.Deconvolution(name = "deconv")()(Map(
"data" -> conv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad,
"num_filter" -> numFilter, "no_bias" -> "true"))
val argNames = deconv.listArguments()
val (argShapes, outShapes, _) = deconv.inferShape(Map("data" -> inputShape))
val inputData = Random.uniform(-5, 5, inputShape)
val outGrad = inputData
val convWeight = Random.normal(0, 1, Shape(numFilter, inputShape(1), kernel._1, kernel._2))
val args: Map[String, NDArray] =
Map("data" -> inputData, "conv_weight" -> convWeight, "deconv_weight" -> convWeight)
val argsGrad: Seq[NDArray] = argShapes.map(NDArray.empty(_))
val exe = deconv.bind(Context.cpu(), args = args, argsGrad = argsGrad)
exe.forward()
val out = exe.outputs.head
exe.backward(outGrad)
assert(reldiff(out, argsGrad.head) < 1e-6)
}
test("deconvolution forward & backward") {
checkDeconvolutionForwardBackward(
inputShape = Shape(1, 1, 5, 5),
numFilter = 1,
kernel = (3, 3),
stride = (1, 1),
pad = (1, 1)
)
checkDeconvolutionForwardBackward(
inputShape = Shape(32, 3, 28, 28),
numFilter = 3,
kernel = (3, 3),
stride = (1, 1),
pad = (1, 1)
)
checkDeconvolutionForwardBackward(
inputShape = Shape(10, 3, 403, 403),
numFilter = 3,
kernel = (7, 7),
stride = (5, 5),
pad = (2, 2)
)
}
// configure A: input --> conv --> output.
// configure B: input --> deconv --> output
// the convolution and deconvoluiton has similar parameter which ensure
// the input shape is the same as output;
// During backward(), if the input of A equals output of B, and the output
// of A equals input of B, then the grad of weight should be the same;
private def checkDeconvolutionGradient(inputShape: Shape,
numFilter: Int,
pad: (Int, Int)): Unit = {
val stride = (1, 1)
val kernel = (2 * pad._1 + 1, 2 * pad._2 + 1)
val dataConv = Symbol.Variable(name = "data_conv")
val conv = Symbol.Convolution(name = "conv")()(Map(
"data" -> dataConv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad,
"num_filter" -> numFilter, "no_bias" -> "true"))
val dataDeconv = Symbol.Variable(name = "data_deconv")
val deconv = Symbol.Deconvolution(name = "deconv")()(Map(
"data" -> dataDeconv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad,
"num_filter" -> numFilter, "no_bias" -> "true"))
val convData = Random.uniform(-5, 5, inputShape)
val convArgs = Map("data_conv" -> convData,
"conv_weight" -> Random.normal(0, 1, Shape(numFilter, inputShape(1), kernel._1, kernel._2)))
val convArgsGrad = Seq(NDArray.zeros(convData.shape),
NDArray.zeros(Shape(numFilter, inputShape(1), kernel._1, kernel._2)))
val exeConv = conv.bind(Context.cpu(), args = convArgs, argsGrad = convArgsGrad)
val convOutGrad = Random.normal(0, 2, exeConv.outputs.head.shape)
exeConv.forward()
exeConv.backward(convOutGrad)
val deconvData = convOutGrad
val deconvArgs = Map("data_deconv" -> deconvData, "deconv_weight" -> convArgs("conv_weight"))
val deconvArgsGrad = Seq(NDArray.zeros(deconvData.shape),
NDArray.zeros(Shape(numFilter, inputShape(1), kernel._1, kernel._2)))
val exeDeconv = deconv.bind(Context.cpu(), args = deconvArgs, argsGrad = deconvArgsGrad)
val deconvOutGrad = convData
exeDeconv.forward()
exeDeconv.backward(deconvOutGrad)
assert(reldiff(convArgsGrad(1), deconvArgsGrad(1)) < 1e-5)
}
test("deconvolution gradient") {
checkDeconvolutionGradient(
inputShape = Shape(1, 3, 5, 5),
numFilter = 3,
pad = (1, 1)
)
checkDeconvolutionGradient(
inputShape = Shape(5, 3, 100, 100),
numFilter = 3,
pad = (3, 3)
)
}
private def checkNearestUpSamplingWithShape(shapes: Seq[Shape],
scale: Int,
rootScale: Int): Unit = {
val arr = shapes.zipWithIndex.map { case (shape, i) =>
(s"arg_$i", Random.uniform(-10, 10, shape))
}.toMap
val arrGrad = shapes.zipWithIndex.map { case (shape, i) =>
(s"arg_$i", NDArray.zeros(shape))
}.toMap
val upArgs = (0 until shapes.size).map(i => Symbol.Variable(s"arg_$i"))
val up = Symbol.UpSampling()(upArgs: _*)(Map("sample_type" -> "nearest", "scale" -> rootScale))
val exe = up.bind(Context.cpu(), args = arr, argsGrad = arrGrad)
exe.forward(isTrain = true)
exe.backward(exe.outputs)
for (k <- 0 until shapes.size) {
val name = s"arg_$k"
val expected =
arr(name).toArray.map(_ * Math.pow(rootScale, 2).toFloat * Math.pow(scale, 2 * k).toFloat)
val real = arrGrad(name).toArray
(expected zip real) foreach { case (e, r) =>
assert(r === e +- 0.1f)
}
}
}
test("nearest upsampling") {
for (rootScale <- 1 to 3) {
for (scale <- 1 to 3) {
for (numShape <- 1 to 3) {
for (base <- 1 to 3) {
val shapes = (0 until numShape).map(i =>
Shape(1, 3, base * rootScale * Math.pow(scale, numShape - 1 - i).toInt,
base * rootScale * Math.pow(scale, numShape - 1 - i).toInt))
checkNearestUpSamplingWithShape(shapes, scale, rootScale)
}
}
}
}
}
test("batch norm") {
val data = Symbol.Variable("data")
val test = Symbol.BatchNorm(name = "bn")()(Map("data" -> data, "fix_gamma" -> "False"))
// scalastyle:off println
println(s"BatchNorm: ${test.toJson}")
// scalastyle:on println
// TODO: check numeric gradient
}
/**
* Compare forward call to expected value.
* @param sym output symbol
* @param location list of numpy arrays corresponding to sym.list_arguments
* @param expected list of arrays corresponding to sym.outputs
* @param checkEps relative error to check to
*/
private def checkSymbolicForward(sym: Symbol,
location: Seq[NDArray],
expected: Seq[NDArray],
checkEps: Float = 1e-5f): Unit = {
val arrData = location.map(_.copy())
val arrGrad = location.map(array => NDArray.empty(array.shape))
val executor = sym.bind(Context.cpu(), args = arrData, argsGrad = arrGrad)
val inps = executor.argArrays
assert(inps.size === location.size,
s"Executor argArrays and and location len do not match." +
s"Got ${inps.size} inputs and ${location.size} locations")
for ((inp, source) <- location zip executor.argArrays) {
source.set(inp)
}
for (g <- executor.gradArrays) {
if (g != null) {
g.set(0f)
}
}
assert(executor.outputs.length === 1)
executor.forward()
for ((expect, output) <- expected zip executor.outputs) {
assert(reldiff(expect, output) <= checkEps)
}
}
/**
* Compare backwards call to expected value.
* @param sym output symbol
* @param location list of numpy arrays corresponding to sym.list_arguments
* @param grad list of numpy arrays corresponding to sym.outputs for incoming gradient
* @param expected list of arrays corresponding to sym.outputs
* @param checkEps relative error to check to
*/
private def checkSymbolicBackward(sym: Symbol,
location: Seq[NDArray],
grad: Seq[NDArray],
expected: Seq[NDArray],
checkEps: Float = 1e-5f): Unit = {
val arrData = location.map(_.copy())
val arrGrad = location.map(array => NDArray.empty(array.shape))
val outGrad = grad.map(_.copy()).toArray
val executor = sym.bind(Context.cpu(), args = arrData, argsGrad = arrGrad)
val inps = executor.argArrays
assert(inps.size === location.size,
s"Executor argArrays and and location len do not match." +
s"Got ${inps.size} inputs and ${location.size} locations")
for ((inp, source) <- location zip executor.argArrays) {
source.set(inp)
}
for (g <- executor.gradArrays) {
if (g != null) {
g.set(0f)
}
}
executor.forward()
executor.backward(outGrad)
for ((expect, grad) <- expected zip executor.gradArrays) {
assert(reldiff(expect, grad) <= checkEps)
}
}
}
| indhub/mxnet | scala-package/core/src/test/scala/org/apache/mxnet/OperatorSuite.scala | Scala | apache-2.0 | 38,523 |
package bhoot
import javax.servlet.http._
import Utils._
import UtilsServlet._
object Admin {
val errorStr = "{success:false}"
def postPurge (request:Request, response:HttpServletResponse):String = {
val keyIn = request.getParamOpt("key").getOrElse("")
println(keyIn)
if (keyIn == "pluckAll") {
AdminWorker ! AdminWorker.PurgeBadCreds
"{success:true}"
} else {
errorStr
}
}
}
import scala.actors._
object AdminWorker extends Actor {
case object PurgeBadCreds
start
import dispatch.{Http, StatusCode}
import dispatch.twitter.Twitter
import dispatch.oauth.OAuth._
import dispatch.json.JsHttp._
val http = new Http
import Actor._
def act() = {
loop {
react {
case PurgeBadCreds => {
import dispatch.{Request}
var allPurged:List[Int]=Nil
val dbKeys = dbHelper.getAllKeys
dbKeys foreach {key =>
// check if valid
val request = (new Request(Twitter.host / "1/account/verify_credentials.json")) <@
(Common.consumer, dispatch.oauth.Token(key._2,key._3))
var done = false
var retryCount = 0
while (!done) {
if (retryCount > 0) {
println("--------- Retrying ------- [%d]" format retryCount)
}
retryCount += 1
try {
http(request ># (obj)) // >> {response => println("Got back " + response)})
done = true
} catch {
case StatusCode(401,contents) =>
if (retryCount > 4) {
dbHelper.removeKey(key._1,key._2, key._3)
allPurged ::= key._1
done = true
}
case _ =>
// ignore other errors for now
// Note that verify-credentials is now rate limited and can cause 400 errors.
if (retryCount > 4) {
done = true
}
}
}
}
Notifier ! Notifier.Purged(allPurged)
}
} // react
} // loop
} // act
}
| hrj/tdash | framework/src/main/scala/admin.scala | Scala | gpl-3.0 | 2,184 |
package filodb.standalone
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import akka.actor.ActorRef
import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec}
import akka.testkit.ImplicitSender
import com.softwaremill.sttp._
import com.softwaremill.sttp.akkahttp.AkkaHttpBackend
import com.softwaremill.sttp.circe._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.StrictLogging
import net.ceedubs.ficus.Ficus._
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.xerial.snappy.Snappy
import remote.RemoteStorage.{LabelMatcher, Query, ReadRequest, ReadResponse}
import filodb.coordinator._
import filodb.coordinator.NodeClusterActor.{DatasetResourceSpec, IngestionSource}
import filodb.coordinator.client.LocalClient
import filodb.core.DatasetRef
import filodb.core.store.StoreConfig
import filodb.prometheus.ast.TimeStepParams
import filodb.prometheus.parse.Parser
import filodb.query.{QueryError, Sampl, QueryResult => QueryResult2}
import filodb.query.PromCirceSupport
import filodb.query.Sampl
import filodb.query.SuccessResponse
import org.scalatest.flatspec.AnyFlatSpecLike
/**
* A trait used for MultiJVM tests based on starting the standalone FiloServer using timeseries-dev config
* (ie pretty much the same as deployed setup)
*/
abstract class StandaloneMultiJvmSpec(config: MultiNodeConfig) extends MultiNodeSpec(config)
with Suite with StrictLogging
with ScalaFutures with AnyFlatSpecLike with ImplicitSender
with matchers.should.Matchers with BeforeAndAfterAll {
override def initialParticipants: Int = roles.size
import akka.testkit._
override implicit val patienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(250, Millis))
lazy val watcher = TestProbe()
val duration = 10.seconds.dilated
val longDuration = 60.seconds
val removedDuration = longDuration * 8
// Ingestion Source section
val source = ConfigFactory.parseFile(new java.io.File("conf/timeseries-standalonetest-source.conf"))
val dataset = DatasetRef(source.as[String]("dataset"))
val numShards = source.as[Int]("num-shards")
val resourceSpec = DatasetResourceSpec(numShards, source.as[Int]("min-num-nodes"))
val sourceconfig = source.getConfig("sourceconfig")
val storeConf = StoreConfig(sourceconfig.getConfig("store"))
val ingestionSource = source.as[Option[String]]("sourcefactory").map { factory =>
IngestionSource(factory, sourceconfig)
}.get
val chunkDuration = storeConf.flushInterval
val numGroupsPerShard = storeConf.groupsPerShard
override def beforeAll(): Unit = multiNodeSpecBeforeAll()
override def afterAll(): Unit = multiNodeSpecAfterAll()
/** Execute within a `runOn`. */
def awaitNodeUp(server: FiloServer, within: FiniteDuration = duration): Unit = {
server.start()
awaitCond(server.cluster.isInitialized, within)
}
/** Execute within a `runOn`. */
def awaitNodeDown(server: FiloServer, within: FiniteDuration = longDuration * 2): Unit = {
server.shutdown()
awaitCond(server.cluster.isTerminated, within)
}
def waitAllShardsIngestionActive(): Unit = {
var activeShards = 0
while (activeShards < numShards) {
expectMsgPF(duration) {
case CurrentShardSnapshot(ref, map) =>
activeShards = map.activeShards(0 until map.numShards).length
}
}
}
/**
* @param shards use when some are up and some down, to test different shard status
*/
def validateShardStatus(client: LocalClient,
coordinator: Option[ActorRef] = None,
shards: Seq[Int] = Seq.empty)
(statusValidator: ShardStatus => Boolean): Unit = {
client.getShardMapper(dataset) match {
case Some(map) =>
info(s"Shard map: $map")
info(s"Shard map nodes: ${map.allNodes}")
if (coordinator.nonEmpty) coordinator forall (c => map.allNodes contains c) shouldEqual true
map.allNodes.size shouldEqual 2 // only two nodes assigned
map.shardValues.size shouldBe numShards
shards match {
case Seq() =>
map.shardValues.forall { case (_, status) => statusValidator(status) } shouldEqual true
case _ =>
shards forall(shard => statusValidator(map.statusForShard(shard))) shouldEqual true
}
case _ =>
fail(s"Unable to obtain status for dataset $dataset")
}
}
def validateShardAssignments(client: LocalClient,
nodeCount: Int,
assignments: Seq[Int],
coordinator: akka.actor.ActorRef): Unit =
client.getShardMapper(dataset) match {
case Some(mapper) =>
mapper.allNodes.size shouldEqual nodeCount
mapper.assignedShards shouldEqual Seq(0, 1, 2, 3)
mapper.unassignedShards shouldEqual Seq.empty
val shards = mapper.shardsForCoord(coordinator)
shards shouldEqual assignments
for {
shard <- shards
} info(s"shard($shard) ${mapper.statusForShard(shard)} $coordinator")
case _ =>
}
def topValuesInShards(client: LocalClient, tagKey: String, shards: Seq[Int]): Unit = {
shards.foreach { shard =>
val values = client.getIndexValues(dataset, tagKey, shard)
info(s"Top values for shard=$shard tag=$tagKey is: $values")
}
}
val query = "heap_usage{dc=\\"DC0\\",_ws_=\\"demo\\",_ns_=\\"App-2\\"}[1m]"
val query1 = "heap_usage{dc=\\"DC0\\",_ws_=\\"demo\\",_ns_=\\"App-2\\"}"
// queryTimestamp is in millis
def runCliQuery(client: LocalClient, queryTimestamp: Long): Double = {
val logicalPlan = Parser.queryToLogicalPlan(query, queryTimestamp/1000, 1000)
val curTime = System.currentTimeMillis
val result = client.logicalPlan2Query(dataset, logicalPlan) match {
case r: QueryResult2 =>
val vals = r.result.flatMap(_.rows.map { r => (r.getLong(0) - curTime, r.getDouble(1)) })
// info(s"result values were $vals")
vals.length should be > 0
vals.map(_._2).sum
case e: QueryError => fail(e.t)
}
info(s"CLI Query Result for $query at $queryTimestamp was $result")
result
}
// Get a point for every minute of an interval for multiple time series for comparing missing data
// TODO: maybe do a sum_over_time() as current windowing just gets last data point
def runRangeQuery(client: LocalClient, startTime: Long, endTime: Long): Map[String, Array[Double]] = {
val logicalPlan = Parser.queryRangeToLogicalPlan(query1, TimeStepParams(startTime/1000, 60, endTime/1000))
var totalSamples = 0
client.logicalPlan2Query(dataset, logicalPlan) match {
case r: QueryResult2 =>
// Transform range query vectors
val map = r.result.map { rv =>
val sampleArray = rv.rows.map(_.getDouble(1)).toArray
totalSamples += sampleArray.size
rv.key.toString -> sampleArray
}.toMap
info(s"Range query result for interval [$startTime, $endTime]: ${map.size} rows, $totalSamples samples")
map
case e: QueryError => fail(e.t)
}
}
def compareRangeResults(map1: Map[String, Array[Double]], map2: Map[String, Array[Double]]): Unit = {
map1.keySet shouldEqual map2.keySet
map1.foreach { case (key, samples) =>
samples.toList shouldEqual map2(key).toList
}
}
def printChunkMeta(client: LocalClient): Unit = {
val chunkMetaQuery = "_filodb_chunkmeta_all(heap_usage{dc=\\"DC0\\",_ws_=\\"demo\\",_ns_=\\"App-2\\"})"
val logicalPlan = Parser.queryRangeToLogicalPlan(chunkMetaQuery, TimeStepParams(0, 60, Int.MaxValue))
client.logicalPlan2Query(dataset, logicalPlan) match {
case QueryResult2(_, schema, result) => result.foreach(rv => println(rv.prettyPrint()))
case e: QueryError => fail(e.t)
}
}
def runHttpQuery(queryTimestamp: Long): Double = {
import io.circe.generic.auto._
import PromCirceSupport._
implicit val sttpBackend = AkkaHttpBackend()
val url = uri"http://localhost:8080/promql/prometheus/api/v1/query?query=$query&time=${queryTimestamp/1000}"
info(s"Querying: $url")
val result1 = sttp.get(url).response(asJson[SuccessResponse]).send().futureValue.unsafeBody.right.get.data.result
val result = result1.flatMap(_.values.get.collect { case d: Sampl => (d.timestamp, d.value) })
info(s"result values were $result")
result.length should be > 0
val sum = result.map(_._2).sum
info(s"HTTP Query Result for $query at $queryTimestamp was $sum")
sum
}
def runRemoteReadQuery(queryTimestamp: Long): Double = {
implicit val sttpBackend = AkkaHttpBackend()
val start = queryTimestamp / 1000 * 1000 - 1.minutes.toMillis // needed to make it equivalent to http/cli queries
val end = queryTimestamp / 1000 * 1000
val nameMatcher = LabelMatcher.newBuilder().setName("__name__").setValue("heap_usage")
val dcMatcher = LabelMatcher.newBuilder().setName("dc").setValue("DC0")
val wsMatcher = LabelMatcher.newBuilder().setName("_ws_").setValue("demo")
val jobMatcher = LabelMatcher.newBuilder().setName("_ns_").setValue("App-2")
val query = Query.newBuilder().addMatchers(wsMatcher)
.addMatchers(nameMatcher)
.addMatchers(dcMatcher)
.addMatchers(jobMatcher)
.setStartTimestampMs(start)
.setEndTimestampMs(queryTimestamp / 1000 * 1000)
val rr = Snappy.compress(ReadRequest.newBuilder().addQueries(query).build().toByteArray())
val url = uri"http://localhost:8080/promql/prometheus/api/v1/read"
info(s"Querying: $url")
val result1 = sttp.post(url).body(rr).response(asByteArray).send().futureValue.unsafeBody
val result = ReadResponse.parseFrom(Snappy.uncompress(result1))
val values = result.getResultsList().asScala
.flatMap(_.getTimeseriesList.asScala).flatMap(_.getSamplesList().asScala.map(_.getValue()))
info(s"result values were $values")
info(s"Remote Read Query Result for $query at $queryTimestamp was ${values.sum}")
values.sum
}
}
| tuplejump/FiloDB | standalone/src/multi-jvm/scala/filodb/standalone/StandaloneMultiJvmSpec.scala | Scala | apache-2.0 | 10,268 |
package aws.daleks.eager
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import aws.daleks.util.Humid
class EagerSQSDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
val sqs = withRegion(new AmazonSQSClient(credentials), region)
def exterminate = {
val queues = sqs.listQueues.getQueueUrls asScala
queues foreach { q =>
println("Esterminating SQS Queue " + q)
Humid {
sqs.deleteQueue(new DeleteQueueRequest().withQueueUrl(q))
}
}
}
} | ggallego/aws-daleks | src/main/scala/aws/daleks/eager/EagerSQSDalek.scala | Scala | apache-2.0 | 702 |
package org.niohiki.quark.util
import java.awt.Graphics2D
import java.awt.geom.AffineTransform
import org.niohiki.quark.core.BBox
import org.niohiki.quark.core.Collidable
import org.niohiki.quark.core.Environment
import org.niohiki.quark.core.Renderable
import org.niohiki.quark.core.Spatial
class Image(image_name: String, init_transform: AffineTransform => Unit = null,
resources: => Resources = DefaultResources)
extends Spatial with Renderable {
def image = resources.getImage(image_name)
val transform = new AffineTransform
if (init_transform != null) {
init_transform(transform)
}
transform.translate(-image.getWidth / 2, -image.getHeight / 2)
def render(env: Environment, g: Graphics2D) {
g.drawImage(image, transform, null)
}
def rotate(angle: Double) {
transform.rotate(angle, image.getWidth / 2, image.getHeight / 2)
}
}
class Tile(image_name: String, init_transform: AffineTransform => Unit = null,
resources: => Resources = DefaultResources)
extends Image(image_name, init_transform, resources) with Collidable {
private val b_box = new BBox(image.getWidth, image.getHeight)
def bBox = b_box
} | niohiki/quark | quark-src/org/niohiki/quark/util/Image.scala | Scala | gpl-3.0 | 1,186 |
package knot.core.emitters
import knot.core._
import knot.core.cell.Cell
import knot.core.dispatch.Dispatcher
trait CellContext {
def isStarted: Boolean
def stop(): Unit
def error(cause: Throwable): Unit
def sender: EmitterAdapter
def become(behavior: Cell.Behavior): Unit
def unbecome(): Unit
implicit def self: EmitterAdapter
def dispatcher: Dispatcher
def parent: EmitterAdapter
def structure: Structure
def cellOf[T <: Cell](provider: Provider[T]): EmitterAdapter
def cellOf[T <: Cell](provider: Provider[T], name: String, decorations: Decorations = Decorations.empty): EmitterAdapter
}
| defvar/knot | knot-core/src/main/scala/knot/core/emitters/CellContext.scala | Scala | mit | 629 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.sql.{Connection, DriverManager, PreparedStatement}
import org.apache.spark.{Logging, Partition}
import org.apache.spark.sql._
import org.apache.spark.sql.sources.LogicalRelation
import org.apache.spark.sql.jdbc.{JDBCPartitioningInfo, JDBCRelation, JDBCPartition}
import org.apache.spark.sql.types._
package object jdbc {
private[sql] object JDBCWriteDetails extends Logging {
/**
* Returns a PreparedStatement that inserts a row into table via conn.
*/
def insertStatement(conn: Connection, table: String, rddSchema: StructType):
PreparedStatement = {
val sql = new StringBuilder(s"INSERT INTO $table VALUES (")
var fieldsLeft = rddSchema.fields.length
while (fieldsLeft > 0) {
sql.append("?")
if (fieldsLeft > 1) sql.append(", ") else sql.append(")")
fieldsLeft = fieldsLeft - 1
}
conn.prepareStatement(sql.toString)
}
/**
* Saves a partition of a DataFrame to the JDBC database. This is done in
* a single database transaction in order to avoid repeatedly inserting
* data as much as possible.
*
* It is still theoretically possible for rows in a DataFrame to be
* inserted into the database more than once if a stage somehow fails after
* the commit occurs but before the stage can return successfully.
*
* This is not a closure inside saveTable() because apparently cosmetic
* implementation changes elsewhere might easily render such a closure
* non-Serializable. Instead, we explicitly close over all variables that
* are used.
*/
def savePartition(url: String, table: String, iterator: Iterator[Row],
rddSchema: StructType, nullTypes: Array[Int]): Iterator[Byte] = {
val conn = DriverManager.getConnection(url)
var committed = false
try {
conn.setAutoCommit(false) // Everything in the same db transaction.
val stmt = insertStatement(conn, table, rddSchema)
try {
while (iterator.hasNext) {
val row = iterator.next()
val numFields = rddSchema.fields.length
var i = 0
while (i < numFields) {
if (row.isNullAt(i)) {
stmt.setNull(i + 1, nullTypes(i))
} else {
rddSchema.fields(i).dataType match {
case IntegerType => stmt.setInt(i + 1, row.getInt(i))
case LongType => stmt.setLong(i + 1, row.getLong(i))
case DoubleType => stmt.setDouble(i + 1, row.getDouble(i))
case FloatType => stmt.setFloat(i + 1, row.getFloat(i))
case ShortType => stmt.setInt(i + 1, row.getShort(i))
case ByteType => stmt.setInt(i + 1, row.getByte(i))
case BooleanType => stmt.setBoolean(i + 1, row.getBoolean(i))
case StringType => stmt.setString(i + 1, row.getString(i))
case BinaryType => stmt.setBytes(i + 1, row.getAs[Array[Byte]](i))
case TimestampType => stmt.setTimestamp(i + 1, row.getAs[java.sql.Timestamp](i))
case DateType => stmt.setDate(i + 1, row.getAs[java.sql.Date](i))
case DecimalType.Unlimited => stmt.setBigDecimal(i + 1,
row.getAs[java.math.BigDecimal](i))
case _ => throw new IllegalArgumentException(
s"Can't translate non-null value for field $i")
}
}
i = i + 1
}
stmt.executeUpdate()
}
} finally {
stmt.close()
}
conn.commit()
committed = true
} finally {
if (!committed) {
// The stage must fail. We got here through an exception path, so
// let the exception through unless rollback() or close() want to
// tell the user about another problem.
conn.rollback()
conn.close()
} else {
// The stage must succeed. We cannot propagate any exception close() might throw.
try {
conn.close()
} catch {
case e: Exception => logWarning("Transaction succeeded, but closing failed", e)
}
}
}
Array[Byte]().iterator
}
/**
* Compute the schema string for this RDD.
*/
def schemaString(df: DataFrame, url: String): String = {
val sb = new StringBuilder()
val quirks = DriverQuirks.get(url)
df.schema.fields foreach { field => {
val name = field.name
var typ: String = quirks.getJDBCType(field.dataType)._1
if (typ == null) typ = field.dataType match {
case IntegerType => "INTEGER"
case LongType => "BIGINT"
case DoubleType => "DOUBLE PRECISION"
case FloatType => "REAL"
case ShortType => "INTEGER"
case ByteType => "BYTE"
case BooleanType => "BIT(1)"
case StringType => "TEXT"
case BinaryType => "BLOB"
case TimestampType => "TIMESTAMP"
case DateType => "DATE"
case DecimalType.Unlimited => "DECIMAL(40,20)"
case _ => throw new IllegalArgumentException(s"Don't know how to save $field to JDBC")
}
val nullable = if (field.nullable) "" else "NOT NULL"
sb.append(s", $name $typ $nullable")
}}
if (sb.length < 2) "" else sb.substring(2)
}
/**
* Saves the RDD to the database in a single transaction.
*/
def saveTable(df: DataFrame, url: String, table: String) {
val quirks = DriverQuirks.get(url)
var nullTypes: Array[Int] = df.schema.fields.map(field => {
var nullType: Option[Int] = quirks.getJDBCType(field.dataType)._2
if (nullType.isEmpty) {
field.dataType match {
case IntegerType => java.sql.Types.INTEGER
case LongType => java.sql.Types.BIGINT
case DoubleType => java.sql.Types.DOUBLE
case FloatType => java.sql.Types.REAL
case ShortType => java.sql.Types.INTEGER
case ByteType => java.sql.Types.INTEGER
case BooleanType => java.sql.Types.BIT
case StringType => java.sql.Types.CLOB
case BinaryType => java.sql.Types.BLOB
case TimestampType => java.sql.Types.TIMESTAMP
case DateType => java.sql.Types.DATE
case DecimalType.Unlimited => java.sql.Types.DECIMAL
case _ => throw new IllegalArgumentException(
s"Can't translate null value for field $field")
}
} else nullType.get
}).toArray
val rddSchema = df.schema
df.foreachPartition { iterator =>
JDBCWriteDetails.savePartition(url, table, iterator, rddSchema, nullTypes)
}
}
}
} // package object jdbc
| hengyicai/OnlineAggregationUCAS | sql/core/src/main/scala/org/apache/spark/sql/jdbc/jdbc.scala | Scala | apache-2.0 | 7,698 |
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.scheduler.messages
import akka.actor.ActorRef
import org.joda.time.LocalDateTime
import org.schedoscope.dsl.View
import org.schedoscope.dsl.transformations.Transformation
import org.schedoscope.scheduler.driver._
import org.schedoscope.scheduler.messages.MaterializeViewMode.MaterializeViewMode
import org.schedoscope.scheduler.states.{PartyInterestedInViewSchedulingStateChange, ViewSchedulingAction, ViewSchedulingState}
import scala.util.Try
case class CommandForView(sender: Option[View], receiver: View, anyRef: AnyRef)
/**
* Superclass for failure messages.
*/
sealed class Failure
/**
* View actor signals its materialization failure to a waiting view actor
*/
case class ViewFailed(view: View) extends Failure
/**
* Driver actor signaling a failure of a transfomation that requires a retry by the receiving view actor.
*/
case class TransformationFailure[T <: Transformation](driverRunHandle: DriverRunHandle[T], driverRunState: DriverRunFailed[T]) extends Failure
/**
* Superclass for commands sent to actors.
*/
sealed class CommandRequest
/**
* Instructs the partition creator actor to verify or create all Partitions that are specified in any
* of the views in parameter 'views'
*/
case class AddPartitions(views: List[View]) extends CommandRequest
/**
* Instructs the partition creator actor to verify or create all tables defined in the specified list of views
*/
case class CheckOrCreateTables(views: List[View]) extends CommandRequest
/**
* Instructs the metadata logger actor to record the version information of the given view
* in the metastore.
*/
case class SetViewVersion(view: View) extends CommandRequest
/**
* Instructs the metadata logger actor to record the timestamp of the last transformation of a
* given view in the metastore
*/
case class LogTransformationTimestamp(view: View, timestamp: Long) extends CommandRequest
/**
* Command to kill a running transformation. Actual outcome depends on the ability of the
* associated driver to be able to do that.
*/
case class KillCommand() extends CommandRequest
/**
* Command to driver actors to instruct their drivers to deploy their resources (e.g. UDFS) to the distributed file system
*/
case class DeployCommand() extends CommandRequest
/**
* Tells a driver actor to execute a transformation.
*
* @param transformation to execute
* @param view to transform
*/
case class TransformView(transformation: Transformation, view: View) extends CommandRequest
/**
* Instructs a driver actor to perform a command, such as a transformation. It comes along
* with the reference to the actor that requested the action. The driver actor can then
* notify the sender about the outcome.
*/
case class DriverCommand(command: AnyRef, sender: ActorRef) extends CommandRequest
/**
* Request to the transformation manager to generate a summary of currently running actions
*/
case class GetTransformations() extends CommandRequest
/**
* Request to the transformation manager to return the state of all driver actors
*/
case class GetTransformationStatusList(statusRequester: ActorRef, transformationQueueStatus: Map[String, List[String]], driverActors: Seq[ActorRef]) extends CommandRequest
/**
* Request to the view manager actor to retrieve information of the currently instantiated views
*
* @param views A list of views to retrieve information from, may be empty
* @param status filter the result by view status
* @param filter filter the result by regular expression on the view name
* @param dependencies also return all dependent views
*/
case class GetViews(views: Option[List[View]], status: Option[String], issueFilter: Option[String], filter: Option[String], dependencies: Boolean = false)
/**
* Request to view manager to send a message to a specific view
*
* @param view target view
* @param message payload
*/
case class DelegateMessageToView(view: View, message: AnyRef) extends CommandRequest
/**
* Request to the view manager to return the state of all views.
*/
case class GetViewStatusList(statusRequester: ActorRef, viewActors: Iterable[ActorRef]) extends CommandRequest
/**
* Flags for the MaterializeView command
*/
object MaterializeViewMode extends Enumeration {
type MaterializeViewMode = Value
val DEFAULT, // no special mode
RESET_TRANSFORMATION_CHECKSUMS, // do not consider version checksum changes when making transformation decisions
RESET_TRANSFORMATION_CHECKSUMS_AND_TIMESTAMPS, // perform a transformation dry run, only update checksums and timestamps
TRANSFORM_ONLY, // directly transform a view without materializing its dependencies
SET_ONLY // set the view to materialized without materializing its dependencies and without transforming itself
= Value
}
/**
* Instructs a table actor to initialize a list of views. If not yet initialized.
*
* @param vs List of views to initialize
*/
case class InitializeViews(vs: List[View]) extends CommandRequest
/**
* Instructs a view actor to materialize itself
*/
case class MaterializeView(mode: MaterializeViewMode.MaterializeViewMode = MaterializeViewMode.DEFAULT) extends CommandRequest
/**
* Special [[MaterializeView]] command with will refresh the metadata of a view before materializing it.
* Used for external views.
*
* @param mode materialization mode
*/
case class MaterializeExternalView(mode: MaterializeViewMode.MaterializeViewMode = MaterializeViewMode.DEFAULT)
extends CommandRequest
/**
* Special [[MaterializeView]] command with will stub a view. This is done by copying the data
* from a different environment.
* Used for development.
*/
case class MaterializeViewAsStub()
extends CommandRequest
/**
* Request for the SchemaManager to retrieve partition / table metadta for view.
*
* @param view to be materialized
* @param mode materialization mode
* @param materializeSource sender of materialize command
*/
case class GetMetaDataForMaterialize(view: View,
mode: MaterializeViewMode = MaterializeViewMode.DEFAULT,
materializeSource: PartyInterestedInViewSchedulingStateChange) extends CommandRequest
/**
* Instructs a view actor to assume that its data needs to be recomputed.
*/
case class InvalidateView() extends CommandRequest
/**
* Instructs a view-actor to retry a transformation after a failure
*/
case class Retry() extends CommandRequest
/**
* Base class for responses to commands.
*/
sealed class CommandResponse
/**
* Driver actor notifying the transformation manager actor of successful resource deployment.
*/
case class DeployCommandSuccess() extends CommandResponse
/**
* Notification for view actor about a new
*
* @param view
* @param viewRef
*/
case class NewTableActorRef(view: View, viewRef: ActorRef) extends CommandResponse
/**
* Schema actor or metadata logger notifying view manager actor or view actor of successful schema action.
*/
case class SchemaActionSuccess() extends CommandResponse
/**
* Schema actor notifying view actor about the metadata of the view
*
* @param metadata of the view
* @param mode transformation mode
* @param materializeSource sender of the [[MaterializeView]] command
*/
case class MetaDataForMaterialize(metadata: (View, (String, Long)),
mode: MaterializeViewMode,
materializeSource: PartyInterestedInViewSchedulingStateChange) extends CommandResponse
/**
* Driver actor notifying view actor of successful transformation.
*
* @param driverRunHandle RunHandle of the executing driver
* @param driverRunState return state of the driver
*/
case class TransformationSuccess[T <: Transformation](driverRunHandle: DriverRunHandle[T], driverRunState: DriverRunSucceeded[T], viewHasData: Boolean) extends CommandResponse
/**
* Response message of transformation manager actor with state of actions
*
* @param transformationStatusList List of entities of TransformationStatusResponse
* @see TransformationStatusResponse
*/
case class TransformationStatusListResponse(transformationStatusList: List[TransformationStatusResponse[_]]) extends CommandResponse
/**
* Response message of view manager actor with state of view actors
*
* @param viewStatusList list of view metadata or a failure, if the views for the actors could not be initialized
* @see ViewStatusResponse
*/
case class ViewStatusListResponse(viewStatusList: Try[List[ViewStatusResponse]]) extends CommandResponse
/**
* Driver actor responding to the transformation manager actor with the state of the running transformation
*
* @param message Textual description of state
* @param actor Reference to the driver actor
* @param driverRunHandle runHandle of a running transformation
* @param driverRunStatus state of a running transformation
*/
case class TransformationStatusResponse[T <: Transformation](message: String, actor: ActorRef, driver: Driver[T], driverRunHandle: DriverRunHandle[T], driverRunStatus: DriverRunState[T]) extends CommandResponse
/**
* View actor responding to the view manager actor with the state of the view
*
* @param status textual description of the status
* @param view reference to the curresponding view
* @param actor actor reference to ViewActor
* @param errors true if some transformations in that subtree have been failing
* @param incomplete true of not all transitive dependencies had data available
*/
case class ViewStatusResponse(status: String, view: View, actor: ActorRef, errors: Option[Boolean] = None, incomplete: Option[Boolean] = None) extends CommandResponse
/**
* Schema actor returning the stored transformation metadata (version checksum, timestamp) retrieved from metadata store
*
* @param metadata contains metadata for a set of views
*/
case class TransformationMetadata(metadata: Map[View, (String, Long)]) extends CommandResponse
/**
* A view actor notifying a depending view that it has no data available
*/
case class ViewHasNoData(view: View) extends CommandResponse
/**
* A view actor notifying a depending view that it has materialized
*
* @param view View that has been changed
* @param incomplete true of not all transitive dependencies had data available
* @param transformationTimestamp timestamp of the oldest? transformation in that dependency tree
* @param errors true if some transformations in that subtree have been failing
*/
case class ViewMaterialized(view: View, incomplete: Boolean, transformationTimestamp: Long, errors: Boolean) extends CommandResponse
/**
* Superclass for all related view scheduling monitoring messages exchanged by actors.
*/
sealed class ViewSchedulingMonitoring
/**
* Message exchanged between View Actors and ViewSchedulerManager Actor
* to know if there are any handler classes instantiated
*/
case class CollectViewSchedulingStatus(handlerClassName: String) extends ViewSchedulingMonitoring
/**
* Message sent from ViewSchedulingListener Actors on
* RetryableViewSchedulingListenerException to register
* on ViewSchedulingManagerActor to recover latest event
* per view on PostRestart
*
*/
case class RegisterFailedListener(handlerClassName: String) extends ViewSchedulingMonitoring
/**
* Message exchanged between View Actors and ViewSchedulerManager Actor
* to know if there are any handler classes instantiated
*/
case class ViewSchedulingMonitoringEvent(prevState: ViewSchedulingState,
newState: ViewSchedulingState,
actions: Set[ViewSchedulingAction],
eventTime: LocalDateTime) extends ViewSchedulingMonitoring | christianrichter/schedoscope | schedoscope-core/src/main/scala/org/schedoscope/scheduler/messages/Messages.scala | Scala | apache-2.0 | 12,630 |
//
// MessagePack for Java
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.msgpack.core
import java.io._
import java.nio.ByteBuffer
import java.util.Collections
import org.msgpack.core.buffer._
import org.msgpack.value.ValueType
import xerial.core.io.IOUtil._
import scala.collection.JavaConverters._
import scala.util.Random
object MessageUnpackerTest {
class SplitMessageBufferInput(array: Array[Array[Byte]]) extends MessageBufferInput {
var cursor = 0
override def next(): MessageBuffer = {
if (cursor < array.length) {
val a = array(cursor)
cursor += 1
MessageBuffer.wrap(a)
} else {
null
}
}
override def close(): Unit = {}
}
}
import MessageUnpackerTest._
class MessageUnpackerTest extends MessagePackSpec {
val universal = MessageBuffer.allocate(0).isInstanceOf[MessageBufferU]
def testData: Array[Byte] = {
val out = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(out)
packer
.packArrayHeader(2)
.packInt(1)
.packString("leo")
.packArrayHeader(2)
.packInt(5)
.packString("aina")
packer.close()
val arr = out.toByteArray
debug(s"packed: ${toHex(arr)}, size:${arr.length}")
arr
}
val intSeq = (for (i <- 0 until 100) yield Random.nextInt()).toArray[Int]
def testData2: Array[Byte] = {
val out = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(out);
packer
.packBoolean(true)
.packBoolean(false)
intSeq.foreach(packer.packInt)
packer.close()
val arr = out.toByteArray
debug(s"packed: ${toHex(arr)}")
arr
}
def write(packer: MessagePacker, r: Random) {
val tpeIndex = Iterator
.continually(r.nextInt(MessageFormat.values().length))
.find(_ != MessageFormat.NEVER_USED.ordinal())
.get
val tpe = MessageFormat.values()(tpeIndex)
tpe.getValueType match {
case ValueType.INTEGER =>
val v = r.nextInt(Int.MaxValue)
trace(s"int: $v")
packer.packInt(v)
case ValueType.FLOAT =>
val v = r.nextFloat()
trace(s"float $v")
packer.packFloat(v)
case ValueType.BOOLEAN =>
val v = r.nextBoolean()
trace(s"boolean $v")
packer.packBoolean(v)
case ValueType.STRING =>
val v = r.alphanumeric.take(r.nextInt(100)).mkString
trace(s"string $v")
packer.packString(v)
case ValueType.BINARY =>
val len = r.nextInt(100)
val b = new Array[Byte](len)
r.nextBytes(b)
trace(s"binary: ${toHex(b)}")
packer.packBinaryHeader(b.length)
packer.writePayload(b)
case ValueType.ARRAY =>
val len = r.nextInt(5)
trace(s"array len: $len")
packer.packArrayHeader(len)
var i = 0
while (i < len) {
write(packer, r)
i += 1
}
case ValueType.MAP =>
val len = r.nextInt(5) + 1
packer.packMapHeader(len)
trace(s"map len: ${len}")
var i = 0
while (i < len * 2) {
write(packer, r)
i += 1
}
case _ =>
val v = r.nextInt(Int.MaxValue)
trace(s"int: $v")
packer.packInt(v)
}
}
def testData3(N: Int): Array[Byte] = {
val out = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(out)
val r = new Random(0)
(0 until N).foreach { i =>
write(packer, r)
}
packer.close()
val arr = out.toByteArray
trace(s"packed: ${toHex(arr)}")
debug(s"size:${arr.length}")
arr
}
def readValue(unpacker: MessageUnpacker) {
val f = unpacker.getNextFormat()
f.getValueType match {
case ValueType.ARRAY =>
val arrLen = unpacker.unpackArrayHeader()
debug(s"arr size: $arrLen")
case ValueType.MAP =>
val mapLen = unpacker.unpackMapHeader()
debug(s"map size: $mapLen")
case ValueType.INTEGER =>
val i = unpacker.unpackLong()
debug(s"int value: $i")
case ValueType.STRING =>
val s = unpacker.unpackString()
debug(s"str value: $s")
case other =>
unpacker.skipValue()
debug(s"unknown type: $f")
}
}
def createTempFile = {
val f = File.createTempFile("msgpackTest", "msgpack")
f.deleteOnExit
val p = MessagePack.newDefaultPacker(new FileOutputStream(f))
p.packInt(99)
p.close
f
}
def checkFile(u: MessageUnpacker) = {
u.unpackInt shouldBe 99
u.hasNext shouldBe false
}
def unpackers(data: Array[Byte]): Seq[MessageUnpacker] = {
val bb = ByteBuffer.allocate(data.length)
val db = ByteBuffer.allocateDirect(data.length)
bb.put(data).flip()
db.put(data).flip()
val builder = Seq.newBuilder[MessageUnpacker]
builder += MessagePack.newDefaultUnpacker(data)
builder += MessagePack.newDefaultUnpacker(bb)
if (!universal) {
builder += MessagePack.newDefaultUnpacker(db)
}
builder.result()
}
def unpackerCollectionWithVariousBuffers(data: Array[Byte], chunkSize: Int): Seq[MessageUnpacker] = {
val seqBytes = Seq.newBuilder[MessageBufferInput]
val seqByteBuffers = Seq.newBuilder[MessageBufferInput]
val seqDirectBuffers = Seq.newBuilder[MessageBufferInput]
var left = data.length
var position = 0
while (left > 0) {
val length = Math.min(chunkSize, left)
seqBytes += new ArrayBufferInput(data, position, length)
val bb = ByteBuffer.allocate(length)
val db = ByteBuffer.allocateDirect(length)
bb.put(data, position, length).flip()
db.put(data, position, length).flip()
seqByteBuffers += new ByteBufferInput(bb)
seqDirectBuffers += new ByteBufferInput(db)
left -= length
position += length
}
val builder = Seq.newBuilder[MessageUnpacker]
builder += MessagePack.newDefaultUnpacker(new SequenceMessageBufferInput(Collections.enumeration(seqBytes.result().asJava)))
builder += MessagePack.newDefaultUnpacker(new SequenceMessageBufferInput(Collections.enumeration(seqByteBuffers.result().asJava)))
if (!universal) {
builder += MessagePack.newDefaultUnpacker(new SequenceMessageBufferInput(Collections.enumeration(seqDirectBuffers.result().asJava)))
}
builder.result()
}
"MessageUnpacker" should {
"parse message packed data" taggedAs ("unpack") in {
val arr = testData
for (unpacker <- unpackers(arr)) {
var count = 0
while (unpacker.hasNext) {
count += 1
readValue(unpacker)
}
count shouldBe 6
unpacker.getTotalReadBytes shouldBe arr.length
}
}
"skip reading values" in {
for (unpacker <- unpackers(testData)) {
var skipCount = 0
while (unpacker.hasNext) {
unpacker.skipValue()
skipCount += 1
}
skipCount shouldBe 2
unpacker.getTotalReadBytes shouldBe testData.length
}
}
"compare skip performance" taggedAs ("skip") in {
val N = 10000
val data = testData3(N)
time("skip performance", repeat = 100) {
block("switch") {
for (unpacker <- unpackers(data)) {
var skipCount = 0
while (unpacker.hasNext) {
unpacker.skipValue()
skipCount += 1
}
skipCount shouldBe N
}
}
}
time("bulk skip performance", repeat = 100) {
block("switch") {
for (unpacker <- unpackers(data)) {
unpacker.skipValue(N)
unpacker.hasNext shouldBe false
}
}
}
}
"parse int data" in {
debug(intSeq.mkString(", "))
for (unpacker <- unpackers(testData2)) {
val ib = Seq.newBuilder[Int]
while (unpacker.hasNext) {
val f = unpacker.getNextFormat
f.getValueType match {
case ValueType.INTEGER =>
val i = unpacker.unpackInt()
trace(f"read int: $i%,d")
ib += i
case ValueType.BOOLEAN =>
val b = unpacker.unpackBoolean()
trace(s"read boolean: $b")
case other =>
unpacker.skipValue()
}
}
ib.result shouldBe intSeq
unpacker.getTotalReadBytes shouldBe testData2.length
}
}
"read data at the buffer boundary" taggedAs ("boundary") in {
trait SplitTest {
val data: Array[Byte]
def run {
for (unpacker <- unpackers(data)) {
val numElems = {
var c = 0
while (unpacker.hasNext) {
readValue(unpacker)
c += 1
}
c
}
for (splitPoint <- 1 until data.length - 1) {
debug(s"split at $splitPoint")
val (h, t) = data.splitAt(splitPoint)
val bin = new SplitMessageBufferInput(Array(h, t))
val unpacker = MessagePack.newDefaultUnpacker(bin)
var count = 0
while (unpacker.hasNext) {
count += 1
val f = unpacker.getNextFormat
readValue(unpacker)
}
count shouldBe numElems
unpacker.getTotalReadBytes shouldBe data.length
}
}
}
}
new SplitTest { val data = testData }.run
new SplitTest { val data = testData3(30) }.run
}
"read integer at MessageBuffer boundaries" taggedAs ("integer-buffer-boundary") in {
val packer = MessagePack.newDefaultBufferPacker()
(0 until 1170).foreach { i =>
packer.packLong(0x0011223344556677L)
}
packer.close
val data = packer.toByteArray
// Boundary test
withResource(MessagePack.newDefaultUnpacker(new InputStreamBufferInput(new ByteArrayInputStream(data), 8192))) { unpacker =>
(0 until 1170).foreach { i =>
unpacker.unpackLong() shouldBe 0x0011223344556677L
}
}
// Boundary test for sequences of ByteBuffer, DirectByteBuffer backed MessageInput.
for (unpacker <- unpackerCollectionWithVariousBuffers(data, 32)) {
(0 until 1170).foreach { i =>
unpacker.unpackLong() shouldBe 0x0011223344556677L
}
}
}
"read string at MessageBuffer boundaries" taggedAs ("string-buffer-boundary") in {
val packer = MessagePack.newDefaultBufferPacker()
(0 until 1170).foreach { i =>
packer.packString("hello world")
}
packer.close
val data = packer.toByteArray
// Boundary test
withResource(MessagePack.newDefaultUnpacker(new InputStreamBufferInput(new ByteArrayInputStream(data), 8192))) { unpacker =>
(0 until 1170).foreach { i =>
unpacker.unpackString() shouldBe "hello world"
}
}
// Boundary test for sequences of ByteBuffer, DirectByteBuffer backed MessageInput.
for (unpacker <- unpackerCollectionWithVariousBuffers(data, 32)) {
(0 until 1170).foreach { i =>
unpacker.unpackString() shouldBe "hello world"
}
}
}
"be faster than msgpack-v6 skip" taggedAs ("cmp-skip") in {
trait Fixture {
val unpacker: MessageUnpacker
def run {
var count = 0
try {
while (unpacker.hasNext) {
unpacker.skipValue()
count += 1
}
} finally {
unpacker.close()
}
}
}
val data = testData3(10000)
val N = 100
val bb = ByteBuffer.allocate(data.length)
bb.put(data).flip()
val db = ByteBuffer.allocateDirect(data.length)
db.put(data).flip()
val t = time("skip performance", repeat = N) {
block("v6") {
import org.msgpack.`type`.{ValueType => ValueTypeV6}
val v6 = new org.msgpack.MessagePack()
val unpacker = new org.msgpack.unpacker.MessagePackUnpacker(v6, new ByteArrayInputStream(data))
var count = 0
try {
while (true) {
unpacker.skip()
count += 1
}
} catch {
case e: EOFException =>
} finally unpacker.close()
}
block("v7-array") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(data)
}.run
}
block("v7-array-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(bb)
}.run
}
if (!universal) block("v7-direct-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(db)
}.run
}
}
t("v7-array").averageWithoutMinMax should be <= t("v6").averageWithoutMinMax
t("v7-array-buffer").averageWithoutMinMax should be <= t("v6").averageWithoutMinMax
if (!universal)
t("v7-direct-buffer").averageWithoutMinMax should be <= t("v6").averageWithoutMinMax
}
import org.msgpack.`type`.{ValueType => ValueTypeV6}
"be faster than msgpack-v6 read value" taggedAs ("cmp-unpack") in {
def readValueV6(unpacker: org.msgpack.unpacker.MessagePackUnpacker) {
val vt = unpacker.getNextType()
vt match {
case ValueTypeV6.ARRAY =>
val len = unpacker.readArrayBegin()
var i = 0
while (i < len) { readValueV6(unpacker); i += 1 }
unpacker.readArrayEnd()
case ValueTypeV6.MAP =>
val len = unpacker.readMapBegin()
var i = 0
while (i < len) {
readValueV6(unpacker); readValueV6(unpacker); i += 1
}
unpacker.readMapEnd()
case ValueTypeV6.NIL =>
unpacker.readNil()
case ValueTypeV6.INTEGER =>
unpacker.readLong()
case ValueTypeV6.BOOLEAN =>
unpacker.readBoolean()
case ValueTypeV6.FLOAT =>
unpacker.readDouble()
case ValueTypeV6.RAW =>
unpacker.readByteArray()
case _ =>
unpacker.skip()
}
}
val buf = new Array[Byte](8192)
def readValue(unpacker: MessageUnpacker) {
val f = unpacker.getNextFormat
val vt = f.getValueType
vt match {
case ValueType.ARRAY =>
val len = unpacker.unpackArrayHeader()
var i = 0
while (i < len) { readValue(unpacker); i += 1 }
case ValueType.MAP =>
val len = unpacker.unpackMapHeader()
var i = 0
while (i < len) { readValue(unpacker); readValue(unpacker); i += 1 }
case ValueType.NIL =>
unpacker.unpackNil()
case ValueType.INTEGER =>
unpacker.unpackLong()
case ValueType.BOOLEAN =>
unpacker.unpackBoolean()
case ValueType.FLOAT =>
unpacker.unpackDouble()
case ValueType.STRING =>
val len = unpacker.unpackRawStringHeader()
unpacker.readPayload(buf, 0, len)
case ValueType.BINARY =>
val len = unpacker.unpackBinaryHeader()
unpacker.readPayload(buf, 0, len)
case _ =>
unpacker.skipValue()
}
}
trait Fixture {
val unpacker: MessageUnpacker
def run {
var count = 0
try {
while (unpacker.hasNext) {
readValue(unpacker)
count += 1
}
} finally unpacker.close()
}
}
val data = testData3(10000)
val N = 100
val bb = ByteBuffer.allocate(data.length)
bb.put(data).flip()
val db = ByteBuffer.allocateDirect(data.length)
db.put(data).flip()
val t = time("unpack performance", repeat = N) {
block("v6") {
val v6 = new org.msgpack.MessagePack()
val unpacker = new org.msgpack.unpacker.MessagePackUnpacker(v6, new ByteArrayInputStream(data))
var count = 0
try {
while (true) {
readValueV6(unpacker)
count += 1
}
} catch {
case e: EOFException =>
} finally unpacker.close()
}
block("v7-array") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(data)
}.run
}
block("v7-array-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(bb)
}.run
}
if (!universal) block("v7-direct-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(db)
}.run
}
}
if (t("v7-array").averageWithoutMinMax > t("v6").averageWithoutMinMax) {
warn(s"v7-array ${t("v7-array").averageWithoutMinMax} is slower than v6 ${t("v6").averageWithoutMinMax}")
}
if (t("v7-array-buffer").averageWithoutMinMax > t("v6").averageWithoutMinMax) {
warn(s"v7-array-buffer ${t("v7-array-buffer").averageWithoutMinMax} is slower than v6 ${t("v6").averageWithoutMinMax}")
}
if (!universal)
t("v7-direct-buffer").averageWithoutMinMax should be <= t("v6").averageWithoutMinMax
}
"be faster for reading binary than v6" taggedAs ("cmp-binary") in {
val bos = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(bos)
val L = 10000
val R = 100
(0 until R).foreach { i =>
packer.packBinaryHeader(L)
packer.writePayload(new Array[Byte](L))
}
packer.close()
trait Fixture {
val unpacker: MessageUnpacker
val loop: Int
def run {
var i = 0
try {
while (i < loop) {
val len = unpacker.unpackBinaryHeader()
val out = new Array[Byte](len)
unpacker.readPayload(out, 0, len)
i += 1
}
} finally unpacker.close()
}
def runRef {
var i = 0
try {
while (i < loop) {
val len = unpacker.unpackBinaryHeader()
val out = unpacker.readPayloadAsReference(len)
i += 1
}
} finally unpacker.close()
}
}
val b = bos.toByteArray
val bb = ByteBuffer.allocate(b.length)
bb.put(b).flip()
val db = ByteBuffer.allocateDirect(b.length)
db.put(b).flip()
time("unpackBinary", repeat = 100) {
block("v6") {
val v6 = new org.msgpack.MessagePack()
val unpacker = new org.msgpack.unpacker.MessagePackUnpacker(v6, new ByteArrayInputStream(b))
var i = 0
while (i < R) {
val out = unpacker.readByteArray()
i += 1
}
unpacker.close()
}
block("v7-array") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(b)
override val loop = R
}.run
}
block("v7-array-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(bb)
override val loop = R
}.run
}
if (!universal) block("v7-direct-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(db)
override val loop = R
}.run
}
block("v7-ref-array") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(b)
override val loop = R
}.runRef
}
block("v7-ref-array-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(bb)
override val loop = R
}.runRef
}
if (!universal) block("v7-ref-direct-buffer") {
new Fixture {
override val unpacker = MessagePack.newDefaultUnpacker(db)
override val loop = R
}.runRef
}
}
}
"read payload as a reference" taggedAs ("ref") in {
val dataSizes =
Seq(0, 1, 5, 8, 16, 32, 128, 256, 1024, 2000, 10000, 100000)
for (s <- dataSizes) {
When(f"data size is $s%,d")
val data = new Array[Byte](s)
Random.nextBytes(data)
val b = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(b)
packer.packBinaryHeader(s)
packer.writePayload(data)
packer.close()
for (unpacker <- unpackers(b.toByteArray)) {
val len = unpacker.unpackBinaryHeader()
len shouldBe s
val ref = unpacker.readPayloadAsReference(len)
unpacker.close()
ref.size() shouldBe s
val stored = new Array[Byte](len)
ref.getBytes(0, stored, 0, len)
stored shouldBe data
}
}
}
"reset the internal states" taggedAs ("reset") in {
val data = intSeq
val b = createMessagePackData(packer => data foreach packer.packInt)
for (unpacker <- unpackers(b)) {
val unpacked = Array.newBuilder[Int]
while (unpacker.hasNext) {
unpacked += unpacker.unpackInt()
}
unpacker.close
unpacked.result shouldBe data
val data2 = intSeq
val b2 = createMessagePackData(packer => data2 foreach packer.packInt)
val bi = new ArrayBufferInput(b2)
unpacker.reset(bi)
val unpacked2 = Array.newBuilder[Int]
while (unpacker.hasNext) {
unpacked2 += unpacker.unpackInt()
}
unpacker.close
unpacked2.result shouldBe data2
// reused the buffer input instance
bi.reset(b2)
unpacker.reset(bi)
val unpacked3 = Array.newBuilder[Int]
while (unpacker.hasNext) {
unpacked3 += unpacker.unpackInt()
}
unpacker.close
unpacked3.result shouldBe data2
}
}
"improve the performance via reset method" taggedAs ("reset-arr") in {
val out = new ByteArrayOutputStream
val packer = MessagePack.newDefaultPacker(out)
packer.packInt(0)
packer.flush
val arr = out.toByteArray
val mb = MessageBuffer.wrap(arr)
val N = 1000
val t = time("unpacker", repeat = 10) {
block("no-buffer-reset") {
withResource(MessagePack.newDefaultUnpacker(arr)) { unpacker =>
for (i <- 0 until N) {
val buf = new ArrayBufferInput(arr)
unpacker.reset(buf)
unpacker.unpackInt
unpacker.close
}
}
}
block("reuse-array-input") {
withResource(MessagePack.newDefaultUnpacker(arr)) { unpacker =>
val buf = new ArrayBufferInput(arr)
for (i <- 0 until N) {
buf.reset(arr)
unpacker.reset(buf)
unpacker.unpackInt
unpacker.close
}
}
}
block("reuse-message-buffer") {
withResource(MessagePack.newDefaultUnpacker(arr)) { unpacker =>
val buf = new ArrayBufferInput(arr)
for (i <- 0 until N) {
buf.reset(mb)
unpacker.reset(buf)
unpacker.unpackInt
unpacker.close
}
}
}
}
// This performance comparison is too close, so we disabled it
// t("reuse-message-buffer").averageWithoutMinMax should be <= t("no-buffer-reset").averageWithoutMinMax
// t("reuse-array-input").averageWithoutMinMax should be <= t("no-buffer-reset").averageWithoutMinMax
}
"reset ChannelBufferInput" in {
val f0 = createTempFile
val u = MessagePack.newDefaultUnpacker(new FileInputStream(f0).getChannel)
checkFile(u)
val f1 = createTempFile
val ch = new FileInputStream(f1).getChannel
u.reset(new ChannelBufferInput(ch))
checkFile(u)
u.close
}
"reset InputStreamBufferInput" in {
val f0 = createTempFile
val u = MessagePack.newDefaultUnpacker(new FileInputStream(f0))
checkFile(u)
val f1 = createTempFile
val in = new FileInputStream(f1)
u.reset(new InputStreamBufferInput(in))
checkFile(u)
u.close
}
"unpack large string data" taggedAs ("large-string") in {
def createLargeData(stringLength: Int): Array[Byte] = {
val out = new ByteArrayOutputStream()
val packer = MessagePack.newDefaultPacker(out)
packer
.packArrayHeader(2)
.packString("l" * stringLength)
.packInt(1)
packer.close()
out.toByteArray
}
Seq(8191, 8192, 8193, 16383, 16384, 16385).foreach { n =>
val arr = createLargeData(n)
for (unpacker <- unpackers(arr)) {
unpacker.unpackArrayHeader shouldBe 2
unpacker.unpackString.length shouldBe n
unpacker.unpackInt shouldBe 1
unpacker.getTotalReadBytes shouldBe arr.length
}
}
}
"unpack string crossing end of buffer" in {
def check(expected: String, strLen: Int) = {
val bytes = new Array[Byte](strLen)
val out = new ByteArrayOutputStream
val packer = MessagePack.newDefaultPacker(out)
packer.packBinaryHeader(bytes.length)
packer.writePayload(bytes)
packer.packString(expected)
packer.close
val unpacker = MessagePack.newDefaultUnpacker(new InputStreamBufferInput(new ByteArrayInputStream(out.toByteArray)))
val len = unpacker.unpackBinaryHeader
unpacker.readPayload(len)
val got = unpacker.unpackString
unpacker.close
got shouldBe expected
}
Seq("\\u3042", "a\\u3042", "\\u3042a", "\\u3042\\u3044\\u3046\\u3048\\u304A\\u304B\\u304D\\u304F\\u3051\\u3053\\u3055\\u3057\\u3059\\u305B\\u305D")
.foreach { s =>
Seq(8185, 8186, 8187, 8188, 16377, 16378, 16379, 16380).foreach { n =>
check(s, n)
}
}
}
def readTest(input: MessageBufferInput): Unit = {
withResource(MessagePack.newDefaultUnpacker(input)) { unpacker =>
while (unpacker.hasNext) {
unpacker.unpackValue()
}
}
}
"read value length at buffer boundary" taggedAs ("number-boundary") in {
val input = new SplitMessageBufferInput(
Array(Array[Byte](MessagePack.Code.STR16),
Array[Byte](0x00),
Array[Byte](0x05), // STR16 length at the boundary
"hello".getBytes(MessagePack.UTF8)))
readTest(input)
val input2 = new SplitMessageBufferInput(
Array(
Array[Byte](MessagePack.Code.STR32),
Array[Byte](0x00),
Array[Byte](0x00, 0x00),
Array[Byte](0x05), // STR32 length at the boundary
"hello".getBytes(MessagePack.UTF8)
))
readTest(input2)
}
}
}
| xuwei-k/msgpack-java | msgpack-core/src/test/scala/org/msgpack/core/MessageUnpackerTest.scala | Scala | apache-2.0 | 27,833 |
import collection._
object Test extends App {
def compare(s1: String, s2: String) {
assert(s1 == s2, s1 + "\\nvs.\\n" + s2)
}
compare(List(1, 2, 3, 4).aggregate(new java.lang.StringBuffer)(_ append _, _ append _).toString, "1234")
compare(List(1, 2, 3, 4).par.aggregate(new java.lang.StringBuffer)(_ append _, _ append _).toString, "1234")
compare(Seq(0 until 100: _*).aggregate(new java.lang.StringBuffer)(_ append _, _ append _).toString, (0 until 100).mkString)
compare(Seq(0 until 100: _*).par.aggregate(new java.lang.StringBuffer)(_ append _, _ append _).toString, (0 until 100).mkString)
} | felixmulder/scala | test/files/run/t6467.scala | Scala | bsd-3-clause | 619 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.